+static load_return_t
+load_code_signature(
+ struct linkedit_data_command *lcp,
+ struct vnode *vp,
+ off_t macho_offset,
+ off_t macho_size,
+ cpu_type_t cputype,
+ load_result_t *result)
+{
+ int ret;
+ kern_return_t kr;
+ vm_offset_t addr;
+ int resid;
+ struct cs_blob *blob;
+ int error;
+ vm_size_t blob_size;
+
+ addr = 0;
+ blob = NULL;
+
+ if (lcp->cmdsize != sizeof (struct linkedit_data_command) ||
+ lcp->dataoff + lcp->datasize > macho_size) {
+ ret = LOAD_BADMACHO;
+ goto out;
+ }
+
+ blob = ubc_cs_blob_get(vp, cputype, -1);
+ if (blob != NULL) {
+ /* we already have a blob for this vnode and cputype */
+ if (blob->csb_cpu_type == cputype &&
+ blob->csb_base_offset == macho_offset &&
+ blob->csb_mem_size == lcp->datasize) {
+ /* it matches the blob we want here: we're done */
+ ret = LOAD_SUCCESS;
+ } else {
+ /* the blob has changed for this vnode: fail ! */
+ ret = LOAD_BADMACHO;
+ }
+ goto out;
+ }
+
+ blob_size = lcp->datasize;
+ kr = ubc_cs_blob_allocate(&addr, &blob_size);
+ if (kr != KERN_SUCCESS) {
+ ret = LOAD_NOSPACE;
+ goto out;
+ }
+
+ resid = 0;
+ error = vn_rdwr(UIO_READ,
+ vp,
+ (caddr_t) addr,
+ lcp->datasize,
+ macho_offset + lcp->dataoff,
+ UIO_SYSSPACE,
+ 0,
+ kauth_cred_get(),
+ &resid,
+ current_proc());
+ if (error || resid != 0) {
+ ret = LOAD_IOERROR;
+ goto out;
+ }
+
+ if (ubc_cs_blob_add(vp,
+ cputype,
+ macho_offset,
+ addr,
+ lcp->datasize)) {
+ ret = LOAD_FAILURE;
+ goto out;
+ } else {
+ /* ubc_cs_blob_add() has consumed "addr" */
+ addr = 0;
+ }
+
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
+
+ blob = ubc_cs_blob_get(vp, cputype, -1);
+
+ ret = LOAD_SUCCESS;
+out:
+ if (result && ret == LOAD_SUCCESS) {
+ result->csflags |= blob->csb_flags;
+ }
+ if (addr != 0) {
+ ubc_cs_blob_deallocate(addr, blob_size);
+ addr = 0;
+ }
+
+ return ret;
+}
+
+
+#if CONFIG_CODE_DECRYPTION
+
+static load_return_t
+set_code_unprotect(
+ struct encryption_info_command *eip,
+ caddr_t addr,
+ vm_map_t map,
+ int64_t slide,
+ struct vnode *vp)
+{
+ int result, len;
+ pager_crypt_info_t crypt_info;
+ const char * cryptname = 0;
+ char *vpath;
+
+ size_t offset;
+ struct segment_command_64 *seg64;
+ struct segment_command *seg32;
+ vm_map_offset_t map_offset, map_size;
+ kern_return_t kr;
+
+ if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO;
+
+ switch(eip->cryptid) {
+ case 0:
+ /* not encrypted, just an empty load command */
+ return LOAD_SUCCESS;
+ case 1:
+ cryptname="com.apple.unfree";
+ break;
+ case 0x10:
+ /* some random cryptid that you could manually put into
+ * your binary if you want NULL */
+ cryptname="com.apple.null";
+ break;
+ default:
+ return LOAD_BADMACHO;
+ }
+
+ if (map == VM_MAP_NULL) return (LOAD_SUCCESS);
+ if (NULL == text_crypter_create) return LOAD_FAILURE;
+
+ MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ if(vpath == NULL) return LOAD_FAILURE;
+
+ len = MAXPATHLEN;
+ result = vn_getpath(vp, vpath, &len);
+ if(result) {
+ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
+ return LOAD_FAILURE;
+ }
+
+ /* set up decrypter first */
+ kr=text_crypter_create(&crypt_info, cryptname, (void*)vpath);
+ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
+
+ if(kr) {
+ printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
+ cryptname, kr);
+ return LOAD_RESOURCE;
+ }
+
+ /* this is terrible, but we have to rescan the load commands to find the
+ * virtual address of this encrypted stuff. This code is gonna look like
+ * the dyld source one day... */
+ struct mach_header *header = (struct mach_header *)addr;
+ size_t mach_header_sz = sizeof(struct mach_header);
+ if (header->magic == MH_MAGIC_64 ||
+ header->magic == MH_CIGAM_64) {
+ mach_header_sz = sizeof(struct mach_header_64);
+ }
+ offset = mach_header_sz;
+ uint32_t ncmds = header->ncmds;
+ while (ncmds--) {
+ /*
+ * Get a pointer to the command.
+ */
+ struct load_command *lcp = (struct load_command *)(addr + offset);
+ offset += lcp->cmdsize;
+
+ switch(lcp->cmd) {
+ case LC_SEGMENT_64:
+ seg64 = (struct segment_command_64 *)lcp;
+ if ((seg64->fileoff <= eip->cryptoff) &&
+ (seg64->fileoff+seg64->filesize >=
+ eip->cryptoff+eip->cryptsize)) {
+ map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
+ map_size = eip->cryptsize;
+ goto remap_now;
+ }
+ case LC_SEGMENT:
+ seg32 = (struct segment_command *)lcp;
+ if ((seg32->fileoff <= eip->cryptoff) &&
+ (seg32->fileoff+seg32->filesize >=
+ eip->cryptoff+eip->cryptsize)) {
+ map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
+ map_size = eip->cryptsize;
+ goto remap_now;
+ }
+ }
+ }
+
+ /* if we get here, did not find anything */
+ return LOAD_BADMACHO;
+
+remap_now:
+ /* now remap using the decrypter */
+ kr = vm_map_apple_protected(map, map_offset, map_offset+map_size, &crypt_info);
+ if(kr) {
+ printf("set_code_unprotect(): mapping failed with %x\n", kr);
+ crypt_info.crypt_end(crypt_info.crypt_ops);
+ return LOAD_PROTECT;
+ }
+
+ return LOAD_SUCCESS;
+}
+
+#endif
+
+/*
+ * This routine exists to support the load_dylinker().
+ *
+ * This routine has its own, separate, understanding of the FAT file format,
+ * which is terrifically unfortunate.
+ */