]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/kern/kern_mman.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_mman.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1988 University of Utah.
30 * Copyright (c) 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * the Systems Programming Group of the University of Utah Computer
35 * Science Department.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
66 *
67 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
68 */
69/*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
75
76/*
77 * Mapped file (mmap) interface to VM
78 */
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/filedesc.h>
83#include <sys/proc_internal.h>
84#include <sys/kauth.h>
85#include <sys/resourcevar.h>
86#include <sys/vnode_internal.h>
87#include <sys/acct.h>
88#include <sys/wait.h>
89#include <sys/file_internal.h>
90#include <sys/vadvise.h>
91#include <sys/trace.h>
92#include <sys/mman.h>
93#include <sys/conf.h>
94#include <sys/stat.h>
95#include <sys/ubc.h>
96#include <sys/ubc_internal.h>
97#include <sys/sysproto.h>
98
99#include <sys/syscall.h>
100#include <sys/kdebug.h>
101#include <sys/bsdtask_info.h>
102
103#include <security/audit/audit.h>
104#include <bsm/audit_kevents.h>
105
106#include <mach/mach_types.h>
107#include <mach/mach_traps.h>
108#include <mach/vm_sync.h>
109#include <mach/vm_behavior.h>
110#include <mach/vm_inherit.h>
111#include <mach/vm_statistics.h>
112#include <mach/mach_vm.h>
113#include <mach/vm_map.h>
114#include <mach/host_priv.h>
115#include <mach/sdt.h>
116
117#include <machine/machine_routines.h>
118
119#include <kern/cpu_number.h>
120#include <kern/host.h>
121#include <kern/task.h>
122#include <kern/page_decrypt.h>
123
124#include <IOKit/IOReturn.h>
125
126#include <vm/vm_map.h>
127#include <vm/vm_kern.h>
128#include <vm/vm_pager.h>
129#include <vm/vm_protos.h>
130
131#if CONFIG_MACF
132#include <security/mac_framework.h>
133#endif
134
135/*
136 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
137 * XXX usage is PROT_* from an interface perspective. Thus the values of
138 * XXX VM_PROT_* and PROT_* need to correspond.
139 */
140int
141mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
142{
143 /*
144 * Map in special device (must be SHARED) or file
145 */
146 struct fileproc *fp;
147 struct vnode *vp;
148 int flags;
149 int prot;
150 int err=0;
151 vm_map_t user_map;
152 kern_return_t result;
153 vm_map_offset_t user_addr;
154 vm_map_size_t user_size;
155 vm_object_offset_t pageoff;
156 vm_object_offset_t file_pos;
157 int alloc_flags = 0;
158 vm_tag_t tag = VM_KERN_MEMORY_NONE;
159 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
160 boolean_t docow;
161 vm_prot_t maxprot;
162 void *handle;
163 memory_object_t pager = MEMORY_OBJECT_NULL;
164 memory_object_control_t control;
165 int mapanon=0;
166 int fpref=0;
167 int error =0;
168 int fd = uap->fd;
169 int num_retries = 0;
170
171 /*
172 * Note that for UNIX03 conformance, there is additional parameter checking for
173 * mmap() system call in libsyscall prior to entering the kernel. The sanity
174 * checks and argument validation done in this function are not the only places
175 * one can get returned errnos.
176 */
177
178 user_map = current_map();
179 user_addr = (vm_map_offset_t)uap->addr;
180 user_size = (vm_map_size_t) uap->len;
181
182 AUDIT_ARG(addr, user_addr);
183 AUDIT_ARG(len, user_size);
184 AUDIT_ARG(fd, uap->fd);
185
186 prot = (uap->prot & VM_PROT_ALL);
187#if 3777787
188 /*
189 * Since the hardware currently does not support writing without
190 * read-before-write, or execution-without-read, if the request is
191 * for write or execute access, we must imply read access as well;
192 * otherwise programs expecting this to work will fail to operate.
193 */
194 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
195 prot |= VM_PROT_READ;
196#endif /* radar 3777787 */
197
198 flags = uap->flags;
199 vp = NULLVP;
200
201 /*
202 * The vm code does not have prototypes & compiler doesn't do the'
203 * the right thing when you cast 64bit value and pass it in function
204 * call. So here it is.
205 */
206 file_pos = (vm_object_offset_t)uap->pos;
207
208
209 /* make sure mapping fits into numeric range etc */
210 if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
211 return (EINVAL);
212
213 /*
214 * Align the file position to a page boundary,
215 * and save its page offset component.
216 */
217 pageoff = (file_pos & vm_map_page_mask(user_map));
218 file_pos -= (vm_object_offset_t)pageoff;
219
220
221 /* Adjust size for rounding (on both ends). */
222 user_size += pageoff; /* low end... */
223 user_size = vm_map_round_page(user_size,
224 vm_map_page_mask(user_map)); /* hi end */
225
226 if (flags & MAP_JIT) {
227 if ((flags & MAP_FIXED) ||
228 (flags & MAP_SHARED) ||
229 !(flags & MAP_ANON) ||
230 (flags & MAP_RESILIENT_CODESIGN) ||
231 (flags & MAP_RESILIENT_MEDIA)) {
232 return EINVAL;
233 }
234 }
235
236 if ((flags & MAP_RESILIENT_CODESIGN) ||
237 (flags & MAP_RESILIENT_MEDIA)) {
238 if ((flags & MAP_ANON) ||
239 (flags & MAP_JIT)) {
240 return EINVAL;
241 }
242 if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
243 return EPERM;
244 }
245 }
246
247 /*
248 * Check for illegal addresses. Watch out for address wrap... Note
249 * that VM_*_ADDRESS are not constants due to casts (argh).
250 */
251 if (flags & MAP_FIXED) {
252 /*
253 * The specified address must have the same remainder
254 * as the file offset taken modulo PAGE_SIZE, so it
255 * should be aligned after adjustment by pageoff.
256 */
257 user_addr -= pageoff;
258 if (user_addr & vm_map_page_mask(user_map))
259 return (EINVAL);
260 }
261#ifdef notyet
262 /* DO not have apis to get this info, need to wait till then*/
263 /*
264 * XXX for non-fixed mappings where no hint is provided or
265 * the hint would fall in the potential heap space,
266 * place it after the end of the largest possible heap.
267 *
268 * There should really be a pmap call to determine a reasonable
269 * location.
270 */
271 else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
272 vm_map_page_mask(user_map)))
273 addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
274 vm_map_page_mask(user_map));
275
276#endif
277
278 alloc_flags = 0;
279
280 if (flags & MAP_ANON) {
281
282 maxprot = VM_PROT_ALL;
283#if CONFIG_MACF
284 /*
285 * Entitlement check.
286 */
287 error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
288 if (error) {
289 return EINVAL;
290 }
291#endif /* MAC */
292
293 /*
294 * Mapping blank space is trivial. Use positive fds as the alias
295 * value for memory tracking.
296 */
297 if (fd != -1) {
298 /*
299 * Use "fd" to pass (some) Mach VM allocation flags,
300 * (see the VM_FLAGS_* definitions).
301 */
302 alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
303 VM_FLAGS_SUPERPAGE_MASK |
304 VM_FLAGS_PURGABLE |
305 VM_FLAGS_4GB_CHUNK);
306 if (alloc_flags != fd) {
307 /* reject if there are any extra flags */
308 return EINVAL;
309 }
310 VM_GET_FLAGS_ALIAS(alloc_flags, tag);
311 alloc_flags &= ~VM_FLAGS_ALIAS_MASK;
312 }
313
314 handle = NULL;
315 file_pos = 0;
316 mapanon = 1;
317 } else {
318 struct vnode_attr va;
319 vfs_context_t ctx = vfs_context_current();
320
321 if (flags & MAP_JIT)
322 return EINVAL;
323
324 /*
325 * Mapping file, get fp for validation. Obtain vnode and make
326 * sure it is of appropriate type.
327 */
328 err = fp_lookup(p, fd, &fp, 0);
329 if (err)
330 return(err);
331 fpref = 1;
332 switch (FILEGLOB_DTYPE(fp->f_fglob)) {
333 case DTYPE_PSXSHM:
334 uap->addr = (user_addr_t)user_addr;
335 uap->len = (user_size_t)user_size;
336 uap->prot = prot;
337 uap->flags = flags;
338 uap->pos = file_pos;
339 error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
340 goto bad;
341 case DTYPE_VNODE:
342 break;
343 default:
344 error = EINVAL;
345 goto bad;
346 }
347 vp = (struct vnode *)fp->f_fglob->fg_data;
348 error = vnode_getwithref(vp);
349 if(error != 0)
350 goto bad;
351
352 if (vp->v_type != VREG && vp->v_type != VCHR) {
353 (void)vnode_put(vp);
354 error = EINVAL;
355 goto bad;
356 }
357
358 AUDIT_ARG(vnpath, vp, ARG_VNODE1);
359
360 /*
361 * POSIX: mmap needs to update access time for mapped files
362 */
363 if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
364 VATTR_INIT(&va);
365 nanotime(&va.va_access_time);
366 VATTR_SET_ACTIVE(&va, va_access_time);
367 vnode_setattr(vp, &va, ctx);
368 }
369
370 /*
371 * XXX hack to handle use of /dev/zero to map anon memory (ala
372 * SunOS).
373 */
374 if (vp->v_type == VCHR || vp->v_type == VSTR) {
375 (void)vnode_put(vp);
376 error = ENODEV;
377 goto bad;
378 } else {
379 /*
380 * Ensure that file and memory protections are
381 * compatible. Note that we only worry about
382 * writability if mapping is shared; in this case,
383 * current and max prot are dictated by the open file.
384 * XXX use the vnode instead? Problem is: what
385 * credentials do we use for determination? What if
386 * proc does a setuid?
387 */
388 maxprot = VM_PROT_EXECUTE; /* ??? */
389 if (fp->f_fglob->fg_flag & FREAD)
390 maxprot |= VM_PROT_READ;
391 else if (prot & PROT_READ) {
392 (void)vnode_put(vp);
393 error = EACCES;
394 goto bad;
395 }
396 /*
397 * If we are sharing potential changes (either via
398 * MAP_SHARED or via the implicit sharing of character
399 * device mappings), and we are trying to get write
400 * permission although we opened it without asking
401 * for it, bail out.
402 */
403
404 if ((flags & MAP_SHARED) != 0) {
405 if ((fp->f_fglob->fg_flag & FWRITE) != 0 &&
406 /*
407 * Do not allow writable mappings of
408 * swap files (see vm_swapfile_pager.c).
409 */
410 !vnode_isswap(vp)) {
411 /*
412 * check for write access
413 *
414 * Note that we already made this check when granting FWRITE
415 * against the file, so it seems redundant here.
416 */
417 error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
418
419 /* if not granted for any reason, but we wanted it, bad */
420 if ((prot & PROT_WRITE) && (error != 0)) {
421 vnode_put(vp);
422 goto bad;
423 }
424
425 /* if writable, remember */
426 if (error == 0)
427 maxprot |= VM_PROT_WRITE;
428
429 } else if ((prot & PROT_WRITE) != 0) {
430 (void)vnode_put(vp);
431 error = EACCES;
432 goto bad;
433 }
434 } else
435 maxprot |= VM_PROT_WRITE;
436
437 handle = (void *)vp;
438#if CONFIG_MACF
439 error = mac_file_check_mmap(vfs_context_ucred(ctx),
440 fp->f_fglob, prot, flags, file_pos, &maxprot);
441 if (error) {
442 (void)vnode_put(vp);
443 goto bad;
444 }
445#endif /* MAC */
446 }
447 }
448
449 if (user_size == 0) {
450 if (!mapanon)
451 (void)vnode_put(vp);
452 error = 0;
453 goto bad;
454 }
455
456 /*
457 * We bend a little - round the start and end addresses
458 * to the nearest page boundary.
459 */
460 user_size = vm_map_round_page(user_size,
461 vm_map_page_mask(user_map));
462
463 if (file_pos & vm_map_page_mask(user_map)) {
464 if (!mapanon)
465 (void)vnode_put(vp);
466 error = EINVAL;
467 goto bad;
468 }
469
470 if ((flags & MAP_FIXED) == 0) {
471 alloc_flags |= VM_FLAGS_ANYWHERE;
472 user_addr = vm_map_round_page(user_addr,
473 vm_map_page_mask(user_map));
474 } else {
475 if (user_addr != vm_map_trunc_page(user_addr,
476 vm_map_page_mask(user_map))) {
477 if (!mapanon)
478 (void)vnode_put(vp);
479 error = EINVAL;
480 goto bad;
481 }
482 /*
483 * mmap(MAP_FIXED) will replace any existing mappings in the
484 * specified range, if the new mapping is successful.
485 * If we just deallocate the specified address range here,
486 * another thread might jump in and allocate memory in that
487 * range before we get a chance to establish the new mapping,
488 * and we won't have a chance to restore the old mappings.
489 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
490 * has to deallocate the existing mappings and establish the
491 * new ones atomically.
492 */
493 alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
494 }
495
496 if (flags & MAP_NOCACHE)
497 alloc_flags |= VM_FLAGS_NO_CACHE;
498
499 if (flags & MAP_JIT) {
500 vmk_flags.vmkf_map_jit = TRUE;
501 }
502
503 if (flags & MAP_RESILIENT_CODESIGN) {
504 alloc_flags |= VM_FLAGS_RESILIENT_CODESIGN;
505 }
506
507 /*
508 * Lookup/allocate object.
509 */
510 if (handle == NULL) {
511 control = NULL;
512#ifdef notyet
513/* Hmm .. */
514#if defined(VM_PROT_READ_IS_EXEC)
515 if (prot & VM_PROT_READ)
516 prot |= VM_PROT_EXECUTE;
517 if (maxprot & VM_PROT_READ)
518 maxprot |= VM_PROT_EXECUTE;
519#endif
520#endif
521
522#if 3777787
523 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
524 prot |= VM_PROT_READ;
525 if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
526 maxprot |= VM_PROT_READ;
527#endif /* radar 3777787 */
528map_anon_retry:
529 result = vm_map_enter_mem_object(user_map,
530 &user_addr, user_size,
531 0, alloc_flags, vmk_flags,
532 tag,
533 IPC_PORT_NULL, 0, FALSE,
534 prot, maxprot,
535 (flags & MAP_SHARED) ?
536 VM_INHERIT_SHARE :
537 VM_INHERIT_DEFAULT);
538
539 /* If a non-binding address was specified for this anonymous
540 * mapping, retry the mapping with a zero base
541 * in the event the mapping operation failed due to
542 * lack of space between the address and the map's maximum.
543 */
544 if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
545 user_addr = vm_map_page_size(user_map);
546 goto map_anon_retry;
547 }
548 } else {
549 if (vnode_isswap(vp)) {
550 /*
551 * Map swap files with a special pager
552 * that returns obfuscated contents.
553 */
554 control = NULL;
555 pager = swapfile_pager_setup(vp);
556 if (pager != MEMORY_OBJECT_NULL) {
557 control = swapfile_pager_control(pager);
558 }
559 } else {
560 control = ubc_getobject(vp, UBC_FLAGS_NONE);
561 }
562
563 if (control == NULL) {
564 (void)vnode_put(vp);
565 error = ENOMEM;
566 goto bad;
567 }
568
569 /*
570 * Set credentials:
571 * FIXME: if we're writing the file we need a way to
572 * ensure that someone doesn't replace our R/W creds
573 * with ones that only work for read.
574 */
575
576 ubc_setthreadcred(vp, p, current_thread());
577 docow = FALSE;
578 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
579 docow = TRUE;
580 }
581
582#ifdef notyet
583/* Hmm .. */
584#if defined(VM_PROT_READ_IS_EXEC)
585 if (prot & VM_PROT_READ)
586 prot |= VM_PROT_EXECUTE;
587 if (maxprot & VM_PROT_READ)
588 maxprot |= VM_PROT_EXECUTE;
589#endif
590#endif /* notyet */
591
592#if 3777787
593 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
594 prot |= VM_PROT_READ;
595 if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
596 maxprot |= VM_PROT_READ;
597#endif /* radar 3777787 */
598
599map_file_retry:
600 if ((flags & MAP_RESILIENT_CODESIGN) ||
601 (flags & MAP_RESILIENT_MEDIA)) {
602 if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
603 assert(!mapanon);
604 vnode_put(vp);
605 error = EPERM;
606 goto bad;
607 }
608 /* strictly limit access to "prot" */
609 maxprot &= prot;
610 }
611
612 vm_object_offset_t end_pos = 0;
613 if (os_add_overflow(user_size, file_pos, &end_pos)) {
614 vnode_put(vp);
615 error = EINVAL;
616 goto bad;
617 }
618
619 result = vm_map_enter_mem_object_control(user_map,
620 &user_addr, user_size,
621 0, alloc_flags, vmk_flags,
622 tag,
623 control, file_pos,
624 docow, prot, maxprot,
625 (flags & MAP_SHARED) ?
626 VM_INHERIT_SHARE :
627 VM_INHERIT_DEFAULT);
628
629 /* If a non-binding address was specified for this file backed
630 * mapping, retry the mapping with a zero base
631 * in the event the mapping operation failed due to
632 * lack of space between the address and the map's maximum.
633 */
634 if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
635 user_addr = vm_map_page_size(user_map);
636 goto map_file_retry;
637 }
638 }
639
640 if (!mapanon) {
641 (void)vnode_put(vp);
642 }
643
644 switch (result) {
645 case KERN_SUCCESS:
646 *retval = user_addr + pageoff;
647 error = 0;
648 break;
649 case KERN_INVALID_ADDRESS:
650 case KERN_NO_SPACE:
651 error = ENOMEM;
652 break;
653 case KERN_PROTECTION_FAILURE:
654 error = EACCES;
655 break;
656 default:
657 error = EINVAL;
658 break;
659 }
660bad:
661 if (pager != MEMORY_OBJECT_NULL) {
662 /*
663 * Release the reference on the pager.
664 * If the mapping was successful, it now holds
665 * an extra reference.
666 */
667 memory_object_deallocate(pager);
668 }
669 if (fpref)
670 fp_drop(p, fd, fp, 0);
671
672 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
673#ifndef CONFIG_EMBEDDED
674 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
675 (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
676#endif
677 return(error);
678}
679
680int
681msync(__unused proc_t p, struct msync_args *uap, int32_t *retval)
682{
683 __pthread_testcancel(1);
684 return(msync_nocancel(p, (struct msync_nocancel_args *)uap, retval));
685}
686
687int
688msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused int32_t *retval)
689{
690 mach_vm_offset_t addr;
691 mach_vm_size_t size;
692 int flags;
693 vm_map_t user_map;
694 int rv;
695 vm_sync_t sync_flags=0;
696
697 user_map = current_map();
698 addr = (mach_vm_offset_t) uap->addr;
699 size = (mach_vm_size_t)uap->len;
700#ifndef CONFIG_EMBEDDED
701 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
702#endif
703 if (addr & vm_map_page_mask(user_map)) {
704 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
705 return EINVAL;
706 }
707 if (size == 0) {
708 /*
709 * We cannot support this properly without maintaining
710 * list all mmaps done. Cannot use vm_map_entry as they could be
711 * split or coalesced by indepenedant actions. So instead of
712 * inaccurate results, lets just return error as invalid size
713 * specified
714 */
715 return (EINVAL); /* XXX breaks posix apps */
716 }
717
718 flags = uap->flags;
719 /* disallow contradictory flags */
720 if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
721 return (EINVAL);
722
723 if (flags & MS_KILLPAGES)
724 sync_flags |= VM_SYNC_KILLPAGES;
725 if (flags & MS_DEACTIVATE)
726 sync_flags |= VM_SYNC_DEACTIVATE;
727 if (flags & MS_INVALIDATE)
728 sync_flags |= VM_SYNC_INVALIDATE;
729
730 if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
731 if (flags & MS_ASYNC)
732 sync_flags |= VM_SYNC_ASYNCHRONOUS;
733 else
734 sync_flags |= VM_SYNC_SYNCHRONOUS;
735 }
736
737 sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */
738
739 rv = mach_vm_msync(user_map, addr, size, sync_flags);
740
741 switch (rv) {
742 case KERN_SUCCESS:
743 break;
744 case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */
745 return (ENOMEM);
746 case KERN_FAILURE:
747 return (EIO);
748 default:
749 return (EINVAL);
750 }
751 return (0);
752}
753
754
755int
756munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
757{
758 mach_vm_offset_t user_addr;
759 mach_vm_size_t user_size;
760 kern_return_t result;
761 vm_map_t user_map;
762
763 user_map = current_map();
764 user_addr = (mach_vm_offset_t) uap->addr;
765 user_size = (mach_vm_size_t) uap->len;
766
767 AUDIT_ARG(addr, user_addr);
768 AUDIT_ARG(len, user_size);
769
770 if (user_addr & vm_map_page_mask(user_map)) {
771 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
772 return EINVAL;
773 }
774
775 if (user_addr + user_size < user_addr)
776 return(EINVAL);
777
778 if (user_size == 0) {
779 /* UNIX SPEC: size is 0, return EINVAL */
780 return EINVAL;
781 }
782
783 result = mach_vm_deallocate(user_map, user_addr, user_size);
784 if (result != KERN_SUCCESS) {
785 return(EINVAL);
786 }
787 return(0);
788}
789
790int
791mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval)
792{
793 vm_prot_t prot;
794 mach_vm_offset_t user_addr;
795 mach_vm_size_t user_size;
796 kern_return_t result;
797 vm_map_t user_map;
798#if CONFIG_MACF
799 int error;
800#endif
801
802 AUDIT_ARG(addr, uap->addr);
803 AUDIT_ARG(len, uap->len);
804 AUDIT_ARG(value32, uap->prot);
805
806 user_map = current_map();
807 user_addr = (mach_vm_offset_t) uap->addr;
808 user_size = (mach_vm_size_t) uap->len;
809 prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ));
810
811 if (user_addr & vm_map_page_mask(user_map)) {
812 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
813 return EINVAL;
814 }
815
816#ifdef notyet
817/* Hmm .. */
818#if defined(VM_PROT_READ_IS_EXEC)
819 if (prot & VM_PROT_READ)
820 prot |= VM_PROT_EXECUTE;
821#endif
822#endif /* notyet */
823
824#if 3936456
825 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
826 prot |= VM_PROT_READ;
827#endif /* 3936456 */
828
829#if defined(__arm64__)
830 if (prot & VM_PROT_STRIP_READ)
831 prot &= ~(VM_PROT_READ | VM_PROT_STRIP_READ);
832#endif
833
834#if CONFIG_MACF
835 /*
836 * The MAC check for mprotect is of limited use for 2 reasons:
837 * Without mmap revocation, the caller could have asked for the max
838 * protections initially instead of a reduced set, so a mprotect
839 * check would offer no new security.
840 * It is not possible to extract the vnode from the pager object(s)
841 * of the target memory range.
842 * However, the MAC check may be used to prevent a process from,
843 * e.g., making the stack executable.
844 */
845 error = mac_proc_check_mprotect(p, user_addr,
846 user_size, prot);
847 if (error)
848 return (error);
849#endif
850
851 if(prot & VM_PROT_TRUSTED) {
852#if CONFIG_DYNAMIC_CODE_SIGNING
853 /* CODE SIGNING ENFORCEMENT - JIT support */
854 /* The special protection value VM_PROT_TRUSTED requests that we treat
855 * this page as if it had a valid code signature.
856 * If this is enabled, there MUST be a MAC policy implementing the
857 * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
858 * compromised because the check would always succeed and thusly any
859 * process could sign dynamically. */
860 result = vm_map_sign(
861 user_map,
862 vm_map_trunc_page(user_addr,
863 vm_map_page_mask(user_map)),
864 vm_map_round_page(user_addr+user_size,
865 vm_map_page_mask(user_map)));
866 switch (result) {
867 case KERN_SUCCESS:
868 break;
869 case KERN_INVALID_ADDRESS:
870 /* UNIX SPEC: for an invalid address range, return ENOMEM */
871 return ENOMEM;
872 default:
873 return EINVAL;
874 }
875#else
876 return ENOTSUP;
877#endif
878 }
879 prot &= ~VM_PROT_TRUSTED;
880
881 result = mach_vm_protect(user_map, user_addr, user_size,
882 FALSE, prot);
883 switch (result) {
884 case KERN_SUCCESS:
885 return (0);
886 case KERN_PROTECTION_FAILURE:
887 return (EACCES);
888 case KERN_INVALID_ADDRESS:
889 /* UNIX SPEC: for an invalid address range, return ENOMEM */
890 return ENOMEM;
891 }
892 return (EINVAL);
893}
894
895
896int
897minherit(__unused proc_t p, struct minherit_args *uap, __unused int32_t *retval)
898{
899 mach_vm_offset_t addr;
900 mach_vm_size_t size;
901 vm_inherit_t inherit;
902 vm_map_t user_map;
903 kern_return_t result;
904
905 AUDIT_ARG(addr, uap->addr);
906 AUDIT_ARG(len, uap->len);
907 AUDIT_ARG(value32, uap->inherit);
908
909 addr = (mach_vm_offset_t)uap->addr;
910 size = (mach_vm_size_t)uap->len;
911 inherit = uap->inherit;
912
913 user_map = current_map();
914 result = mach_vm_inherit(user_map, addr, size,
915 inherit);
916 switch (result) {
917 case KERN_SUCCESS:
918 return (0);
919 case KERN_PROTECTION_FAILURE:
920 return (EACCES);
921 }
922 return (EINVAL);
923}
924
925int
926madvise(__unused proc_t p, struct madvise_args *uap, __unused int32_t *retval)
927{
928 vm_map_t user_map;
929 mach_vm_offset_t start;
930 mach_vm_size_t size;
931 vm_behavior_t new_behavior;
932 kern_return_t result;
933
934 /*
935 * Since this routine is only advisory, we default to conservative
936 * behavior.
937 */
938 switch (uap->behav) {
939 case MADV_RANDOM:
940 new_behavior = VM_BEHAVIOR_RANDOM;
941 break;
942 case MADV_SEQUENTIAL:
943 new_behavior = VM_BEHAVIOR_SEQUENTIAL;
944 break;
945 case MADV_NORMAL:
946 new_behavior = VM_BEHAVIOR_DEFAULT;
947 break;
948 case MADV_WILLNEED:
949 new_behavior = VM_BEHAVIOR_WILLNEED;
950 break;
951 case MADV_DONTNEED:
952 new_behavior = VM_BEHAVIOR_DONTNEED;
953 break;
954 case MADV_FREE:
955 new_behavior = VM_BEHAVIOR_FREE;
956 break;
957 case MADV_ZERO_WIRED_PAGES:
958 new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES;
959 break;
960 case MADV_FREE_REUSABLE:
961 new_behavior = VM_BEHAVIOR_REUSABLE;
962 break;
963 case MADV_FREE_REUSE:
964 new_behavior = VM_BEHAVIOR_REUSE;
965 break;
966 case MADV_CAN_REUSE:
967 new_behavior = VM_BEHAVIOR_CAN_REUSE;
968 break;
969 case MADV_PAGEOUT:
970#if MACH_ASSERT
971 new_behavior = VM_BEHAVIOR_PAGEOUT;
972 break;
973#else /* MACH_ASSERT */
974 return ENOTSUP;
975#endif /* MACH_ASSERT */
976 default:
977 return(EINVAL);
978 }
979
980 start = (mach_vm_offset_t) uap->addr;
981 size = (mach_vm_size_t) uap->len;
982
983#if __arm64__
984 if (start == 0 &&
985 size != 0 &&
986 (uap->behav == MADV_FREE ||
987 uap->behav == MADV_FREE_REUSABLE)) {
988 printf("** FOURK_COMPAT: %d[%s] "
989 "failing madvise(0x%llx,0x%llx,%s)\n",
990 p->p_pid, p->p_comm, start, size,
991 ((uap->behav == MADV_FREE_REUSABLE)
992 ? "MADV_FREE_REUSABLE"
993 : "MADV_FREE"));
994 DTRACE_VM3(fourk_compat_madvise,
995 uint64_t, start,
996 uint64_t, size,
997 int, uap->behav);
998 return EINVAL;
999 }
1000#endif /* __arm64__ */
1001
1002 user_map = current_map();
1003
1004 result = mach_vm_behavior_set(user_map, start, size, new_behavior);
1005 switch (result) {
1006 case KERN_SUCCESS:
1007 return 0;
1008 case KERN_INVALID_ADDRESS:
1009 return EINVAL;
1010 case KERN_NO_SPACE:
1011 return ENOMEM;
1012 }
1013
1014 return EINVAL;
1015}
1016
1017int
1018mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval)
1019{
1020 mach_vm_offset_t addr = 0, first_addr = 0, end = 0, cur_end = 0;
1021 vm_map_t map = VM_MAP_NULL;
1022 user_addr_t vec = 0;
1023 int error = 0;
1024 int lastvecindex = 0;
1025 int mincoreinfo=0;
1026 int pqueryinfo = 0;
1027 unsigned int pqueryinfo_vec_size = 0;
1028 vm_page_info_basic_t info = NULL;
1029 mach_msg_type_number_t count = 0;
1030 char *kernel_vec = NULL;
1031 unsigned int req_vec_size_pages = 0, cur_vec_size_pages = 0, vecindex = 0;
1032 kern_return_t kr = KERN_SUCCESS;
1033
1034 map = current_map();
1035
1036 /*
1037 * Make sure that the addresses presented are valid for user
1038 * mode.
1039 */
1040 first_addr = addr = vm_map_trunc_page(uap->addr,
1041 vm_map_page_mask(map));
1042 end = vm_map_round_page(uap->addr + uap->len,
1043 vm_map_page_mask(map));
1044
1045 if (end < addr)
1046 return (EINVAL);
1047
1048 if (end == addr)
1049 return (0);
1050
1051 /*
1052 * We are going to loop through the whole 'req_vec_size' pages
1053 * range in chunks of 'cur_vec_size'.
1054 */
1055
1056 req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
1057 cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
1058
1059 kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK | M_ZERO);
1060
1061 if (kernel_vec == NULL) {
1062 return (ENOMEM);
1063 }
1064
1065 /*
1066 * Address of byte vector
1067 */
1068 vec = uap->vec;
1069
1070 pqueryinfo_vec_size = cur_vec_size_pages * sizeof(struct vm_page_info_basic);
1071 info = (void*) _MALLOC(pqueryinfo_vec_size, M_TEMP, M_WAITOK);
1072
1073 if (info == NULL) {
1074 FREE(kernel_vec, M_TEMP);
1075 return (ENOMEM);
1076 }
1077
1078 while (addr < end) {
1079
1080 cur_end = addr + (cur_vec_size_pages * PAGE_SIZE_64);
1081
1082 count = VM_PAGE_INFO_BASIC_COUNT;
1083 kr = vm_map_page_range_info_internal(map,
1084 addr,
1085 cur_end,
1086 VM_PAGE_INFO_BASIC,
1087 (vm_page_info_t) info,
1088 &count);
1089
1090 assert(kr == KERN_SUCCESS);
1091
1092 /*
1093 * Do this on a map entry basis so that if the pages are not
1094 * in the current processes address space, we can easily look
1095 * up the pages elsewhere.
1096 */
1097 lastvecindex = -1;
1098 for( ; addr < cur_end; addr += PAGE_SIZE ) {
1099
1100 pqueryinfo = info[lastvecindex + 1].disposition;
1101
1102 mincoreinfo = 0;
1103
1104 if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
1105 mincoreinfo |= MINCORE_INCORE;
1106 if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
1107 mincoreinfo |= MINCORE_REFERENCED;
1108 if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
1109 mincoreinfo |= MINCORE_MODIFIED;
1110 if (pqueryinfo & VM_PAGE_QUERY_PAGE_PAGED_OUT)
1111 mincoreinfo |= MINCORE_PAGED_OUT;
1112 if (pqueryinfo & VM_PAGE_QUERY_PAGE_COPIED)
1113 mincoreinfo |= MINCORE_COPIED;
1114 if ((pqueryinfo & VM_PAGE_QUERY_PAGE_EXTERNAL) == 0)
1115 mincoreinfo |= MINCORE_ANONYMOUS;
1116 /*
1117 * calculate index into user supplied byte vector
1118 */
1119 vecindex = (addr - first_addr)>> PAGE_SHIFT;
1120 kernel_vec[vecindex] = (char)mincoreinfo;
1121 lastvecindex = vecindex;
1122 }
1123
1124
1125 assert(vecindex == (cur_vec_size_pages - 1));
1126
1127 error = copyout(kernel_vec, vec, cur_vec_size_pages * sizeof(char) /* a char per page */);
1128
1129 if (error) {
1130 break;
1131 }
1132
1133 /*
1134 * For the next chunk, we'll need:
1135 * - bump the location in the user buffer for our next disposition.
1136 * - new length
1137 * - starting address
1138 */
1139 vec += cur_vec_size_pages * sizeof(char);
1140 req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
1141 cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
1142
1143 first_addr = addr;
1144 }
1145
1146 FREE(kernel_vec, M_TEMP);
1147 FREE(info, M_TEMP);
1148
1149 if (error) {
1150 return (EFAULT);
1151 }
1152
1153 return (0);
1154}
1155
1156int
1157mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval)
1158{
1159 vm_map_t user_map;
1160 vm_map_offset_t addr;
1161 vm_map_size_t size, pageoff;
1162 kern_return_t result;
1163
1164 AUDIT_ARG(addr, uap->addr);
1165 AUDIT_ARG(len, uap->len);
1166
1167 addr = (vm_map_offset_t) uap->addr;
1168 size = (vm_map_size_t)uap->len;
1169
1170 /* disable wrap around */
1171 if (addr + size < addr)
1172 return (EINVAL);
1173
1174 if (size == 0)
1175 return (0);
1176
1177 user_map = current_map();
1178 pageoff = (addr & vm_map_page_mask(user_map));
1179 addr -= pageoff;
1180 size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));
1181
1182 /* have to call vm_map_wire directly to pass "I don't know" protections */
1183 result = vm_map_wire_kernel(user_map, addr, addr+size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK, TRUE);
1184
1185 if (result == KERN_RESOURCE_SHORTAGE)
1186 return EAGAIN;
1187 else if (result == KERN_PROTECTION_FAILURE)
1188 return EACCES;
1189 else if (result != KERN_SUCCESS)
1190 return ENOMEM;
1191
1192 return 0; /* KERN_SUCCESS */
1193}
1194
1195int
1196munlock(__unused proc_t p, struct munlock_args *uap, __unused int32_t *retval)
1197{
1198 mach_vm_offset_t addr;
1199 mach_vm_size_t size;
1200 vm_map_t user_map;
1201 kern_return_t result;
1202
1203 AUDIT_ARG(addr, uap->addr);
1204 AUDIT_ARG(addr, uap->len);
1205
1206 addr = (mach_vm_offset_t) uap->addr;
1207 size = (mach_vm_size_t)uap->len;
1208 user_map = current_map();
1209
1210 /* JMM - need to remove all wirings by spec - this just removes one */
1211 result = mach_vm_wire_kernel(host_priv_self(), user_map, addr, size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK);
1212 return (result == KERN_SUCCESS ? 0 : ENOMEM);
1213}
1214
1215
1216int
1217mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused int32_t *retval)
1218{
1219 return (ENOSYS);
1220}
1221
1222int
1223munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused int32_t *retval)
1224{
1225 return(ENOSYS);
1226}
1227
1228#if CONFIG_CODE_DECRYPTION
1229int
1230mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __unused int32_t *retval)
1231{
1232 mach_vm_offset_t user_addr;
1233 mach_vm_size_t user_size;
1234 kern_return_t result;
1235 vm_map_t user_map;
1236 uint32_t cryptid;
1237 cpu_type_t cputype;
1238 cpu_subtype_t cpusubtype;
1239 pager_crypt_info_t crypt_info;
1240 const char * cryptname = 0;
1241 char *vpath;
1242 int len, ret;
1243 struct proc_regioninfo_internal pinfo;
1244 vnode_t vp;
1245 uintptr_t vnodeaddr;
1246 uint32_t vid;
1247
1248 AUDIT_ARG(addr, uap->addr);
1249 AUDIT_ARG(len, uap->len);
1250
1251 user_map = current_map();
1252 user_addr = (mach_vm_offset_t) uap->addr;
1253 user_size = (mach_vm_size_t) uap->len;
1254
1255 cryptid = uap->cryptid;
1256 cputype = uap->cputype;
1257 cpusubtype = uap->cpusubtype;
1258
1259 if (user_addr & vm_map_page_mask(user_map)) {
1260 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
1261 return EINVAL;
1262 }
1263
1264 switch(cryptid) {
1265 case 0:
1266 /* not encrypted, just an empty load command */
1267 return 0;
1268 case 1:
1269 cryptname="com.apple.unfree";
1270 break;
1271 case 0x10:
1272 /* some random cryptid that you could manually put into
1273 * your binary if you want NULL */
1274 cryptname="com.apple.null";
1275 break;
1276 default:
1277 return EINVAL;
1278 }
1279
1280 if (NULL == text_crypter_create) return ENOTSUP;
1281
1282 ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid);
1283 if (ret == 0 || !vnodeaddr) {
1284 /* No really, this returns 0 if the memory address is not backed by a file */
1285 return (EINVAL);
1286 }
1287
1288 vp = (vnode_t)vnodeaddr;
1289 if ((vnode_getwithvid(vp, vid)) == 0) {
1290 MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1291 if(vpath == NULL) {
1292 vnode_put(vp);
1293 return (ENOMEM);
1294 }
1295
1296 len = MAXPATHLEN;
1297 ret = vn_getpath(vp, vpath, &len);
1298 if(ret) {
1299 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1300 vnode_put(vp);
1301 return (ret);
1302 }
1303
1304 vnode_put(vp);
1305 } else {
1306 return (EINVAL);
1307 }
1308
1309#if 0
1310 kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n",
1311 __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size);
1312#endif
1313
1314 /* set up decrypter first */
1315 crypt_file_data_t crypt_data = {
1316 .filename = vpath,
1317 .cputype = cputype,
1318 .cpusubtype = cpusubtype };
1319 result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
1320#if VM_MAP_DEBUG_APPLE_PROTECT
1321 if (vm_map_debug_apple_protect) {
1322 printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n",
1323 p->p_pid, p->p_comm,
1324 user_map,
1325 (uint64_t) user_addr,
1326 (uint64_t) (user_addr + user_size),
1327 __FUNCTION__, vpath, result);
1328 }
1329#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1330 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1331
1332 if(result) {
1333 printf("%s: unable to create decrypter %s, kr=%d\n",
1334 __FUNCTION__, cryptname, result);
1335 if (result == kIOReturnNotPrivileged) {
1336 /* text encryption returned decryption failure */
1337 return (EPERM);
1338 } else {
1339 return (ENOMEM);
1340 }
1341 }
1342
1343 /* now remap using the decrypter */
1344 vm_object_offset_t crypto_backing_offset;
1345 crypto_backing_offset = -1; /* i.e. use map entry's offset */
1346 result = vm_map_apple_protected(user_map,
1347 user_addr,
1348 user_addr+user_size,
1349 crypto_backing_offset,
1350 &crypt_info);
1351 if (result) {
1352 printf("%s: mapping failed with %d\n", __FUNCTION__, result);
1353 }
1354
1355 if (result) {
1356 return (EPERM);
1357 }
1358 return 0;
1359}
1360#endif /* CONFIG_CODE_DECRYPTION */