]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_mman.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / bsd / kern / kern_mman.c
CommitLineData
1c79356b 1/*
2d21ac55
A
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
1c79356b 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
8f6c56a5 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1988 University of Utah.
30 * Copyright (c) 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * the Systems Programming Group of the University of Utah Computer
35 * Science Department.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
66 *
67 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
68 */
2d21ac55
A
69/*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
1c79356b
A
75
76/*
77 * Mapped file (mmap) interface to VM
78 */
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/filedesc.h>
91447636
A
83#include <sys/proc_internal.h>
84#include <sys/kauth.h>
1c79356b 85#include <sys/resourcevar.h>
91447636 86#include <sys/vnode_internal.h>
1c79356b
A
87#include <sys/acct.h>
88#include <sys/wait.h>
91447636 89#include <sys/file_internal.h>
1c79356b
A
90#include <sys/vadvise.h>
91#include <sys/trace.h>
92#include <sys/mman.h>
93#include <sys/conf.h>
94#include <sys/stat.h>
95#include <sys/ubc.h>
2d21ac55 96#include <sys/ubc_internal.h>
91447636 97#include <sys/sysproto.h>
1c79356b 98
2d21ac55
A
99#include <sys/syscall.h>
100#include <sys/kdebug.h>
fe8ab488 101#include <sys/bsdtask_info.h>
2d21ac55 102
b0d623f7 103#include <security/audit/audit.h>
e5568f75
A
104#include <bsm/audit_kevents.h>
105
1c79356b 106#include <mach/mach_types.h>
91447636
A
107#include <mach/mach_traps.h>
108#include <mach/vm_sync.h>
109#include <mach/vm_behavior.h>
110#include <mach/vm_inherit.h>
111#include <mach/vm_statistics.h>
112#include <mach/mach_vm.h>
113#include <mach/vm_map.h>
114#include <mach/host_priv.h>
3e170ce0 115#include <mach/sdt.h>
1c79356b 116
316670eb
A
117#include <machine/machine_routines.h>
118
1c79356b 119#include <kern/cpu_number.h>
91447636 120#include <kern/host.h>
316670eb 121#include <kern/task.h>
fe8ab488
A
122#include <kern/page_decrypt.h>
123
124#include <IOKit/IOReturn.h>
1c79356b
A
125
126#include <vm/vm_map.h>
127#include <vm/vm_kern.h>
128#include <vm/vm_pager.h>
b0d623f7 129#include <vm/vm_protos.h>
2d21ac55 130
5ba3f43e
A
131#if CONFIG_MACF
132#include <security/mac_framework.h>
133#endif
134
2d21ac55
A
135/*
136 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
137 * XXX usage is PROT_* from an interface perspective. Thus the values of
138 * XXX VM_PROT_* and PROT_* need to correspond.
139 */
1c79356b 140int
2d21ac55 141mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
1c79356b
A
142{
143 /*
144 * Map in special device (must be SHARED) or file
145 */
91447636 146 struct fileproc *fp;
39037602 147 struct vnode *vp;
1c79356b 148 int flags;
593a1d5f 149 int prot;
1c79356b
A
150 int err=0;
151 vm_map_t user_map;
152 kern_return_t result;
316670eb
A
153 vm_map_offset_t user_addr;
154 vm_map_size_t user_size;
91447636 155 vm_object_offset_t pageoff;
1c79356b 156 vm_object_offset_t file_pos;
5ba3f43e
A
157 int alloc_flags = 0;
158 vm_tag_t tag = VM_KERN_MEMORY_NONE;
159 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
91447636 160 boolean_t docow;
1c79356b
A
161 vm_prot_t maxprot;
162 void *handle;
b0d623f7
A
163 memory_object_t pager = MEMORY_OBJECT_NULL;
164 memory_object_control_t control;
1c79356b 165 int mapanon=0;
91447636
A
166 int fpref=0;
167 int error =0;
168 int fd = uap->fd;
6d2010ae 169 int num_retries = 0;
1c79356b 170
3e170ce0
A
171 /*
172 * Note that for UNIX03 conformance, there is additional parameter checking for
173 * mmap() system call in libsyscall prior to entering the kernel. The sanity
174 * checks and argument validation done in this function are not the only places
175 * one can get returned errnos.
176 */
177
39236c6e 178 user_map = current_map();
316670eb
A
179 user_addr = (vm_map_offset_t)uap->addr;
180 user_size = (vm_map_size_t) uap->len;
91447636
A
181
182 AUDIT_ARG(addr, user_addr);
183 AUDIT_ARG(len, user_size);
e5568f75
A
184 AUDIT_ARG(fd, uap->fd);
185
1c79356b 186 prot = (uap->prot & VM_PROT_ALL);
2d21ac55
A
187#if 3777787
188 /*
189 * Since the hardware currently does not support writing without
190 * read-before-write, or execution-without-read, if the request is
191 * for write or execute access, we must imply read access as well;
192 * otherwise programs expecting this to work will fail to operate.
193 */
194 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
195 prot |= VM_PROT_READ;
196#endif /* radar 3777787 */
197
1c79356b 198 flags = uap->flags;
91447636 199 vp = NULLVP;
1c79356b
A
200
201 /*
202 * The vm code does not have prototypes & compiler doesn't do the'
203 * the right thing when you cast 64bit value and pass it in function
204 * call. So here it is.
205 */
206 file_pos = (vm_object_offset_t)uap->pos;
207
208
209 /* make sure mapping fits into numeric range etc */
2d21ac55 210 if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
1c79356b
A
211 return (EINVAL);
212
213 /*
214 * Align the file position to a page boundary,
215 * and save its page offset component.
216 */
39236c6e 217 pageoff = (file_pos & vm_map_page_mask(user_map));
1c79356b
A
218 file_pos -= (vm_object_offset_t)pageoff;
219
220
221 /* Adjust size for rounding (on both ends). */
39236c6e
A
222 user_size += pageoff; /* low end... */
223 user_size = vm_map_round_page(user_size,
224 vm_map_page_mask(user_map)); /* hi end */
1c79356b 225
3e170ce0
A
226 if (flags & MAP_JIT) {
227 if ((flags & MAP_FIXED) ||
228 (flags & MAP_SHARED) ||
229 !(flags & MAP_ANON) ||
39037602
A
230 (flags & MAP_RESILIENT_CODESIGN) ||
231 (flags & MAP_RESILIENT_MEDIA)) {
3e170ce0
A
232 return EINVAL;
233 }
234 }
235
236 if ((flags & MAP_RESILIENT_CODESIGN) ||
237 (flags & MAP_RESILIENT_MEDIA)) {
39037602
A
238 if ((flags & MAP_ANON) ||
239 (flags & MAP_JIT)) {
3e170ce0
A
240 return EINVAL;
241 }
242 if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
243 return EPERM;
244 }
6d2010ae 245 }
3e170ce0 246
1c79356b
A
247 /*
248 * Check for illegal addresses. Watch out for address wrap... Note
249 * that VM_*_ADDRESS are not constants due to casts (argh).
250 */
251 if (flags & MAP_FIXED) {
252 /*
253 * The specified address must have the same remainder
254 * as the file offset taken modulo PAGE_SIZE, so it
255 * should be aligned after adjustment by pageoff.
256 */
257 user_addr -= pageoff;
39236c6e 258 if (user_addr & vm_map_page_mask(user_map))
6d2010ae 259 return (EINVAL);
1c79356b
A
260 }
261#ifdef notyet
262 /* DO not have apis to get this info, need to wait till then*/
263 /*
264 * XXX for non-fixed mappings where no hint is provided or
265 * the hint would fall in the potential heap space,
266 * place it after the end of the largest possible heap.
267 *
268 * There should really be a pmap call to determine a reasonable
269 * location.
270 */
39236c6e
A
271 else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
272 vm_map_page_mask(user_map)))
273 addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
274 vm_map_page_mask(user_map));
1c79356b
A
275
276#endif
277
2d21ac55 278 alloc_flags = 0;
1c79356b
A
279
280 if (flags & MAP_ANON) {
6d2010ae
A
281
282 maxprot = VM_PROT_ALL;
283#if CONFIG_MACF
284 /*
285 * Entitlement check.
6d2010ae 286 */
316670eb 287 error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
6d2010ae
A
288 if (error) {
289 return EINVAL;
316670eb 290 }
6d2010ae
A
291#endif /* MAC */
292
1c79356b 293 /*
2d21ac55
A
294 * Mapping blank space is trivial. Use positive fds as the alias
295 * value for memory tracking.
1c79356b 296 */
2d21ac55
A
297 if (fd != -1) {
298 /*
299 * Use "fd" to pass (some) Mach VM allocation flags,
300 * (see the VM_FLAGS_* definitions).
301 */
5ba3f43e
A
302 alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
303 VM_FLAGS_SUPERPAGE_MASK |
2d21ac55
A
304 VM_FLAGS_PURGABLE);
305 if (alloc_flags != fd) {
306 /* reject if there are any extra flags */
307 return EINVAL;
308 }
5ba3f43e
A
309 VM_GET_FLAGS_ALIAS(alloc_flags, tag);
310 alloc_flags &= ~VM_FLAGS_ALIAS_MASK;
2d21ac55
A
311 }
312
1c79356b 313 handle = NULL;
1c79356b
A
314 file_pos = 0;
315 mapanon = 1;
316 } else {
91447636 317 struct vnode_attr va;
2d21ac55
A
318 vfs_context_t ctx = vfs_context_current();
319
316670eb
A
320 if (flags & MAP_JIT)
321 return EINVAL;
322
1c79356b
A
323 /*
324 * Mapping file, get fp for validation. Obtain vnode and make
325 * sure it is of appropriate type.
326 */
91447636 327 err = fp_lookup(p, fd, &fp, 0);
1c79356b
A
328 if (err)
329 return(err);
91447636 330 fpref = 1;
39236c6e
A
331 switch (FILEGLOB_DTYPE(fp->f_fglob)) {
332 case DTYPE_PSXSHM:
91447636
A
333 uap->addr = (user_addr_t)user_addr;
334 uap->len = (user_size_t)user_size;
1c79356b
A
335 uap->prot = prot;
336 uap->flags = flags;
337 uap->pos = file_pos;
91447636
A
338 error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
339 goto bad;
39236c6e
A
340 case DTYPE_VNODE:
341 break;
342 default:
91447636
A
343 error = EINVAL;
344 goto bad;
345 }
346 vp = (struct vnode *)fp->f_fglob->fg_data;
347 error = vnode_getwithref(vp);
348 if(error != 0)
349 goto bad;
350
351 if (vp->v_type != VREG && vp->v_type != VCHR) {
352 (void)vnode_put(vp);
353 error = EINVAL;
354 goto bad;
355 }
e5568f75
A
356
357 AUDIT_ARG(vnpath, vp, ARG_VNODE1);
91447636 358
2d21ac55
A
359 /*
360 * POSIX: mmap needs to update access time for mapped files
91447636 361 */
2d21ac55
A
362 if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
363 VATTR_INIT(&va);
364 nanotime(&va.va_access_time);
365 VATTR_SET_ACTIVE(&va, va_access_time);
366 vnode_setattr(vp, &va, ctx);
367 }
b0d623f7 368
1c79356b
A
369 /*
370 * XXX hack to handle use of /dev/zero to map anon memory (ala
371 * SunOS).
372 */
373 if (vp->v_type == VCHR || vp->v_type == VSTR) {
91447636
A
374 (void)vnode_put(vp);
375 error = ENODEV;
376 goto bad;
1c79356b
A
377 } else {
378 /*
379 * Ensure that file and memory protections are
380 * compatible. Note that we only worry about
381 * writability if mapping is shared; in this case,
382 * current and max prot are dictated by the open file.
383 * XXX use the vnode instead? Problem is: what
384 * credentials do we use for determination? What if
385 * proc does a setuid?
386 */
387 maxprot = VM_PROT_EXECUTE; /* ??? */
91447636 388 if (fp->f_fglob->fg_flag & FREAD)
1c79356b 389 maxprot |= VM_PROT_READ;
91447636
A
390 else if (prot & PROT_READ) {
391 (void)vnode_put(vp);
392 error = EACCES;
393 goto bad;
394 }
1c79356b
A
395 /*
396 * If we are sharing potential changes (either via
397 * MAP_SHARED or via the implicit sharing of character
398 * device mappings), and we are trying to get write
399 * permission although we opened it without asking
400 * for it, bail out.
401 */
402
403 if ((flags & MAP_SHARED) != 0) {
b0d623f7
A
404 if ((fp->f_fglob->fg_flag & FWRITE) != 0 &&
405 /*
406 * Do not allow writable mappings of
407 * swap files (see vm_swapfile_pager.c).
408 */
409 !vnode_isswap(vp)) {
91447636
A
410 /*
411 * check for write access
412 *
413 * Note that we already made this check when granting FWRITE
414 * against the file, so it seems redundant here.
415 */
2d21ac55 416 error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
91447636
A
417
418 /* if not granted for any reason, but we wanted it, bad */
419 if ((prot & PROT_WRITE) && (error != 0)) {
420 vnode_put(vp);
421 goto bad;
422 }
423
424 /* if writable, remember */
425 if (error == 0)
426 maxprot |= VM_PROT_WRITE;
427
428 } else if ((prot & PROT_WRITE) != 0) {
429 (void)vnode_put(vp);
430 error = EACCES;
431 goto bad;
432 }
1c79356b
A
433 } else
434 maxprot |= VM_PROT_WRITE;
435
436 handle = (void *)vp;
2d21ac55
A
437#if CONFIG_MACF
438 error = mac_file_check_mmap(vfs_context_ucred(ctx),
3e170ce0 439 fp->f_fglob, prot, flags, file_pos, &maxprot);
2d21ac55
A
440 if (error) {
441 (void)vnode_put(vp);
442 goto bad;
443 }
444#endif /* MAC */
1c79356b
A
445 }
446 }
447
91447636
A
448 if (user_size == 0) {
449 if (!mapanon)
450 (void)vnode_put(vp);
451 error = 0;
452 goto bad;
453 }
1c79356b
A
454
455 /*
456 * We bend a little - round the start and end addresses
457 * to the nearest page boundary.
458 */
39236c6e
A
459 user_size = vm_map_round_page(user_size,
460 vm_map_page_mask(user_map));
1c79356b 461
39236c6e 462 if (file_pos & vm_map_page_mask(user_map)) {
91447636
A
463 if (!mapanon)
464 (void)vnode_put(vp);
465 error = EINVAL;
466 goto bad;
467 }
1c79356b 468
1c79356b 469 if ((flags & MAP_FIXED) == 0) {
2d21ac55 470 alloc_flags |= VM_FLAGS_ANYWHERE;
39236c6e
A
471 user_addr = vm_map_round_page(user_addr,
472 vm_map_page_mask(user_map));
1c79356b 473 } else {
39236c6e
A
474 if (user_addr != vm_map_trunc_page(user_addr,
475 vm_map_page_mask(user_map))) {
91447636
A
476 if (!mapanon)
477 (void)vnode_put(vp);
478 error = EINVAL;
479 goto bad;
480 }
481 /*
482 * mmap(MAP_FIXED) will replace any existing mappings in the
483 * specified range, if the new mapping is successful.
484 * If we just deallocate the specified address range here,
485 * another thread might jump in and allocate memory in that
486 * range before we get a chance to establish the new mapping,
487 * and we won't have a chance to restore the old mappings.
488 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
489 * has to deallocate the existing mappings and establish the
490 * new ones atomically.
491 */
2d21ac55 492 alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
1c79356b
A
493 }
494
2d21ac55
A
495 if (flags & MAP_NOCACHE)
496 alloc_flags |= VM_FLAGS_NO_CACHE;
1c79356b 497
3e170ce0 498 if (flags & MAP_JIT) {
5ba3f43e 499 vmk_flags.vmkf_map_jit = TRUE;
6d2010ae 500 }
3e170ce0
A
501
502 if (flags & MAP_RESILIENT_CODESIGN) {
503 alloc_flags |= VM_FLAGS_RESILIENT_CODESIGN;
504 }
505
1c79356b
A
506 /*
507 * Lookup/allocate object.
508 */
1c79356b 509 if (handle == NULL) {
b0d623f7 510 control = NULL;
1c79356b
A
511#ifdef notyet
512/* Hmm .. */
513#if defined(VM_PROT_READ_IS_EXEC)
514 if (prot & VM_PROT_READ)
515 prot |= VM_PROT_EXECUTE;
1c79356b
A
516 if (maxprot & VM_PROT_READ)
517 maxprot |= VM_PROT_EXECUTE;
518#endif
519#endif
2d21ac55
A
520
521#if 3777787
522 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
523 prot |= VM_PROT_READ;
524 if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
525 maxprot |= VM_PROT_READ;
526#endif /* radar 3777787 */
6d2010ae 527map_anon_retry:
2d21ac55
A
528 result = vm_map_enter_mem_object(user_map,
529 &user_addr, user_size,
5ba3f43e
A
530 0, alloc_flags, vmk_flags,
531 tag,
2d21ac55
A
532 IPC_PORT_NULL, 0, FALSE,
533 prot, maxprot,
534 (flags & MAP_SHARED) ?
535 VM_INHERIT_SHARE :
536 VM_INHERIT_DEFAULT);
6d2010ae
A
537
538 /* If a non-binding address was specified for this anonymous
539 * mapping, retry the mapping with a zero base
540 * in the event the mapping operation failed due to
541 * lack of space between the address and the map's maximum.
542 */
543 if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
39236c6e 544 user_addr = vm_map_page_size(user_map);
6d2010ae
A
545 goto map_anon_retry;
546 }
1c79356b 547 } else {
b0d623f7
A
548 if (vnode_isswap(vp)) {
549 /*
550 * Map swap files with a special pager
551 * that returns obfuscated contents.
552 */
553 control = NULL;
554 pager = swapfile_pager_setup(vp);
555 if (pager != MEMORY_OBJECT_NULL) {
556 control = swapfile_pager_control(pager);
557 }
558 } else {
559 control = ubc_getobject(vp, UBC_FLAGS_NONE);
560 }
1c79356b 561
b0d623f7 562 if (control == NULL) {
91447636
A
563 (void)vnode_put(vp);
564 error = ENOMEM;
565 goto bad;
566 }
1c79356b
A
567
568 /*
569 * Set credentials:
570 * FIXME: if we're writing the file we need a way to
571 * ensure that someone doesn't replace our R/W creds
572 * with ones that only work for read.
573 */
574
13fec989 575 ubc_setthreadcred(vp, p, current_thread());
1c79356b
A
576 docow = FALSE;
577 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
578 docow = TRUE;
579 }
580
581#ifdef notyet
582/* Hmm .. */
583#if defined(VM_PROT_READ_IS_EXEC)
584 if (prot & VM_PROT_READ)
585 prot |= VM_PROT_EXECUTE;
1c79356b
A
586 if (maxprot & VM_PROT_READ)
587 maxprot |= VM_PROT_EXECUTE;
588#endif
589#endif /* notyet */
590
2d21ac55
A
591#if 3777787
592 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
593 prot |= VM_PROT_READ;
594 if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
595 maxprot |= VM_PROT_READ;
596#endif /* radar 3777787 */
3e170ce0 597
6d2010ae 598map_file_retry:
3e170ce0
A
599 if ((flags & MAP_RESILIENT_CODESIGN) ||
600 (flags & MAP_RESILIENT_MEDIA)) {
601 if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
602 assert(!mapanon);
603 vnode_put(vp);
604 error = EPERM;
605 goto bad;
606 }
607 /* strictly limit access to "prot" */
608 maxprot &= prot;
609 }
b0d623f7 610 result = vm_map_enter_mem_object_control(user_map,
2d21ac55 611 &user_addr, user_size,
5ba3f43e
A
612 0, alloc_flags, vmk_flags,
613 tag,
b0d623f7 614 control, file_pos,
2d21ac55
A
615 docow, prot, maxprot,
616 (flags & MAP_SHARED) ?
617 VM_INHERIT_SHARE :
618 VM_INHERIT_DEFAULT);
6d2010ae
A
619
620 /* If a non-binding address was specified for this file backed
621 * mapping, retry the mapping with a zero base
622 * in the event the mapping operation failed due to
623 * lack of space between the address and the map's maximum.
624 */
625 if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
39236c6e 626 user_addr = vm_map_page_size(user_map);
6d2010ae
A
627 goto map_file_retry;
628 }
1c79356b
A
629 }
630
b0d623f7 631 if (!mapanon) {
91447636 632 (void)vnode_put(vp);
b0d623f7 633 }
1c79356b 634
1c79356b
A
635 switch (result) {
636 case KERN_SUCCESS:
91447636
A
637 *retval = user_addr + pageoff;
638 error = 0;
639 break;
1c79356b
A
640 case KERN_INVALID_ADDRESS:
641 case KERN_NO_SPACE:
91447636
A
642 error = ENOMEM;
643 break;
1c79356b 644 case KERN_PROTECTION_FAILURE:
91447636
A
645 error = EACCES;
646 break;
1c79356b 647 default:
91447636
A
648 error = EINVAL;
649 break;
1c79356b 650 }
91447636 651bad:
b0d623f7
A
652 if (pager != MEMORY_OBJECT_NULL) {
653 /*
654 * Release the reference on the pager.
655 * If the mapping was successful, it now holds
656 * an extra reference.
657 */
658 memory_object_deallocate(pager);
659 }
91447636
A
660 if (fpref)
661 fp_drop(p, fd, fp, 0);
2d21ac55
A
662
663 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
5ba3f43e 664#ifndef CONFIG_EMBEDDED
2d21ac55
A
665 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
666 (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
5ba3f43e 667#endif
91447636 668 return(error);
1c79356b
A
669}
670
1c79356b 671int
b0d623f7 672msync(__unused proc_t p, struct msync_args *uap, int32_t *retval)
2d21ac55
A
673{
674 __pthread_testcancel(1);
675 return(msync_nocancel(p, (struct msync_nocancel_args *)uap, retval));
676}
677
678int
b0d623f7 679msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused int32_t *retval)
1c79356b 680{
91447636
A
681 mach_vm_offset_t addr;
682 mach_vm_size_t size;
1c79356b
A
683 int flags;
684 vm_map_t user_map;
685 int rv;
686 vm_sync_t sync_flags=0;
687
39236c6e 688 user_map = current_map();
91447636
A
689 addr = (mach_vm_offset_t) uap->addr;
690 size = (mach_vm_size_t)uap->len;
5ba3f43e 691#ifndef CONFIG_EMBEDDED
2d21ac55 692 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
5ba3f43e 693#endif
39236c6e 694 if (addr & vm_map_page_mask(user_map)) {
91447636
A
695 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
696 return EINVAL;
697 }
1c79356b
A
698 if (size == 0) {
699 /*
700 * We cannot support this properly without maintaining
701 * list all mmaps done. Cannot use vm_map_entry as they could be
702 * split or coalesced by indepenedant actions. So instead of
703 * inaccurate results, lets just return error as invalid size
704 * specified
705 */
55e303ae 706 return (EINVAL); /* XXX breaks posix apps */
1c79356b
A
707 }
708
91447636
A
709 flags = uap->flags;
710 /* disallow contradictory flags */
2d21ac55 711 if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
91447636
A
712 return (EINVAL);
713
1c79356b
A
714 if (flags & MS_KILLPAGES)
715 sync_flags |= VM_SYNC_KILLPAGES;
716 if (flags & MS_DEACTIVATE)
717 sync_flags |= VM_SYNC_DEACTIVATE;
718 if (flags & MS_INVALIDATE)
719 sync_flags |= VM_SYNC_INVALIDATE;
720
721 if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
722 if (flags & MS_ASYNC)
723 sync_flags |= VM_SYNC_ASYNCHRONOUS;
724 else
725 sync_flags |= VM_SYNC_SYNCHRONOUS;
726 }
91447636
A
727
728 sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */
729
91447636 730 rv = mach_vm_msync(user_map, addr, size, sync_flags);
1c79356b
A
731
732 switch (rv) {
733 case KERN_SUCCESS:
734 break;
91447636
A
735 case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */
736 return (ENOMEM);
1c79356b
A
737 case KERN_FAILURE:
738 return (EIO);
739 default:
740 return (EINVAL);
741 }
1c79356b 742 return (0);
1c79356b
A
743}
744
745
55e303ae 746int
b0d623f7 747munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
1c79356b 748{
91447636 749 mach_vm_offset_t user_addr;
39236c6e
A
750 mach_vm_size_t user_size;
751 kern_return_t result;
752 vm_map_t user_map;
1c79356b 753
39236c6e 754 user_map = current_map();
91447636
A
755 user_addr = (mach_vm_offset_t) uap->addr;
756 user_size = (mach_vm_size_t) uap->len;
1c79356b 757
91447636
A
758 AUDIT_ARG(addr, user_addr);
759 AUDIT_ARG(len, user_size);
e5568f75 760
39236c6e 761 if (user_addr & vm_map_page_mask(user_map)) {
91447636
A
762 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
763 return EINVAL;
764 }
1c79356b 765
1c79356b
A
766 if (user_addr + user_size < user_addr)
767 return(EINVAL);
768
91447636
A
769 if (user_size == 0) {
770 /* UNIX SPEC: size is 0, return EINVAL */
771 return EINVAL;
772 }
1c79356b 773
39236c6e 774 result = mach_vm_deallocate(user_map, user_addr, user_size);
1c79356b
A
775 if (result != KERN_SUCCESS) {
776 return(EINVAL);
777 }
778 return(0);
779}
780
1c79356b 781int
b0d623f7 782mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval)
1c79356b 783{
39037602 784 vm_prot_t prot;
91447636
A
785 mach_vm_offset_t user_addr;
786 mach_vm_size_t user_size;
1c79356b
A
787 kern_return_t result;
788 vm_map_t user_map;
2d21ac55
A
789#if CONFIG_MACF
790 int error;
791#endif
1c79356b 792
e5568f75
A
793 AUDIT_ARG(addr, uap->addr);
794 AUDIT_ARG(len, uap->len);
b0d623f7 795 AUDIT_ARG(value32, uap->prot);
91447636 796
39236c6e 797 user_map = current_map();
91447636
A
798 user_addr = (mach_vm_offset_t) uap->addr;
799 user_size = (mach_vm_size_t) uap->len;
39037602 800 prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ));
1c79356b 801
39236c6e 802 if (user_addr & vm_map_page_mask(user_map)) {
91447636
A
803 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
804 return EINVAL;
805 }
806
1c79356b
A
807#ifdef notyet
808/* Hmm .. */
809#if defined(VM_PROT_READ_IS_EXEC)
810 if (prot & VM_PROT_READ)
811 prot |= VM_PROT_EXECUTE;
812#endif
813#endif /* notyet */
814
2d21ac55
A
815#if 3936456
816 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
817 prot |= VM_PROT_READ;
818#endif /* 3936456 */
819
5ba3f43e
A
820#if defined(__arm64__)
821 if (prot & VM_PROT_STRIP_READ)
822 prot &= ~(VM_PROT_READ | VM_PROT_STRIP_READ);
823#endif
39037602 824
2d21ac55
A
825#if CONFIG_MACF
826 /*
827 * The MAC check for mprotect is of limited use for 2 reasons:
828 * Without mmap revocation, the caller could have asked for the max
829 * protections initially instead of a reduced set, so a mprotect
830 * check would offer no new security.
831 * It is not possible to extract the vnode from the pager object(s)
832 * of the target memory range.
833 * However, the MAC check may be used to prevent a process from,
834 * e.g., making the stack executable.
835 */
836 error = mac_proc_check_mprotect(p, user_addr,
837 user_size, prot);
838 if (error)
839 return (error);
840#endif
b7266188
A
841
842 if(prot & VM_PROT_TRUSTED) {
843#if CONFIG_DYNAMIC_CODE_SIGNING
844 /* CODE SIGNING ENFORCEMENT - JIT support */
845 /* The special protection value VM_PROT_TRUSTED requests that we treat
846 * this page as if it had a valid code signature.
847 * If this is enabled, there MUST be a MAC policy implementing the
848 * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
849 * compromised because the check would always succeed and thusly any
850 * process could sign dynamically. */
39236c6e
A
851 result = vm_map_sign(
852 user_map,
853 vm_map_trunc_page(user_addr,
854 vm_map_page_mask(user_map)),
855 vm_map_round_page(user_addr+user_size,
856 vm_map_page_mask(user_map)));
b7266188
A
857 switch (result) {
858 case KERN_SUCCESS:
859 break;
860 case KERN_INVALID_ADDRESS:
861 /* UNIX SPEC: for an invalid address range, return ENOMEM */
862 return ENOMEM;
863 default:
864 return EINVAL;
865 }
866#else
867 return ENOTSUP;
868#endif
869 }
870 prot &= ~VM_PROT_TRUSTED;
871
91447636
A
872 result = mach_vm_protect(user_map, user_addr, user_size,
873 FALSE, prot);
1c79356b
A
874 switch (result) {
875 case KERN_SUCCESS:
876 return (0);
877 case KERN_PROTECTION_FAILURE:
878 return (EACCES);
91447636
A
879 case KERN_INVALID_ADDRESS:
880 /* UNIX SPEC: for an invalid address range, return ENOMEM */
881 return ENOMEM;
1c79356b
A
882 }
883 return (EINVAL);
884}
885
886
1c79356b 887int
b0d623f7 888minherit(__unused proc_t p, struct minherit_args *uap, __unused int32_t *retval)
1c79356b 889{
91447636
A
890 mach_vm_offset_t addr;
891 mach_vm_size_t size;
39037602 892 vm_inherit_t inherit;
1c79356b
A
893 vm_map_t user_map;
894 kern_return_t result;
895
e5568f75
A
896 AUDIT_ARG(addr, uap->addr);
897 AUDIT_ARG(len, uap->len);
b0d623f7 898 AUDIT_ARG(value32, uap->inherit);
1c79356b 899
91447636
A
900 addr = (mach_vm_offset_t)uap->addr;
901 size = (mach_vm_size_t)uap->len;
902 inherit = uap->inherit;
1c79356b
A
903
904 user_map = current_map();
91447636 905 result = mach_vm_inherit(user_map, addr, size,
1c79356b
A
906 inherit);
907 switch (result) {
908 case KERN_SUCCESS:
909 return (0);
910 case KERN_PROTECTION_FAILURE:
911 return (EACCES);
912 }
913 return (EINVAL);
914}
915
1c79356b 916int
b0d623f7 917madvise(__unused proc_t p, struct madvise_args *uap, __unused int32_t *retval)
1c79356b
A
918{
919 vm_map_t user_map;
91447636
A
920 mach_vm_offset_t start;
921 mach_vm_size_t size;
1c79356b
A
922 vm_behavior_t new_behavior;
923 kern_return_t result;
924
1c79356b
A
925 /*
926 * Since this routine is only advisory, we default to conservative
927 * behavior.
928 */
1c79356b
A
929 switch (uap->behav) {
930 case MADV_RANDOM:
931 new_behavior = VM_BEHAVIOR_RANDOM;
9bccf70c 932 break;
1c79356b
A
933 case MADV_SEQUENTIAL:
934 new_behavior = VM_BEHAVIOR_SEQUENTIAL;
9bccf70c 935 break;
1c79356b 936 case MADV_NORMAL:
1c79356b 937 new_behavior = VM_BEHAVIOR_DEFAULT;
9bccf70c
A
938 break;
939 case MADV_WILLNEED:
940 new_behavior = VM_BEHAVIOR_WILLNEED;
941 break;
942 case MADV_DONTNEED:
943 new_behavior = VM_BEHAVIOR_DONTNEED;
944 break;
b0d623f7
A
945 case MADV_FREE:
946 new_behavior = VM_BEHAVIOR_FREE;
947 break;
948 case MADV_ZERO_WIRED_PAGES:
949 new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES;
950 break;
951 case MADV_FREE_REUSABLE:
952 new_behavior = VM_BEHAVIOR_REUSABLE;
953 break;
954 case MADV_FREE_REUSE:
955 new_behavior = VM_BEHAVIOR_REUSE;
956 break;
957 case MADV_CAN_REUSE:
958 new_behavior = VM_BEHAVIOR_CAN_REUSE;
959 break;
3e170ce0
A
960 case MADV_PAGEOUT:
961#if MACH_ASSERT
962 new_behavior = VM_BEHAVIOR_PAGEOUT;
963 break;
964#else /* MACH_ASSERT */
965 return ENOTSUP;
966#endif /* MACH_ASSERT */
9bccf70c
A
967 default:
968 return(EINVAL);
1c79356b
A
969 }
970
91447636
A
971 start = (mach_vm_offset_t) uap->addr;
972 size = (mach_vm_size_t) uap->len;
973
5ba3f43e
A
974#if __arm64__
975 if (start == 0 &&
976 size != 0 &&
977 (uap->behav == MADV_FREE ||
978 uap->behav == MADV_FREE_REUSABLE)) {
979 printf("** FOURK_COMPAT: %d[%s] "
980 "failing madvise(0x%llx,0x%llx,%s)\n",
981 p->p_pid, p->p_comm, start, size,
982 ((uap->behav == MADV_FREE_REUSABLE)
983 ? "MADV_FREE_REUSABLE"
984 : "MADV_FREE"));
985 DTRACE_VM3(fourk_compat_madvise,
986 uint64_t, start,
987 uint64_t, size,
988 int, uap->behav);
989 return EINVAL;
990 }
991#endif /* __arm64__ */
3e170ce0 992
91447636
A
993 user_map = current_map();
994
995 result = mach_vm_behavior_set(user_map, start, size, new_behavior);
1c79356b 996 switch (result) {
6d2010ae
A
997 case KERN_SUCCESS:
998 return 0;
999 case KERN_INVALID_ADDRESS:
1000 return EINVAL;
1001 case KERN_NO_SPACE:
1002 return ENOMEM;
1c79356b
A
1003 }
1004
6d2010ae 1005 return EINVAL;
1c79356b
A
1006}
1007
1c79356b 1008int
b0d623f7 1009mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval)
1c79356b 1010{
5ba3f43e
A
1011 mach_vm_offset_t addr = 0, first_addr = 0, end = 0, cur_end = 0;
1012 vm_map_t map = VM_MAP_NULL;
1013 user_addr_t vec = 0;
1014 int error = 0;
1015 int vecindex = 0, lastvecindex = 0;
1c79356b 1016 int mincoreinfo=0;
5ba3f43e
A
1017 int pqueryinfo = 0;
1018 unsigned int pqueryinfo_vec_size = 0;
1019 vm_page_info_basic_t info = NULL;
1020 mach_msg_type_number_t count = 0;
1021 char *kernel_vec = NULL;
1022 int req_vec_size_pages = 0, cur_vec_size_pages = 0;
1023 kern_return_t kr = KERN_SUCCESS;
91447636 1024
1c79356b
A
1025 map = current_map();
1026
1027 /*
1028 * Make sure that the addresses presented are valid for user
1029 * mode.
1030 */
39236c6e
A
1031 first_addr = addr = vm_map_trunc_page(uap->addr,
1032 vm_map_page_mask(map));
5ba3f43e 1033 end = vm_map_round_page(uap->addr + uap->len,
39236c6e 1034 vm_map_page_mask(map));
1c79356b 1035
1c79356b
A
1036 if (end < addr)
1037 return (EINVAL);
1038
5ba3f43e
A
1039 if (end == addr)
1040 return (0);
1041
1c79356b 1042 /*
5ba3f43e
A
1043 * We are going to loop through the whole 'req_vec_size' pages
1044 * range in chunks of 'cur_vec_size'.
1c79356b 1045 */
1c79356b 1046
5ba3f43e
A
1047 req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
1048 cur_vec_size_pages = MIN(req_vec_size_pages, (int)(MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
1049
1050 kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK);
1051
1052 if (kernel_vec == NULL) {
1053 return (ENOMEM);
1054 }
1c79356b
A
1055
1056 /*
5ba3f43e 1057 * Address of byte vector
1c79356b 1058 */
5ba3f43e
A
1059 vec = uap->vec;
1060
1061 pqueryinfo_vec_size = cur_vec_size_pages * sizeof(struct vm_page_info_basic);
1062 info = (void*) _MALLOC(pqueryinfo_vec_size, M_TEMP, M_WAITOK);
1063
1064 if (info == NULL) {
1065 FREE(kernel_vec, M_TEMP);
1066 return (ENOMEM);
1067 }
1068
1069 while (addr < end) {
1070
1071 cur_end = addr + (cur_vec_size_pages * PAGE_SIZE_64);
1072
1073 count = VM_PAGE_INFO_BASIC_COUNT;
1074 kr = vm_map_page_range_info_internal(map,
1075 addr,
1076 cur_end,
1077 VM_PAGE_INFO_BASIC,
1078 (vm_page_info_t) info,
1079 &count);
1080
1081 assert(kr == KERN_SUCCESS);
1c79356b
A
1082
1083 /*
5ba3f43e
A
1084 * Do this on a map entry basis so that if the pages are not
1085 * in the current processes address space, we can easily look
1086 * up the pages elsewhere.
1c79356b 1087 */
5ba3f43e
A
1088 lastvecindex = -1;
1089 for( ; addr < cur_end; addr += PAGE_SIZE ) {
1090
1091 pqueryinfo = info[lastvecindex + 1].disposition;
1092
1093 mincoreinfo = 0;
1094
1095 if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
1096 mincoreinfo |= MINCORE_INCORE;
1097 if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
1098 mincoreinfo |= MINCORE_REFERENCED;
1099 if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
1100 mincoreinfo |= MINCORE_MODIFIED;
1101 if (pqueryinfo & VM_PAGE_QUERY_PAGE_PAGED_OUT)
1102 mincoreinfo |= MINCORE_PAGED_OUT;
1103 if (pqueryinfo & VM_PAGE_QUERY_PAGE_COPIED)
1104 mincoreinfo |= MINCORE_COPIED;
1105 if ((pqueryinfo & VM_PAGE_QUERY_PAGE_EXTERNAL) == 0)
1106 mincoreinfo |= MINCORE_ANONYMOUS;
1107 /*
1108 * calculate index into user supplied byte vector
1109 */
1110 vecindex = (addr - first_addr)>> PAGE_SHIFT;
1111 kernel_vec[vecindex] = (char)mincoreinfo;
1112 lastvecindex = vecindex;
1c79356b
A
1113 }
1114
5ba3f43e
A
1115
1116 assert(vecindex == (cur_vec_size_pages - 1));
1117
1118 error = copyout(kernel_vec, vec, cur_vec_size_pages * sizeof(char) /* a char per page */);
1119
1c79356b 1120 if (error) {
5ba3f43e 1121 break;
1c79356b 1122 }
5ba3f43e
A
1123
1124 /*
1125 * For the next chunk, we'll need:
1126 * - bump the location in the user buffer for our next disposition.
1127 * - new length
1128 * - starting address
1129 */
1130 vec += cur_vec_size_pages * sizeof(char);
1131 req_vec_size_pages = (end - addr) >> PAGE_SHIFT;
1132 cur_vec_size_pages = MIN(req_vec_size_pages, (int)(MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT));
1133
1134 first_addr = addr;
1c79356b
A
1135 }
1136
5ba3f43e
A
1137 FREE(kernel_vec, M_TEMP);
1138 FREE(info, M_TEMP);
1c79356b 1139
5ba3f43e
A
1140 if (error) {
1141 return (EFAULT);
1c79356b 1142 }
5ba3f43e 1143
1c79356b
A
1144 return (0);
1145}
1146
1c79356b 1147int
b0d623f7 1148mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval)
1c79356b
A
1149{
1150 vm_map_t user_map;
91447636
A
1151 vm_map_offset_t addr;
1152 vm_map_size_t size, pageoff;
1c79356b
A
1153 kern_return_t result;
1154
e5568f75
A
1155 AUDIT_ARG(addr, uap->addr);
1156 AUDIT_ARG(len, uap->len);
1c79356b 1157
91447636
A
1158 addr = (vm_map_offset_t) uap->addr;
1159 size = (vm_map_size_t)uap->len;
1c79356b
A
1160
1161 /* disable wrap around */
1162 if (addr + size < addr)
1163 return (EINVAL);
91447636
A
1164
1165 if (size == 0)
1166 return (0);
1167
1c79356b 1168 user_map = current_map();
39236c6e
A
1169 pageoff = (addr & vm_map_page_mask(user_map));
1170 addr -= pageoff;
1171 size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));
1c79356b 1172
91447636 1173 /* have to call vm_map_wire directly to pass "I don't know" protections */
5ba3f43e 1174 result = vm_map_wire_kernel(user_map, addr, addr+size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK, TRUE);
2d21ac55
A
1175
1176 if (result == KERN_RESOURCE_SHORTAGE)
1177 return EAGAIN;
39037602
A
1178 else if (result == KERN_PROTECTION_FAILURE)
1179 return EACCES;
2d21ac55
A
1180 else if (result != KERN_SUCCESS)
1181 return ENOMEM;
1182
1183 return 0; /* KERN_SUCCESS */
1c79356b
A
1184}
1185
1c79356b 1186int
b0d623f7 1187munlock(__unused proc_t p, struct munlock_args *uap, __unused int32_t *retval)
1c79356b 1188{
91447636
A
1189 mach_vm_offset_t addr;
1190 mach_vm_size_t size;
1c79356b
A
1191 vm_map_t user_map;
1192 kern_return_t result;
1193
e5568f75 1194 AUDIT_ARG(addr, uap->addr);
91447636 1195 AUDIT_ARG(addr, uap->len);
1c79356b 1196
91447636
A
1197 addr = (mach_vm_offset_t) uap->addr;
1198 size = (mach_vm_size_t)uap->len;
1c79356b
A
1199 user_map = current_map();
1200
91447636 1201 /* JMM - need to remove all wirings by spec - this just removes one */
5ba3f43e 1202 result = mach_vm_wire_kernel(host_priv_self(), user_map, addr, size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK);
1c79356b
A
1203 return (result == KERN_SUCCESS ? 0 : ENOMEM);
1204}
1205
1206
1c79356b 1207int
b0d623f7 1208mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused int32_t *retval)
1c79356b
A
1209{
1210 return (ENOSYS);
1211}
1212
1c79356b 1213int
b0d623f7 1214munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused int32_t *retval)
1c79356b
A
1215{
1216 return(ENOSYS);
1217}
1218
fe8ab488
A
1219#if CONFIG_CODE_DECRYPTION
1220int
1221mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __unused int32_t *retval)
1c79356b 1222{
fe8ab488
A
1223 mach_vm_offset_t user_addr;
1224 mach_vm_size_t user_size;
1225 kern_return_t result;
1226 vm_map_t user_map;
1227 uint32_t cryptid;
1228 cpu_type_t cputype;
1229 cpu_subtype_t cpusubtype;
3e170ce0 1230 pager_crypt_info_t crypt_info;
fe8ab488
A
1231 const char * cryptname = 0;
1232 char *vpath;
1233 int len, ret;
1234 struct proc_regioninfo_internal pinfo;
1235 vnode_t vp;
1236 uintptr_t vnodeaddr;
1237 uint32_t vid;
1238
1239 AUDIT_ARG(addr, uap->addr);
1240 AUDIT_ARG(len, uap->len);
1241
1242 user_map = current_map();
1243 user_addr = (mach_vm_offset_t) uap->addr;
1244 user_size = (mach_vm_size_t) uap->len;
1245
1246 cryptid = uap->cryptid;
1247 cputype = uap->cputype;
1248 cpusubtype = uap->cpusubtype;
1249
1250 if (user_addr & vm_map_page_mask(user_map)) {
1251 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
1252 return EINVAL;
1253 }
1254
1255 switch(cryptid) {
1256 case 0:
1257 /* not encrypted, just an empty load command */
1258 return 0;
1259 case 1:
1260 cryptname="com.apple.unfree";
1261 break;
1262 case 0x10:
1263 /* some random cryptid that you could manually put into
1264 * your binary if you want NULL */
1265 cryptname="com.apple.null";
1266 break;
1267 default:
1268 return EINVAL;
1269 }
1270
1271 if (NULL == text_crypter_create) return ENOTSUP;
1272
1273 ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid);
1274 if (ret == 0 || !vnodeaddr) {
1275 /* No really, this returns 0 if the memory address is not backed by a file */
1276 return (EINVAL);
1277 }
1278
1279 vp = (vnode_t)vnodeaddr;
1280 if ((vnode_getwithvid(vp, vid)) == 0) {
1281 MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1282 if(vpath == NULL) {
1283 vnode_put(vp);
1284 return (ENOMEM);
1285 }
1286
1287 len = MAXPATHLEN;
1288 ret = vn_getpath(vp, vpath, &len);
1289 if(ret) {
1290 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1291 vnode_put(vp);
1292 return (ret);
1293 }
1294
1295 vnode_put(vp);
1296 } else {
1297 return (EINVAL);
1298 }
1299
1300#if 0
1301 kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n",
1302 __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size);
1303#endif
3e170ce0 1304
fe8ab488
A
1305 /* set up decrypter first */
1306 crypt_file_data_t crypt_data = {
1307 .filename = vpath,
1308 .cputype = cputype,
1309 .cpusubtype = cpusubtype };
1310 result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
39037602
A
1311#if VM_MAP_DEBUG_APPLE_PROTECT
1312 if (vm_map_debug_apple_protect) {
1313 printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n",
1314 p->p_pid, p->p_comm,
1315 user_map,
1316 (uint64_t) user_addr,
1317 (uint64_t) (user_addr + user_size),
1318 __FUNCTION__, vpath, result);
1319 }
1320#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
fe8ab488
A
1321 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1322
1323 if(result) {
1324 printf("%s: unable to create decrypter %s, kr=%d\n",
1325 __FUNCTION__, cryptname, result);
1326 if (result == kIOReturnNotPrivileged) {
1327 /* text encryption returned decryption failure */
1328 return (EPERM);
1329 } else {
1330 return (ENOMEM);
1331 }
1332 }
1333
1334 /* now remap using the decrypter */
3e170ce0
A
1335 vm_object_offset_t crypto_backing_offset;
1336 crypto_backing_offset = -1; /* i.e. use map entry's offset */
1337 result = vm_map_apple_protected(user_map,
1338 user_addr,
1339 user_addr+user_size,
1340 crypto_backing_offset,
1341 &crypt_info);
fe8ab488
A
1342 if (result) {
1343 printf("%s: mapping failed with %d\n", __FUNCTION__, result);
3e170ce0
A
1344 }
1345
1346 if (result) {
fe8ab488
A
1347 return (EPERM);
1348 }
fe8ab488 1349 return 0;
1c79356b 1350}
fe8ab488 1351#endif /* CONFIG_CODE_DECRYPTION */