]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_mman.c
xnu-2422.110.17.tar.gz
[apple/xnu.git] / bsd / kern / kern_mman.c
CommitLineData
1c79356b 1/*
2d21ac55
A
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
1c79356b 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
8f6c56a5 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1988 University of Utah.
30 * Copyright (c) 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * the Systems Programming Group of the University of Utah Computer
35 * Science Department.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
66 *
67 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
68 */
2d21ac55
A
69/*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
1c79356b
A
75
76/*
77 * Mapped file (mmap) interface to VM
78 */
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/filedesc.h>
91447636
A
83#include <sys/proc_internal.h>
84#include <sys/kauth.h>
1c79356b 85#include <sys/resourcevar.h>
91447636 86#include <sys/vnode_internal.h>
1c79356b
A
87#include <sys/acct.h>
88#include <sys/wait.h>
91447636 89#include <sys/file_internal.h>
1c79356b
A
90#include <sys/vadvise.h>
91#include <sys/trace.h>
92#include <sys/mman.h>
93#include <sys/conf.h>
94#include <sys/stat.h>
95#include <sys/ubc.h>
2d21ac55 96#include <sys/ubc_internal.h>
91447636 97#include <sys/sysproto.h>
6d2010ae
A
98#if CONFIG_PROTECT
99#include <sys/cprotect.h>
100#endif
1c79356b 101
2d21ac55
A
102#include <sys/syscall.h>
103#include <sys/kdebug.h>
104
b0d623f7 105#include <security/audit/audit.h>
e5568f75
A
106#include <bsm/audit_kevents.h>
107
1c79356b 108#include <mach/mach_types.h>
91447636
A
109#include <mach/mach_traps.h>
110#include <mach/vm_sync.h>
111#include <mach/vm_behavior.h>
112#include <mach/vm_inherit.h>
113#include <mach/vm_statistics.h>
114#include <mach/mach_vm.h>
115#include <mach/vm_map.h>
116#include <mach/host_priv.h>
1c79356b 117
316670eb
A
118#include <machine/machine_routines.h>
119
1c79356b 120#include <kern/cpu_number.h>
91447636 121#include <kern/host.h>
316670eb 122#include <kern/task.h>
1c79356b
A
123
124#include <vm/vm_map.h>
125#include <vm/vm_kern.h>
126#include <vm/vm_pager.h>
b0d623f7 127#include <vm/vm_protos.h>
2d21ac55
A
128
129/* XXX the following function should probably be static */
130kern_return_t map_fd_funneled(int, vm_object_offset_t, vm_offset_t *,
131 boolean_t, vm_size_t);
132
2d21ac55
A
133/*
134 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
135 * XXX usage is PROT_* from an interface perspective. Thus the values of
136 * XXX VM_PROT_* and PROT_* need to correspond.
137 */
1c79356b 138int
2d21ac55 139mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
1c79356b
A
140{
141 /*
142 * Map in special device (must be SHARED) or file
143 */
91447636 144 struct fileproc *fp;
1c79356b
A
145 register struct vnode *vp;
146 int flags;
593a1d5f 147 int prot;
1c79356b
A
148 int err=0;
149 vm_map_t user_map;
150 kern_return_t result;
316670eb
A
151 vm_map_offset_t user_addr;
152 vm_map_size_t user_size;
91447636 153 vm_object_offset_t pageoff;
1c79356b 154 vm_object_offset_t file_pos;
2d21ac55 155 int alloc_flags=0;
91447636 156 boolean_t docow;
1c79356b
A
157 vm_prot_t maxprot;
158 void *handle;
b0d623f7
A
159 memory_object_t pager = MEMORY_OBJECT_NULL;
160 memory_object_control_t control;
1c79356b 161 int mapanon=0;
91447636
A
162 int fpref=0;
163 int error =0;
164 int fd = uap->fd;
6d2010ae 165 int num_retries = 0;
1c79356b 166
39236c6e 167 user_map = current_map();
316670eb
A
168 user_addr = (vm_map_offset_t)uap->addr;
169 user_size = (vm_map_size_t) uap->len;
91447636
A
170
171 AUDIT_ARG(addr, user_addr);
172 AUDIT_ARG(len, user_size);
e5568f75
A
173 AUDIT_ARG(fd, uap->fd);
174
1c79356b 175 prot = (uap->prot & VM_PROT_ALL);
2d21ac55
A
176#if 3777787
177 /*
178 * Since the hardware currently does not support writing without
179 * read-before-write, or execution-without-read, if the request is
180 * for write or execute access, we must imply read access as well;
181 * otherwise programs expecting this to work will fail to operate.
182 */
183 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
184 prot |= VM_PROT_READ;
185#endif /* radar 3777787 */
186
1c79356b 187 flags = uap->flags;
91447636 188 vp = NULLVP;
1c79356b
A
189
190 /*
191 * The vm code does not have prototypes & compiler doesn't do the'
192 * the right thing when you cast 64bit value and pass it in function
193 * call. So here it is.
194 */
195 file_pos = (vm_object_offset_t)uap->pos;
196
197
198 /* make sure mapping fits into numeric range etc */
2d21ac55 199 if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
1c79356b
A
200 return (EINVAL);
201
202 /*
203 * Align the file position to a page boundary,
204 * and save its page offset component.
205 */
39236c6e 206 pageoff = (file_pos & vm_map_page_mask(user_map));
1c79356b
A
207 file_pos -= (vm_object_offset_t)pageoff;
208
209
210 /* Adjust size for rounding (on both ends). */
39236c6e
A
211 user_size += pageoff; /* low end... */
212 user_size = vm_map_round_page(user_size,
213 vm_map_page_mask(user_map)); /* hi end */
1c79356b 214
316670eb 215 if ((flags & MAP_JIT) && ((flags & MAP_FIXED) || (flags & MAP_SHARED) || !(flags & MAP_ANON))){
6d2010ae
A
216 return EINVAL;
217 }
1c79356b
A
218 /*
219 * Check for illegal addresses. Watch out for address wrap... Note
220 * that VM_*_ADDRESS are not constants due to casts (argh).
221 */
222 if (flags & MAP_FIXED) {
223 /*
224 * The specified address must have the same remainder
225 * as the file offset taken modulo PAGE_SIZE, so it
226 * should be aligned after adjustment by pageoff.
227 */
228 user_addr -= pageoff;
39236c6e 229 if (user_addr & vm_map_page_mask(user_map))
6d2010ae 230 return (EINVAL);
1c79356b
A
231 }
232#ifdef notyet
233 /* DO not have apis to get this info, need to wait till then*/
234 /*
235 * XXX for non-fixed mappings where no hint is provided or
236 * the hint would fall in the potential heap space,
237 * place it after the end of the largest possible heap.
238 *
239 * There should really be a pmap call to determine a reasonable
240 * location.
241 */
39236c6e
A
242 else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
243 vm_map_page_mask(user_map)))
244 addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
245 vm_map_page_mask(user_map));
1c79356b
A
246
247#endif
248
2d21ac55 249 alloc_flags = 0;
1c79356b
A
250
251 if (flags & MAP_ANON) {
6d2010ae
A
252
253 maxprot = VM_PROT_ALL;
254#if CONFIG_MACF
255 /*
256 * Entitlement check.
6d2010ae 257 */
316670eb 258 error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
6d2010ae
A
259 if (error) {
260 return EINVAL;
316670eb 261 }
6d2010ae
A
262#endif /* MAC */
263
1c79356b 264 /*
2d21ac55
A
265 * Mapping blank space is trivial. Use positive fds as the alias
266 * value for memory tracking.
1c79356b 267 */
2d21ac55
A
268 if (fd != -1) {
269 /*
270 * Use "fd" to pass (some) Mach VM allocation flags,
271 * (see the VM_FLAGS_* definitions).
272 */
6d2010ae 273 alloc_flags = fd & (VM_FLAGS_ALIAS_MASK | VM_FLAGS_SUPERPAGE_MASK |
2d21ac55
A
274 VM_FLAGS_PURGABLE);
275 if (alloc_flags != fd) {
276 /* reject if there are any extra flags */
277 return EINVAL;
278 }
279 }
280
1c79356b 281 handle = NULL;
1c79356b
A
282 file_pos = 0;
283 mapanon = 1;
284 } else {
91447636 285 struct vnode_attr va;
2d21ac55
A
286 vfs_context_t ctx = vfs_context_current();
287
316670eb
A
288 if (flags & MAP_JIT)
289 return EINVAL;
290
1c79356b
A
291 /*
292 * Mapping file, get fp for validation. Obtain vnode and make
293 * sure it is of appropriate type.
294 */
91447636 295 err = fp_lookup(p, fd, &fp, 0);
1c79356b
A
296 if (err)
297 return(err);
91447636 298 fpref = 1;
39236c6e
A
299 switch (FILEGLOB_DTYPE(fp->f_fglob)) {
300 case DTYPE_PSXSHM:
91447636
A
301 uap->addr = (user_addr_t)user_addr;
302 uap->len = (user_size_t)user_size;
1c79356b
A
303 uap->prot = prot;
304 uap->flags = flags;
305 uap->pos = file_pos;
91447636
A
306 error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
307 goto bad;
39236c6e
A
308 case DTYPE_VNODE:
309 break;
310 default:
91447636
A
311 error = EINVAL;
312 goto bad;
313 }
314 vp = (struct vnode *)fp->f_fglob->fg_data;
315 error = vnode_getwithref(vp);
316 if(error != 0)
317 goto bad;
318
319 if (vp->v_type != VREG && vp->v_type != VCHR) {
320 (void)vnode_put(vp);
321 error = EINVAL;
322 goto bad;
323 }
e5568f75
A
324
325 AUDIT_ARG(vnpath, vp, ARG_VNODE1);
91447636 326
2d21ac55
A
327 /*
328 * POSIX: mmap needs to update access time for mapped files
91447636 329 */
2d21ac55
A
330 if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
331 VATTR_INIT(&va);
332 nanotime(&va.va_access_time);
333 VATTR_SET_ACTIVE(&va, va_access_time);
334 vnode_setattr(vp, &va, ctx);
335 }
b0d623f7 336
1c79356b
A
337 /*
338 * XXX hack to handle use of /dev/zero to map anon memory (ala
339 * SunOS).
340 */
341 if (vp->v_type == VCHR || vp->v_type == VSTR) {
91447636
A
342 (void)vnode_put(vp);
343 error = ENODEV;
344 goto bad;
1c79356b
A
345 } else {
346 /*
347 * Ensure that file and memory protections are
348 * compatible. Note that we only worry about
349 * writability if mapping is shared; in this case,
350 * current and max prot are dictated by the open file.
351 * XXX use the vnode instead? Problem is: what
352 * credentials do we use for determination? What if
353 * proc does a setuid?
354 */
355 maxprot = VM_PROT_EXECUTE; /* ??? */
91447636 356 if (fp->f_fglob->fg_flag & FREAD)
1c79356b 357 maxprot |= VM_PROT_READ;
91447636
A
358 else if (prot & PROT_READ) {
359 (void)vnode_put(vp);
360 error = EACCES;
361 goto bad;
362 }
1c79356b
A
363 /*
364 * If we are sharing potential changes (either via
365 * MAP_SHARED or via the implicit sharing of character
366 * device mappings), and we are trying to get write
367 * permission although we opened it without asking
368 * for it, bail out.
369 */
370
371 if ((flags & MAP_SHARED) != 0) {
b0d623f7
A
372 if ((fp->f_fglob->fg_flag & FWRITE) != 0 &&
373 /*
374 * Do not allow writable mappings of
375 * swap files (see vm_swapfile_pager.c).
376 */
377 !vnode_isswap(vp)) {
91447636
A
378 /*
379 * check for write access
380 *
381 * Note that we already made this check when granting FWRITE
382 * against the file, so it seems redundant here.
383 */
2d21ac55 384 error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
91447636
A
385
386 /* if not granted for any reason, but we wanted it, bad */
387 if ((prot & PROT_WRITE) && (error != 0)) {
388 vnode_put(vp);
389 goto bad;
390 }
391
392 /* if writable, remember */
393 if (error == 0)
394 maxprot |= VM_PROT_WRITE;
395
396 } else if ((prot & PROT_WRITE) != 0) {
397 (void)vnode_put(vp);
398 error = EACCES;
399 goto bad;
400 }
1c79356b
A
401 } else
402 maxprot |= VM_PROT_WRITE;
403
404 handle = (void *)vp;
2d21ac55
A
405#if CONFIG_MACF
406 error = mac_file_check_mmap(vfs_context_ucred(ctx),
407 fp->f_fglob, prot, flags, &maxprot);
408 if (error) {
409 (void)vnode_put(vp);
410 goto bad;
411 }
412#endif /* MAC */
6d2010ae
A
413
414#if CONFIG_PROTECT
415 {
316670eb
A
416 error = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
417 if (error) {
418 (void) vnode_put(vp);
419 goto bad;
6d2010ae
A
420 }
421 }
422#endif /* CONFIG_PROTECT */
423
424
1c79356b
A
425 }
426 }
427
91447636
A
428 if (user_size == 0) {
429 if (!mapanon)
430 (void)vnode_put(vp);
431 error = 0;
432 goto bad;
433 }
1c79356b
A
434
435 /*
436 * We bend a little - round the start and end addresses
437 * to the nearest page boundary.
438 */
39236c6e
A
439 user_size = vm_map_round_page(user_size,
440 vm_map_page_mask(user_map));
1c79356b 441
39236c6e 442 if (file_pos & vm_map_page_mask(user_map)) {
91447636
A
443 if (!mapanon)
444 (void)vnode_put(vp);
445 error = EINVAL;
446 goto bad;
447 }
1c79356b 448
1c79356b 449 if ((flags & MAP_FIXED) == 0) {
2d21ac55 450 alloc_flags |= VM_FLAGS_ANYWHERE;
39236c6e
A
451 user_addr = vm_map_round_page(user_addr,
452 vm_map_page_mask(user_map));
1c79356b 453 } else {
39236c6e
A
454 if (user_addr != vm_map_trunc_page(user_addr,
455 vm_map_page_mask(user_map))) {
91447636
A
456 if (!mapanon)
457 (void)vnode_put(vp);
458 error = EINVAL;
459 goto bad;
460 }
461 /*
462 * mmap(MAP_FIXED) will replace any existing mappings in the
463 * specified range, if the new mapping is successful.
464 * If we just deallocate the specified address range here,
465 * another thread might jump in and allocate memory in that
466 * range before we get a chance to establish the new mapping,
467 * and we won't have a chance to restore the old mappings.
468 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
469 * has to deallocate the existing mappings and establish the
470 * new ones atomically.
471 */
2d21ac55 472 alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
1c79356b
A
473 }
474
2d21ac55
A
475 if (flags & MAP_NOCACHE)
476 alloc_flags |= VM_FLAGS_NO_CACHE;
1c79356b 477
6d2010ae
A
478 if (flags & MAP_JIT){
479 alloc_flags |= VM_FLAGS_MAP_JIT;
480 }
1c79356b
A
481 /*
482 * Lookup/allocate object.
483 */
1c79356b 484 if (handle == NULL) {
b0d623f7 485 control = NULL;
1c79356b
A
486#ifdef notyet
487/* Hmm .. */
488#if defined(VM_PROT_READ_IS_EXEC)
489 if (prot & VM_PROT_READ)
490 prot |= VM_PROT_EXECUTE;
1c79356b
A
491 if (maxprot & VM_PROT_READ)
492 maxprot |= VM_PROT_EXECUTE;
493#endif
494#endif
2d21ac55
A
495
496#if 3777787
497 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
498 prot |= VM_PROT_READ;
499 if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
500 maxprot |= VM_PROT_READ;
501#endif /* radar 3777787 */
6d2010ae 502map_anon_retry:
2d21ac55
A
503 result = vm_map_enter_mem_object(user_map,
504 &user_addr, user_size,
505 0, alloc_flags,
506 IPC_PORT_NULL, 0, FALSE,
507 prot, maxprot,
508 (flags & MAP_SHARED) ?
509 VM_INHERIT_SHARE :
510 VM_INHERIT_DEFAULT);
6d2010ae
A
511
512 /* If a non-binding address was specified for this anonymous
513 * mapping, retry the mapping with a zero base
514 * in the event the mapping operation failed due to
515 * lack of space between the address and the map's maximum.
516 */
517 if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
39236c6e 518 user_addr = vm_map_page_size(user_map);
6d2010ae
A
519 goto map_anon_retry;
520 }
1c79356b 521 } else {
b0d623f7
A
522 if (vnode_isswap(vp)) {
523 /*
524 * Map swap files with a special pager
525 * that returns obfuscated contents.
526 */
527 control = NULL;
528 pager = swapfile_pager_setup(vp);
529 if (pager != MEMORY_OBJECT_NULL) {
530 control = swapfile_pager_control(pager);
531 }
532 } else {
533 control = ubc_getobject(vp, UBC_FLAGS_NONE);
534 }
1c79356b 535
b0d623f7 536 if (control == NULL) {
91447636
A
537 (void)vnode_put(vp);
538 error = ENOMEM;
539 goto bad;
540 }
1c79356b
A
541
542 /*
543 * Set credentials:
544 * FIXME: if we're writing the file we need a way to
545 * ensure that someone doesn't replace our R/W creds
546 * with ones that only work for read.
547 */
548
13fec989 549 ubc_setthreadcred(vp, p, current_thread());
1c79356b
A
550 docow = FALSE;
551 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
552 docow = TRUE;
553 }
554
555#ifdef notyet
556/* Hmm .. */
557#if defined(VM_PROT_READ_IS_EXEC)
558 if (prot & VM_PROT_READ)
559 prot |= VM_PROT_EXECUTE;
1c79356b
A
560 if (maxprot & VM_PROT_READ)
561 maxprot |= VM_PROT_EXECUTE;
562#endif
563#endif /* notyet */
564
2d21ac55
A
565#if 3777787
566 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
567 prot |= VM_PROT_READ;
568 if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
569 maxprot |= VM_PROT_READ;
570#endif /* radar 3777787 */
6d2010ae 571map_file_retry:
b0d623f7 572 result = vm_map_enter_mem_object_control(user_map,
2d21ac55
A
573 &user_addr, user_size,
574 0, alloc_flags,
b0d623f7 575 control, file_pos,
2d21ac55
A
576 docow, prot, maxprot,
577 (flags & MAP_SHARED) ?
578 VM_INHERIT_SHARE :
579 VM_INHERIT_DEFAULT);
6d2010ae
A
580
581 /* If a non-binding address was specified for this file backed
582 * mapping, retry the mapping with a zero base
583 * in the event the mapping operation failed due to
584 * lack of space between the address and the map's maximum.
585 */
586 if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
39236c6e 587 user_addr = vm_map_page_size(user_map);
6d2010ae
A
588 goto map_file_retry;
589 }
1c79356b
A
590 }
591
b0d623f7 592 if (!mapanon) {
91447636 593 (void)vnode_put(vp);
b0d623f7 594 }
1c79356b 595
1c79356b
A
596 switch (result) {
597 case KERN_SUCCESS:
91447636
A
598 *retval = user_addr + pageoff;
599 error = 0;
600 break;
1c79356b
A
601 case KERN_INVALID_ADDRESS:
602 case KERN_NO_SPACE:
91447636
A
603 error = ENOMEM;
604 break;
1c79356b 605 case KERN_PROTECTION_FAILURE:
91447636
A
606 error = EACCES;
607 break;
1c79356b 608 default:
91447636
A
609 error = EINVAL;
610 break;
1c79356b 611 }
91447636 612bad:
b0d623f7
A
613 if (pager != MEMORY_OBJECT_NULL) {
614 /*
615 * Release the reference on the pager.
616 * If the mapping was successful, it now holds
617 * an extra reference.
618 */
619 memory_object_deallocate(pager);
620 }
91447636
A
621 if (fpref)
622 fp_drop(p, fd, fp, 0);
2d21ac55
A
623
624 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
625 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
626 (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
91447636 627 return(error);
1c79356b
A
628}
629
1c79356b 630int
b0d623f7 631msync(__unused proc_t p, struct msync_args *uap, int32_t *retval)
2d21ac55
A
632{
633 __pthread_testcancel(1);
634 return(msync_nocancel(p, (struct msync_nocancel_args *)uap, retval));
635}
636
637int
b0d623f7 638msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused int32_t *retval)
1c79356b 639{
91447636
A
640 mach_vm_offset_t addr;
641 mach_vm_size_t size;
1c79356b
A
642 int flags;
643 vm_map_t user_map;
644 int rv;
645 vm_sync_t sync_flags=0;
646
39236c6e 647 user_map = current_map();
91447636
A
648 addr = (mach_vm_offset_t) uap->addr;
649 size = (mach_vm_size_t)uap->len;
2d21ac55 650 KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0);
39236c6e 651 if (addr & vm_map_page_mask(user_map)) {
91447636
A
652 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
653 return EINVAL;
654 }
1c79356b
A
655 if (size == 0) {
656 /*
657 * We cannot support this properly without maintaining
658 * list all mmaps done. Cannot use vm_map_entry as they could be
659 * split or coalesced by indepenedant actions. So instead of
660 * inaccurate results, lets just return error as invalid size
661 * specified
662 */
55e303ae 663 return (EINVAL); /* XXX breaks posix apps */
1c79356b
A
664 }
665
91447636
A
666 flags = uap->flags;
667 /* disallow contradictory flags */
2d21ac55 668 if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC))
91447636
A
669 return (EINVAL);
670
1c79356b
A
671 if (flags & MS_KILLPAGES)
672 sync_flags |= VM_SYNC_KILLPAGES;
673 if (flags & MS_DEACTIVATE)
674 sync_flags |= VM_SYNC_DEACTIVATE;
675 if (flags & MS_INVALIDATE)
676 sync_flags |= VM_SYNC_INVALIDATE;
677
678 if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
679 if (flags & MS_ASYNC)
680 sync_flags |= VM_SYNC_ASYNCHRONOUS;
681 else
682 sync_flags |= VM_SYNC_SYNCHRONOUS;
683 }
91447636
A
684
685 sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */
686
91447636 687 rv = mach_vm_msync(user_map, addr, size, sync_flags);
1c79356b
A
688
689 switch (rv) {
690 case KERN_SUCCESS:
691 break;
91447636
A
692 case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */
693 return (ENOMEM);
1c79356b
A
694 case KERN_FAILURE:
695 return (EIO);
696 default:
697 return (EINVAL);
698 }
1c79356b 699 return (0);
1c79356b
A
700}
701
702
55e303ae 703int
b0d623f7 704munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval)
1c79356b 705{
91447636 706 mach_vm_offset_t user_addr;
39236c6e
A
707 mach_vm_size_t user_size;
708 kern_return_t result;
709 vm_map_t user_map;
1c79356b 710
39236c6e 711 user_map = current_map();
91447636
A
712 user_addr = (mach_vm_offset_t) uap->addr;
713 user_size = (mach_vm_size_t) uap->len;
1c79356b 714
91447636
A
715 AUDIT_ARG(addr, user_addr);
716 AUDIT_ARG(len, user_size);
e5568f75 717
39236c6e 718 if (user_addr & vm_map_page_mask(user_map)) {
91447636
A
719 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
720 return EINVAL;
721 }
1c79356b 722
1c79356b
A
723 if (user_addr + user_size < user_addr)
724 return(EINVAL);
725
91447636
A
726 if (user_size == 0) {
727 /* UNIX SPEC: size is 0, return EINVAL */
728 return EINVAL;
729 }
1c79356b 730
39236c6e 731 result = mach_vm_deallocate(user_map, user_addr, user_size);
1c79356b
A
732 if (result != KERN_SUCCESS) {
733 return(EINVAL);
734 }
735 return(0);
736}
737
1c79356b 738int
b0d623f7 739mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval)
1c79356b
A
740{
741 register vm_prot_t prot;
91447636
A
742 mach_vm_offset_t user_addr;
743 mach_vm_size_t user_size;
1c79356b
A
744 kern_return_t result;
745 vm_map_t user_map;
2d21ac55
A
746#if CONFIG_MACF
747 int error;
748#endif
1c79356b 749
e5568f75
A
750 AUDIT_ARG(addr, uap->addr);
751 AUDIT_ARG(len, uap->len);
b0d623f7 752 AUDIT_ARG(value32, uap->prot);
91447636 753
39236c6e 754 user_map = current_map();
91447636
A
755 user_addr = (mach_vm_offset_t) uap->addr;
756 user_size = (mach_vm_size_t) uap->len;
b7266188 757 prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED));
1c79356b 758
39236c6e 759 if (user_addr & vm_map_page_mask(user_map)) {
91447636
A
760 /* UNIX SPEC: user address is not page-aligned, return EINVAL */
761 return EINVAL;
762 }
763
1c79356b
A
764#ifdef notyet
765/* Hmm .. */
766#if defined(VM_PROT_READ_IS_EXEC)
767 if (prot & VM_PROT_READ)
768 prot |= VM_PROT_EXECUTE;
769#endif
770#endif /* notyet */
771
2d21ac55
A
772#if 3936456
773 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
774 prot |= VM_PROT_READ;
775#endif /* 3936456 */
776
2d21ac55
A
777#if CONFIG_MACF
778 /*
779 * The MAC check for mprotect is of limited use for 2 reasons:
780 * Without mmap revocation, the caller could have asked for the max
781 * protections initially instead of a reduced set, so a mprotect
782 * check would offer no new security.
783 * It is not possible to extract the vnode from the pager object(s)
784 * of the target memory range.
785 * However, the MAC check may be used to prevent a process from,
786 * e.g., making the stack executable.
787 */
788 error = mac_proc_check_mprotect(p, user_addr,
789 user_size, prot);
790 if (error)
791 return (error);
792#endif
b7266188
A
793
794 if(prot & VM_PROT_TRUSTED) {
795#if CONFIG_DYNAMIC_CODE_SIGNING
796 /* CODE SIGNING ENFORCEMENT - JIT support */
797 /* The special protection value VM_PROT_TRUSTED requests that we treat
798 * this page as if it had a valid code signature.
799 * If this is enabled, there MUST be a MAC policy implementing the
800 * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be
801 * compromised because the check would always succeed and thusly any
802 * process could sign dynamically. */
39236c6e
A
803 result = vm_map_sign(
804 user_map,
805 vm_map_trunc_page(user_addr,
806 vm_map_page_mask(user_map)),
807 vm_map_round_page(user_addr+user_size,
808 vm_map_page_mask(user_map)));
b7266188
A
809 switch (result) {
810 case KERN_SUCCESS:
811 break;
812 case KERN_INVALID_ADDRESS:
813 /* UNIX SPEC: for an invalid address range, return ENOMEM */
814 return ENOMEM;
815 default:
816 return EINVAL;
817 }
818#else
819 return ENOTSUP;
820#endif
821 }
822 prot &= ~VM_PROT_TRUSTED;
823
91447636
A
824 result = mach_vm_protect(user_map, user_addr, user_size,
825 FALSE, prot);
1c79356b
A
826 switch (result) {
827 case KERN_SUCCESS:
828 return (0);
829 case KERN_PROTECTION_FAILURE:
830 return (EACCES);
91447636
A
831 case KERN_INVALID_ADDRESS:
832 /* UNIX SPEC: for an invalid address range, return ENOMEM */
833 return ENOMEM;
1c79356b
A
834 }
835 return (EINVAL);
836}
837
838
1c79356b 839int
b0d623f7 840minherit(__unused proc_t p, struct minherit_args *uap, __unused int32_t *retval)
1c79356b 841{
91447636
A
842 mach_vm_offset_t addr;
843 mach_vm_size_t size;
1c79356b
A
844 register vm_inherit_t inherit;
845 vm_map_t user_map;
846 kern_return_t result;
847
e5568f75
A
848 AUDIT_ARG(addr, uap->addr);
849 AUDIT_ARG(len, uap->len);
b0d623f7 850 AUDIT_ARG(value32, uap->inherit);
1c79356b 851
91447636
A
852 addr = (mach_vm_offset_t)uap->addr;
853 size = (mach_vm_size_t)uap->len;
854 inherit = uap->inherit;
1c79356b
A
855
856 user_map = current_map();
91447636 857 result = mach_vm_inherit(user_map, addr, size,
1c79356b
A
858 inherit);
859 switch (result) {
860 case KERN_SUCCESS:
861 return (0);
862 case KERN_PROTECTION_FAILURE:
863 return (EACCES);
864 }
865 return (EINVAL);
866}
867
1c79356b 868int
b0d623f7 869madvise(__unused proc_t p, struct madvise_args *uap, __unused int32_t *retval)
1c79356b
A
870{
871 vm_map_t user_map;
91447636
A
872 mach_vm_offset_t start;
873 mach_vm_size_t size;
1c79356b
A
874 vm_behavior_t new_behavior;
875 kern_return_t result;
876
1c79356b
A
877 /*
878 * Since this routine is only advisory, we default to conservative
879 * behavior.
880 */
1c79356b
A
881 switch (uap->behav) {
882 case MADV_RANDOM:
883 new_behavior = VM_BEHAVIOR_RANDOM;
9bccf70c 884 break;
1c79356b
A
885 case MADV_SEQUENTIAL:
886 new_behavior = VM_BEHAVIOR_SEQUENTIAL;
9bccf70c 887 break;
1c79356b 888 case MADV_NORMAL:
1c79356b 889 new_behavior = VM_BEHAVIOR_DEFAULT;
9bccf70c
A
890 break;
891 case MADV_WILLNEED:
892 new_behavior = VM_BEHAVIOR_WILLNEED;
893 break;
894 case MADV_DONTNEED:
895 new_behavior = VM_BEHAVIOR_DONTNEED;
896 break;
b0d623f7
A
897 case MADV_FREE:
898 new_behavior = VM_BEHAVIOR_FREE;
899 break;
900 case MADV_ZERO_WIRED_PAGES:
901 new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES;
902 break;
903 case MADV_FREE_REUSABLE:
904 new_behavior = VM_BEHAVIOR_REUSABLE;
905 break;
906 case MADV_FREE_REUSE:
907 new_behavior = VM_BEHAVIOR_REUSE;
908 break;
909 case MADV_CAN_REUSE:
910 new_behavior = VM_BEHAVIOR_CAN_REUSE;
911 break;
9bccf70c
A
912 default:
913 return(EINVAL);
1c79356b
A
914 }
915
91447636
A
916 start = (mach_vm_offset_t) uap->addr;
917 size = (mach_vm_size_t) uap->len;
918
919 user_map = current_map();
920
921 result = mach_vm_behavior_set(user_map, start, size, new_behavior);
1c79356b 922 switch (result) {
6d2010ae
A
923 case KERN_SUCCESS:
924 return 0;
925 case KERN_INVALID_ADDRESS:
926 return EINVAL;
927 case KERN_NO_SPACE:
928 return ENOMEM;
1c79356b
A
929 }
930
6d2010ae 931 return EINVAL;
1c79356b
A
932}
933
1c79356b 934int
b0d623f7 935mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval)
1c79356b 936{
91447636 937 mach_vm_offset_t addr, first_addr, end;
1c79356b 938 vm_map_t map;
91447636 939 user_addr_t vec;
1c79356b
A
940 int error;
941 int vecindex, lastvecindex;
942 int mincoreinfo=0;
943 int pqueryinfo;
944 kern_return_t ret;
945 int numref;
946
91447636
A
947 char c;
948
1c79356b
A
949 map = current_map();
950
951 /*
952 * Make sure that the addresses presented are valid for user
953 * mode.
954 */
39236c6e
A
955 first_addr = addr = vm_map_trunc_page(uap->addr,
956 vm_map_page_mask(map));
957 end = addr + vm_map_round_page(uap->len,
958 vm_map_page_mask(map));
1c79356b 959
1c79356b
A
960 if (end < addr)
961 return (EINVAL);
962
963 /*
964 * Address of byte vector
965 */
966 vec = uap->vec;
967
968 map = current_map();
969
970 /*
971 * Do this on a map entry basis so that if the pages are not
972 * in the current processes address space, we can easily look
973 * up the pages elsewhere.
974 */
975 lastvecindex = -1;
91447636 976 for( ; addr < end; addr += PAGE_SIZE ) {
1c79356b 977 pqueryinfo = 0;
2d21ac55 978 ret = mach_vm_page_query(map, addr, &pqueryinfo, &numref);
1c79356b
A
979 if (ret != KERN_SUCCESS)
980 pqueryinfo = 0;
981 mincoreinfo = 0;
982 if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
983 mincoreinfo |= MINCORE_INCORE;
984 if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
985 mincoreinfo |= MINCORE_REFERENCED;
986 if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
987 mincoreinfo |= MINCORE_MODIFIED;
988
989
990 /*
991 * calculate index into user supplied byte vector
992 */
993 vecindex = (addr - first_addr)>> PAGE_SHIFT;
994
995 /*
996 * If we have skipped map entries, we need to make sure that
997 * the byte vector is zeroed for those skipped entries.
998 */
999 while((lastvecindex + 1) < vecindex) {
91447636
A
1000 c = 0;
1001 error = copyout(&c, vec + lastvecindex, 1);
1c79356b
A
1002 if (error) {
1003 return (EFAULT);
1004 }
1005 ++lastvecindex;
1006 }
1007
1008 /*
1009 * Pass the page information to the user
1010 */
91447636
A
1011 c = (char)mincoreinfo;
1012 error = copyout(&c, vec + vecindex, 1);
1c79356b
A
1013 if (error) {
1014 return (EFAULT);
1015 }
1016 lastvecindex = vecindex;
1017 }
1018
1019
1020 /*
1021 * Zero the last entries in the byte vector.
1022 */
1023 vecindex = (end - first_addr) >> PAGE_SHIFT;
1024 while((lastvecindex + 1) < vecindex) {
91447636
A
1025 c = 0;
1026 error = copyout(&c, vec + lastvecindex, 1);
1c79356b
A
1027 if (error) {
1028 return (EFAULT);
1029 }
1030 ++lastvecindex;
1031 }
1032
1033 return (0);
1034}
1035
1c79356b 1036int
b0d623f7 1037mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval)
1c79356b
A
1038{
1039 vm_map_t user_map;
91447636
A
1040 vm_map_offset_t addr;
1041 vm_map_size_t size, pageoff;
1c79356b
A
1042 kern_return_t result;
1043
e5568f75
A
1044 AUDIT_ARG(addr, uap->addr);
1045 AUDIT_ARG(len, uap->len);
1c79356b 1046
91447636
A
1047 addr = (vm_map_offset_t) uap->addr;
1048 size = (vm_map_size_t)uap->len;
1c79356b
A
1049
1050 /* disable wrap around */
1051 if (addr + size < addr)
1052 return (EINVAL);
91447636
A
1053
1054 if (size == 0)
1055 return (0);
1056
1c79356b 1057 user_map = current_map();
39236c6e
A
1058 pageoff = (addr & vm_map_page_mask(user_map));
1059 addr -= pageoff;
1060 size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));
1c79356b 1061
91447636
A
1062 /* have to call vm_map_wire directly to pass "I don't know" protections */
1063 result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE, TRUE);
2d21ac55
A
1064
1065 if (result == KERN_RESOURCE_SHORTAGE)
1066 return EAGAIN;
1067 else if (result != KERN_SUCCESS)
1068 return ENOMEM;
1069
1070 return 0; /* KERN_SUCCESS */
1c79356b
A
1071}
1072
1c79356b 1073int
b0d623f7 1074munlock(__unused proc_t p, struct munlock_args *uap, __unused int32_t *retval)
1c79356b 1075{
91447636
A
1076 mach_vm_offset_t addr;
1077 mach_vm_size_t size;
1c79356b
A
1078 vm_map_t user_map;
1079 kern_return_t result;
1080
e5568f75 1081 AUDIT_ARG(addr, uap->addr);
91447636 1082 AUDIT_ARG(addr, uap->len);
1c79356b 1083
91447636
A
1084 addr = (mach_vm_offset_t) uap->addr;
1085 size = (mach_vm_size_t)uap->len;
1c79356b
A
1086 user_map = current_map();
1087
91447636
A
1088 /* JMM - need to remove all wirings by spec - this just removes one */
1089 result = mach_vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE);
1c79356b
A
1090 return (result == KERN_SUCCESS ? 0 : ENOMEM);
1091}
1092
1093
1c79356b 1094int
b0d623f7 1095mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused int32_t *retval)
1c79356b
A
1096{
1097 return (ENOSYS);
1098}
1099
1c79356b 1100int
b0d623f7 1101munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused int32_t *retval)
1c79356b
A
1102{
1103 return(ENOSYS);
1104}
1105
55e303ae
A
1106/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
1107kern_return_t
91447636 1108map_fd(struct map_fd_args *args)
1c79356b 1109{
91447636
A
1110 int fd = args->fd;
1111 vm_offset_t offset = args->offset;
1112 vm_offset_t *va = args->va;
1113 boolean_t findspace = args->findspace;
1114 vm_size_t size = args->size;
1c79356b 1115 kern_return_t ret;
1c79356b 1116
e5568f75 1117 AUDIT_MACH_SYSCALL_ENTER(AUE_MAPFD);
b0d623f7 1118 AUDIT_ARG(addr, CAST_DOWN(user_addr_t, args->va));
e5568f75
A
1119 AUDIT_ARG(fd, fd);
1120
91447636 1121 ret = map_fd_funneled( fd, (vm_object_offset_t)offset, va, findspace, size);
1c79356b 1122
e5568f75 1123 AUDIT_MACH_SYSCALL_EXIT(ret);
1c79356b
A
1124 return ret;
1125}
1126
55e303ae
A
1127kern_return_t
1128map_fd_funneled(
1c79356b
A
1129 int fd,
1130 vm_object_offset_t offset,
1131 vm_offset_t *va,
1132 boolean_t findspace,
1133 vm_size_t size)
1134{
1135 kern_return_t result;
91447636 1136 struct fileproc *fp;
1c79356b
A
1137 struct vnode *vp;
1138 void * pager;
1139 vm_offset_t map_addr=0;
1140 vm_size_t map_size;
1c79356b 1141 int err=0;
6d2010ae 1142 vm_prot_t maxprot = VM_PROT_ALL;
1c79356b 1143 vm_map_t my_map;
2d21ac55 1144 proc_t p = current_proc();
91447636 1145 struct vnode_attr vattr;
1c79356b 1146
39236c6e
A
1147 my_map = current_map();
1148
1c79356b
A
1149 /*
1150 * Find the inode; verify that it's a regular file.
1151 */
1152
91447636 1153 err = fp_lookup(p, fd, &fp, 0);
1c79356b
A
1154 if (err)
1155 return(err);
1156
39236c6e 1157 if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
91447636
A
1158 err = KERN_INVALID_ARGUMENT;
1159 goto bad;
1160 }
b4c24cb9 1161
91447636
A
1162 if (!(fp->f_fglob->fg_flag & FREAD)) {
1163 err = KERN_PROTECTION_FAILURE;
1164 goto bad;
1165 }
b4c24cb9 1166
91447636
A
1167 vp = (struct vnode *)fp->f_fglob->fg_data;
1168 err = vnode_getwithref(vp);
1169 if(err != 0)
1170 goto bad;
1c79356b 1171
91447636
A
1172 if (vp->v_type != VREG) {
1173 (void)vnode_put(vp);
1174 err = KERN_INVALID_ARGUMENT;
1175 goto bad;
1176 }
1c79356b 1177
6d2010ae
A
1178#if CONFIG_MACF
1179 err = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()),
1180 fp->f_fglob, VM_PROT_DEFAULT, MAP_FILE, &maxprot);
1181 if (err) {
1182 (void)vnode_put(vp);
1183 goto bad;
1184 }
1185#endif /* MAC */
1186
1187#if CONFIG_PROTECT
1188 /* check for content protection access */
1189 {
316670eb
A
1190 err = cp_handle_vnop(vp, CP_READ_ACCESS | CP_WRITE_ACCESS, 0);
1191 if (err != 0) {
1192 (void) vnode_put(vp);
1193 goto bad;
1194 }
6d2010ae
A
1195 }
1196#endif /* CONFIG_PROTECT */
1197
e5568f75
A
1198 AUDIT_ARG(vnpath, vp, ARG_VNODE1);
1199
2d21ac55
A
1200 /*
1201 * POSIX: mmap needs to update access time for mapped files
91447636 1202 */
2d21ac55
A
1203 if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
1204 VATTR_INIT(&vattr);
1205 nanotime(&vattr.va_access_time);
1206 VATTR_SET_ACTIVE(&vattr, va_access_time);
1207 vnode_setattr(vp, &vattr, vfs_context_current());
1208 }
1209
39236c6e 1210 if (offset & vm_map_page_mask(my_map)) {
9bccf70c 1211 printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
91447636
A
1212 (void)vnode_put(vp);
1213 err = KERN_INVALID_ARGUMENT;
1214 goto bad;
1c79356b 1215 }
39236c6e 1216 map_size = vm_map_round_page(size, vm_map_page_mask(my_map));
1c79356b
A
1217
1218 /*
1219 * Allow user to map in a zero length file.
1220 */
91447636
A
1221 if (size == 0) {
1222 (void)vnode_put(vp);
1223 err = KERN_SUCCESS;
1224 goto bad;
1225 }
1c79356b
A
1226 /*
1227 * Map in the file.
1228 */
2d21ac55 1229 pager = (void *)ubc_getpager(vp);
91447636
A
1230 if (pager == NULL) {
1231 (void)vnode_put(vp);
1232 err = KERN_FAILURE;
1233 goto bad;
1234 }
1c79356b 1235
1c79356b
A
1236 result = vm_map_64(
1237 my_map,
91447636
A
1238 &map_addr, map_size, (vm_offset_t)0,
1239 VM_FLAGS_ANYWHERE, pager, offset, TRUE,
6d2010ae 1240 VM_PROT_DEFAULT, maxprot,
1c79356b 1241 VM_INHERIT_DEFAULT);
91447636
A
1242 if (result != KERN_SUCCESS) {
1243 (void)vnode_put(vp);
1244 err = result;
1245 goto bad;
1246 }
1c79356b
A
1247
1248
1249 if (!findspace) {
b0d623f7
A
1250 //K64todo fix for 64bit user?
1251 uint32_t dst_addr;
1c79356b
A
1252 vm_map_copy_t tmp;
1253
91447636 1254 if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr)) ||
39236c6e 1255 trunc_page(dst_addr) != dst_addr) {
1c79356b
A
1256 (void) vm_map_remove(
1257 my_map,
1258 map_addr, map_addr + map_size,
1259 VM_MAP_NO_FLAGS);
91447636
A
1260 (void)vnode_put(vp);
1261 err = KERN_INVALID_ADDRESS;
1262 goto bad;
1c79356b
A
1263 }
1264
91447636
A
1265 result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
1266 (vm_map_size_t)map_size, TRUE, &tmp);
1c79356b
A
1267 if (result != KERN_SUCCESS) {
1268
39236c6e
A
1269 (void) vm_map_remove(
1270 my_map,
1271 vm_map_trunc_page(map_addr,
1272 vm_map_page_mask(my_map)),
1273 vm_map_round_page(map_addr + map_size,
1274 vm_map_page_mask(my_map)),
1275 VM_MAP_NO_FLAGS);
91447636
A
1276 (void)vnode_put(vp);
1277 err = result;
1278 goto bad;
1c79356b
A
1279 }
1280
91447636
A
1281 result = vm_map_copy_overwrite(my_map,
1282 (vm_map_address_t)dst_addr, tmp, FALSE);
1c79356b
A
1283 if (result != KERN_SUCCESS) {
1284 vm_map_copy_discard(tmp);
91447636
A
1285 (void)vnode_put(vp);
1286 err = result;
1287 goto bad;
1c79356b
A
1288 }
1289 } else {
b0d623f7
A
1290 // K64todo bug compatible now, should fix for 64bit user
1291 uint32_t user_map_addr = CAST_DOWN_EXPLICIT(uint32_t, map_addr);
1292 if (copyout(&user_map_addr, CAST_USER_ADDR_T(va), sizeof (user_map_addr))) {
39236c6e
A
1293 (void) vm_map_remove(
1294 my_map,
1295 vm_map_trunc_page(map_addr,
1296 vm_map_page_mask(my_map)),
1297 vm_map_round_page(map_addr + map_size,
1298 vm_map_page_mask(my_map)),
1299 VM_MAP_NO_FLAGS);
91447636
A
1300 (void)vnode_put(vp);
1301 err = KERN_INVALID_ADDRESS;
1302 goto bad;
1c79356b
A
1303 }
1304 }
1305
13fec989 1306 ubc_setthreadcred(vp, current_proc(), current_thread());
91447636
A
1307 (void)vnode_put(vp);
1308 err = 0;
1309bad:
1310 fp_drop(p, fd, fp, 0);
1311 return (err);
1c79356b 1312}
91447636 1313