]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kern_mman.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / kern / kern_mman.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * Copyright (c) 1988 University of Utah.
27 * Copyright (c) 1991, 1993
28 * The Regents of the University of California. All rights reserved.
29 *
30 * This code is derived from software contributed to Berkeley by
31 * the Systems Programming Group of the University of Utah Computer
32 * Science Department.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by the University of
45 * California, Berkeley and its contributors.
46 * 4. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
63 *
64 * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95
65 */
66
67/*
68 * Mapped file (mmap) interface to VM
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/filedesc.h>
74#include <sys/proc.h>
75#include <sys/resourcevar.h>
76#include <sys/buf.h>
77#include <sys/vnode.h>
78#include <sys/acct.h>
79#include <sys/wait.h>
80#include <sys/file.h>
81#include <sys/vadvise.h>
82#include <sys/trace.h>
83#include <sys/mman.h>
84#include <sys/conf.h>
85#include <sys/stat.h>
86#include <sys/ubc.h>
87
88#include <mach/mach_types.h>
89
90#include <kern/cpu_number.h>
91
92#include <vm/vm_map.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_pager.h>
95
96#include <mach/vm_sync.h>
97#include <mach/vm_behavior.h>
98#include <mach/vm_inherit.h>
99#include <mach/vm_statistics.h>
100
101struct sbrk_args {
102 int incr;
103};
104
105/* ARGSUSED */
106int
107sbrk(p, uap, retval)
108 struct proc *p;
109 struct sbrk_args *uap;
110 register_t *retval;
111{
112 /* Not yet implemented */
113 return (EOPNOTSUPP);
114}
115
116struct sstk_args {
117 int incr;
118} *uap;
119
120/* ARGSUSED */
121int
122sstk(p, uap, retval)
123 struct proc *p;
124 struct sstk_args *uap;
125 register_t *retval;
126{
127 /* Not yet implemented */
128 return (EOPNOTSUPP);
129}
130
131#if COMPAT_43
132/* ARGSUSED */
133int
134ogetpagesize(p, uap, retval)
135 struct proc *p;
136 void *uap;
137 register_t *retval;
138{
139
140 *retval = PAGE_SIZE;
141 return (0);
142}
143#endif /* COMPAT_43 */
144
145struct osmmap_args {
146 caddr_t addr;
147 int len;
148 int prot;
149 int share;
150 int fd;
151 long pos;
152};
153
154osmmap(curp, uap, retval)
155 struct proc *curp;
156 register struct osmmap_args *uap;
157 register_t *retval;
158{
159struct mmap_args {
160 caddr_t addr;
161 size_t len;
162 int prot;
163 int flags;
164 int fd;
165#ifdef DOUBLE_ALIGN_PARAMS
166 long pad;
167#endif
168 off_t pos;
169} newargs;
170
171 if ((uap->share == MAP_SHARED )|| (uap->share == MAP_PRIVATE )) {
172 newargs.addr = uap->addr;
173 newargs.len = (size_t)uap->len;
174 newargs.prot = uap->prot;
175 newargs.flags = uap->share;
176 newargs.fd = uap->fd;
177 newargs.pos = (off_t)uap->pos;
178 return(mmap(curp,&newargs, retval));
179 } else
180 return(EINVAL);
181}
182
183struct mmap_args {
184 caddr_t addr;
185 size_t len;
186 int prot;
187 int flags;
188 int fd;
189#ifdef DOUBLE_ALIGN_PARAMS
190 long pad;
191#endif
192 off_t pos;
193};
194int
195mmap(p, uap, retval)
196 struct proc *p;
197 struct mmap_args *uap;
198 register_t *retval;
199{
200 /*
201 * Map in special device (must be SHARED) or file
202 */
203 struct file *fp;
204 register struct vnode *vp;
205 int flags;
206 int prot;
207 int err=0;
208 vm_map_t user_map;
209 kern_return_t result;
210 vm_offset_t user_addr;
211 vm_size_t user_size;
212 vm_offset_t pageoff;
213 vm_object_offset_t file_pos;
214 boolean_t find_space, docow;
215 vm_prot_t maxprot;
216 void *handle;
217 vm_pager_t pager;
218 int mapanon=0;
219
220 user_addr = (vm_offset_t)uap->addr;
221 user_size = (vm_size_t) uap->len;
222 prot = (uap->prot & VM_PROT_ALL);
223 flags = uap->flags;
224
225 /*
226 * The vm code does not have prototypes & compiler doesn't do the'
227 * the right thing when you cast 64bit value and pass it in function
228 * call. So here it is.
229 */
230 file_pos = (vm_object_offset_t)uap->pos;
231
232
233 /* make sure mapping fits into numeric range etc */
234 if ((file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64) ||
235 ((ssize_t) uap->len < 0 )||
236 ((flags & MAP_ANON) && uap->fd != -1))
237 return (EINVAL);
238
239 /*
240 * Align the file position to a page boundary,
241 * and save its page offset component.
242 */
243 pageoff = ((vm_offset_t)file_pos & PAGE_MASK);
244 file_pos -= (vm_object_offset_t)pageoff;
245
246
247 /* Adjust size for rounding (on both ends). */
248 user_size += pageoff; /* low end... */
d7e50217 249 user_size = (vm_size_t) round_page_32(user_size); /* hi end */
1c79356b
A
250
251
252 /*
253 * Check for illegal addresses. Watch out for address wrap... Note
254 * that VM_*_ADDRESS are not constants due to casts (argh).
255 */
256 if (flags & MAP_FIXED) {
257 /*
258 * The specified address must have the same remainder
259 * as the file offset taken modulo PAGE_SIZE, so it
260 * should be aligned after adjustment by pageoff.
261 */
262 user_addr -= pageoff;
263 if (user_addr & PAGE_MASK)
264 return (EINVAL);
265 /* Address range must be all in user VM space. */
266 if (VM_MAX_ADDRESS > 0 && (user_addr + user_size > VM_MAX_ADDRESS))
267 return (EINVAL);
268 if (VM_MIN_ADDRESS > 0 && user_addr < VM_MIN_ADDRESS)
269 return (EINVAL);
270 if (user_addr + user_size < user_addr)
271 return (EINVAL);
272 }
273#ifdef notyet
274 /* DO not have apis to get this info, need to wait till then*/
275 /*
276 * XXX for non-fixed mappings where no hint is provided or
277 * the hint would fall in the potential heap space,
278 * place it after the end of the largest possible heap.
279 *
280 * There should really be a pmap call to determine a reasonable
281 * location.
282 */
d7e50217
A
283 else if (addr < round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ))
284 addr = round_page_32(p->p_vmspace->vm_daddr + MAXDSIZ);
1c79356b
A
285
286#endif
287
288
289 if (flags & MAP_ANON) {
290 /*
291 * Mapping blank space is trivial.
292 */
293 handle = NULL;
294 maxprot = VM_PROT_ALL;
295 file_pos = 0;
296 mapanon = 1;
297 } else {
298 /*
299 * Mapping file, get fp for validation. Obtain vnode and make
300 * sure it is of appropriate type.
301 */
302 err = fdgetf(p, uap->fd, &fp);
303 if (err)
304 return(err);
305 if(fp->f_type == DTYPE_PSXSHM) {
306 uap->addr = user_addr;
307 uap->len = user_size;
308 uap->prot = prot;
309 uap->flags = flags;
310 uap->pos = file_pos;
311 return(pshm_mmap(p, uap, retval, fp , pageoff));
312 }
313
314 if (fp->f_type != DTYPE_VNODE)
315 return(EINVAL);
316 vp = (struct vnode *)fp->f_data;
317
318 if (vp->v_type != VREG && vp->v_type != VCHR)
319 return (EINVAL);
320 /*
321 * XXX hack to handle use of /dev/zero to map anon memory (ala
322 * SunOS).
323 */
324 if (vp->v_type == VCHR || vp->v_type == VSTR) {
325 return(EOPNOTSUPP);
326 } else {
327 /*
328 * Ensure that file and memory protections are
329 * compatible. Note that we only worry about
330 * writability if mapping is shared; in this case,
331 * current and max prot are dictated by the open file.
332 * XXX use the vnode instead? Problem is: what
333 * credentials do we use for determination? What if
334 * proc does a setuid?
335 */
336 maxprot = VM_PROT_EXECUTE; /* ??? */
337 if (fp->f_flag & FREAD)
338 maxprot |= VM_PROT_READ;
339 else if (prot & PROT_READ)
340 return (EACCES);
341 /*
342 * If we are sharing potential changes (either via
343 * MAP_SHARED or via the implicit sharing of character
344 * device mappings), and we are trying to get write
345 * permission although we opened it without asking
346 * for it, bail out.
347 */
348
349 if ((flags & MAP_SHARED) != 0) {
350 if ((fp->f_flag & FWRITE) != 0) {
351 struct vattr va;
352 if ((err =
353 VOP_GETATTR(vp, &va,
354 p->p_ucred, p)))
355 return (err);
356 if ((va.va_flags &
357 (IMMUTABLE|APPEND)) == 0)
358 maxprot |= VM_PROT_WRITE;
359 else if (prot & PROT_WRITE)
360 return (EPERM);
361 } else if ((prot & PROT_WRITE) != 0)
362 return (EACCES);
363 } else
364 maxprot |= VM_PROT_WRITE;
365
366 handle = (void *)vp;
367 }
368 }
369
370 if (user_size == 0)
371 return(0);
372
373 /*
374 * We bend a little - round the start and end addresses
375 * to the nearest page boundary.
376 */
d7e50217 377 user_size = round_page_32(user_size);
1c79356b
A
378
379 if (file_pos & PAGE_MASK_64)
380 return (EINVAL);
381
382 user_map = current_map();
383
384 if ((flags & MAP_FIXED) == 0) {
385 find_space = TRUE;
d7e50217 386 user_addr = round_page_32(user_addr);
1c79356b 387 } else {
d7e50217 388 if (user_addr != trunc_page_32(user_addr))
1c79356b
A
389 return (EINVAL);
390 find_space = FALSE;
391 (void) vm_deallocate(user_map, user_addr, user_size);
392 }
393
394
395 /*
396 * Lookup/allocate object.
397 */
398 if (flags & MAP_ANON) {
399 /*
400 * Unnamed anonymous regions always start at 0.
401 */
402 if (handle == 0)
403 file_pos = 0;
404 }
405
406 if (handle == NULL) {
407 pager = NULL;
408#ifdef notyet
409/* Hmm .. */
410#if defined(VM_PROT_READ_IS_EXEC)
411 if (prot & VM_PROT_READ)
412 prot |= VM_PROT_EXECUTE;
413
414 if (maxprot & VM_PROT_READ)
415 maxprot |= VM_PROT_EXECUTE;
416#endif
417#endif
418 result = vm_allocate(user_map, &user_addr, user_size, find_space);
419 if (result != KERN_SUCCESS)
420 goto out;
421
422 } else {
423 UBCINFOCHECK("mmap", vp);
424 pager = ubc_getpager(vp);
425
426 if (pager == NULL)
427 return (ENOMEM);
428
429 /*
430 * Set credentials:
431 * FIXME: if we're writing the file we need a way to
432 * ensure that someone doesn't replace our R/W creds
433 * with ones that only work for read.
434 */
435
436 ubc_setcred(vp, p);
437 docow = FALSE;
438 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
439 docow = TRUE;
440 }
441
442#ifdef notyet
443/* Hmm .. */
444#if defined(VM_PROT_READ_IS_EXEC)
445 if (prot & VM_PROT_READ)
446 prot |= VM_PROT_EXECUTE;
447
448 if (maxprot & VM_PROT_READ)
449 maxprot |= VM_PROT_EXECUTE;
450#endif
451#endif /* notyet */
452
453 result = vm_map_64(user_map, &user_addr, user_size,
454 0, find_space, pager, file_pos, docow,
455 prot, maxprot,
456 VM_INHERIT_DEFAULT);
457
458 if (result != KERN_SUCCESS)
459 goto out;
460
461 ubc_map(vp);
462 }
463
464 if (flags & (MAP_SHARED|MAP_INHERIT)) {
465 result = vm_inherit(user_map, user_addr, user_size,
466 VM_INHERIT_SHARE);
467 if (result != KERN_SUCCESS) {
468 (void) vm_deallocate(user_map, user_addr, user_size);
469 goto out;
470 }
471 }
472
473out:
474 switch (result) {
475 case KERN_SUCCESS:
476 if (!mapanon)
477 *fdflags(p, uap->fd) |= UF_MAPPED;
478 *retval = (register_t)(user_addr + pageoff);
479 return (0);
480 case KERN_INVALID_ADDRESS:
481 case KERN_NO_SPACE:
482 return (ENOMEM);
483 case KERN_PROTECTION_FAILURE:
484 return (EACCES);
485 default:
486 return (EINVAL);
487 }
488 /*NOTREACHED*/
489}
490
491struct msync_args {
492 caddr_t addr;
493 int len;
494 int flags;
495};
496int
497msync(p, uap, retval)
498 struct proc *p;
499 struct msync_args *uap;
500 register_t *retval;
501{
502 vm_offset_t addr;
503 vm_size_t size, pageoff;
504 int flags;
505 vm_map_t user_map;
506 int rv;
507 vm_sync_t sync_flags=0;
508
509 addr = (vm_offset_t) uap->addr;
510 pageoff = (addr & PAGE_MASK);
511 addr -= pageoff;
512 size = uap->len;
d7e50217 513 size = (vm_size_t) round_page_32(size);
1c79356b
A
514 flags = uap->flags;
515
516 if (addr + size < addr)
517 return(EINVAL);
518
519 user_map = current_map();
520
521 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
522 return (EINVAL);
523
524 if (size == 0) {
525 /*
526 * We cannot support this properly without maintaining
527 * list all mmaps done. Cannot use vm_map_entry as they could be
528 * split or coalesced by indepenedant actions. So instead of
529 * inaccurate results, lets just return error as invalid size
530 * specified
531 */
532 return(EINVAL);
533 }
534
535 if (flags & MS_KILLPAGES)
536 sync_flags |= VM_SYNC_KILLPAGES;
537 if (flags & MS_DEACTIVATE)
538 sync_flags |= VM_SYNC_DEACTIVATE;
539 if (flags & MS_INVALIDATE)
540 sync_flags |= VM_SYNC_INVALIDATE;
541
542 if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) {
543 if (flags & MS_ASYNC)
544 sync_flags |= VM_SYNC_ASYNCHRONOUS;
545 else
546 sync_flags |= VM_SYNC_SYNCHRONOUS;
547 }
548 rv = vm_msync(user_map, addr, size, sync_flags);
549
550 switch (rv) {
551 case KERN_SUCCESS:
552 break;
553 case KERN_INVALID_ADDRESS:
554 return (EINVAL); /* Sun returns ENOMEM? */
555 case KERN_FAILURE:
556 return (EIO);
557 default:
558 return (EINVAL);
559 }
560
561 return (0);
562
563}
564
565
566mremap()
567{
568 /* Not yet implemented */
569 return (EOPNOTSUPP);
570}
571
572struct munmap_args {
573 caddr_t addr;
574 int len;
575};
576munmap(p, uap, retval)
577 struct proc *p;
578 struct munmap_args *uap;
579 register_t *retval;
580
581{
582 vm_offset_t user_addr;
583 vm_size_t user_size, pageoff;
584 kern_return_t result;
585
586 user_addr = (vm_offset_t) uap->addr;
587 user_size = (vm_size_t) uap->len;
588
589 pageoff = (user_addr & PAGE_MASK);
590
591 user_addr -= pageoff;
592 user_size += pageoff;
d7e50217 593 user_size = round_page_32(user_size);
1c79356b
A
594 if (user_addr + user_size < user_addr)
595 return(EINVAL);
596
597 if (user_size == 0)
598 return (0);
599
600 /* Address range must be all in user VM space. */
601 if (VM_MAX_ADDRESS > 0 && (user_addr + user_size > VM_MAX_ADDRESS))
602 return (EINVAL);
603 if (VM_MIN_ADDRESS > 0 && user_addr < VM_MIN_ADDRESS)
604 return (EINVAL);
605
606
607 result = vm_deallocate(current_map(), user_addr, user_size);
608 if (result != KERN_SUCCESS) {
609 return(EINVAL);
610 }
611 return(0);
612}
613
614void
615munmapfd(p, fd)
616 struct proc *p;
617 int fd;
618{
619 /*
620 * XXX should vm_deallocate any regions mapped to this file
621 */
622 *fdflags(p, fd) &= ~UF_MAPPED;
623}
624
625struct mprotect_args {
626 caddr_t addr;
627 int len;
628 int prot;
629};
630int
631mprotect(p, uap, retval)
632 struct proc *p;
633 struct mprotect_args *uap;
634 register_t *retval;
635{
636 register vm_prot_t prot;
637 vm_offset_t user_addr;
638 vm_size_t user_size, pageoff;
639 kern_return_t result;
640 vm_map_t user_map;
641
642 user_addr = (vm_offset_t) uap->addr;
643 user_size = (vm_size_t) uap->len;
644 prot = (vm_prot_t)(uap->prot & VM_PROT_ALL);
645
646#ifdef notyet
647/* Hmm .. */
648#if defined(VM_PROT_READ_IS_EXEC)
649 if (prot & VM_PROT_READ)
650 prot |= VM_PROT_EXECUTE;
651#endif
652#endif /* notyet */
653
654 pageoff = (user_addr & PAGE_MASK);
655 user_addr -= pageoff;
656 user_size += pageoff;
d7e50217 657 user_size = round_page_32(user_size);
1c79356b
A
658 if (user_addr + user_size < user_addr)
659 return(EINVAL);
660
661 user_map = current_map();
662
663 result = vm_map_protect(user_map, user_addr, user_addr+user_size, prot,
664 FALSE);
665 switch (result) {
666 case KERN_SUCCESS:
667 return (0);
668 case KERN_PROTECTION_FAILURE:
669 return (EACCES);
670 }
671 return (EINVAL);
672}
673
674
675struct minherit_args {
676 void *addr;
677 size_t len;
678 int inherit;
679};
680
681int
682minherit(p, uap, retval)
683 struct proc *p;
684 struct minherit_args *uap;
685 register_t *retval;
686{
687 vm_offset_t addr;
688 vm_size_t size, pageoff;
689 register vm_inherit_t inherit;
690 vm_map_t user_map;
691 kern_return_t result;
692
693 addr = (vm_offset_t)uap->addr;
694 size = uap->len;
695 inherit = uap->inherit;
696
697 pageoff = (addr & PAGE_MASK);
698 addr -= pageoff;
699 size += pageoff;
d7e50217 700 size = (vm_size_t) round_page_32(size);
1c79356b
A
701 if (addr + size < addr)
702 return(EINVAL);
703
704 user_map = current_map();
705 result = vm_inherit(user_map, addr, size,
706 inherit);
707 switch (result) {
708 case KERN_SUCCESS:
709 return (0);
710 case KERN_PROTECTION_FAILURE:
711 return (EACCES);
712 }
713 return (EINVAL);
714}
715
716struct madvise_args {
717 caddr_t addr;
718 int len;
719 int behav;
720};
721/* ARGSUSED */
722int
723madvise(p, uap, retval)
724 struct proc *p;
725 struct madvise_args *uap;
726 register_t *retval;
727{
728 vm_map_t user_map;
729 vm_offset_t start, end;
730 vm_behavior_t new_behavior;
731 kern_return_t result;
732
733 /*
734 * Check for illegal addresses. Watch out for address wrap... Note
735 * that VM_*_ADDRESS are not constants due to casts (argh).
736 */
737 if (VM_MAX_ADDRESS > 0 &&
738 ((vm_offset_t) uap->addr + uap->len) > VM_MAX_ADDRESS)
9bccf70c 739 return (ENOMEM);
1c79356b 740 if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS)
9bccf70c 741 return (ENOMEM);
1c79356b
A
742
743 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
9bccf70c 744 return (ENOMEM);
1c79356b
A
745
746 /*
747 * Since this routine is only advisory, we default to conservative
748 * behavior.
749 */
d7e50217
A
750 start = trunc_page_32((vm_offset_t) uap->addr);
751 end = round_page_32((vm_offset_t) uap->addr + uap->len);
1c79356b
A
752
753 user_map = current_map();
754
755 switch (uap->behav) {
756 case MADV_RANDOM:
757 new_behavior = VM_BEHAVIOR_RANDOM;
9bccf70c 758 break;
1c79356b
A
759 case MADV_SEQUENTIAL:
760 new_behavior = VM_BEHAVIOR_SEQUENTIAL;
9bccf70c 761 break;
1c79356b 762 case MADV_NORMAL:
1c79356b 763 new_behavior = VM_BEHAVIOR_DEFAULT;
9bccf70c
A
764 break;
765 case MADV_WILLNEED:
766 new_behavior = VM_BEHAVIOR_WILLNEED;
767 break;
768 case MADV_DONTNEED:
769 new_behavior = VM_BEHAVIOR_DONTNEED;
770 break;
771 default:
772 return(EINVAL);
1c79356b
A
773 }
774
9bccf70c 775 result = vm_behavior_set(user_map, start, end, new_behavior);
1c79356b
A
776 switch (result) {
777 case KERN_SUCCESS:
778 return (0);
779 case KERN_INVALID_ADDRESS:
780 return (EINVAL);
781 }
782
783 return (EINVAL);
784}
785
786struct mincore_args {
787 const void *addr;
788 size_t len;
789 char *vec;
790};
791/* ARGSUSED */
792int
793mincore(p, uap, retval)
794 struct proc *p;
795 struct mincore_args *uap;
796 register_t *retval;
797{
798 vm_offset_t addr, first_addr;
799 vm_offset_t end;
800 vm_map_t map;
801 char *vec;
802 int error;
803 int vecindex, lastvecindex;
804 int mincoreinfo=0;
805 int pqueryinfo;
806 kern_return_t ret;
807 int numref;
808
809 map = current_map();
810
811 /*
812 * Make sure that the addresses presented are valid for user
813 * mode.
814 */
d7e50217
A
815 first_addr = addr = trunc_page_32((vm_offset_t) uap->addr);
816 end = addr + (vm_size_t)round_page_32(uap->len);
1c79356b
A
817
818 if (VM_MAX_ADDRESS > 0 && end > VM_MAX_ADDRESS)
819 return (EINVAL);
820 if (end < addr)
821 return (EINVAL);
822
823 /*
824 * Address of byte vector
825 */
826 vec = uap->vec;
827
828 map = current_map();
829
830 /*
831 * Do this on a map entry basis so that if the pages are not
832 * in the current processes address space, we can easily look
833 * up the pages elsewhere.
834 */
835 lastvecindex = -1;
836 for(addr; addr < end; addr += PAGE_SIZE) {
837 pqueryinfo = 0;
838 ret = vm_map_page_query(map, addr, &pqueryinfo, &numref);
839 if (ret != KERN_SUCCESS)
840 pqueryinfo = 0;
841 mincoreinfo = 0;
842 if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
843 mincoreinfo |= MINCORE_INCORE;
844 if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
845 mincoreinfo |= MINCORE_REFERENCED;
846 if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
847 mincoreinfo |= MINCORE_MODIFIED;
848
849
850 /*
851 * calculate index into user supplied byte vector
852 */
853 vecindex = (addr - first_addr)>> PAGE_SHIFT;
854
855 /*
856 * If we have skipped map entries, we need to make sure that
857 * the byte vector is zeroed for those skipped entries.
858 */
859 while((lastvecindex + 1) < vecindex) {
860 error = subyte( vec + lastvecindex, 0);
861 if (error) {
862 return (EFAULT);
863 }
864 ++lastvecindex;
865 }
866
867 /*
868 * Pass the page information to the user
869 */
870 error = subyte( vec + vecindex, mincoreinfo);
871 if (error) {
872 return (EFAULT);
873 }
874 lastvecindex = vecindex;
875 }
876
877
878 /*
879 * Zero the last entries in the byte vector.
880 */
881 vecindex = (end - first_addr) >> PAGE_SHIFT;
882 while((lastvecindex + 1) < vecindex) {
883 error = subyte( vec + lastvecindex, 0);
884 if (error) {
885 return (EFAULT);
886 }
887 ++lastvecindex;
888 }
889
890 return (0);
891}
892
893struct mlock_args {
894 caddr_t addr;
895 size_t len;
896};
897
898int
899mlock(p, uap, retval)
900 struct proc *p;
901 struct mlock_args *uap;
902 register_t *retval;
903{
904 vm_map_t user_map;
905 vm_offset_t addr;
906 vm_size_t size, pageoff;
907 int error;
908 kern_return_t result;
909
910 addr = (vm_offset_t) uap->addr;
911 size = uap->len;
912
913 pageoff = (addr & PAGE_MASK);
914 addr -= pageoff;
915 size += pageoff;
d7e50217 916 size = (vm_size_t) round_page_32(size);
1c79356b
A
917
918 /* disable wrap around */
919 if (addr + size < addr)
920 return (EINVAL);
921#ifdef notyet
922/* Hmm.. What am I going to do with this? */
923 if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
924 return (EAGAIN);
925#ifdef pmap_wired_count
926 if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
927 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
928 return (ENOMEM);
929#else
930 error = suser(p->p_ucred, &p->p_acflag);
931 if (error)
932 return (error);
933#endif
934#endif /* notyet */
935
936 user_map = current_map();
937
938 /* vm_wire */
9bccf70c 939 result = vm_map_wire(user_map, addr, (vm_offset_t)(addr+size), VM_PROT_NONE, TRUE);
1c79356b
A
940 return (result == KERN_SUCCESS ? 0 : ENOMEM);
941}
942
943struct munlock_args {
944 caddr_t addr;
945 size_t len;
946};
947int
948munlock(p, uap, retval)
949 struct proc *p;
950 struct munlock_args *uap;
951 register_t *retval;
952{
953 vm_offset_t addr;
954 vm_size_t size, pageoff;
955 int error;
956 vm_map_t user_map;
957 kern_return_t result;
958
959 addr = (vm_offset_t) uap->addr;
960 size = uap->len;
961
962 pageoff = (addr & PAGE_MASK);
963 addr -= pageoff;
964 size += pageoff;
d7e50217 965 size = (vm_size_t) round_page_32(size);
1c79356b
A
966
967 /* disable wrap around */
968 if (addr + size < addr)
969 return (EINVAL);
970
971#ifdef notyet
972/* Hmm.. What am I going to do with this? */
973#ifndef pmap_wired_count
974 error = suser(p->p_ucred, &p->p_acflag);
975 if (error)
976 return (error);
977#endif
978#endif /* notyet */
979
980 user_map = current_map();
981
982 /* vm_wire */
983 result = vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE);
984 return (result == KERN_SUCCESS ? 0 : ENOMEM);
985}
986
987
988struct mlockall_args {
989 int how;
990};
991
992int
993mlockall(p, uap)
994 struct proc *p;
995 struct mlockall_args *uap;
996{
997 return (ENOSYS);
998}
999
1000struct munlockall_args {
1001 int how;
1002};
1003
1004int
1005munlockall(p, uap)
1006 struct proc *p;
1007 struct munlockall_args *uap;
1008{
1009 return(ENOSYS);
1010}
1011
1012
1013/* BEGIN DEFUNCT */
1014struct obreak_args {
1015 char *nsiz;
1016};
1017obreak(p, uap, retval)
1018 struct proc *p;
1019 struct obreak_args *uap;
1020 register_t *retval;
1021{
1022 /* Not implemented, obsolete */
1023 return (ENOMEM);
1024}
1025
1026int both;
1027
1028ovadvise()
1029{
1030
1031#ifdef lint
1032 both = 0;
1033#endif
1034}
1035/* END DEFUNCT */
1036#if 1
1037int print_map_addr=0;
1038#endif /* 1 */
1039
1040/* CDY need to fix interface to allow user to map above 32 bits */
1041kern_return_t map_fd(
1042 int fd,
1043 vm_offset_t offset,
1044 vm_offset_t *va,
1045 boolean_t findspace,
1046 vm_size_t size)
1047{
1048 kern_return_t ret;
1049 boolean_t funnel_state;
1050
1051 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1052
1053 ret = map_fd_funneled( fd, (vm_object_offset_t)offset,
1054 va, findspace, size);
1055
1056 (void) thread_funnel_set(kernel_flock, FALSE);
1057
1058 return ret;
1059}
1060
1061kern_return_t map_fd_funneled(
1062 int fd,
1063 vm_object_offset_t offset,
1064 vm_offset_t *va,
1065 boolean_t findspace,
1066 vm_size_t size)
1067{
1068 kern_return_t result;
1069 struct file *fp;
1070 struct vnode *vp;
1071 void * pager;
1072 vm_offset_t map_addr=0;
1073 vm_size_t map_size;
1074 vm_map_copy_t tmp;
1075 int err=0;
1076 vm_map_t my_map;
0b4e3aa0 1077 struct proc *p =(struct proc *)current_proc();
1c79356b
A
1078#if 0
1079 extern int print_map_addr;
1080#endif /* 0 */
1081
1082 /*
1083 * Find the inode; verify that it's a regular file.
1084 */
1085
1086 err = fdgetf(p, fd, &fp);
1087 if (err)
1088 return(err);
1089
1090 if (fp->f_type != DTYPE_VNODE)
1091 return(KERN_INVALID_ARGUMENT);
b4c24cb9
A
1092
1093 if (!(fp->f_flag & FREAD))
1094 return (KERN_PROTECTION_FAILURE);
1095
1c79356b
A
1096 vp = (struct vnode *)fp->f_data;
1097
1098 if (vp->v_type != VREG)
1099 return (KERN_INVALID_ARGUMENT);
1100
1101 if (offset & PAGE_MASK_64) {
9bccf70c 1102 printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
1c79356b
A
1103 return (KERN_INVALID_ARGUMENT);
1104 }
d7e50217 1105 map_size = round_page_32(size);
1c79356b
A
1106
1107 /*
1108 * Allow user to map in a zero length file.
1109 */
1110 if (size == 0)
1111 return (KERN_SUCCESS);
1112 /*
1113 * Map in the file.
1114 */
1115 UBCINFOCHECK("map_fd_funneled", vp);
1116 pager = (void *) ubc_getpager(vp);
1117 if (pager == NULL)
1118 return (KERN_FAILURE);
1119
1120
1121 my_map = current_map();
1122
1123 result = vm_map_64(
1124 my_map,
1125 &map_addr, map_size, (vm_offset_t)0, TRUE,
1126 pager, offset, TRUE,
1127 VM_PROT_DEFAULT, VM_PROT_ALL,
1128 VM_INHERIT_DEFAULT);
1129 if (result != KERN_SUCCESS)
1130 return (result);
1131
1132
1133 if (!findspace) {
1134 vm_offset_t dst_addr;
1135 vm_map_copy_t tmp;
1136
1137 if (copyin(va, &dst_addr, sizeof (dst_addr)) ||
d7e50217 1138 trunc_page_32(dst_addr) != dst_addr) {
1c79356b
A
1139 (void) vm_map_remove(
1140 my_map,
1141 map_addr, map_addr + map_size,
1142 VM_MAP_NO_FLAGS);
1143 return (KERN_INVALID_ADDRESS);
1144 }
1145
1146 result = vm_map_copyin(
1147 my_map,
1148 map_addr, map_size, TRUE,
1149 &tmp);
1150 if (result != KERN_SUCCESS) {
1151
1152 (void) vm_map_remove(
1153 my_map,
1154 map_addr, map_addr + map_size,
1155 VM_MAP_NO_FLAGS);
1156 return (result);
1157 }
1158
1159 result = vm_map_copy_overwrite(
1160 my_map,
1161 dst_addr, tmp, FALSE);
1162 if (result != KERN_SUCCESS) {
1163 vm_map_copy_discard(tmp);
1164 return (result);
1165 }
1166 } else {
1167 if (copyout(&map_addr, va, sizeof (map_addr))) {
1168 (void) vm_map_remove(
1169 my_map,
1170 map_addr, map_addr + map_size,
1171 VM_MAP_NO_FLAGS);
1172 return (KERN_INVALID_ADDRESS);
1173 }
1174 }
1175
1176 ubc_setcred(vp, current_proc());
1177 ubc_map(vp);
1178
1179 return (KERN_SUCCESS);
1180}