]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/io_map.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / ppc / io_map.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 *
25 */
26
27 #include <debug.h>
28 #include <mach/vm_param.h>
29 #include <vm/vm_kern.h>
30 #include <vm/vm_map.h>
31 #include <vm/vm_page.h>
32 #include <ppc/pmap.h>
33 #include <ppc/io_map_entries.h>
34 #include <ppc/Firmware.h>
35 #include <ppc/mappings.h>
36 #include <ppc/proc_reg.h>
37
38 extern vm_offset_t virtual_avail;
39
40 /*
41 * Allocate and map memory for devices that may need to be mapped
42 * outside the usual physical memory. If phys_addr is NULL then
43 * steal the appropriate number of physical pages from the vm
44 * system and map them.
45 */
46 vm_offset_t
47 io_map(phys_addr, size)
48 vm_offset_t phys_addr;
49 vm_size_t size;
50 {
51 vm_offset_t start;
52 int i;
53 unsigned int j;
54 vm_page_t m;
55
56
57 #if DEBUG
58 assert (kernel_map != VM_MAP_NULL); /* VM must be initialised */
59 #endif
60
61 if (phys_addr != 0) {
62 /* make sure we map full contents of all the pages concerned */
63 size = round_page(size + (phys_addr & PAGE_MASK));
64
65 /* Steal some free virtual addresses */
66
67 (void) kmem_alloc_pageable(kernel_map, &start, size);
68
69 pmap_map_block(kernel_pmap, start, phys_addr, size,
70 VM_PROT_READ|VM_PROT_WRITE, PTE_WIMG_IO, 0); /* Set up a block mapped area */
71
72 return (start + (phys_addr & PAGE_MASK));
73
74 } else {
75
76 /* Steal some free virtual addresses */
77 (void) kmem_alloc_pageable(kernel_map, &start, size);
78
79 mapping_prealloc(size); /* Make sure there are enough free mappings */
80 /* Steal some physical pages and map them one by one */
81 for (i = 0; i < size ; i += PAGE_SIZE) {
82 m = VM_PAGE_NULL;
83 while ((m = vm_page_grab()) == VM_PAGE_NULL)
84 VM_PAGE_WAIT();
85 vm_page_gobble(m);
86 (void) pmap_map_bd(start + i,
87 m->phys_addr,
88 m->phys_addr + PAGE_SIZE,
89 VM_PROT_READ|VM_PROT_WRITE);
90 }
91
92 mapping_relpre(); /* Allow mapping release */
93 return start;
94 }
95 }