]>
Commit | Line | Data |
---|---|---|
0b4e3aa0 A |
1 | /* |
2 | * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <sys/errno.h> | |
24 | #include <kern/host.h> | |
25 | #include <mach/mach_types.h> | |
26 | #include <vm/vm_map.h> | |
27 | #include <vm/vm_kern.h> | |
28 | #include <vm/vm_pageout.h> | |
29 | #include <mach/kern_return.h> | |
30 | #include <mach/memory_object_types.h> | |
31 | #include <mach/port.h> | |
32 | #include <mach/policy.h> | |
33 | #include <ipc/ipc_port.h> | |
34 | #include <ipc/ipc_space.h> | |
35 | #include <kern/thread.h> | |
36 | #include <device/device_port.h> | |
37 | #include <vm/vm_pageout.h> | |
38 | ||
39 | #include <libkern/OSAtomic.h> | |
40 | ||
41 | ||
42 | /* Device VM COMPONENT INTERFACES */ | |
43 | ||
44 | ||
45 | /* | |
46 | * Device PAGER | |
47 | */ | |
48 | ||
49 | ||
50 | /* until component support available */ | |
51 | ||
52 | ||
53 | ||
54 | /* until component support available */ | |
55 | int device_pager_workaround; | |
56 | ||
57 | typedef int device_port_t; | |
58 | ||
59 | typedef struct device_pager { | |
60 | int *pager; /* pager workaround pointer */ | |
61 | unsigned int pager_ikot; /* fake ip_kotype() */ | |
62 | unsigned int ref_count; /* reference count */ | |
63 | memory_object_control_t control_handle; /* mem object's cntrl handle */ | |
64 | device_port_t device_handle; /* device_handle */ | |
65 | vm_size_t size; | |
66 | int flags; | |
67 | } *device_pager_t; | |
68 | ||
69 | ||
70 | ||
71 | void | |
72 | device_pager_bootstrap( | |
73 | void); | |
74 | ||
75 | ||
76 | memory_object_t | |
77 | device_pager_setup( | |
78 | memory_object_t, | |
79 | int, | |
80 | vm_size_t, | |
81 | int); | |
82 | ||
83 | device_pager_t | |
84 | device_pager_lookup( | |
85 | memory_object_t); | |
86 | ||
87 | kern_return_t | |
88 | device_pager_init( | |
89 | memory_object_t, | |
90 | memory_object_control_t, | |
91 | vm_size_t); | |
92 | ||
93 | ||
94 | kern_return_t | |
95 | device_pager_data_request( | |
96 | memory_object_t, | |
97 | memory_object_offset_t, | |
98 | vm_size_t, | |
99 | vm_prot_t); | |
100 | ||
101 | kern_return_t | |
102 | device_pager_data_return( | |
103 | memory_object_t, | |
104 | memory_object_offset_t, | |
105 | vm_size_t, | |
106 | boolean_t, | |
107 | boolean_t); | |
108 | ||
109 | void | |
110 | device_pager_reference( | |
111 | memory_object_t); | |
112 | ||
113 | void | |
114 | device_pager_deallocate( | |
115 | memory_object_t); | |
116 | ||
117 | kern_return_t | |
118 | device_pager_data_initialize( | |
119 | memory_object_t, | |
120 | memory_object_offset_t, | |
121 | vm_size_t); | |
122 | ||
123 | kern_return_t | |
124 | device_pager_data_unlock( | |
125 | memory_object_t, | |
126 | memory_object_offset_t, | |
127 | vm_size_t, | |
128 | vm_prot_t); | |
129 | ||
130 | kern_return_t | |
131 | device_pager_terminate( | |
132 | memory_object_t); | |
133 | ||
134 | kern_return_t | |
135 | device_pager_synchronize( | |
136 | memory_object_t, | |
137 | memory_object_offset_t, | |
138 | vm_offset_t, | |
139 | vm_sync_t); | |
140 | ||
141 | kern_return_t | |
142 | device_pager_unmap( | |
143 | memory_object_t); | |
144 | ||
145 | device_pager_t | |
146 | device_object_create(void); | |
147 | ||
148 | zone_t device_pager_zone; | |
149 | ||
150 | ||
151 | #define DEVICE_PAGER_NULL ((device_pager_t) 0) | |
152 | ||
153 | ||
154 | #define MAX_DNODE 10000 | |
155 | ||
156 | ||
157 | ||
158 | ||
159 | ||
160 | /* | |
161 | * | |
162 | */ | |
163 | void | |
164 | device_pager_bootstrap(void) | |
165 | { | |
166 | register vm_size_t size; | |
167 | ||
168 | size = (vm_size_t) sizeof(struct device_pager); | |
169 | device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size, | |
170 | PAGE_SIZE, "device node pager structures"); | |
171 | ||
172 | return; | |
173 | } | |
174 | ||
175 | /* | |
176 | * | |
177 | */ | |
178 | memory_object_t | |
179 | device_pager_setup( | |
180 | memory_object_t device, | |
181 | int device_handle, | |
182 | vm_size_t size, | |
183 | int flags) | |
184 | { | |
185 | device_pager_t device_object; | |
186 | ||
187 | device_object = device_object_create(); | |
188 | if (device_object == DEVICE_PAGER_NULL) | |
189 | panic("device_pager_setup: device_object_create() failed"); | |
190 | ||
191 | device_object->device_handle = device_handle; | |
192 | device_object->size = size; | |
193 | device_object->flags = 0; | |
194 | if(flags & DEVICE_PAGER_CONTIGUOUS) { | |
195 | device_object->flags |= DEVICE_PAGER_CONTIGUOUS; | |
196 | } | |
197 | if(flags & DEVICE_PAGER_NOPHYSCACHE) { | |
198 | device_object->flags |= DEVICE_PAGER_NOPHYSCACHE; | |
199 | } | |
200 | ||
201 | return((memory_object_t)device_object); | |
202 | } | |
203 | ||
204 | /* | |
205 | * | |
206 | */ | |
207 | kern_return_t | |
208 | device_pager_populate_object( | |
209 | memory_object_t device, | |
210 | memory_object_offset_t offset, | |
211 | vm_offset_t phys_addr, | |
212 | vm_size_t size) | |
213 | { | |
214 | device_pager_t device_object; | |
215 | vm_object_t vm_object; | |
216 | kern_return_t kr; | |
217 | upl_t upl; | |
218 | ipc_port_t previous; | |
219 | ||
220 | device_object = device_pager_lookup(device); | |
221 | if(device_object == DEVICE_PAGER_NULL) | |
222 | return KERN_FAILURE; | |
223 | ||
224 | vm_object = (vm_object_t)memory_object_control_to_vm_object( | |
225 | device_object->control_handle); | |
226 | if(vm_object == NULL) | |
227 | return KERN_FAILURE; | |
228 | ||
229 | kr = vm_object_populate_with_private( | |
230 | vm_object, offset, phys_addr, size); | |
231 | if(kr != KERN_SUCCESS) | |
232 | return kr; | |
233 | ||
234 | if(!vm_object->phys_contiguous) { | |
235 | int null_size = 0; | |
236 | kr = vm_object_upl_request(vm_object, | |
237 | (vm_object_offset_t)offset, size, &upl, NULL, | |
238 | &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE)); | |
239 | ||
240 | if(kr != KERN_SUCCESS) | |
241 | panic("device_pager_populate_object: list_req failed"); | |
242 | ||
243 | upl_commit(upl, NULL); | |
244 | upl_deallocate(upl); | |
245 | } | |
246 | ||
247 | ||
248 | return kr; | |
249 | } | |
250 | ||
251 | /* | |
252 | * | |
253 | */ | |
254 | device_pager_t | |
255 | device_pager_lookup( | |
256 | memory_object_t name) | |
257 | { | |
258 | device_pager_t device_object; | |
259 | ||
260 | device_object = (device_pager_t)name; | |
261 | assert(device_object->pager == &device_pager_workaround); | |
262 | return (device_object); | |
263 | } | |
264 | ||
265 | /* | |
266 | * | |
267 | */ | |
268 | kern_return_t | |
269 | device_pager_init(memory_object_t mem_obj, | |
270 | memory_object_control_t control, | |
271 | vm_size_t pg_size) | |
272 | { | |
273 | device_pager_t device_object; | |
274 | kern_return_t kr; | |
275 | memory_object_attr_info_data_t attributes; | |
276 | ||
277 | vm_object_t vm_object; | |
278 | ||
279 | ||
280 | if (control == MEMORY_OBJECT_CONTROL_NULL) | |
281 | return KERN_INVALID_ARGUMENT; | |
282 | ||
283 | device_object = device_pager_lookup(mem_obj); | |
284 | ||
285 | memory_object_control_reference(control); | |
286 | device_object->control_handle = control; | |
287 | ||
288 | ||
289 | /* The following settings should be done through an expanded change */ | |
290 | /* attributes call */ | |
291 | ||
292 | vm_object = (vm_object_t)memory_object_control_to_vm_object(control); | |
293 | vm_object_lock(vm_object); | |
294 | vm_object->private = TRUE; | |
295 | if(device_object->flags & DEVICE_PAGER_CONTIGUOUS) | |
296 | vm_object->phys_contiguous = TRUE; | |
297 | if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE) | |
298 | vm_object->nophyscache = TRUE; | |
299 | vm_object_unlock(vm_object); | |
300 | ||
301 | ||
302 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
303 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ | |
304 | attributes.cluster_size = (1 << (PAGE_SHIFT)); | |
305 | attributes.may_cache_object = FALSE; | |
306 | attributes.temporary = TRUE; | |
307 | ||
308 | kr = memory_object_change_attributes( | |
309 | control, | |
310 | MEMORY_OBJECT_ATTRIBUTE_INFO, | |
311 | (memory_object_info_t) &attributes, | |
312 | MEMORY_OBJECT_ATTR_INFO_COUNT); | |
313 | if (kr != KERN_SUCCESS) | |
314 | panic("device_pager_init: memory_object_change_attributes() failed"); | |
315 | ||
316 | return(KERN_SUCCESS); | |
317 | } | |
318 | ||
319 | /* | |
320 | * | |
321 | */ | |
322 | kern_return_t | |
323 | device_pager_data_return( | |
324 | memory_object_t mem_obj, | |
325 | memory_object_offset_t offset, | |
326 | vm_size_t data_cnt, | |
327 | boolean_t dirty, | |
328 | boolean_t kernel_copy) | |
329 | { | |
330 | device_pager_t device_object; | |
331 | ||
332 | device_object = device_pager_lookup(mem_obj); | |
333 | if (device_object == DEVICE_PAGER_NULL) | |
334 | panic("device_pager_data_return: lookup failed"); | |
335 | ||
336 | return device_data_action(device_object->device_handle, device_object, | |
337 | VM_PROT_READ | VM_PROT_WRITE, offset, data_cnt); | |
338 | } | |
339 | ||
340 | /* | |
341 | * | |
342 | */ | |
343 | kern_return_t | |
344 | device_pager_data_request( | |
345 | memory_object_t mem_obj, | |
346 | memory_object_offset_t offset, | |
347 | vm_size_t length, | |
348 | vm_prot_t protection_required) | |
349 | { | |
350 | device_pager_t device_object; | |
351 | ||
352 | device_object = device_pager_lookup(mem_obj); | |
353 | ||
354 | if (device_object == DEVICE_PAGER_NULL) | |
355 | panic("device_pager_data_request: lookup failed"); | |
356 | ||
357 | device_data_action(device_object->device_handle, device_object, | |
358 | VM_PROT_READ, offset, length); | |
359 | return KERN_SUCCESS; | |
360 | } | |
361 | ||
362 | /* | |
363 | * | |
364 | */ | |
365 | void | |
366 | device_pager_reference( | |
367 | memory_object_t mem_obj) | |
368 | { | |
369 | device_pager_t device_object; | |
370 | unsigned int prev_ref_count; | |
371 | ||
372 | device_object = device_pager_lookup(mem_obj); | |
373 | prev_ref_count = OSIncrementAtomic((UInt32 *)&device_object->ref_count); | |
374 | assert(prev_ref_count > 0); | |
375 | } | |
376 | ||
377 | /* | |
378 | * | |
379 | */ | |
380 | void | |
381 | device_pager_deallocate( | |
382 | memory_object_t mem_obj) | |
383 | { | |
384 | device_pager_t device_object; | |
385 | ||
386 | device_object = device_pager_lookup(mem_obj); | |
387 | ||
388 | if (OSDecrementAtomic((UInt32 *)&device_object->ref_count) == 1) { | |
389 | if (device_object->device_handle != (device_port_t) NULL) { | |
390 | device_close(device_object->device_handle); | |
391 | } | |
392 | zfree(device_pager_zone, (vm_offset_t) device_object); | |
393 | } | |
394 | return; | |
395 | } | |
396 | ||
397 | kern_return_t | |
398 | device_pager_data_initialize( | |
399 | memory_object_t mem_obj, | |
400 | memory_object_offset_t offset, | |
401 | vm_size_t data_cnt) | |
402 | { | |
403 | return KERN_FAILURE; | |
404 | } | |
405 | ||
406 | kern_return_t | |
407 | device_pager_data_unlock( | |
408 | memory_object_t mem_obj, | |
409 | memory_object_offset_t offset, | |
410 | vm_size_t size, | |
411 | vm_prot_t desired_access) | |
412 | { | |
413 | return KERN_FAILURE; | |
414 | } | |
415 | ||
416 | device_pager_terminate( | |
417 | memory_object_t mem_obj) | |
418 | { | |
419 | return KERN_SUCCESS; | |
420 | } | |
421 | ||
422 | ||
423 | ||
424 | /* | |
425 | * | |
426 | */ | |
427 | kern_return_t | |
428 | device_pager_synchronize( | |
429 | memory_object_t mem_obj, | |
430 | memory_object_offset_t offset, | |
431 | vm_offset_t length, | |
432 | vm_sync_t sync_flags) | |
433 | { | |
434 | device_pager_t device_object; | |
435 | ||
436 | device_object = device_pager_lookup(mem_obj); | |
437 | ||
438 | memory_object_synchronize_completed( | |
439 | device_object->control_handle, offset, length); | |
440 | ||
441 | return KERN_SUCCESS; | |
442 | } | |
443 | ||
444 | /* | |
445 | * | |
446 | */ | |
447 | kern_return_t | |
448 | device_pager_unmap( | |
449 | memory_object_t mem_obj) | |
450 | { | |
451 | return KERN_SUCCESS; | |
452 | } | |
453 | ||
454 | ||
455 | ||
456 | /* | |
457 | * | |
458 | */ | |
459 | device_pager_t | |
460 | device_object_create() | |
461 | { | |
462 | register device_pager_t device_object; | |
463 | ||
464 | device_object = (struct device_pager *) zalloc(device_pager_zone); | |
465 | if (device_object == DEVICE_PAGER_NULL) | |
466 | return(DEVICE_PAGER_NULL); | |
467 | device_object->pager = &device_pager_workaround; | |
468 | device_object->pager_ikot = IKOT_MEMORY_OBJECT; | |
469 | device_object->ref_count = 1; | |
470 | device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL; | |
471 | ||
472 | ||
473 | return(device_object); | |
474 | } | |
475 |