]> git.saurik.com Git - apple/xnu.git/blob - osfmk/device/iokit_rpc.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
37
38 #include <machine/machparam.h> /* spl definitions */
39
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42
43 #include <kern/clock.h>
44 #include <kern/spl.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
52
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
60
61 #include <machine/machparam.h>
62
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
65 #endif
66 #if defined(__arm__) || defined(__arm64__)
67 #include <arm/pmap.h>
68 #endif
69 #include <IOKit/IOKitServer.h>
70
71 #define EXTERN
72 #define MIGEXTERN
73
74 /*
75 * Lookup a device by its port.
76 * Doesn't consume the naked send right; produces a device reference.
77 */
78 static io_object_t
79 iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
80 {
81 io_object_t obj;
82
83 if (!IP_VALID(port)) {
84 return NULL;
85 }
86
87 iokit_lock_port(port);
88 if (ip_active(port) && (ip_kotype(port) == type)) {
89 obj = (io_object_t) port->ip_kobject;
90 iokit_add_reference( obj, type );
91 } else {
92 obj = NULL;
93 }
94
95 iokit_unlock_port(port);
96
97 return obj;
98 }
99
100 MIGEXTERN io_object_t
101 iokit_lookup_object_port(
102 ipc_port_t port)
103 {
104 return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
105 }
106
107 MIGEXTERN io_object_t
108 iokit_lookup_connect_port(
109 ipc_port_t port)
110 {
111 return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
112 }
113
114 static io_object_t
115 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
116 {
117 io_object_t obj = NULL;
118
119 if (name && MACH_PORT_VALID(name)) {
120 ipc_port_t port;
121 kern_return_t kr;
122
123 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
124
125 if (kr == KERN_SUCCESS) {
126 assert(IP_VALID(port));
127
128 ip_reference(port);
129 ip_unlock(port);
130
131 iokit_lock_port(port);
132 if (ip_active(port) && (ip_kotype(port) == type)) {
133 obj = (io_object_t) port->ip_kobject;
134 iokit_add_reference(obj, type);
135 }
136 iokit_unlock_port(port);
137
138 ip_release(port);
139 }
140 }
141
142 return obj;
143 }
144
145 EXTERN io_object_t
146 iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
147 {
148 return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
149 }
150
151 EXTERN io_object_t
152 iokit_lookup_connect_ref_current_task(mach_port_name_t name)
153 {
154 return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
155 }
156
157 EXTERN void
158 iokit_retain_port( ipc_port_t port )
159 {
160 ipc_port_reference( port );
161 }
162
163 EXTERN void
164 iokit_release_port( ipc_port_t port )
165 {
166 ipc_port_release( port );
167 }
168
169 EXTERN void
170 iokit_release_port_send( ipc_port_t port )
171 {
172 ipc_port_release_send( port );
173 }
174
175 extern lck_mtx_t iokit_obj_to_port_binding_lock;
176
177 EXTERN void
178 iokit_lock_port( __unused ipc_port_t port )
179 {
180 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
181 }
182
183 EXTERN void
184 iokit_unlock_port( __unused ipc_port_t port )
185 {
186 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
187 }
188
189 /*
190 * Get the port for a device.
191 * Consumes a device reference; produces a naked send right.
192 */
193
194 static ipc_port_t
195 iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
196 {
197 ipc_port_t port;
198 ipc_port_t sendPort;
199
200 if (obj == NULL) {
201 return IP_NULL;
202 }
203
204 port = iokit_port_for_object( obj, type );
205 if (port) {
206 sendPort = ipc_port_make_send( port);
207 iokit_release_port( port );
208 } else {
209 sendPort = IP_NULL;
210 }
211
212 iokit_remove_reference( obj );
213
214 return sendPort;
215 }
216
217 MIGEXTERN ipc_port_t
218 iokit_make_object_port(
219 io_object_t obj )
220 {
221 return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
222 }
223
224 MIGEXTERN ipc_port_t
225 iokit_make_connect_port(
226 io_object_t obj )
227 {
228 return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
229 }
230
231 int gIOKitPortCount;
232
233 EXTERN ipc_port_t
234 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
235 {
236 ipc_port_t notify;
237 ipc_port_t port;
238
239 do {
240 /* Allocate port, keeping a reference for it. */
241 port = ipc_port_alloc_kernel();
242 if (port == IP_NULL) {
243 continue;
244 }
245
246 /* set kobject & type */
247 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
248
249 /* Request no-senders notifications on the port. */
250 ip_lock( port);
251 notify = ipc_port_make_sonce_locked( port);
252 ipc_port_nsrequest( port, 1, notify, &notify);
253 /* port unlocked */
254 assert( notify == IP_NULL);
255 gIOKitPortCount++;
256 } while (FALSE);
257
258 return port;
259 }
260
261
262 EXTERN kern_return_t
263 iokit_destroy_object_port( ipc_port_t port )
264 {
265 iokit_lock_port(port);
266 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
267
268 // iokit_remove_reference( obj );
269 iokit_unlock_port(port);
270 ipc_port_dealloc_kernel( port);
271 gIOKitPortCount--;
272
273 return KERN_SUCCESS;
274 }
275
276 EXTERN kern_return_t
277 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
278 {
279 iokit_lock_port(port);
280 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
281 iokit_unlock_port(port);
282
283 return KERN_SUCCESS;
284 }
285
286 EXTERN mach_port_name_t
287 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
288 {
289 ipc_port_t port;
290 ipc_port_t sendPort;
291 mach_port_name_t name = 0;
292
293 if (obj == NULL) {
294 return MACH_PORT_NULL;
295 }
296
297 port = iokit_port_for_object( obj, type );
298 if (port) {
299 sendPort = ipc_port_make_send( port);
300 iokit_release_port( port );
301 } else {
302 sendPort = IP_NULL;
303 }
304
305 if (IP_VALID( sendPort )) {
306 kern_return_t kr;
307 kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
308 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
309 if (kr != KERN_SUCCESS) {
310 ipc_port_release_send( sendPort );
311 name = MACH_PORT_NULL;
312 }
313 } else if (sendPort == IP_NULL) {
314 name = MACH_PORT_NULL;
315 } else if (sendPort == IP_DEAD) {
316 name = MACH_PORT_DEAD;
317 }
318
319 return name;
320 }
321
322 EXTERN kern_return_t
323 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
324 {
325 return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
326 }
327
328 /*
329 * Handle the No-More_Senders notification generated from a device port destroy.
330 * Since there are no longer any tasks which hold a send right to this device
331 * port a NMS notification has been generated.
332 */
333
334 static void
335 iokit_no_senders( mach_no_senders_notification_t * notification )
336 {
337 ipc_port_t port;
338 io_object_t obj = NULL;
339 ipc_kobject_type_t type = IKOT_NONE;
340 ipc_port_t notify;
341
342 port = (ipc_port_t) notification->not_header.msgh_remote_port;
343
344 // convert a port to io_object_t.
345 if (IP_VALID(port)) {
346 iokit_lock_port(port);
347 if (ip_active(port)) {
348 obj = (io_object_t) port->ip_kobject;
349 type = ip_kotype( port );
350 if ((IKOT_IOKIT_OBJECT == type)
351 || (IKOT_IOKIT_CONNECT == type)
352 || (IKOT_IOKIT_IDENT == type)) {
353 iokit_add_reference( obj, IKOT_IOKIT_OBJECT );
354 } else {
355 obj = NULL;
356 }
357 }
358 iokit_unlock_port(port);
359
360 if (obj) {
361 mach_port_mscount_t mscount = notification->not_count;
362
363 if (KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) {
364 /* Re-request no-senders notifications on the port (if still active) */
365 ip_lock(port);
366 if (ip_active(port)) {
367 notify = ipc_port_make_sonce_locked(port);
368 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
369 /* port unlocked */
370 if (notify != IP_NULL) {
371 ipc_port_release_sonce(notify);
372 }
373 } else {
374 ip_unlock(port);
375 }
376 }
377 iokit_remove_reference( obj );
378 }
379 }
380 }
381
382
383 EXTERN
384 boolean_t
385 iokit_notify( mach_msg_header_t * msg )
386 {
387 switch (msg->msgh_id) {
388 case MACH_NOTIFY_NO_SENDERS:
389 iokit_no_senders((mach_no_senders_notification_t *) msg);
390 return TRUE;
391
392 case MACH_NOTIFY_PORT_DELETED:
393 case MACH_NOTIFY_PORT_DESTROYED:
394 case MACH_NOTIFY_SEND_ONCE:
395 case MACH_NOTIFY_DEAD_NAME:
396 default:
397 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
398 return FALSE;
399 }
400 }
401
402 /* need to create a pmap function to generalize */
403 unsigned int
404 IODefaultCacheBits(addr64_t pa)
405 {
406 return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
407 }
408
409 kern_return_t
410 IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
411 mach_vm_size_t length, unsigned int options)
412 {
413 vm_prot_t prot;
414 unsigned int flags;
415 ppnum_t pagenum;
416 pmap_t pmap = map->pmap;
417
418 prot = (options & kIOMapReadOnly)
419 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
420
421 pagenum = (ppnum_t)atop_64(pa);
422
423 switch (options & kIOMapCacheMask) { /* What cache mode do we need? */
424 case kIOMapDefaultCache:
425 default:
426 flags = IODefaultCacheBits(pa);
427 break;
428
429 case kIOMapInhibitCache:
430 flags = VM_WIMG_IO;
431 break;
432
433 case kIOMapWriteThruCache:
434 flags = VM_WIMG_WTHRU;
435 break;
436
437 case kIOMapWriteCombineCache:
438 flags = VM_WIMG_WCOMB;
439 break;
440
441 case kIOMapCopybackCache:
442 flags = VM_WIMG_COPYBACK;
443 break;
444
445 case kIOMapCopybackInnerCache:
446 flags = VM_WIMG_INNERWBACK;
447 break;
448
449 case kIOMapPostedWrite:
450 flags = VM_WIMG_POSTED;
451 break;
452 }
453
454 pmap_set_cache_attributes(pagenum, flags);
455
456 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
457
458
459 // Set up a block mapped area
460 return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
461 }
462
463 kern_return_t
464 IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
465 {
466 pmap_t pmap = map->pmap;
467
468 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
469
470 return KERN_SUCCESS;
471 }
472
473 kern_return_t
474 IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
475 mach_vm_size_t __unused length, unsigned int __unused options)
476 {
477 mach_vm_size_t off;
478 vm_prot_t prot;
479 unsigned int flags;
480 pmap_t pmap = map->pmap;
481 pmap_flush_context pmap_flush_context_storage;
482 boolean_t delayed_pmap_flush = FALSE;
483
484 prot = (options & kIOMapReadOnly)
485 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
486
487 switch (options & kIOMapCacheMask) {
488 // what cache mode do we need?
489 case kIOMapDefaultCache:
490 default:
491 return KERN_INVALID_ARGUMENT;
492
493 case kIOMapInhibitCache:
494 flags = VM_WIMG_IO;
495 break;
496
497 case kIOMapWriteThruCache:
498 flags = VM_WIMG_WTHRU;
499 break;
500
501 case kIOMapWriteCombineCache:
502 flags = VM_WIMG_WCOMB;
503 break;
504
505 case kIOMapCopybackCache:
506 flags = VM_WIMG_COPYBACK;
507 break;
508
509 case kIOMapCopybackInnerCache:
510 flags = VM_WIMG_INNERWBACK;
511 break;
512
513 case kIOMapPostedWrite:
514 flags = VM_WIMG_POSTED;
515 break;
516 }
517
518 pmap_flush_context_init(&pmap_flush_context_storage);
519 delayed_pmap_flush = FALSE;
520
521 // enter each page's physical address in the target map
522 for (off = 0; off < length; off += page_size) {
523 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
524 if (ppnum) {
525 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
526 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
527 delayed_pmap_flush = TRUE;
528 }
529 }
530 if (delayed_pmap_flush == TRUE) {
531 pmap_flush(&pmap_flush_context_storage);
532 }
533
534 return KERN_SUCCESS;
535 }
536
537 ppnum_t
538 IOGetLastPageNumber(void)
539 {
540 #if __i386__ || __x86_64__
541 ppnum_t lastPage, highest = 0;
542 unsigned int idx;
543
544 for (idx = 0; idx < pmap_memory_region_count; idx++) {
545 lastPage = pmap_memory_regions[idx].end - 1;
546 if (lastPage > highest) {
547 highest = lastPage;
548 }
549 }
550 return highest;
551 #elif __arm__ || __arm64__
552 return 0;
553 #else
554 #error unknown arch
555 #endif
556 }
557
558
559 void IOGetTime( mach_timespec_t * clock_time);
560 void
561 IOGetTime( mach_timespec_t * clock_time)
562 {
563 clock_sec_t sec;
564 clock_nsec_t nsec;
565 clock_get_system_nanotime(&sec, &nsec);
566 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
567 clock_time->tv_nsec = nsec;
568 }