]> git.saurik.com Git - apple/xnu.git/blob - osfmk/device/iokit_rpc.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
37
38 #include <machine/machparam.h> /* spl definitions */
39
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42
43 #include <kern/clock.h>
44 #include <kern/spl.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
52
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
60
61 #include <machine/machparam.h>
62
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
65 #endif
66 #include <IOKit/IOTypes.h>
67
68 #define EXTERN
69 #define MIGEXTERN
70
71 /*
72 * Functions in iokit:IOUserClient.cpp
73 */
74
75 extern void iokit_add_reference( io_object_t obj );
76 extern void iokit_add_connect_reference( io_object_t obj );
77
78 extern ipc_port_t iokit_port_for_object( io_object_t obj,
79 ipc_kobject_type_t type );
80
81 extern kern_return_t iokit_client_died( io_object_t obj,
82 ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
83
84 extern kern_return_t
85 iokit_client_memory_for_type(
86 io_object_t connect,
87 unsigned int type,
88 unsigned int * flags,
89 vm_address_t * address,
90 vm_size_t * size );
91
92
93 extern ppnum_t IOGetLastPageNumber(void);
94
95 /*
96 * Functions imported by iokit:IOUserClient.cpp
97 */
98
99 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
100 ipc_kobject_type_t type );
101
102 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
103
104 extern mach_port_name_t iokit_make_send_right( task_t task,
105 io_object_t obj, ipc_kobject_type_t type );
106
107 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
108
109 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
110
111 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
112
113 extern void iokit_retain_port( ipc_port_t port );
114 extern void iokit_release_port( ipc_port_t port );
115 extern void iokit_release_port_send( ipc_port_t port );
116
117 extern void iokit_lock_port(ipc_port_t port);
118 extern void iokit_unlock_port(ipc_port_t port);
119
120 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
121
122 /*
123 * Functions imported by iokit:IOMemoryDescriptor.cpp
124 */
125
126 extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
127 mach_vm_size_t length, unsigned int mapFlags);
128
129 extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
130
131 extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
132 mach_vm_size_t length, unsigned int options);
133
134 extern unsigned int IODefaultCacheBits(addr64_t pa);
135
136 /*
137 * Lookup a device by its port.
138 * Doesn't consume the naked send right; produces a device reference.
139 */
140 MIGEXTERN io_object_t
141 iokit_lookup_object_port(
142 ipc_port_t port)
143 {
144 io_object_t obj;
145
146 if (!IP_VALID(port))
147 return (NULL);
148
149 iokit_lock_port(port);
150 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
151 obj = (io_object_t) port->ip_kobject;
152 iokit_add_reference( obj );
153 }
154 else
155 obj = NULL;
156
157 iokit_unlock_port(port);
158
159 return( obj );
160 }
161
162 MIGEXTERN io_object_t
163 iokit_lookup_connect_port(
164 ipc_port_t port)
165 {
166 io_object_t obj;
167
168 if (!IP_VALID(port))
169 return (NULL);
170
171 iokit_lock_port(port);
172 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
173 obj = (io_object_t) port->ip_kobject;
174 iokit_add_connect_reference( obj );
175 }
176 else
177 obj = NULL;
178
179 iokit_unlock_port(port);
180
181 return( obj );
182 }
183
184 EXTERN io_object_t
185 iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
186 {
187 io_object_t obj = NULL;
188
189 if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
190 ipc_port_t port;
191 kern_return_t kr;
192
193 kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
194
195 if (kr == KERN_SUCCESS) {
196 assert(IP_VALID(port));
197
198 ip_reference(port);
199 ip_unlock(port);
200
201 iokit_lock_port(port);
202 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
203 obj = (io_object_t) port->ip_kobject;
204 iokit_add_connect_reference(obj);
205 }
206 iokit_unlock_port(port);
207
208 ip_release(port);
209 }
210 }
211
212 return obj;
213 }
214
215 EXTERN io_object_t
216 iokit_lookup_connect_ref_current_task(io_object_t connectRef)
217 {
218 return iokit_lookup_connect_ref(connectRef, current_space());
219 }
220
221 EXTERN void
222 iokit_retain_port( ipc_port_t port )
223 {
224 ipc_port_reference( port );
225 }
226
227 EXTERN void
228 iokit_release_port( ipc_port_t port )
229 {
230 ipc_port_release( port );
231 }
232
233 EXTERN void
234 iokit_release_port_send( ipc_port_t port )
235 {
236 ipc_port_release_send( port );
237 }
238
239 extern lck_mtx_t iokit_obj_to_port_binding_lock;
240
241 EXTERN void
242 iokit_lock_port( __unused ipc_port_t port )
243 {
244 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
245 }
246
247 EXTERN void
248 iokit_unlock_port( __unused ipc_port_t port )
249 {
250 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
251 }
252
253 /*
254 * Get the port for a device.
255 * Consumes a device reference; produces a naked send right.
256 */
257 MIGEXTERN ipc_port_t
258 iokit_make_object_port(
259 io_object_t obj )
260 {
261 ipc_port_t port;
262 ipc_port_t sendPort;
263
264 if( obj == NULL)
265 return IP_NULL;
266
267 port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT );
268 if( port) {
269 sendPort = ipc_port_make_send( port);
270 iokit_release_port( port );
271 } else
272 sendPort = IP_NULL;
273
274 iokit_remove_reference( obj );
275
276 return( sendPort);
277 }
278
279 MIGEXTERN ipc_port_t
280 iokit_make_connect_port(
281 io_object_t obj )
282 {
283 ipc_port_t port;
284 ipc_port_t sendPort;
285
286 if( obj == NULL)
287 return IP_NULL;
288
289 port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT );
290 if( port) {
291 sendPort = ipc_port_make_send( port);
292 iokit_release_port( port );
293 } else
294 sendPort = IP_NULL;
295
296 iokit_remove_reference( obj );
297
298 return( sendPort);
299 }
300
301 int gIOKitPortCount;
302
303 EXTERN ipc_port_t
304 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
305 {
306 ipc_port_t notify;
307 ipc_port_t port;
308
309 do {
310
311 /* Allocate port, keeping a reference for it. */
312 port = ipc_port_alloc_kernel();
313 if( port == IP_NULL)
314 continue;
315
316 /* set kobject & type */
317 // iokit_add_reference( obj );
318 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
319
320 /* Request no-senders notifications on the port. */
321 ip_lock( port);
322 notify = ipc_port_make_sonce_locked( port);
323 ipc_port_nsrequest( port, 1, notify, &notify);
324 /* port unlocked */
325 assert( notify == IP_NULL);
326 gIOKitPortCount++;
327
328 } while( FALSE);
329
330 return( port );
331 }
332
333
334 EXTERN kern_return_t
335 iokit_destroy_object_port( ipc_port_t port )
336 {
337
338 iokit_lock_port(port);
339 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
340
341 // iokit_remove_reference( obj );
342 iokit_unlock_port(port);
343 ipc_port_dealloc_kernel( port);
344 gIOKitPortCount--;
345
346 return( KERN_SUCCESS);
347 }
348
349 EXTERN kern_return_t
350 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
351 {
352 iokit_lock_port(port);
353 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
354 iokit_unlock_port(port);
355
356 return( KERN_SUCCESS);
357 }
358
359 EXTERN mach_port_name_t
360 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
361 {
362 ipc_port_t port;
363 ipc_port_t sendPort;
364 mach_port_name_t name = 0;
365
366 if( obj == NULL)
367 return MACH_PORT_NULL;
368
369 port = iokit_port_for_object( obj, type );
370 if( port) {
371 sendPort = ipc_port_make_send( port);
372 iokit_release_port( port );
373 } else
374 sendPort = IP_NULL;
375
376 if (IP_VALID( sendPort )) {
377 kern_return_t kr;
378 kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
379 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
380 if ( kr != KERN_SUCCESS) {
381 ipc_port_release_send( sendPort );
382 name = MACH_PORT_NULL;
383 }
384 } else if ( sendPort == IP_NULL)
385 name = MACH_PORT_NULL;
386 else if ( sendPort == IP_DEAD)
387 name = MACH_PORT_DEAD;
388
389 iokit_remove_reference( obj );
390
391 return( name );
392 }
393
394 EXTERN kern_return_t
395 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
396 {
397 return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
398 }
399
400 /*
401 * Handle the No-More_Senders notification generated from a device port destroy.
402 * Since there are no longer any tasks which hold a send right to this device
403 * port a NMS notification has been generated.
404 */
405
406 static void
407 iokit_no_senders( mach_no_senders_notification_t * notification )
408 {
409 ipc_port_t port;
410 io_object_t obj = NULL;
411 ipc_kobject_type_t type = IKOT_NONE;
412 ipc_port_t notify;
413
414 port = (ipc_port_t) notification->not_header.msgh_remote_port;
415
416 // convert a port to io_object_t.
417 if( IP_VALID(port)) {
418 iokit_lock_port(port);
419 if( ip_active(port)) {
420 obj = (io_object_t) port->ip_kobject;
421 type = ip_kotype( port );
422 if( (IKOT_IOKIT_OBJECT == type)
423 || (IKOT_IOKIT_CONNECT == type))
424 iokit_add_reference( obj );
425 else
426 obj = NULL;
427 }
428 iokit_unlock_port(port);
429
430 if( obj ) {
431
432 mach_port_mscount_t mscount = notification->not_count;
433
434 if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
435 {
436 /* Re-request no-senders notifications on the port (if still active) */
437 ip_lock(port);
438 if (ip_active(port)) {
439 notify = ipc_port_make_sonce_locked(port);
440 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
441 /* port unlocked */
442 if ( notify != IP_NULL)
443 ipc_port_release_sonce(notify);
444 } else {
445 ip_unlock(port);
446 }
447 }
448 iokit_remove_reference( obj );
449 }
450 }
451 }
452
453
454 EXTERN
455 boolean_t
456 iokit_notify( mach_msg_header_t * msg )
457 {
458 switch (msg->msgh_id) {
459 case MACH_NOTIFY_NO_SENDERS:
460 iokit_no_senders((mach_no_senders_notification_t *) msg);
461 return TRUE;
462
463 case MACH_NOTIFY_PORT_DELETED:
464 case MACH_NOTIFY_PORT_DESTROYED:
465 case MACH_NOTIFY_SEND_ONCE:
466 case MACH_NOTIFY_DEAD_NAME:
467 default:
468 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
469 return FALSE;
470 }
471 }
472
473 /* need to create a pmap function to generalize */
474 unsigned int IODefaultCacheBits(addr64_t pa)
475 {
476 return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
477 }
478
479 kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
480 mach_vm_size_t length, unsigned int options)
481 {
482 vm_prot_t prot;
483 unsigned int flags;
484 ppnum_t pagenum;
485 pmap_t pmap = map->pmap;
486
487 prot = (options & kIOMapReadOnly)
488 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
489
490 pagenum = (ppnum_t)atop_64(pa);
491
492 switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
493
494 case kIOMapDefaultCache:
495 default:
496 flags = IODefaultCacheBits(pa);
497 break;
498
499 case kIOMapInhibitCache:
500 flags = VM_WIMG_IO;
501 break;
502
503 case kIOMapWriteThruCache:
504 flags = VM_WIMG_WTHRU;
505 break;
506
507 case kIOMapWriteCombineCache:
508 flags = VM_WIMG_WCOMB;
509 break;
510
511 case kIOMapCopybackCache:
512 flags = VM_WIMG_COPYBACK;
513 break;
514 case kIOMapCopybackInnerCache:
515 flags = VM_WIMG_INNERWBACK;
516 break;
517 }
518
519 pmap_set_cache_attributes(pagenum, flags);
520
521 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
522
523
524 // Set up a block mapped area
525 pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
526
527 return( KERN_SUCCESS );
528 }
529
530 kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
531 {
532 pmap_t pmap = map->pmap;
533
534 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
535
536 return( KERN_SUCCESS );
537 }
538
539 kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
540 mach_vm_size_t __unused length, unsigned int __unused options)
541 {
542 mach_vm_size_t off;
543 vm_prot_t prot;
544 unsigned int flags;
545 pmap_t pmap = map->pmap;
546 pmap_flush_context pmap_flush_context_storage;
547 boolean_t delayed_pmap_flush = FALSE;
548
549 prot = (options & kIOMapReadOnly)
550 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
551
552 switch (options & kIOMapCacheMask)
553 {
554 // what cache mode do we need?
555 case kIOMapDefaultCache:
556 default:
557 return (KERN_INVALID_ARGUMENT);
558
559 case kIOMapInhibitCache:
560 flags = VM_WIMG_IO;
561 break;
562
563 case kIOMapWriteThruCache:
564 flags = VM_WIMG_WTHRU;
565 break;
566
567 case kIOMapWriteCombineCache:
568 flags = VM_WIMG_WCOMB;
569 break;
570
571 case kIOMapCopybackCache:
572 flags = VM_WIMG_COPYBACK;
573 break;
574 }
575
576 pmap_flush_context_init(&pmap_flush_context_storage);
577 delayed_pmap_flush = FALSE;
578
579 // enter each page's physical address in the target map
580 for (off = 0; off < length; off += page_size)
581 {
582 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
583 if (ppnum) {
584 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
585 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
586 delayed_pmap_flush = TRUE;
587 }
588 }
589 if (delayed_pmap_flush == TRUE)
590 pmap_flush(&pmap_flush_context_storage);
591
592 return (KERN_SUCCESS);
593 }
594
595 ppnum_t IOGetLastPageNumber(void)
596 {
597 #if __i386__ || __x86_64__
598 ppnum_t lastPage, highest = 0;
599 unsigned int idx;
600
601 for (idx = 0; idx < pmap_memory_region_count; idx++)
602 {
603 lastPage = pmap_memory_regions[idx].end - 1;
604 if (lastPage > highest)
605 highest = lastPage;
606 }
607 return (highest);
608 #else
609 #error unknown arch
610 #endif
611 }
612
613
614 void IOGetTime( mach_timespec_t * clock_time);
615 void IOGetTime( mach_timespec_t * clock_time)
616 {
617 clock_sec_t sec;
618 clock_nsec_t nsec;
619 clock_get_system_nanotime(&sec, &nsec);
620 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
621 clock_time->tv_nsec = nsec;
622 }
623