]> git.saurik.com Git - apple/xnu.git/blob - osfmk/device/iokit_rpc.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
37
38 #include <machine/machparam.h> /* spl definitions */
39
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42
43 #include <kern/clock.h>
44 #include <kern/spl.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
52
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
60
61 #include <machine/machparam.h>
62
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
65 #endif
66 #if defined(__arm__) || defined(__arm64__)
67 #include <arm/pmap.h>
68 #endif
69 #include <IOKit/IOTypes.h>
70
71 #define EXTERN
72 #define MIGEXTERN
73
74 /*
75 * Functions in iokit:IOUserClient.cpp
76 */
77
78 extern void iokit_add_reference( io_object_t obj );
79 extern void iokit_add_connect_reference( io_object_t obj );
80
81 extern ipc_port_t iokit_port_for_object( io_object_t obj,
82 ipc_kobject_type_t type );
83
84 extern kern_return_t iokit_client_died( io_object_t obj,
85 ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
86
87 extern kern_return_t
88 iokit_client_memory_for_type(
89 io_object_t connect,
90 unsigned int type,
91 unsigned int * flags,
92 vm_address_t * address,
93 vm_size_t * size );
94
95
96 extern ppnum_t IOGetLastPageNumber(void);
97
98 /*
99 * Functions imported by iokit:IOUserClient.cpp
100 */
101
102 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
103 ipc_kobject_type_t type );
104
105 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
106
107 extern mach_port_name_t iokit_make_send_right( task_t task,
108 io_object_t obj, ipc_kobject_type_t type );
109
110 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
111
112 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
113
114 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
115
116 extern void iokit_retain_port( ipc_port_t port );
117 extern void iokit_release_port( ipc_port_t port );
118 extern void iokit_release_port_send( ipc_port_t port );
119
120 extern void iokit_lock_port(ipc_port_t port);
121 extern void iokit_unlock_port(ipc_port_t port);
122
123 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
124
125 /*
126 * Functions imported by iokit:IOMemoryDescriptor.cpp
127 */
128
129 extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
130 mach_vm_size_t length, unsigned int mapFlags);
131
132 extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
133
134 extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
135 mach_vm_size_t length, unsigned int options);
136
137 extern unsigned int IODefaultCacheBits(addr64_t pa);
138
139 /*
140 * Lookup a device by its port.
141 * Doesn't consume the naked send right; produces a device reference.
142 */
143 MIGEXTERN io_object_t
144 iokit_lookup_object_port(
145 ipc_port_t port)
146 {
147 io_object_t obj;
148
149 if (!IP_VALID(port))
150 return (NULL);
151
152 iokit_lock_port(port);
153 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
154 obj = (io_object_t) port->ip_kobject;
155 iokit_add_reference( obj );
156 }
157 else
158 obj = NULL;
159
160 iokit_unlock_port(port);
161
162 return( obj );
163 }
164
165 MIGEXTERN io_object_t
166 iokit_lookup_connect_port(
167 ipc_port_t port)
168 {
169 io_object_t obj;
170
171 if (!IP_VALID(port))
172 return (NULL);
173
174 iokit_lock_port(port);
175 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
176 obj = (io_object_t) port->ip_kobject;
177 iokit_add_connect_reference( obj );
178 }
179 else
180 obj = NULL;
181
182 iokit_unlock_port(port);
183
184 return( obj );
185 }
186
187 EXTERN io_object_t
188 iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
189 {
190 io_object_t obj = NULL;
191
192 if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
193 ipc_port_t port;
194 kern_return_t kr;
195
196 kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
197
198 if (kr == KERN_SUCCESS) {
199 assert(IP_VALID(port));
200
201 ip_reference(port);
202 ip_unlock(port);
203
204 iokit_lock_port(port);
205 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
206 obj = (io_object_t) port->ip_kobject;
207 iokit_add_connect_reference(obj);
208 }
209 iokit_unlock_port(port);
210
211 ip_release(port);
212 }
213 }
214
215 return obj;
216 }
217
218 EXTERN io_object_t
219 iokit_lookup_connect_ref_current_task(io_object_t connectRef)
220 {
221 return iokit_lookup_connect_ref(connectRef, current_space());
222 }
223
224 EXTERN void
225 iokit_retain_port( ipc_port_t port )
226 {
227 ipc_port_reference( port );
228 }
229
230 EXTERN void
231 iokit_release_port( ipc_port_t port )
232 {
233 ipc_port_release( port );
234 }
235
236 EXTERN void
237 iokit_release_port_send( ipc_port_t port )
238 {
239 ipc_port_release_send( port );
240 }
241
242 extern lck_mtx_t iokit_obj_to_port_binding_lock;
243
244 EXTERN void
245 iokit_lock_port( __unused ipc_port_t port )
246 {
247 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
248 }
249
250 EXTERN void
251 iokit_unlock_port( __unused ipc_port_t port )
252 {
253 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
254 }
255
256 /*
257 * Get the port for a device.
258 * Consumes a device reference; produces a naked send right.
259 */
260 MIGEXTERN ipc_port_t
261 iokit_make_object_port(
262 io_object_t obj )
263 {
264 ipc_port_t port;
265 ipc_port_t sendPort;
266
267 if( obj == NULL)
268 return IP_NULL;
269
270 port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT );
271 if( port) {
272 sendPort = ipc_port_make_send( port);
273 iokit_release_port( port );
274 } else
275 sendPort = IP_NULL;
276
277 iokit_remove_reference( obj );
278
279 return( sendPort);
280 }
281
282 MIGEXTERN ipc_port_t
283 iokit_make_connect_port(
284 io_object_t obj )
285 {
286 ipc_port_t port;
287 ipc_port_t sendPort;
288
289 if( obj == NULL)
290 return IP_NULL;
291
292 port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT );
293 if( port) {
294 sendPort = ipc_port_make_send( port);
295 iokit_release_port( port );
296 } else
297 sendPort = IP_NULL;
298
299 iokit_remove_reference( obj );
300
301 return( sendPort);
302 }
303
304 int gIOKitPortCount;
305
306 EXTERN ipc_port_t
307 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
308 {
309 ipc_port_t notify;
310 ipc_port_t port;
311
312 do {
313
314 /* Allocate port, keeping a reference for it. */
315 port = ipc_port_alloc_kernel();
316 if( port == IP_NULL)
317 continue;
318
319 /* set kobject & type */
320 // iokit_add_reference( obj );
321 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
322
323 /* Request no-senders notifications on the port. */
324 ip_lock( port);
325 notify = ipc_port_make_sonce_locked( port);
326 ipc_port_nsrequest( port, 1, notify, &notify);
327 /* port unlocked */
328 assert( notify == IP_NULL);
329 gIOKitPortCount++;
330
331 } while( FALSE);
332
333 return( port );
334 }
335
336
337 EXTERN kern_return_t
338 iokit_destroy_object_port( ipc_port_t port )
339 {
340
341 iokit_lock_port(port);
342 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
343
344 // iokit_remove_reference( obj );
345 iokit_unlock_port(port);
346 ipc_port_dealloc_kernel( port);
347 gIOKitPortCount--;
348
349 return( KERN_SUCCESS);
350 }
351
352 EXTERN kern_return_t
353 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
354 {
355 iokit_lock_port(port);
356 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
357 iokit_unlock_port(port);
358
359 return( KERN_SUCCESS);
360 }
361
362 EXTERN mach_port_name_t
363 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
364 {
365 ipc_port_t port;
366 ipc_port_t sendPort;
367 mach_port_name_t name = 0;
368
369 if( obj == NULL)
370 return MACH_PORT_NULL;
371
372 port = iokit_port_for_object( obj, type );
373 if( port) {
374 sendPort = ipc_port_make_send( port);
375 iokit_release_port( port );
376 } else
377 sendPort = IP_NULL;
378
379 if (IP_VALID( sendPort )) {
380 kern_return_t kr;
381 kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
382 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
383 if ( kr != KERN_SUCCESS) {
384 ipc_port_release_send( sendPort );
385 name = MACH_PORT_NULL;
386 }
387 } else if ( sendPort == IP_NULL)
388 name = MACH_PORT_NULL;
389 else if ( sendPort == IP_DEAD)
390 name = MACH_PORT_DEAD;
391
392 iokit_remove_reference( obj );
393
394 return( name );
395 }
396
397 EXTERN kern_return_t
398 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
399 {
400 return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
401 }
402
403 /*
404 * Handle the No-More_Senders notification generated from a device port destroy.
405 * Since there are no longer any tasks which hold a send right to this device
406 * port a NMS notification has been generated.
407 */
408
409 static void
410 iokit_no_senders( mach_no_senders_notification_t * notification )
411 {
412 ipc_port_t port;
413 io_object_t obj = NULL;
414 ipc_kobject_type_t type = IKOT_NONE;
415 ipc_port_t notify;
416
417 port = (ipc_port_t) notification->not_header.msgh_remote_port;
418
419 // convert a port to io_object_t.
420 if( IP_VALID(port)) {
421 iokit_lock_port(port);
422 if( ip_active(port)) {
423 obj = (io_object_t) port->ip_kobject;
424 type = ip_kotype( port );
425 if( (IKOT_IOKIT_OBJECT == type)
426 || (IKOT_IOKIT_CONNECT == type))
427 iokit_add_reference( obj );
428 else
429 obj = NULL;
430 }
431 iokit_unlock_port(port);
432
433 if( obj ) {
434
435 mach_port_mscount_t mscount = notification->not_count;
436
437 if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
438 {
439 /* Re-request no-senders notifications on the port (if still active) */
440 ip_lock(port);
441 if (ip_active(port)) {
442 notify = ipc_port_make_sonce_locked(port);
443 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
444 /* port unlocked */
445 if ( notify != IP_NULL)
446 ipc_port_release_sonce(notify);
447 } else {
448 ip_unlock(port);
449 }
450 }
451 iokit_remove_reference( obj );
452 }
453 }
454 }
455
456
457 EXTERN
458 boolean_t
459 iokit_notify( mach_msg_header_t * msg )
460 {
461 switch (msg->msgh_id) {
462 case MACH_NOTIFY_NO_SENDERS:
463 iokit_no_senders((mach_no_senders_notification_t *) msg);
464 return TRUE;
465
466 case MACH_NOTIFY_PORT_DELETED:
467 case MACH_NOTIFY_PORT_DESTROYED:
468 case MACH_NOTIFY_SEND_ONCE:
469 case MACH_NOTIFY_DEAD_NAME:
470 default:
471 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
472 return FALSE;
473 }
474 }
475
476 /* need to create a pmap function to generalize */
477 unsigned int IODefaultCacheBits(addr64_t pa)
478 {
479 return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
480 }
481
482 kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
483 mach_vm_size_t length, unsigned int options)
484 {
485 vm_prot_t prot;
486 unsigned int flags;
487 ppnum_t pagenum;
488 pmap_t pmap = map->pmap;
489
490 prot = (options & kIOMapReadOnly)
491 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
492
493 pagenum = (ppnum_t)atop_64(pa);
494
495 switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
496
497 case kIOMapDefaultCache:
498 default:
499 flags = IODefaultCacheBits(pa);
500 break;
501
502 case kIOMapInhibitCache:
503 flags = VM_WIMG_IO;
504 break;
505
506 case kIOMapWriteThruCache:
507 flags = VM_WIMG_WTHRU;
508 break;
509
510 case kIOMapWriteCombineCache:
511 flags = VM_WIMG_WCOMB;
512 break;
513
514 case kIOMapCopybackCache:
515 flags = VM_WIMG_COPYBACK;
516 break;
517
518 case kIOMapCopybackInnerCache:
519 flags = VM_WIMG_INNERWBACK;
520 break;
521
522 case kIOMapPostedWrite:
523 flags = VM_WIMG_POSTED;
524 break;
525 }
526
527 pmap_set_cache_attributes(pagenum, flags);
528
529 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
530
531
532 // Set up a block mapped area
533 return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
534 }
535
536 kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
537 {
538 pmap_t pmap = map->pmap;
539
540 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
541
542 return( KERN_SUCCESS );
543 }
544
545 kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
546 mach_vm_size_t __unused length, unsigned int __unused options)
547 {
548 mach_vm_size_t off;
549 vm_prot_t prot;
550 unsigned int flags;
551 pmap_t pmap = map->pmap;
552 pmap_flush_context pmap_flush_context_storage;
553 boolean_t delayed_pmap_flush = FALSE;
554
555 prot = (options & kIOMapReadOnly)
556 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
557
558 switch (options & kIOMapCacheMask)
559 {
560 // what cache mode do we need?
561 case kIOMapDefaultCache:
562 default:
563 return (KERN_INVALID_ARGUMENT);
564
565 case kIOMapInhibitCache:
566 flags = VM_WIMG_IO;
567 break;
568
569 case kIOMapWriteThruCache:
570 flags = VM_WIMG_WTHRU;
571 break;
572
573 case kIOMapWriteCombineCache:
574 flags = VM_WIMG_WCOMB;
575 break;
576
577 case kIOMapCopybackCache:
578 flags = VM_WIMG_COPYBACK;
579 break;
580
581 case kIOMapCopybackInnerCache:
582 flags = VM_WIMG_INNERWBACK;
583 break;
584
585 case kIOMapPostedWrite:
586 flags = VM_WIMG_POSTED;
587 break;
588 }
589
590 pmap_flush_context_init(&pmap_flush_context_storage);
591 delayed_pmap_flush = FALSE;
592
593 // enter each page's physical address in the target map
594 for (off = 0; off < length; off += page_size)
595 {
596 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
597 if (ppnum) {
598 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
599 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
600 delayed_pmap_flush = TRUE;
601 }
602 }
603 if (delayed_pmap_flush == TRUE)
604 pmap_flush(&pmap_flush_context_storage);
605
606 return (KERN_SUCCESS);
607 }
608
609 ppnum_t IOGetLastPageNumber(void)
610 {
611 #if __i386__ || __x86_64__
612 ppnum_t lastPage, highest = 0;
613 unsigned int idx;
614
615 for (idx = 0; idx < pmap_memory_region_count; idx++)
616 {
617 lastPage = pmap_memory_regions[idx].end - 1;
618 if (lastPage > highest)
619 highest = lastPage;
620 }
621 return (highest);
622 #elif __arm__ || __arm64__
623 return 0;
624 #else
625 #error unknown arch
626 #endif
627 }
628
629
630 void IOGetTime( mach_timespec_t * clock_time);
631 void IOGetTime( mach_timespec_t * clock_time)
632 {
633 clock_sec_t sec;
634 clock_nsec_t nsec;
635 clock_get_system_nanotime(&sec, &nsec);
636 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
637 clock_time->tv_nsec = nsec;
638 }
639