]> git.saurik.com Git - apple/xnu.git/blob - osfmk/device/iokit_rpc.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
37
38 #include <machine/machparam.h> /* spl definitions */
39
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42
43 #include <kern/clock.h>
44 #include <kern/spl.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
52
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
60
61 #include <machine/machparam.h>
62
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
65 #endif
66 #include <IOKit/IOTypes.h>
67
68 #define EXTERN
69 #define MIGEXTERN
70
71 /*
72 * Functions in iokit:IOUserClient.cpp
73 */
74
75 extern void iokit_add_reference( io_object_t obj );
76
77 extern ipc_port_t iokit_port_for_object( io_object_t obj,
78 ipc_kobject_type_t type );
79
80 extern kern_return_t iokit_client_died( io_object_t obj,
81 ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
82
83 extern kern_return_t
84 iokit_client_memory_for_type(
85 io_object_t connect,
86 unsigned int type,
87 unsigned int * flags,
88 vm_address_t * address,
89 vm_size_t * size );
90
91
92 extern ppnum_t IOGetLastPageNumber(void);
93
94 /*
95 * Functions imported by iokit:IOUserClient.cpp
96 */
97
98 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
99 ipc_kobject_type_t type );
100
101 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
102
103 extern mach_port_name_t iokit_make_send_right( task_t task,
104 io_object_t obj, ipc_kobject_type_t type );
105
106 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
107
108 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
109
110 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
111
112 extern void iokit_retain_port( ipc_port_t port );
113 extern void iokit_release_port( ipc_port_t port );
114 extern void iokit_release_port_send( ipc_port_t port );
115
116 extern void iokit_lock_port(ipc_port_t port);
117 extern void iokit_unlock_port(ipc_port_t port);
118
119 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
120
121 /*
122 * Functions imported by iokit:IOMemoryDescriptor.cpp
123 */
124
125 extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
126 mach_vm_size_t length, unsigned int mapFlags);
127
128 extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
129
130 extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
131 mach_vm_size_t length, unsigned int options);
132
133 extern unsigned int IODefaultCacheBits(addr64_t pa);
134
135 /*
136 * Lookup a device by its port.
137 * Doesn't consume the naked send right; produces a device reference.
138 */
139 MIGEXTERN io_object_t
140 iokit_lookup_object_port(
141 ipc_port_t port)
142 {
143 register io_object_t obj;
144
145 if (!IP_VALID(port))
146 return (NULL);
147
148 iokit_lock_port(port);
149 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
150 obj = (io_object_t) port->ip_kobject;
151 iokit_add_reference( obj );
152 }
153 else
154 obj = NULL;
155
156 iokit_unlock_port(port);
157
158 return( obj );
159 }
160
161 MIGEXTERN io_object_t
162 iokit_lookup_connect_port(
163 ipc_port_t port)
164 {
165 register io_object_t obj;
166
167 if (!IP_VALID(port))
168 return (NULL);
169
170 iokit_lock_port(port);
171 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
172 obj = (io_object_t) port->ip_kobject;
173 iokit_add_reference( obj );
174 }
175 else
176 obj = NULL;
177
178 iokit_unlock_port(port);
179
180 return( obj );
181 }
182
183 EXTERN io_object_t
184 iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
185 {
186 io_object_t obj = NULL;
187
188 if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
189 ipc_port_t port;
190 kern_return_t kr;
191
192 kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
193
194 if (kr == KERN_SUCCESS) {
195 assert(IP_VALID(port));
196
197 ip_reference(port);
198 ip_unlock(port);
199
200 iokit_lock_port(port);
201 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
202 obj = (io_object_t) port->ip_kobject;
203 iokit_add_reference(obj);
204 }
205 iokit_unlock_port(port);
206
207 ip_release(port);
208 }
209 }
210
211 return obj;
212 }
213
214 EXTERN io_object_t
215 iokit_lookup_connect_ref_current_task(io_object_t connectRef)
216 {
217 return iokit_lookup_connect_ref(connectRef, current_space());
218 }
219
220 EXTERN void
221 iokit_retain_port( ipc_port_t port )
222 {
223 ipc_port_reference( port );
224 }
225
226 EXTERN void
227 iokit_release_port( ipc_port_t port )
228 {
229 ipc_port_release( port );
230 }
231
232 EXTERN void
233 iokit_release_port_send( ipc_port_t port )
234 {
235 ipc_port_release_send( port );
236 }
237
238 extern lck_mtx_t iokit_obj_to_port_binding_lock;
239
240 EXTERN void
241 iokit_lock_port( __unused ipc_port_t port )
242 {
243 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
244 }
245
246 EXTERN void
247 iokit_unlock_port( __unused ipc_port_t port )
248 {
249 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
250 }
251
252 /*
253 * Get the port for a device.
254 * Consumes a device reference; produces a naked send right.
255 */
256 MIGEXTERN ipc_port_t
257 iokit_make_object_port(
258 io_object_t obj )
259 {
260 register ipc_port_t port;
261 register ipc_port_t sendPort;
262
263 if( obj == NULL)
264 return IP_NULL;
265
266 port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT );
267 if( port) {
268 sendPort = ipc_port_make_send( port);
269 iokit_release_port( port );
270 } else
271 sendPort = IP_NULL;
272
273 iokit_remove_reference( obj );
274
275 return( sendPort);
276 }
277
278 MIGEXTERN ipc_port_t
279 iokit_make_connect_port(
280 io_object_t obj )
281 {
282 register ipc_port_t port;
283 register ipc_port_t sendPort;
284
285 if( obj == NULL)
286 return IP_NULL;
287
288 port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT );
289 if( port) {
290 sendPort = ipc_port_make_send( port);
291 iokit_release_port( port );
292 } else
293 sendPort = IP_NULL;
294
295 iokit_remove_reference( obj );
296
297 return( sendPort);
298 }
299
300 int gIOKitPortCount;
301
302 EXTERN ipc_port_t
303 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
304 {
305 ipc_port_t notify;
306 ipc_port_t port;
307
308 do {
309
310 /* Allocate port, keeping a reference for it. */
311 port = ipc_port_alloc_kernel();
312 if( port == IP_NULL)
313 continue;
314
315 /* set kobject & type */
316 // iokit_add_reference( obj );
317 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
318
319 /* Request no-senders notifications on the port. */
320 ip_lock( port);
321 notify = ipc_port_make_sonce_locked( port);
322 ipc_port_nsrequest( port, 1, notify, &notify);
323 /* port unlocked */
324 assert( notify == IP_NULL);
325 gIOKitPortCount++;
326
327 } while( FALSE);
328
329 return( port );
330 }
331
332
333 EXTERN kern_return_t
334 iokit_destroy_object_port( ipc_port_t port )
335 {
336 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
337
338 // iokit_remove_reference( obj );
339
340 ipc_port_dealloc_kernel( port);
341 gIOKitPortCount--;
342
343 return( KERN_SUCCESS);
344 }
345
346 EXTERN kern_return_t
347 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
348 {
349 iokit_lock_port(port);
350 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
351 iokit_unlock_port(port);
352
353 return( KERN_SUCCESS);
354 }
355
356 EXTERN mach_port_name_t
357 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
358 {
359 ipc_port_t port;
360 ipc_port_t sendPort;
361 mach_port_name_t name;
362
363 if( obj == NULL)
364 return MACH_PORT_NULL;
365
366 port = iokit_port_for_object( obj, type );
367 if( port) {
368 sendPort = ipc_port_make_send( port);
369 iokit_release_port( port );
370 } else
371 sendPort = IP_NULL;
372
373 if (IP_VALID( sendPort )) {
374 kern_return_t kr;
375 kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
376 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
377 if ( kr != KERN_SUCCESS)
378 name = MACH_PORT_NULL;
379 } else if ( sendPort == IP_NULL)
380 name = MACH_PORT_NULL;
381 else if ( sendPort == IP_DEAD)
382 name = MACH_PORT_DEAD;
383
384 iokit_remove_reference( obj );
385
386 return( name );
387 }
388
389 EXTERN kern_return_t
390 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
391 {
392 return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
393 }
394
395 /*
396 * Handle the No-More_Senders notification generated from a device port destroy.
397 * Since there are no longer any tasks which hold a send right to this device
398 * port a NMS notification has been generated.
399 */
400
401 static void
402 iokit_no_senders( mach_no_senders_notification_t * notification )
403 {
404 ipc_port_t port;
405 io_object_t obj = NULL;
406 ipc_kobject_type_t type = IKOT_NONE;
407 ipc_port_t notify;
408
409 port = (ipc_port_t) notification->not_header.msgh_remote_port;
410
411 // convert a port to io_object_t.
412 if( IP_VALID(port)) {
413 iokit_lock_port(port);
414 if( ip_active(port)) {
415 obj = (io_object_t) port->ip_kobject;
416 type = ip_kotype( port );
417 if( (IKOT_IOKIT_OBJECT == type)
418 || (IKOT_IOKIT_CONNECT == type))
419 iokit_add_reference( obj );
420 else
421 obj = NULL;
422 }
423 iokit_unlock_port(port);
424
425 if( obj ) {
426
427 mach_port_mscount_t mscount = notification->not_count;
428
429 if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
430 {
431 /* Re-request no-senders notifications on the port (if still active) */
432 ip_lock(port);
433 if (ip_active(port)) {
434 notify = ipc_port_make_sonce_locked(port);
435 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
436 /* port unlocked */
437 if ( notify != IP_NULL)
438 ipc_port_release_sonce(notify);
439 }
440 }
441 iokit_remove_reference( obj );
442 }
443 }
444 }
445
446
447 EXTERN
448 boolean_t
449 iokit_notify( mach_msg_header_t * msg )
450 {
451 switch (msg->msgh_id) {
452 case MACH_NOTIFY_NO_SENDERS:
453 iokit_no_senders((mach_no_senders_notification_t *) msg);
454 return TRUE;
455
456 case MACH_NOTIFY_PORT_DELETED:
457 case MACH_NOTIFY_PORT_DESTROYED:
458 case MACH_NOTIFY_SEND_ONCE:
459 case MACH_NOTIFY_DEAD_NAME:
460 default:
461 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
462 return FALSE;
463 }
464 }
465
466 /* need to create a pmap function to generalize */
467 unsigned int IODefaultCacheBits(addr64_t pa)
468 {
469 return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
470 }
471
472 kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
473 mach_vm_size_t length, unsigned int options)
474 {
475 vm_prot_t prot;
476 unsigned int flags;
477 ppnum_t pagenum;
478 pmap_t pmap = map->pmap;
479
480 prot = (options & kIOMapReadOnly)
481 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
482
483 pagenum = (ppnum_t)atop_64(pa);
484
485 switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
486
487 case kIOMapDefaultCache:
488 default:
489 flags = IODefaultCacheBits(pa);
490 break;
491
492 case kIOMapInhibitCache:
493 flags = VM_WIMG_IO;
494 break;
495
496 case kIOMapWriteThruCache:
497 flags = VM_WIMG_WTHRU;
498 break;
499
500 case kIOMapWriteCombineCache:
501 flags = VM_WIMG_WCOMB;
502 break;
503
504 case kIOMapCopybackCache:
505 flags = VM_WIMG_COPYBACK;
506 break;
507 case kIOMapCopybackInnerCache:
508 flags = VM_WIMG_INNERWBACK;
509 break;
510 }
511
512 pmap_set_cache_attributes(pagenum, flags);
513
514 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
515
516
517 // Set up a block mapped area
518 pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
519
520 return( KERN_SUCCESS );
521 }
522
523 kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
524 {
525 pmap_t pmap = map->pmap;
526
527 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
528
529 return( KERN_SUCCESS );
530 }
531
532 kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
533 mach_vm_size_t __unused length, unsigned int __unused options)
534 {
535 mach_vm_size_t off;
536 vm_prot_t prot;
537 unsigned int flags;
538 pmap_t pmap = map->pmap;
539
540 prot = (options & kIOMapReadOnly)
541 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
542
543 switch (options & kIOMapCacheMask)
544 {
545 // what cache mode do we need?
546 case kIOMapDefaultCache:
547 default:
548 return (KERN_INVALID_ARGUMENT);
549
550 case kIOMapInhibitCache:
551 flags = VM_WIMG_IO;
552 break;
553
554 case kIOMapWriteThruCache:
555 flags = VM_WIMG_WTHRU;
556 break;
557
558 case kIOMapWriteCombineCache:
559 flags = VM_WIMG_WCOMB;
560 break;
561
562 case kIOMapCopybackCache:
563 flags = VM_WIMG_COPYBACK;
564 break;
565 }
566
567 // enter each page's physical address in the target map
568 for (off = 0; off < length; off += page_size)
569 {
570 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
571 if (ppnum)
572 pmap_enter(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE);
573 }
574
575 return (KERN_SUCCESS);
576 }
577
578 ppnum_t IOGetLastPageNumber(void)
579 {
580 #if __i386__ || __x86_64__
581 ppnum_t lastPage, highest = 0;
582 unsigned int idx;
583
584 for (idx = 0; idx < pmap_memory_region_count; idx++)
585 {
586 lastPage = pmap_memory_regions[idx].end - 1;
587 if (lastPage > highest)
588 highest = lastPage;
589 }
590 return (highest);
591 #else
592 #error unknown arch
593 #endif
594 }
595
596
597 void IOGetTime( mach_timespec_t * clock_time);
598 void IOGetTime( mach_timespec_t * clock_time)
599 {
600 clock_sec_t sec;
601 clock_nsec_t nsec;
602 clock_get_system_nanotime(&sec, &nsec);
603 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
604 clock_time->tv_nsec = nsec;
605 }
606