]> git.saurik.com Git - apple/xnu.git/blob - osfmk/device/iokit_rpc.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach_kdb.h>
29 #include <zone_debug.h>
30 #include <mach_kdb.h>
31
32 #include <mach/boolean.h>
33 #include <mach/kern_return.h>
34 #include <mach/mig_errors.h>
35 #include <mach/port.h>
36 #include <mach/vm_param.h>
37 #include <mach/notify.h>
38 //#include <mach/mach_host_server.h>
39 #include <mach/mach_types.h>
40
41 #include <machine/machparam.h> /* spl definitions */
42
43 #include <ipc/ipc_port.h>
44 #include <ipc/ipc_space.h>
45
46 #include <kern/clock.h>
47 #include <kern/spl.h>
48 #include <kern/counters.h>
49 #include <kern/queue.h>
50 #include <kern/zalloc.h>
51 #include <kern/thread.h>
52 #include <kern/task.h>
53 #include <kern/sched_prim.h>
54 #include <kern/misc_protos.h>
55
56 #include <vm/pmap.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_kern.h>
59
60 #include <device/device_types.h>
61 #include <device/device_port.h>
62 #include <device/device_server.h>
63
64 #include <machine/machparam.h>
65
66 #ifdef __ppc__
67 #include <ppc/mappings.h>
68 #endif
69 #ifdef __i386
70 #include <i386/pmap.h>
71 #endif
72 #include <IOKit/IOTypes.h>
73
74 #define EXTERN
75 #define MIGEXTERN
76
77 /*
78 * Functions in iokit:IOUserClient.cpp
79 */
80
81 extern void iokit_add_reference( io_object_t obj );
82
83 extern ipc_port_t iokit_port_for_object( io_object_t obj,
84 ipc_kobject_type_t type );
85
86 extern kern_return_t iokit_client_died( io_object_t obj,
87 ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
88
89 extern kern_return_t
90 iokit_client_memory_for_type(
91 io_object_t connect,
92 unsigned int type,
93 unsigned int * flags,
94 vm_address_t * address,
95 vm_size_t * size );
96
97
98 extern ppnum_t IOGetLastPageNumber(void);
99
100 /*
101 * Functions imported by iokit:IOUserClient.cpp
102 */
103
104 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
105 ipc_kobject_type_t type );
106
107 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
108
109 extern mach_port_name_t iokit_make_send_right( task_t task,
110 io_object_t obj, ipc_kobject_type_t type );
111
112 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
113
114 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
115
116 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
117
118 extern void iokit_retain_port( ipc_port_t port );
119 extern void iokit_release_port( ipc_port_t port );
120
121 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
122
123 /*
124 * Functions imported by iokit:IOMemoryDescriptor.cpp
125 */
126
127 extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
128 mach_vm_size_t length, unsigned int mapFlags);
129
130 extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
131
132 extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
133 mach_vm_size_t length, unsigned int options);
134
135 extern unsigned int IODefaultCacheBits(addr64_t pa);
136
137 /*
138 * Lookup a device by its port.
139 * Doesn't consume the naked send right; produces a device reference.
140 */
141 MIGEXTERN io_object_t
142 iokit_lookup_object_port(
143 ipc_port_t port)
144 {
145 register io_object_t obj;
146
147 if (!IP_VALID(port))
148 return (NULL);
149
150 ip_lock(port);
151 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
152 obj = (io_object_t) port->ip_kobject;
153 iokit_add_reference( obj );
154 }
155 else
156 obj = NULL;
157
158 ip_unlock(port);
159
160 return( obj );
161 }
162
163 MIGEXTERN io_object_t
164 iokit_lookup_connect_port(
165 ipc_port_t port)
166 {
167 register io_object_t obj;
168
169 if (!IP_VALID(port))
170 return (NULL);
171
172 ip_lock(port);
173 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
174 obj = (io_object_t) port->ip_kobject;
175 iokit_add_reference( obj );
176 }
177 else
178 obj = NULL;
179
180 ip_unlock(port);
181
182 return( obj );
183 }
184
185 EXTERN io_object_t
186 iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
187 {
188 io_object_t obj = NULL;
189
190 if (connectRef && MACH_PORT_VALID((mach_port_name_t)connectRef)) {
191 ipc_port_t port;
192 kern_return_t kr;
193
194 kr = ipc_object_translate(space, (mach_port_name_t)connectRef, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
195
196 if (kr == KERN_SUCCESS) {
197 assert(IP_VALID(port));
198
199 if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
200 obj = (io_object_t) port->ip_kobject;
201 iokit_add_reference(obj);
202 }
203
204 ip_unlock(port);
205 }
206 }
207
208 return obj;
209 }
210
211 EXTERN io_object_t
212 iokit_lookup_connect_ref_current_task(io_object_t connectRef)
213 {
214 return iokit_lookup_connect_ref(connectRef, current_space());
215 }
216
217 EXTERN void
218 iokit_retain_port( ipc_port_t port )
219 {
220 ipc_port_reference( port );
221 }
222
223 EXTERN void
224 iokit_release_port( ipc_port_t port )
225 {
226 ipc_port_release( port );
227 }
228
229 /*
230 * Get the port for a device.
231 * Consumes a device reference; produces a naked send right.
232 */
233 MIGEXTERN ipc_port_t
234 iokit_make_object_port(
235 io_object_t obj )
236 {
237 register ipc_port_t port;
238 register ipc_port_t sendPort;
239
240 if( obj == NULL)
241 return IP_NULL;
242
243 port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT );
244 if( port) {
245 sendPort = ipc_port_make_send( port);
246 iokit_release_port( port );
247 } else
248 sendPort = IP_NULL;
249
250 iokit_remove_reference( obj );
251
252 return( sendPort);
253 }
254
255 MIGEXTERN ipc_port_t
256 iokit_make_connect_port(
257 io_object_t obj )
258 {
259 register ipc_port_t port;
260 register ipc_port_t sendPort;
261
262 if( obj == NULL)
263 return IP_NULL;
264
265 port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT );
266 if( port) {
267 sendPort = ipc_port_make_send( port);
268 iokit_release_port( port );
269 } else
270 sendPort = IP_NULL;
271
272 iokit_remove_reference( obj );
273
274 return( sendPort);
275 }
276
277 int gIOKitPortCount;
278
279 EXTERN ipc_port_t
280 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
281 {
282 ipc_port_t notify;
283 ipc_port_t port;
284
285 do {
286
287 /* Allocate port, keeping a reference for it. */
288 port = ipc_port_alloc_kernel();
289 if( port == IP_NULL)
290 continue;
291
292 /* set kobject & type */
293 // iokit_add_reference( obj );
294 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
295
296 /* Request no-senders notifications on the port. */
297 notify = ipc_port_make_sonce( port);
298 ip_lock( port);
299 ipc_port_nsrequest( port, 1, notify, &notify);
300 assert( notify == IP_NULL);
301 gIOKitPortCount++;
302
303 } while( FALSE);
304
305 return( port );
306 }
307
308
309 EXTERN kern_return_t
310 iokit_destroy_object_port( ipc_port_t port )
311 {
312 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
313
314 // iokit_remove_reference( obj );
315
316 ipc_port_dealloc_kernel( port);
317 gIOKitPortCount--;
318
319 return( KERN_SUCCESS);
320 }
321
322 EXTERN kern_return_t
323 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
324 {
325 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
326
327 return( KERN_SUCCESS);
328 }
329
330 EXTERN mach_port_name_t
331 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
332 {
333 ipc_port_t port;
334 ipc_port_t sendPort;
335 mach_port_name_t name;
336
337 if( obj == NULL)
338 return MACH_PORT_NULL;
339
340 port = iokit_port_for_object( obj, type );
341 if( port) {
342 sendPort = ipc_port_make_send( port);
343 iokit_release_port( port );
344 } else
345 sendPort = IP_NULL;
346
347 if (IP_VALID( sendPort )) {
348 kern_return_t kr;
349 kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
350 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
351 if ( kr != KERN_SUCCESS)
352 name = MACH_PORT_NULL;
353 } else if ( sendPort == IP_NULL)
354 name = MACH_PORT_NULL;
355 else if ( sendPort == IP_DEAD)
356 name = MACH_PORT_DEAD;
357
358 iokit_remove_reference( obj );
359
360 return( name );
361 }
362
363 EXTERN kern_return_t
364 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
365 {
366 return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
367 }
368
369 /*
370 * Handle the No-More_Senders notification generated from a device port destroy.
371 * Since there are no longer any tasks which hold a send right to this device
372 * port a NMS notification has been generated.
373 */
374
375 static void
376 iokit_no_senders( mach_no_senders_notification_t * notification )
377 {
378 ipc_port_t port;
379 io_object_t obj = NULL;
380 ipc_kobject_type_t type = IKOT_NONE;
381 ipc_port_t notify;
382
383 port = (ipc_port_t) notification->not_header.msgh_remote_port;
384
385 // convert a port to io_object_t.
386 if( IP_VALID(port)) {
387 ip_lock(port);
388 if( ip_active(port)) {
389 obj = (io_object_t) port->ip_kobject;
390 type = ip_kotype( port );
391 if( (IKOT_IOKIT_OBJECT == type)
392 || (IKOT_IOKIT_CONNECT == type))
393 iokit_add_reference( obj );
394 else
395 obj = NULL;
396 }
397 ip_unlock(port);
398
399 if( obj ) {
400
401 mach_port_mscount_t mscount = notification->not_count;
402
403 if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
404 {
405 /* Re-request no-senders notifications on the port. */
406 notify = ipc_port_make_sonce( port);
407 ip_lock( port);
408 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
409 assert( notify == IP_NULL);
410 }
411 iokit_remove_reference( obj );
412 }
413 }
414 }
415
416
417 EXTERN
418 boolean_t
419 iokit_notify( mach_msg_header_t * msg )
420 {
421 switch (msg->msgh_id) {
422 case MACH_NOTIFY_NO_SENDERS:
423 iokit_no_senders((mach_no_senders_notification_t *) msg);
424 return TRUE;
425
426 case MACH_NOTIFY_PORT_DELETED:
427 case MACH_NOTIFY_PORT_DESTROYED:
428 case MACH_NOTIFY_SEND_ONCE:
429 case MACH_NOTIFY_DEAD_NAME:
430 default:
431 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
432 return FALSE;
433 }
434 }
435
436 /* need to create a pmap function to generalize */
437 unsigned int IODefaultCacheBits(addr64_t pa)
438 {
439 return(pmap_cache_attributes(pa >> PAGE_SHIFT));
440 }
441
442 kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
443 mach_vm_size_t length, unsigned int options)
444 {
445 vm_prot_t prot;
446 unsigned int flags;
447 pmap_t pmap = map->pmap;
448
449 prot = (options & kIOMapReadOnly)
450 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
451
452 switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
453
454 case kIOMapDefaultCache:
455 default:
456 flags = IODefaultCacheBits(pa);
457 break;
458
459 case kIOMapInhibitCache:
460 flags = VM_WIMG_IO;
461 break;
462
463 case kIOMapWriteThruCache:
464 flags = VM_WIMG_WTHRU;
465 break;
466
467 case kIOMapWriteCombineCache:
468 flags = VM_WIMG_WCOMB;
469 break;
470
471 case kIOMapCopybackCache:
472 flags = VM_WIMG_COPYBACK;
473 break;
474 }
475
476 // Set up a block mapped area
477 pmap_map_block(pmap, va, (ppnum_t)atop_64(pa), (uint32_t) atop_64(round_page_64(length)), prot, flags, 0);
478
479 return( KERN_SUCCESS );
480 }
481
482 kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
483 {
484 pmap_t pmap = map->pmap;
485
486 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
487
488 return( KERN_SUCCESS );
489 }
490
491 kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
492 mach_vm_size_t __unused length, unsigned int __unused options)
493 {
494 #if __ppc__
495 // can't remap block mappings, but ppc doesn't speculatively read from WC
496 #else
497
498 mach_vm_size_t off;
499 vm_prot_t prot;
500 unsigned int flags;
501 pmap_t pmap = map->pmap;
502
503 prot = (options & kIOMapReadOnly)
504 ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
505
506 switch (options & kIOMapCacheMask)
507 {
508 // what cache mode do we need?
509 case kIOMapDefaultCache:
510 default:
511 return (KERN_INVALID_ARGUMENT);
512
513 case kIOMapInhibitCache:
514 flags = VM_WIMG_IO;
515 break;
516
517 case kIOMapWriteThruCache:
518 flags = VM_WIMG_WTHRU;
519 break;
520
521 case kIOMapWriteCombineCache:
522 flags = VM_WIMG_WCOMB;
523 break;
524
525 case kIOMapCopybackCache:
526 flags = VM_WIMG_COPYBACK;
527 break;
528 }
529
530 // enter each page's physical address in the target map
531 for (off = 0; off < length; off += page_size)
532 {
533 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
534 if (ppnum)
535 pmap_enter(pmap, va + off, ppnum, prot, flags, TRUE);
536 }
537
538 #endif
539
540 return (KERN_SUCCESS);
541 }
542
543 ppnum_t IOGetLastPageNumber(void)
544 {
545 ppnum_t lastPage, highest = 0;
546 unsigned int idx;
547
548 #if __ppc__
549 for (idx = 0; idx < pmap_mem_regions_count; idx++)
550 {
551 lastPage = pmap_mem_regions[idx].mrEnd;
552 #elif __i386__
553 for (idx = 0; idx < pmap_memory_region_count; idx++)
554 {
555 lastPage = pmap_memory_regions[idx].end - 1;
556 #else
557 #error arch
558 #endif
559 if (lastPage > highest)
560 highest = lastPage;
561 }
562 return (highest);
563 }
564
565
566 void IOGetTime( mach_timespec_t * clock_time);
567 void IOGetTime( mach_timespec_t * clock_time)
568 {
569 clock_get_system_nanotime(&clock_time->tv_sec, (uint32_t *) &clock_time->tv_nsec);
570 }
571