]> git.saurik.com Git - apple/xnu.git/blob - osfmk/device/iokit_rpc.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
37
38 #include <machine/machparam.h> /* spl definitions */
39
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
42
43 #include <kern/clock.h>
44 #include <kern/spl.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
52
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
60
61 #include <machine/machparam.h>
62
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
65 #endif
66 #if defined(__arm__) || defined(__arm64__)
67 #include <arm/pmap.h>
68 #endif
69 #include <IOKit/IOKitServer.h>
70
71 #define EXTERN
72 #define MIGEXTERN
73
74 /*
75 * Lookup a device by its port.
76 * Doesn't consume the naked send right; produces a device reference.
77 */
78 io_object_t
79 iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
80 {
81 io_object_t obj;
82
83 if (!IP_VALID(port)) {
84 return NULL;
85 }
86
87 iokit_lock_port(port);
88 if (ip_active(port) && (ip_kotype(port) == type)) {
89 obj = (io_object_t) port->ip_kobject;
90 iokit_add_reference( obj, type );
91 } else {
92 obj = NULL;
93 }
94
95 iokit_unlock_port(port);
96
97 return obj;
98 }
99
100 MIGEXTERN io_object_t
101 iokit_lookup_object_port(
102 ipc_port_t port)
103 {
104 return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
105 }
106
107 MIGEXTERN io_object_t
108 iokit_lookup_connect_port(
109 ipc_port_t port)
110 {
111 return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
112 }
113
114 MIGEXTERN io_object_t
115 iokit_lookup_uext_object_port(
116 ipc_port_t port)
117 {
118 return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT);
119 }
120
121 static io_object_t
122 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
123 {
124 io_object_t obj = NULL;
125
126 if (name && MACH_PORT_VALID(name)) {
127 ipc_port_t port;
128 kern_return_t kr;
129
130 kr = ipc_port_translate_send(space, name, &port);
131
132 if (kr == KERN_SUCCESS) {
133 assert(IP_VALID(port));
134 require_ip_active(port);
135 ip_reference(port);
136 ip_unlock(port);
137
138 iokit_lock_port(port);
139 if (ip_kotype(port) == type) {
140 obj = (io_object_t) port->ip_kobject;
141 iokit_add_reference(obj, type);
142 }
143 iokit_unlock_port(port);
144
145 ip_release(port);
146 }
147 }
148
149 return obj;
150 }
151
152 EXTERN io_object_t
153 iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
154 {
155 return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
156 }
157
158 EXTERN io_object_t
159 iokit_lookup_connect_ref_current_task(mach_port_name_t name)
160 {
161 return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
162 }
163
164 EXTERN io_object_t
165 iokit_lookup_uext_ref_current_task(mach_port_name_t name)
166 {
167 return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space());
168 }
169
170 EXTERN void
171 iokit_retain_port( ipc_port_t port )
172 {
173 ipc_port_reference( port );
174 }
175
176 EXTERN void
177 iokit_release_port( ipc_port_t port )
178 {
179 ipc_port_release( port );
180 }
181
182 EXTERN void
183 iokit_release_port_send( ipc_port_t port )
184 {
185 ipc_port_release_send( port );
186 }
187
188 extern lck_mtx_t iokit_obj_to_port_binding_lock;
189
190 EXTERN void
191 iokit_lock_port( __unused ipc_port_t port )
192 {
193 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
194 }
195
196 EXTERN void
197 iokit_unlock_port( __unused ipc_port_t port )
198 {
199 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
200 }
201
202 /*
203 * Get the port for a device.
204 * Consumes a device reference; produces a naked send right.
205 */
206
207 static ipc_port_t
208 iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
209 {
210 ipc_port_t port;
211 ipc_port_t sendPort;
212
213 if (obj == NULL) {
214 return IP_NULL;
215 }
216
217 port = iokit_port_for_object( obj, type );
218 if (port) {
219 sendPort = ipc_port_make_send( port);
220 iokit_release_port( port );
221 } else {
222 sendPort = IP_NULL;
223 }
224
225 iokit_remove_reference( obj );
226
227 return sendPort;
228 }
229
230 MIGEXTERN ipc_port_t
231 iokit_make_object_port(
232 io_object_t obj )
233 {
234 return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
235 }
236
237 MIGEXTERN ipc_port_t
238 iokit_make_connect_port(
239 io_object_t obj )
240 {
241 return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
242 }
243
244 int gIOKitPortCount;
245
246 EXTERN ipc_port_t
247 iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
248 {
249 /* Allocate port, keeping a reference for it. */
250 gIOKitPortCount++;
251 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST;
252 if (type == IKOT_IOKIT_CONNECT) {
253 options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
254 }
255 return ipc_kobject_alloc_port((ipc_kobject_t) obj, type, options);
256 }
257
258 EXTERN kern_return_t
259 iokit_destroy_object_port( ipc_port_t port )
260 {
261 iokit_lock_port(port);
262 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
263
264 // iokit_remove_reference( obj );
265 iokit_unlock_port(port);
266 ipc_port_dealloc_kernel( port);
267 gIOKitPortCount--;
268
269 return KERN_SUCCESS;
270 }
271
272 EXTERN kern_return_t
273 iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
274 {
275 iokit_lock_port(port);
276 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
277 iokit_unlock_port(port);
278
279 return KERN_SUCCESS;
280 }
281
282 EXTERN mach_port_name_t
283 iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
284 {
285 ipc_port_t port;
286 ipc_port_t sendPort;
287 mach_port_name_t name = 0;
288
289 if (obj == NULL) {
290 return MACH_PORT_NULL;
291 }
292
293 port = iokit_port_for_object( obj, type );
294 if (port) {
295 sendPort = ipc_port_make_send( port);
296 iokit_release_port( port );
297 } else {
298 sendPort = IP_NULL;
299 }
300
301 if (IP_VALID( sendPort )) {
302 kern_return_t kr;
303 // Remove once <rdar://problem/45522961> is fixed.
304 // We need to make ith_knote NULL as ipc_object_copyout() uses
305 // thread-argument-passing and its value should not be garbage
306 current_thread()->ith_knote = ITH_KNOTE_NULL;
307 kr = ipc_object_copyout( task->itk_space, ip_to_object(sendPort),
308 MACH_MSG_TYPE_PORT_SEND, NULL, NULL, &name);
309 if (kr != KERN_SUCCESS) {
310 ipc_port_release_send( sendPort );
311 name = MACH_PORT_NULL;
312 }
313 } else if (sendPort == IP_NULL) {
314 name = MACH_PORT_NULL;
315 } else if (sendPort == IP_DEAD) {
316 name = MACH_PORT_DEAD;
317 }
318
319 return name;
320 }
321
322 EXTERN kern_return_t
323 iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
324 {
325 return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
326 }
327
328 /*
329 * Handle the No-More_Senders notification generated from a device port destroy.
330 * Since there are no longer any tasks which hold a send right to this device
331 * port a NMS notification has been generated.
332 */
333
334 static void
335 iokit_no_senders( mach_no_senders_notification_t * notification )
336 {
337 ipc_port_t port;
338 io_object_t obj = NULL;
339 ipc_kobject_type_t type = IKOT_NONE;
340 ipc_port_t notify;
341
342 port = notification->not_header.msgh_remote_port;
343
344 // convert a port to io_object_t.
345 if (IP_VALID(port)) {
346 iokit_lock_port(port);
347 if (ip_active(port)) {
348 obj = (io_object_t) port->ip_kobject;
349 type = ip_kotype( port );
350 if ((IKOT_IOKIT_OBJECT == type)
351 || (IKOT_IOKIT_CONNECT == type)
352 || (IKOT_IOKIT_IDENT == type)
353 || (IKOT_UEXT_OBJECT == type)) {
354 iokit_add_reference( obj, IKOT_IOKIT_OBJECT );
355 } else {
356 obj = NULL;
357 }
358 }
359 iokit_unlock_port(port);
360
361 if (obj) {
362 mach_port_mscount_t mscount = notification->not_count;
363
364 if (KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) {
365 /* Re-request no-senders notifications on the port (if still active) */
366 ip_lock(port);
367 if (ip_active(port)) {
368 notify = ipc_port_make_sonce_locked(port);
369 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
370 /* port unlocked */
371 if (notify != IP_NULL) {
372 ipc_port_release_sonce(notify);
373 }
374 } else {
375 ip_unlock(port);
376 }
377 }
378 iokit_remove_reference( obj );
379 }
380 }
381 }
382
383
384 EXTERN
385 boolean_t
386 iokit_notify( mach_msg_header_t * msg )
387 {
388 switch (msg->msgh_id) {
389 case MACH_NOTIFY_NO_SENDERS:
390 iokit_no_senders((mach_no_senders_notification_t *) msg);
391 return TRUE;
392
393 case MACH_NOTIFY_PORT_DELETED:
394 case MACH_NOTIFY_PORT_DESTROYED:
395 case MACH_NOTIFY_SEND_ONCE:
396 case MACH_NOTIFY_DEAD_NAME:
397 default:
398 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
399 return FALSE;
400 }
401 }
402
403 /* need to create a pmap function to generalize */
404 unsigned int
405 IODefaultCacheBits(addr64_t pa)
406 {
407 return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
408 }
409
410 kern_return_t
411 IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
412 mach_vm_size_t length, unsigned int options)
413 {
414 vm_prot_t prot;
415 unsigned int flags;
416 ppnum_t pagenum;
417 pmap_t pmap = map->pmap;
418
419 prot = (options & kIOMapReadOnly)
420 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
421
422 pagenum = (ppnum_t)atop_64(pa);
423
424 switch (options & kIOMapCacheMask) { /* What cache mode do we need? */
425 case kIOMapDefaultCache:
426 default:
427 flags = IODefaultCacheBits(pa);
428 break;
429
430 case kIOMapInhibitCache:
431 flags = VM_WIMG_IO;
432 break;
433
434 case kIOMapWriteThruCache:
435 flags = VM_WIMG_WTHRU;
436 break;
437
438 case kIOMapWriteCombineCache:
439 flags = VM_WIMG_WCOMB;
440 break;
441
442 case kIOMapCopybackCache:
443 flags = VM_WIMG_COPYBACK;
444 break;
445
446 case kIOMapCopybackInnerCache:
447 flags = VM_WIMG_INNERWBACK;
448 break;
449
450 case kIOMapPostedWrite:
451 flags = VM_WIMG_POSTED;
452 break;
453
454 case kIOMapRealTimeCache:
455 flags = VM_WIMG_RT;
456 break;
457 }
458
459 pmap_set_cache_attributes(pagenum, flags);
460
461 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
462
463
464 // Set up a block mapped area
465 return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
466 }
467
468 kern_return_t
469 IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
470 {
471 pmap_t pmap = map->pmap;
472
473 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
474
475 return KERN_SUCCESS;
476 }
477
478 kern_return_t
479 IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
480 mach_vm_size_t __unused length, unsigned int __unused options)
481 {
482 mach_vm_size_t off;
483 vm_prot_t prot;
484 unsigned int flags;
485 pmap_t pmap = map->pmap;
486 pmap_flush_context pmap_flush_context_storage;
487 boolean_t delayed_pmap_flush = FALSE;
488
489 prot = (options & kIOMapReadOnly)
490 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
491
492 switch (options & kIOMapCacheMask) {
493 // what cache mode do we need?
494 case kIOMapDefaultCache:
495 default:
496 return KERN_INVALID_ARGUMENT;
497
498 case kIOMapInhibitCache:
499 flags = VM_WIMG_IO;
500 break;
501
502 case kIOMapWriteThruCache:
503 flags = VM_WIMG_WTHRU;
504 break;
505
506 case kIOMapWriteCombineCache:
507 flags = VM_WIMG_WCOMB;
508 break;
509
510 case kIOMapCopybackCache:
511 flags = VM_WIMG_COPYBACK;
512 break;
513
514 case kIOMapCopybackInnerCache:
515 flags = VM_WIMG_INNERWBACK;
516 break;
517
518 case kIOMapPostedWrite:
519 flags = VM_WIMG_POSTED;
520 break;
521
522 case kIOMapRealTimeCache:
523 flags = VM_WIMG_RT;
524 break;
525 }
526
527 pmap_flush_context_init(&pmap_flush_context_storage);
528 delayed_pmap_flush = FALSE;
529
530 // enter each page's physical address in the target map
531 for (off = 0; off < length; off += page_size) {
532 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
533 if (ppnum) {
534 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
535 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
536 delayed_pmap_flush = TRUE;
537 }
538 }
539 if (delayed_pmap_flush == TRUE) {
540 pmap_flush(&pmap_flush_context_storage);
541 }
542
543 return KERN_SUCCESS;
544 }
545
546 ppnum_t
547 IOGetLastPageNumber(void)
548 {
549 #if __i386__ || __x86_64__
550 ppnum_t lastPage, highest = 0;
551 unsigned int idx;
552
553 for (idx = 0; idx < pmap_memory_region_count; idx++) {
554 lastPage = pmap_memory_regions[idx].end - 1;
555 if (lastPage > highest) {
556 highest = lastPage;
557 }
558 }
559 return highest;
560 #elif __arm__ || __arm64__
561 return 0;
562 #else
563 #error unknown arch
564 #endif
565 }
566
567
568 void IOGetTime( mach_timespec_t * clock_time);
569 void
570 IOGetTime( mach_timespec_t * clock_time)
571 {
572 clock_sec_t sec;
573 clock_nsec_t nsec;
574 clock_get_system_nanotime(&sec, &nsec);
575 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
576 clock_time->tv_nsec = nsec;
577 }