]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/device/iokit_rpc.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / device / iokit_rpc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach/boolean.h>
29#include <mach/kern_return.h>
30#include <mach/mig_errors.h>
31#include <mach/port.h>
32#include <mach/vm_param.h>
33#include <mach/notify.h>
34//#include <mach/mach_host_server.h>
35#include <mach/mach_types.h>
36
37#include <machine/machparam.h> /* spl definitions */
38
39#include <ipc/ipc_port.h>
40#include <ipc/ipc_space.h>
41
42#include <kern/clock.h>
43#include <kern/spl.h>
44#include <kern/counters.h>
45#include <kern/queue.h>
46#include <kern/zalloc.h>
47#include <kern/thread.h>
48#include <kern/task.h>
49#include <kern/sched_prim.h>
50#include <kern/misc_protos.h>
51
52#include <vm/pmap.h>
53#include <vm/vm_map.h>
54#include <vm/vm_kern.h>
55
56#include <device/device_types.h>
57#include <device/device_port.h>
58#include <device/device_server.h>
59
60#include <machine/machparam.h>
61
62#if defined(__i386__) || defined(__x86_64__)
63#include <i386/pmap.h>
64#endif
65#if defined(__arm__) || defined(__arm64__)
66#include <arm/pmap.h>
67#endif
68#include <IOKit/IOKitServer.h>
69
70#define EXTERN
71#define MIGEXTERN
72
73LCK_GRP_DECLARE(dev_lck_grp, "device");
74LCK_MTX_DECLARE(iokit_obj_to_port_binding_lock, &dev_lck_grp);
75
76/*
77 * Lookup a device by its port.
78 * Doesn't consume the naked send right; produces a device reference.
79 */
80io_object_t
81iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type)
82{
83 io_object_t obj;
84
85 if (!IP_VALID(port)) {
86 return NULL;
87 }
88
89 iokit_lock_port(port);
90 if (ip_active(port) && (ip_kotype(port) == type)) {
91 obj = (io_object_t) ip_get_kobject(port);
92 iokit_add_reference( obj, type );
93 } else {
94 obj = NULL;
95 }
96
97 iokit_unlock_port(port);
98
99 return obj;
100}
101
102MIGEXTERN io_object_t
103iokit_lookup_object_port(
104 ipc_port_t port)
105{
106 return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT);
107}
108
109MIGEXTERN io_object_t
110iokit_lookup_connect_port(
111 ipc_port_t port)
112{
113 return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT);
114}
115
116MIGEXTERN io_object_t
117iokit_lookup_uext_object_port(
118 ipc_port_t port)
119{
120 return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT);
121}
122
123static io_object_t
124iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space)
125{
126 io_object_t obj = NULL;
127
128 if (name && MACH_PORT_VALID(name)) {
129 ipc_port_t port;
130 kern_return_t kr;
131
132 kr = ipc_port_translate_send(space, name, &port);
133
134 if (kr == KERN_SUCCESS) {
135 assert(IP_VALID(port));
136 require_ip_active(port);
137 ip_reference(port);
138 ip_unlock(port);
139
140 iokit_lock_port(port);
141 if (ip_kotype(port) == type) {
142 obj = (io_object_t) ip_get_kobject(port);
143 iokit_add_reference(obj, type);
144 }
145 iokit_unlock_port(port);
146
147 ip_release(port);
148 }
149 }
150
151 return obj;
152}
153
154EXTERN io_object_t
155iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task)
156{
157 return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space);
158}
159
160EXTERN io_object_t
161iokit_lookup_connect_ref_current_task(mach_port_name_t name)
162{
163 return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space());
164}
165
166EXTERN io_object_t
167iokit_lookup_uext_ref_current_task(mach_port_name_t name)
168{
169 return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space());
170}
171
172EXTERN void
173iokit_retain_port( ipc_port_t port )
174{
175 ipc_port_reference( port );
176}
177
178EXTERN void
179iokit_release_port( ipc_port_t port )
180{
181 ipc_port_release( port );
182}
183
184EXTERN void
185iokit_release_port_send( ipc_port_t port )
186{
187 ipc_port_release_send( port );
188}
189
190EXTERN void
191iokit_lock_port( __unused ipc_port_t port )
192{
193 lck_mtx_lock(&iokit_obj_to_port_binding_lock);
194}
195
196EXTERN void
197iokit_unlock_port( __unused ipc_port_t port )
198{
199 lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
200}
201
202/*
203 * Get the port for a device.
204 * Consumes a device reference; produces a naked send right.
205 */
206
207static ipc_port_t
208iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type)
209{
210 ipc_port_t port;
211 ipc_port_t sendPort;
212
213 if (obj == NULL) {
214 return IP_NULL;
215 }
216
217 port = iokit_port_for_object( obj, type );
218 if (port) {
219 sendPort = ipc_port_make_send( port);
220 iokit_release_port( port );
221 } else {
222 sendPort = IP_NULL;
223 }
224
225 iokit_remove_reference( obj );
226
227 return sendPort;
228}
229
230MIGEXTERN ipc_port_t
231iokit_make_object_port(
232 io_object_t obj )
233{
234 return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT);
235}
236
237MIGEXTERN ipc_port_t
238iokit_make_connect_port(
239 io_object_t obj )
240{
241 return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT);
242}
243
244int gIOKitPortCount;
245
246EXTERN ipc_port_t
247iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
248{
249 /* Allocate port, keeping a reference for it. */
250 gIOKitPortCount++;
251 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST;
252 if (type == IKOT_IOKIT_CONNECT) {
253 options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
254 }
255 if (type == IKOT_UEXT_OBJECT) {
256 ipc_label_t label = IPC_LABEL_DEXT;
257 return ipc_kobject_alloc_labeled_port((ipc_kobject_t) obj, type, label, options);
258 } else {
259 return ipc_kobject_alloc_port((ipc_kobject_t) obj, type, options);
260 }
261}
262
263EXTERN kern_return_t
264iokit_destroy_object_port( ipc_port_t port )
265{
266 iokit_lock_port(port);
267 ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
268
269// iokit_remove_reference( obj );
270 iokit_unlock_port(port);
271 ipc_port_dealloc_kernel( port);
272 gIOKitPortCount--;
273
274 return KERN_SUCCESS;
275}
276
277EXTERN kern_return_t
278iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
279{
280 iokit_lock_port(port);
281 ipc_kobject_set( port, (ipc_kobject_t) obj, type);
282 iokit_unlock_port(port);
283
284 return KERN_SUCCESS;
285}
286
287EXTERN mach_port_name_t
288iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
289{
290 ipc_port_t port;
291 ipc_port_t sendPort;
292 mach_port_name_t name = 0;
293
294 if (obj == NULL) {
295 return MACH_PORT_NULL;
296 }
297
298 port = iokit_port_for_object( obj, type );
299 if (port) {
300 sendPort = ipc_port_make_send( port);
301 iokit_release_port( port );
302 } else {
303 sendPort = IP_NULL;
304 }
305
306 if (IP_VALID( sendPort )) {
307 kern_return_t kr;
308 // Remove once <rdar://problem/45522961> is fixed.
309 // We need to make ith_knote NULL as ipc_object_copyout() uses
310 // thread-argument-passing and its value should not be garbage
311 current_thread()->ith_knote = ITH_KNOTE_NULL;
312 kr = ipc_object_copyout( task->itk_space, ip_to_object(sendPort),
313 MACH_MSG_TYPE_PORT_SEND, NULL, NULL, &name);
314 if (kr != KERN_SUCCESS) {
315 ipc_port_release_send( sendPort );
316 name = MACH_PORT_NULL;
317 }
318 } else if (sendPort == IP_NULL) {
319 name = MACH_PORT_NULL;
320 } else if (sendPort == IP_DEAD) {
321 name = MACH_PORT_DEAD;
322 }
323
324 return name;
325}
326
327EXTERN kern_return_t
328iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
329{
330 return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta );
331}
332
333/*
334 * Handle the No-More_Senders notification generated from a device port destroy.
335 * Since there are no longer any tasks which hold a send right to this device
336 * port a NMS notification has been generated.
337 */
338
339static void
340iokit_no_senders( mach_no_senders_notification_t * notification )
341{
342 ipc_port_t port;
343 io_object_t obj = NULL;
344 ipc_kobject_type_t type = IKOT_NONE;
345 ipc_port_t notify;
346
347 port = notification->not_header.msgh_remote_port;
348
349 // convert a port to io_object_t.
350 if (IP_VALID(port)) {
351 iokit_lock_port(port);
352 if (ip_active(port)) {
353 obj = (io_object_t) ip_get_kobject(port);
354 type = ip_kotype( port );
355 if ((IKOT_IOKIT_OBJECT == type)
356 || (IKOT_IOKIT_CONNECT == type)
357 || (IKOT_IOKIT_IDENT == type)
358 || (IKOT_UEXT_OBJECT == type)) {
359 iokit_add_reference( obj, IKOT_IOKIT_OBJECT );
360 } else {
361 obj = NULL;
362 }
363 }
364 iokit_unlock_port(port);
365
366 if (obj) {
367 mach_port_mscount_t mscount = notification->not_count;
368
369 if (KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) {
370 /* Re-request no-senders notifications on the port (if still active) */
371 ip_lock(port);
372 if (ip_active(port)) {
373 notify = ipc_port_make_sonce_locked(port);
374 ipc_port_nsrequest( port, mscount + 1, notify, &notify);
375 /* port unlocked */
376 if (notify != IP_NULL) {
377 ipc_port_release_sonce(notify);
378 }
379 } else {
380 ip_unlock(port);
381 }
382 }
383 iokit_remove_reference( obj );
384 }
385 }
386}
387
388
389EXTERN
390boolean_t
391iokit_notify( mach_msg_header_t * msg )
392{
393 switch (msg->msgh_id) {
394 case MACH_NOTIFY_NO_SENDERS:
395 iokit_no_senders((mach_no_senders_notification_t *) msg);
396 return TRUE;
397
398 case MACH_NOTIFY_PORT_DELETED:
399 case MACH_NOTIFY_PORT_DESTROYED:
400 case MACH_NOTIFY_SEND_ONCE:
401 case MACH_NOTIFY_DEAD_NAME:
402 default:
403 printf("iokit_notify: strange notification %d\n", msg->msgh_id);
404 return FALSE;
405 }
406}
407
408kern_return_t
409iokit_label_dext_task(task_t task)
410{
411 return ipc_space_add_label(task->itk_space, IPC_LABEL_DEXT);
412}
413
414/* need to create a pmap function to generalize */
415unsigned int
416IODefaultCacheBits(addr64_t pa)
417{
418 return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT));
419}
420
421kern_return_t
422IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
423 mach_vm_size_t length, unsigned int options)
424{
425 vm_prot_t prot;
426 unsigned int flags;
427 ppnum_t pagenum;
428 pmap_t pmap = map->pmap;
429
430 prot = (options & kIOMapReadOnly)
431 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
432
433 pagenum = (ppnum_t)atop_64(pa);
434
435 switch (options & kIOMapCacheMask) { /* What cache mode do we need? */
436 case kIOMapDefaultCache:
437 default:
438 flags = IODefaultCacheBits(pa);
439 break;
440
441 case kIOMapInhibitCache:
442 flags = VM_WIMG_IO;
443 break;
444
445 case kIOMapWriteThruCache:
446 flags = VM_WIMG_WTHRU;
447 break;
448
449 case kIOMapWriteCombineCache:
450 flags = VM_WIMG_WCOMB;
451 break;
452
453 case kIOMapCopybackCache:
454 flags = VM_WIMG_COPYBACK;
455 break;
456
457 case kIOMapCopybackInnerCache:
458 flags = VM_WIMG_INNERWBACK;
459 break;
460
461 case kIOMapPostedWrite:
462 flags = VM_WIMG_POSTED;
463 break;
464
465 case kIOMapRealTimeCache:
466 flags = VM_WIMG_RT;
467 break;
468 }
469
470 pmap_set_cache_attributes(pagenum, flags);
471
472 vm_map_set_cache_attr(map, (vm_map_offset_t)va);
473
474
475 // Set up a block mapped area
476 return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
477}
478
479kern_return_t
480IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
481{
482 pmap_t pmap = map->pmap;
483
484 pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
485
486 return KERN_SUCCESS;
487}
488
489kern_return_t
490IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
491 mach_vm_size_t __unused length, unsigned int __unused options)
492{
493 mach_vm_size_t off;
494 vm_prot_t prot;
495 unsigned int flags;
496 pmap_t pmap = map->pmap;
497 pmap_flush_context pmap_flush_context_storage;
498 boolean_t delayed_pmap_flush = FALSE;
499
500 prot = (options & kIOMapReadOnly)
501 ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
502
503 switch (options & kIOMapCacheMask) {
504 // what cache mode do we need?
505 case kIOMapDefaultCache:
506 default:
507 return KERN_INVALID_ARGUMENT;
508
509 case kIOMapInhibitCache:
510 flags = VM_WIMG_IO;
511 break;
512
513 case kIOMapWriteThruCache:
514 flags = VM_WIMG_WTHRU;
515 break;
516
517 case kIOMapWriteCombineCache:
518 flags = VM_WIMG_WCOMB;
519 break;
520
521 case kIOMapCopybackCache:
522 flags = VM_WIMG_COPYBACK;
523 break;
524
525 case kIOMapCopybackInnerCache:
526 flags = VM_WIMG_INNERWBACK;
527 break;
528
529 case kIOMapPostedWrite:
530 flags = VM_WIMG_POSTED;
531 break;
532
533 case kIOMapRealTimeCache:
534 flags = VM_WIMG_RT;
535 break;
536 }
537
538 pmap_flush_context_init(&pmap_flush_context_storage);
539 delayed_pmap_flush = FALSE;
540
541 // enter each page's physical address in the target map
542 for (off = 0; off < length; off += page_size) {
543 ppnum_t ppnum = pmap_find_phys(pmap, va + off);
544 if (ppnum) {
545 pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
546 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
547 delayed_pmap_flush = TRUE;
548 }
549 }
550 if (delayed_pmap_flush == TRUE) {
551 pmap_flush(&pmap_flush_context_storage);
552 }
553
554 return KERN_SUCCESS;
555}
556
557ppnum_t
558IOGetLastPageNumber(void)
559{
560#if __i386__ || __x86_64__
561 ppnum_t lastPage, highest = 0;
562 unsigned int idx;
563
564 for (idx = 0; idx < pmap_memory_region_count; idx++) {
565 lastPage = pmap_memory_regions[idx].end - 1;
566 if (lastPage > highest) {
567 highest = lastPage;
568 }
569 }
570 return highest;
571#elif __arm__ || __arm64__
572 return 0;
573#else
574#error unknown arch
575#endif
576}
577
578
579void IOGetTime( mach_timespec_t * clock_time);
580void
581IOGetTime( mach_timespec_t * clock_time)
582{
583 clock_sec_t sec;
584 clock_nsec_t nsec;
585 clock_get_system_nanotime(&sec, &nsec);
586 clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
587 clock_time->tv_nsec = nsec;
588}