2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #if defined(__arm__) || defined(__arm64__)
69 #include <IOKit/IOKitServer.h>
75 * Lookup a device by its port.
76 * Doesn't consume the naked send right; produces a device reference.
79 iokit_lookup_io_object(ipc_port_t port
, ipc_kobject_type_t type
)
83 if (!IP_VALID(port
)) {
87 iokit_lock_port(port
);
88 if (ip_active(port
) && (ip_kotype(port
) == type
)) {
89 obj
= (io_object_t
) port
->ip_kobject
;
90 iokit_add_reference( obj
, type
);
95 iokit_unlock_port(port
);
100 MIGEXTERN io_object_t
101 iokit_lookup_object_port(
104 return iokit_lookup_io_object(port
, IKOT_IOKIT_OBJECT
);
107 MIGEXTERN io_object_t
108 iokit_lookup_connect_port(
111 return iokit_lookup_io_object(port
, IKOT_IOKIT_CONNECT
);
114 MIGEXTERN io_object_t
115 iokit_lookup_uext_object_port(
118 return iokit_lookup_io_object(port
, IKOT_UEXT_OBJECT
);
122 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name
, ipc_kobject_type_t type
, ipc_space_t space
)
124 io_object_t obj
= NULL
;
126 if (name
&& MACH_PORT_VALID(name
)) {
130 kr
= ipc_port_translate_send(space
, name
, &port
);
132 if (kr
== KERN_SUCCESS
) {
133 assert(IP_VALID(port
));
134 require_ip_active(port
);
138 iokit_lock_port(port
);
139 if (ip_kotype(port
) == type
) {
140 obj
= (io_object_t
) port
->ip_kobject
;
141 iokit_add_reference(obj
, type
);
143 iokit_unlock_port(port
);
153 iokit_lookup_object_with_port_name(mach_port_name_t name
, ipc_kobject_type_t type
, task_t task
)
155 return iokit_lookup_object_in_space_with_port_name(name
, type
, task
->itk_space
);
159 iokit_lookup_connect_ref_current_task(mach_port_name_t name
)
161 return iokit_lookup_object_in_space_with_port_name(name
, IKOT_IOKIT_CONNECT
, current_space());
165 iokit_lookup_uext_ref_current_task(mach_port_name_t name
)
167 return iokit_lookup_object_in_space_with_port_name(name
, IKOT_UEXT_OBJECT
, current_space());
171 iokit_retain_port( ipc_port_t port
)
173 ipc_port_reference( port
);
177 iokit_release_port( ipc_port_t port
)
179 ipc_port_release( port
);
183 iokit_release_port_send( ipc_port_t port
)
185 ipc_port_release_send( port
);
188 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
191 iokit_lock_port( __unused ipc_port_t port
)
193 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
197 iokit_unlock_port( __unused ipc_port_t port
)
199 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
203 * Get the port for a device.
204 * Consumes a device reference; produces a naked send right.
208 iokit_make_port_of_type(io_object_t obj
, ipc_kobject_type_t type
)
217 port
= iokit_port_for_object( obj
, type
);
219 sendPort
= ipc_port_make_send( port
);
220 iokit_release_port( port
);
225 iokit_remove_reference( obj
);
231 iokit_make_object_port(
234 return iokit_make_port_of_type(obj
, IKOT_IOKIT_OBJECT
);
238 iokit_make_connect_port(
241 return iokit_make_port_of_type(obj
, IKOT_IOKIT_CONNECT
);
247 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
249 /* Allocate port, keeping a reference for it. */
251 ipc_kobject_alloc_options_t options
= IPC_KOBJECT_ALLOC_NSREQUEST
;
252 if (type
== IKOT_IOKIT_CONNECT
) {
253 options
|= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND
;
255 return ipc_kobject_alloc_port((ipc_kobject_t
) obj
, type
, options
);
259 iokit_destroy_object_port( ipc_port_t port
)
261 iokit_lock_port(port
);
262 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
264 // iokit_remove_reference( obj );
265 iokit_unlock_port(port
);
266 ipc_port_dealloc_kernel( port
);
273 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
275 iokit_lock_port(port
);
276 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
277 iokit_unlock_port(port
);
282 EXTERN mach_port_name_t
283 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
287 mach_port_name_t name
= 0;
290 return MACH_PORT_NULL
;
293 port
= iokit_port_for_object( obj
, type
);
295 sendPort
= ipc_port_make_send( port
);
296 iokit_release_port( port
);
301 if (IP_VALID( sendPort
)) {
303 // Remove once <rdar://problem/45522961> is fixed.
304 // We need to make ith_knote NULL as ipc_object_copyout() uses
305 // thread-argument-passing and its value should not be garbage
306 current_thread()->ith_knote
= ITH_KNOTE_NULL
;
307 kr
= ipc_object_copyout( task
->itk_space
, ip_to_object(sendPort
),
308 MACH_MSG_TYPE_PORT_SEND
, NULL
, NULL
, &name
);
309 if (kr
!= KERN_SUCCESS
) {
310 ipc_port_release_send( sendPort
);
311 name
= MACH_PORT_NULL
;
313 } else if (sendPort
== IP_NULL
) {
314 name
= MACH_PORT_NULL
;
315 } else if (sendPort
== IP_DEAD
) {
316 name
= MACH_PORT_DEAD
;
323 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
325 return mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
);
329 * Handle the No-More_Senders notification generated from a device port destroy.
330 * Since there are no longer any tasks which hold a send right to this device
331 * port a NMS notification has been generated.
335 iokit_no_senders( mach_no_senders_notification_t
* notification
)
338 io_object_t obj
= NULL
;
339 ipc_kobject_type_t type
= IKOT_NONE
;
342 port
= notification
->not_header
.msgh_remote_port
;
344 // convert a port to io_object_t.
345 if (IP_VALID(port
)) {
346 iokit_lock_port(port
);
347 if (ip_active(port
)) {
348 obj
= (io_object_t
) port
->ip_kobject
;
349 type
= ip_kotype( port
);
350 if ((IKOT_IOKIT_OBJECT
== type
)
351 || (IKOT_IOKIT_CONNECT
== type
)
352 || (IKOT_IOKIT_IDENT
== type
)
353 || (IKOT_UEXT_OBJECT
== type
)) {
354 iokit_add_reference( obj
, IKOT_IOKIT_OBJECT
);
359 iokit_unlock_port(port
);
362 mach_port_mscount_t mscount
= notification
->not_count
;
364 if (KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
)) {
365 /* Re-request no-senders notifications on the port (if still active) */
367 if (ip_active(port
)) {
368 notify
= ipc_port_make_sonce_locked(port
);
369 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
371 if (notify
!= IP_NULL
) {
372 ipc_port_release_sonce(notify
);
378 iokit_remove_reference( obj
);
386 iokit_notify( mach_msg_header_t
* msg
)
388 switch (msg
->msgh_id
) {
389 case MACH_NOTIFY_NO_SENDERS
:
390 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
393 case MACH_NOTIFY_PORT_DELETED
:
394 case MACH_NOTIFY_PORT_DESTROYED
:
395 case MACH_NOTIFY_SEND_ONCE
:
396 case MACH_NOTIFY_DEAD_NAME
:
398 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
403 /* need to create a pmap function to generalize */
405 IODefaultCacheBits(addr64_t pa
)
407 return pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
));
411 IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
412 mach_vm_size_t length
, unsigned int options
)
417 pmap_t pmap
= map
->pmap
;
419 prot
= (options
& kIOMapReadOnly
)
420 ? VM_PROT_READ
: (VM_PROT_READ
| VM_PROT_WRITE
);
422 pagenum
= (ppnum_t
)atop_64(pa
);
424 switch (options
& kIOMapCacheMask
) { /* What cache mode do we need? */
425 case kIOMapDefaultCache
:
427 flags
= IODefaultCacheBits(pa
);
430 case kIOMapInhibitCache
:
434 case kIOMapWriteThruCache
:
435 flags
= VM_WIMG_WTHRU
;
438 case kIOMapWriteCombineCache
:
439 flags
= VM_WIMG_WCOMB
;
442 case kIOMapCopybackCache
:
443 flags
= VM_WIMG_COPYBACK
;
446 case kIOMapCopybackInnerCache
:
447 flags
= VM_WIMG_INNERWBACK
;
450 case kIOMapPostedWrite
:
451 flags
= VM_WIMG_POSTED
;
454 case kIOMapRealTimeCache
:
459 pmap_set_cache_attributes(pagenum
, flags
);
461 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
464 // Set up a block mapped area
465 return pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
469 IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
471 pmap_t pmap
= map
->pmap
;
473 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
479 IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
480 mach_vm_size_t __unused length
, unsigned int __unused options
)
485 pmap_t pmap
= map
->pmap
;
486 pmap_flush_context pmap_flush_context_storage
;
487 boolean_t delayed_pmap_flush
= FALSE
;
489 prot
= (options
& kIOMapReadOnly
)
490 ? VM_PROT_READ
: (VM_PROT_READ
| VM_PROT_WRITE
);
492 switch (options
& kIOMapCacheMask
) {
493 // what cache mode do we need?
494 case kIOMapDefaultCache
:
496 return KERN_INVALID_ARGUMENT
;
498 case kIOMapInhibitCache
:
502 case kIOMapWriteThruCache
:
503 flags
= VM_WIMG_WTHRU
;
506 case kIOMapWriteCombineCache
:
507 flags
= VM_WIMG_WCOMB
;
510 case kIOMapCopybackCache
:
511 flags
= VM_WIMG_COPYBACK
;
514 case kIOMapCopybackInnerCache
:
515 flags
= VM_WIMG_INNERWBACK
;
518 case kIOMapPostedWrite
:
519 flags
= VM_WIMG_POSTED
;
522 case kIOMapRealTimeCache
:
527 pmap_flush_context_init(&pmap_flush_context_storage
);
528 delayed_pmap_flush
= FALSE
;
530 // enter each page's physical address in the target map
531 for (off
= 0; off
< length
; off
+= page_size
) {
532 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
534 pmap_enter_options(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
,
535 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
536 delayed_pmap_flush
= TRUE
;
539 if (delayed_pmap_flush
== TRUE
) {
540 pmap_flush(&pmap_flush_context_storage
);
547 IOGetLastPageNumber(void)
549 #if __i386__ || __x86_64__
550 ppnum_t lastPage
, highest
= 0;
553 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++) {
554 lastPage
= pmap_memory_regions
[idx
].end
- 1;
555 if (lastPage
> highest
) {
560 #elif __arm__ || __arm64__
568 void IOGetTime( mach_timespec_t
* clock_time
);
570 IOGetTime( mach_timespec_t
* clock_time
)
574 clock_get_system_nanotime(&sec
, &nsec
);
575 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
576 clock_time
->tv_nsec
= nsec
;