2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #if defined(__arm__) || defined(__arm64__)
69 #include <IOKit/IOKitServer.h>
75 * Lookup a device by its port.
76 * Doesn't consume the naked send right; produces a device reference.
79 iokit_lookup_io_object(ipc_port_t port
, ipc_kobject_type_t type
)
83 if (!IP_VALID(port
)) {
87 iokit_lock_port(port
);
88 if (ip_active(port
) && (ip_kotype(port
) == type
)) {
89 obj
= (io_object_t
) port
->ip_kobject
;
90 iokit_add_reference( obj
, type
);
95 iokit_unlock_port(port
);
100 MIGEXTERN io_object_t
101 iokit_lookup_object_port(
104 return iokit_lookup_io_object(port
, IKOT_IOKIT_OBJECT
);
107 MIGEXTERN io_object_t
108 iokit_lookup_connect_port(
111 return iokit_lookup_io_object(port
, IKOT_IOKIT_CONNECT
);
115 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name
, ipc_kobject_type_t type
, ipc_space_t space
)
117 io_object_t obj
= NULL
;
119 if (name
&& MACH_PORT_VALID(name
)) {
123 kr
= ipc_object_translate(space
, name
, MACH_PORT_RIGHT_SEND
, (ipc_object_t
*)&port
);
125 if (kr
== KERN_SUCCESS
) {
126 assert(IP_VALID(port
));
131 iokit_lock_port(port
);
132 if (ip_active(port
) && (ip_kotype(port
) == type
)) {
133 obj
= (io_object_t
) port
->ip_kobject
;
134 iokit_add_reference(obj
, type
);
136 iokit_unlock_port(port
);
146 iokit_lookup_object_with_port_name(mach_port_name_t name
, ipc_kobject_type_t type
, task_t task
)
148 return iokit_lookup_object_in_space_with_port_name(name
, type
, task
->itk_space
);
152 iokit_lookup_connect_ref_current_task(mach_port_name_t name
)
154 return iokit_lookup_object_in_space_with_port_name(name
, IKOT_IOKIT_CONNECT
, current_space());
158 iokit_retain_port( ipc_port_t port
)
160 ipc_port_reference( port
);
164 iokit_release_port( ipc_port_t port
)
166 ipc_port_release( port
);
170 iokit_release_port_send( ipc_port_t port
)
172 ipc_port_release_send( port
);
175 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
178 iokit_lock_port( __unused ipc_port_t port
)
180 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
184 iokit_unlock_port( __unused ipc_port_t port
)
186 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
190 * Get the port for a device.
191 * Consumes a device reference; produces a naked send right.
195 iokit_make_port_of_type(io_object_t obj
, ipc_kobject_type_t type
)
204 port
= iokit_port_for_object( obj
, type
);
206 sendPort
= ipc_port_make_send( port
);
207 iokit_release_port( port
);
212 iokit_remove_reference( obj
);
218 iokit_make_object_port(
221 return iokit_make_port_of_type(obj
, IKOT_IOKIT_OBJECT
);
225 iokit_make_connect_port(
228 return iokit_make_port_of_type(obj
, IKOT_IOKIT_CONNECT
);
234 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
240 /* Allocate port, keeping a reference for it. */
241 port
= ipc_port_alloc_kernel();
242 if (port
== IP_NULL
) {
246 /* set kobject & type */
247 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
249 /* Request no-senders notifications on the port. */
251 notify
= ipc_port_make_sonce_locked( port
);
252 ipc_port_nsrequest( port
, 1, notify
, ¬ify
);
254 assert( notify
== IP_NULL
);
263 iokit_destroy_object_port( ipc_port_t port
)
265 iokit_lock_port(port
);
266 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
268 // iokit_remove_reference( obj );
269 iokit_unlock_port(port
);
270 ipc_port_dealloc_kernel( port
);
277 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
279 iokit_lock_port(port
);
280 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
281 iokit_unlock_port(port
);
286 EXTERN mach_port_name_t
287 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
291 mach_port_name_t name
= 0;
294 return MACH_PORT_NULL
;
297 port
= iokit_port_for_object( obj
, type
);
299 sendPort
= ipc_port_make_send( port
);
300 iokit_release_port( port
);
305 if (IP_VALID( sendPort
)) {
307 kr
= ipc_object_copyout( task
->itk_space
, (ipc_object_t
) sendPort
,
308 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
309 if (kr
!= KERN_SUCCESS
) {
310 ipc_port_release_send( sendPort
);
311 name
= MACH_PORT_NULL
;
313 } else if (sendPort
== IP_NULL
) {
314 name
= MACH_PORT_NULL
;
315 } else if (sendPort
== IP_DEAD
) {
316 name
= MACH_PORT_DEAD
;
323 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
325 return mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
);
329 * Handle the No-More_Senders notification generated from a device port destroy.
330 * Since there are no longer any tasks which hold a send right to this device
331 * port a NMS notification has been generated.
335 iokit_no_senders( mach_no_senders_notification_t
* notification
)
338 io_object_t obj
= NULL
;
339 ipc_kobject_type_t type
= IKOT_NONE
;
342 port
= (ipc_port_t
) notification
->not_header
.msgh_remote_port
;
344 // convert a port to io_object_t.
345 if (IP_VALID(port
)) {
346 iokit_lock_port(port
);
347 if (ip_active(port
)) {
348 obj
= (io_object_t
) port
->ip_kobject
;
349 type
= ip_kotype( port
);
350 if ((IKOT_IOKIT_OBJECT
== type
)
351 || (IKOT_IOKIT_CONNECT
== type
)
352 || (IKOT_IOKIT_IDENT
== type
)) {
353 iokit_add_reference( obj
, IKOT_IOKIT_OBJECT
);
358 iokit_unlock_port(port
);
361 mach_port_mscount_t mscount
= notification
->not_count
;
363 if (KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
)) {
364 /* Re-request no-senders notifications on the port (if still active) */
366 if (ip_active(port
)) {
367 notify
= ipc_port_make_sonce_locked(port
);
368 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
370 if (notify
!= IP_NULL
) {
371 ipc_port_release_sonce(notify
);
377 iokit_remove_reference( obj
);
385 iokit_notify( mach_msg_header_t
* msg
)
387 switch (msg
->msgh_id
) {
388 case MACH_NOTIFY_NO_SENDERS
:
389 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
392 case MACH_NOTIFY_PORT_DELETED
:
393 case MACH_NOTIFY_PORT_DESTROYED
:
394 case MACH_NOTIFY_SEND_ONCE
:
395 case MACH_NOTIFY_DEAD_NAME
:
397 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
402 /* need to create a pmap function to generalize */
404 IODefaultCacheBits(addr64_t pa
)
406 return pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
));
410 IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
411 mach_vm_size_t length
, unsigned int options
)
416 pmap_t pmap
= map
->pmap
;
418 prot
= (options
& kIOMapReadOnly
)
419 ? VM_PROT_READ
: (VM_PROT_READ
| VM_PROT_WRITE
);
421 pagenum
= (ppnum_t
)atop_64(pa
);
423 switch (options
& kIOMapCacheMask
) { /* What cache mode do we need? */
424 case kIOMapDefaultCache
:
426 flags
= IODefaultCacheBits(pa
);
429 case kIOMapInhibitCache
:
433 case kIOMapWriteThruCache
:
434 flags
= VM_WIMG_WTHRU
;
437 case kIOMapWriteCombineCache
:
438 flags
= VM_WIMG_WCOMB
;
441 case kIOMapCopybackCache
:
442 flags
= VM_WIMG_COPYBACK
;
445 case kIOMapCopybackInnerCache
:
446 flags
= VM_WIMG_INNERWBACK
;
449 case kIOMapPostedWrite
:
450 flags
= VM_WIMG_POSTED
;
454 pmap_set_cache_attributes(pagenum
, flags
);
456 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
459 // Set up a block mapped area
460 return pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
464 IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
466 pmap_t pmap
= map
->pmap
;
468 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
474 IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
475 mach_vm_size_t __unused length
, unsigned int __unused options
)
480 pmap_t pmap
= map
->pmap
;
481 pmap_flush_context pmap_flush_context_storage
;
482 boolean_t delayed_pmap_flush
= FALSE
;
484 prot
= (options
& kIOMapReadOnly
)
485 ? VM_PROT_READ
: (VM_PROT_READ
| VM_PROT_WRITE
);
487 switch (options
& kIOMapCacheMask
) {
488 // what cache mode do we need?
489 case kIOMapDefaultCache
:
491 return KERN_INVALID_ARGUMENT
;
493 case kIOMapInhibitCache
:
497 case kIOMapWriteThruCache
:
498 flags
= VM_WIMG_WTHRU
;
501 case kIOMapWriteCombineCache
:
502 flags
= VM_WIMG_WCOMB
;
505 case kIOMapCopybackCache
:
506 flags
= VM_WIMG_COPYBACK
;
509 case kIOMapCopybackInnerCache
:
510 flags
= VM_WIMG_INNERWBACK
;
513 case kIOMapPostedWrite
:
514 flags
= VM_WIMG_POSTED
;
518 pmap_flush_context_init(&pmap_flush_context_storage
);
519 delayed_pmap_flush
= FALSE
;
521 // enter each page's physical address in the target map
522 for (off
= 0; off
< length
; off
+= page_size
) {
523 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
525 pmap_enter_options(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
,
526 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
527 delayed_pmap_flush
= TRUE
;
530 if (delayed_pmap_flush
== TRUE
) {
531 pmap_flush(&pmap_flush_context_storage
);
538 IOGetLastPageNumber(void)
540 #if __i386__ || __x86_64__
541 ppnum_t lastPage
, highest
= 0;
544 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++) {
545 lastPage
= pmap_memory_regions
[idx
].end
- 1;
546 if (lastPage
> highest
) {
551 #elif __arm__ || __arm64__
559 void IOGetTime( mach_timespec_t
* clock_time
);
561 IOGetTime( mach_timespec_t
* clock_time
)
565 clock_get_system_nanotime(&sec
, &nsec
);
566 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
567 clock_time
->tv_nsec
= nsec
;