2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #if defined(__arm__) || defined(__arm64__)
69 #include <IOKit/IOKitServer.h>
75 * Lookup a device by its port.
76 * Doesn't consume the naked send right; produces a device reference.
79 iokit_lookup_io_object(ipc_port_t port
, ipc_kobject_type_t type
)
86 iokit_lock_port(port
);
87 if (ip_active(port
) && (ip_kotype(port
) == type
)) {
88 obj
= (io_object_t
) port
->ip_kobject
;
89 iokit_add_reference( obj
, type
);
94 iokit_unlock_port(port
);
100 iokit_lookup_object_port(
103 return (iokit_lookup_io_object(port
, IKOT_IOKIT_OBJECT
));
106 MIGEXTERN io_object_t
107 iokit_lookup_connect_port(
110 return (iokit_lookup_io_object(port
, IKOT_IOKIT_CONNECT
));
114 iokit_lookup_object_in_space_with_port_name(mach_port_name_t name
, ipc_kobject_type_t type
, ipc_space_t space
)
116 io_object_t obj
= NULL
;
118 if (name
&& MACH_PORT_VALID(name
)) {
122 kr
= ipc_object_translate(space
, name
, MACH_PORT_RIGHT_SEND
, (ipc_object_t
*)&port
);
124 if (kr
== KERN_SUCCESS
) {
125 assert(IP_VALID(port
));
130 iokit_lock_port(port
);
131 if (ip_active(port
) && (ip_kotype(port
) == type
)) {
132 obj
= (io_object_t
) port
->ip_kobject
;
133 iokit_add_reference(obj
, type
);
135 iokit_unlock_port(port
);
145 iokit_lookup_object_with_port_name(mach_port_name_t name
, ipc_kobject_type_t type
, task_t task
)
147 return (iokit_lookup_object_in_space_with_port_name(name
, type
, task
->itk_space
));
151 iokit_lookup_connect_ref_current_task(mach_port_name_t name
)
153 return (iokit_lookup_object_in_space_with_port_name(name
, IKOT_IOKIT_CONNECT
, current_space()));
157 iokit_retain_port( ipc_port_t port
)
159 ipc_port_reference( port
);
163 iokit_release_port( ipc_port_t port
)
165 ipc_port_release( port
);
169 iokit_release_port_send( ipc_port_t port
)
171 ipc_port_release_send( port
);
174 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
177 iokit_lock_port( __unused ipc_port_t port
)
179 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
183 iokit_unlock_port( __unused ipc_port_t port
)
185 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
189 * Get the port for a device.
190 * Consumes a device reference; produces a naked send right.
194 iokit_make_port_of_type(io_object_t obj
, ipc_kobject_type_t type
)
202 port
= iokit_port_for_object( obj
, type
);
204 sendPort
= ipc_port_make_send( port
);
205 iokit_release_port( port
);
209 iokit_remove_reference( obj
);
215 iokit_make_object_port(
218 return (iokit_make_port_of_type(obj
, IKOT_IOKIT_OBJECT
));
222 iokit_make_connect_port(
225 return (iokit_make_port_of_type(obj
, IKOT_IOKIT_CONNECT
));
231 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
238 /* Allocate port, keeping a reference for it. */
239 port
= ipc_port_alloc_kernel();
243 /* set kobject & type */
244 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
246 /* Request no-senders notifications on the port. */
248 notify
= ipc_port_make_sonce_locked( port
);
249 ipc_port_nsrequest( port
, 1, notify
, ¬ify
);
251 assert( notify
== IP_NULL
);
261 iokit_destroy_object_port( ipc_port_t port
)
264 iokit_lock_port(port
);
265 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
267 // iokit_remove_reference( obj );
268 iokit_unlock_port(port
);
269 ipc_port_dealloc_kernel( port
);
272 return( KERN_SUCCESS
);
276 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
278 iokit_lock_port(port
);
279 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
280 iokit_unlock_port(port
);
282 return( KERN_SUCCESS
);
285 EXTERN mach_port_name_t
286 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
290 mach_port_name_t name
= 0;
293 return MACH_PORT_NULL
;
295 port
= iokit_port_for_object( obj
, type
);
297 sendPort
= ipc_port_make_send( port
);
298 iokit_release_port( port
);
302 if (IP_VALID( sendPort
)) {
304 kr
= ipc_object_copyout( task
->itk_space
, (ipc_object_t
) sendPort
,
305 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
306 if ( kr
!= KERN_SUCCESS
) {
307 ipc_port_release_send( sendPort
);
308 name
= MACH_PORT_NULL
;
310 } else if ( sendPort
== IP_NULL
)
311 name
= MACH_PORT_NULL
;
312 else if ( sendPort
== IP_DEAD
)
313 name
= MACH_PORT_DEAD
;
319 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
321 return (mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
));
325 * Handle the No-More_Senders notification generated from a device port destroy.
326 * Since there are no longer any tasks which hold a send right to this device
327 * port a NMS notification has been generated.
331 iokit_no_senders( mach_no_senders_notification_t
* notification
)
334 io_object_t obj
= NULL
;
335 ipc_kobject_type_t type
= IKOT_NONE
;
338 port
= (ipc_port_t
) notification
->not_header
.msgh_remote_port
;
340 // convert a port to io_object_t.
341 if( IP_VALID(port
)) {
342 iokit_lock_port(port
);
343 if( ip_active(port
)) {
344 obj
= (io_object_t
) port
->ip_kobject
;
345 type
= ip_kotype( port
);
346 if( (IKOT_IOKIT_OBJECT
== type
)
347 || (IKOT_IOKIT_CONNECT
== type
)
348 || (IKOT_IOKIT_IDENT
== type
))
349 iokit_add_reference( obj
, IKOT_IOKIT_OBJECT
);
353 iokit_unlock_port(port
);
357 mach_port_mscount_t mscount
= notification
->not_count
;
359 if( KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
))
361 /* Re-request no-senders notifications on the port (if still active) */
363 if (ip_active(port
)) {
364 notify
= ipc_port_make_sonce_locked(port
);
365 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
367 if ( notify
!= IP_NULL
)
368 ipc_port_release_sonce(notify
);
373 iokit_remove_reference( obj
);
381 iokit_notify( mach_msg_header_t
* msg
)
383 switch (msg
->msgh_id
) {
384 case MACH_NOTIFY_NO_SENDERS
:
385 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
388 case MACH_NOTIFY_PORT_DELETED
:
389 case MACH_NOTIFY_PORT_DESTROYED
:
390 case MACH_NOTIFY_SEND_ONCE
:
391 case MACH_NOTIFY_DEAD_NAME
:
393 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
398 /* need to create a pmap function to generalize */
399 unsigned int IODefaultCacheBits(addr64_t pa
)
401 return(pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
)));
404 kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
405 mach_vm_size_t length
, unsigned int options
)
410 pmap_t pmap
= map
->pmap
;
412 prot
= (options
& kIOMapReadOnly
)
413 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
415 pagenum
= (ppnum_t
)atop_64(pa
);
417 switch(options
& kIOMapCacheMask
) { /* What cache mode do we need? */
419 case kIOMapDefaultCache
:
421 flags
= IODefaultCacheBits(pa
);
424 case kIOMapInhibitCache
:
428 case kIOMapWriteThruCache
:
429 flags
= VM_WIMG_WTHRU
;
432 case kIOMapWriteCombineCache
:
433 flags
= VM_WIMG_WCOMB
;
436 case kIOMapCopybackCache
:
437 flags
= VM_WIMG_COPYBACK
;
440 case kIOMapCopybackInnerCache
:
441 flags
= VM_WIMG_INNERWBACK
;
444 case kIOMapPostedWrite
:
445 flags
= VM_WIMG_POSTED
;
449 pmap_set_cache_attributes(pagenum
, flags
);
451 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
454 // Set up a block mapped area
455 return pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
458 kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
460 pmap_t pmap
= map
->pmap
;
462 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
464 return( KERN_SUCCESS
);
467 kern_return_t
IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
468 mach_vm_size_t __unused length
, unsigned int __unused options
)
473 pmap_t pmap
= map
->pmap
;
474 pmap_flush_context pmap_flush_context_storage
;
475 boolean_t delayed_pmap_flush
= FALSE
;
477 prot
= (options
& kIOMapReadOnly
)
478 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
480 switch (options
& kIOMapCacheMask
)
482 // what cache mode do we need?
483 case kIOMapDefaultCache
:
485 return (KERN_INVALID_ARGUMENT
);
487 case kIOMapInhibitCache
:
491 case kIOMapWriteThruCache
:
492 flags
= VM_WIMG_WTHRU
;
495 case kIOMapWriteCombineCache
:
496 flags
= VM_WIMG_WCOMB
;
499 case kIOMapCopybackCache
:
500 flags
= VM_WIMG_COPYBACK
;
503 case kIOMapCopybackInnerCache
:
504 flags
= VM_WIMG_INNERWBACK
;
507 case kIOMapPostedWrite
:
508 flags
= VM_WIMG_POSTED
;
512 pmap_flush_context_init(&pmap_flush_context_storage
);
513 delayed_pmap_flush
= FALSE
;
515 // enter each page's physical address in the target map
516 for (off
= 0; off
< length
; off
+= page_size
)
518 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
520 pmap_enter_options(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
,
521 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
522 delayed_pmap_flush
= TRUE
;
525 if (delayed_pmap_flush
== TRUE
)
526 pmap_flush(&pmap_flush_context_storage
);
528 return (KERN_SUCCESS
);
531 ppnum_t
IOGetLastPageNumber(void)
533 #if __i386__ || __x86_64__
534 ppnum_t lastPage
, highest
= 0;
537 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++)
539 lastPage
= pmap_memory_regions
[idx
].end
- 1;
540 if (lastPage
> highest
)
544 #elif __arm__ || __arm64__
552 void IOGetTime( mach_timespec_t
* clock_time
);
553 void IOGetTime( mach_timespec_t
* clock_time
)
557 clock_get_system_nanotime(&sec
, &nsec
);
558 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
559 clock_time
->tv_nsec
= nsec
;