pset = default_pager_external_set;
}
- ipc_port_make_sonce(mem_obj);
ip_lock(mem_obj); /* unlocked in nsrequest below */
+ ipc_port_make_sonce_locked(mem_obj);
ipc_port_nsrequest(mem_obj, sync, mem_obj, &previous);
}
boolean_t reclaim_backing_store)
{
vstruct_t vs;
+ kern_return_t retval;
vs_lookup(mem_obj, vs);
for (;;) {
vs->vs_xfer_pending = TRUE;
vs_unlock(vs);
- ps_vstruct_reclaim(vs, TRUE, reclaim_backing_store);
+ retval = ps_vstruct_reclaim(vs, TRUE, reclaim_backing_store);
vs_lock(vs);
vs->vs_xfer_pending = FALSE;
vs_unlock(vs);
- return KERN_SUCCESS;
+ return retval;
}
kern_return_t
vs_lookup(mem_obj, vs);
default_pager_total++;
+
+ /* might be unreachable if VS_TRY_LOCK is, by definition, always true */
+ __unreachable_ok_push
if(!VS_TRY_LOCK(vs)) {
/* the call below will not be done by caller when we have */
/* a synchronous interface */
upl_deallocate(upl);
return KERN_SUCCESS;
}
+ __unreachable_ok_pop
if ((vs->vs_seqno != vs->vs_next_seqno++)
|| (vs->vs_readers)
/*
* Out out-of-line port arrays are simply kalloc'ed.
*/
- psize = round_page(actual * sizeof (*pagers));
+ psize = vm_map_round_page(actual * sizeof (*pagers),
+ vm_map_page_mask(ipc_kernel_map));
ppotential = (unsigned int) (psize / sizeof (*pagers));
pagers = (memory_object_t *)kalloc(psize);
if (0 == pagers)
* then "copied in" as if it had been sent by a
* user process.
*/
- osize = round_page(actual * sizeof (*objects));
+ osize = vm_map_round_page(actual * sizeof (*objects),
+ vm_map_page_mask(ipc_kernel_map));
opotential = (unsigned int) (osize / sizeof (*objects));
kr = kmem_alloc(ipc_kernel_map, &oaddr, osize);
if (KERN_SUCCESS != kr) {
pagers[--ppotential] = MEMORY_OBJECT_NULL;
}
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(oaddr),
- vm_map_round_page(oaddr + osize), FALSE);
+ kr = vm_map_unwire(ipc_kernel_map,
+ vm_map_trunc_page(oaddr,
+ vm_map_page_mask(ipc_kernel_map)),
+ vm_map_round_page(oaddr + osize,
+ vm_map_page_mask(ipc_kernel_map)),
+ FALSE);
assert(KERN_SUCCESS == kr);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)oaddr,
(vm_map_size_t)osize, TRUE, &pcopy);
if (0 != addr)
kmem_free(ipc_kernel_map, addr, size);
- size = round_page(actual * sizeof (*pages));
+ size = vm_map_round_page(actual * sizeof (*pages),
+ vm_map_page_mask(ipc_kernel_map));
kr = kmem_alloc(ipc_kernel_map, &addr, size);
if (KERN_SUCCESS != kr)
return KERN_RESOURCE_SHORTAGE;
while (actual < potential)
pages[--potential].dpp_offset = 0;
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
- vm_map_round_page(addr + size), FALSE);
+ kr = vm_map_unwire(ipc_kernel_map,
+ vm_map_trunc_page(addr,
+ vm_map_page_mask(ipc_kernel_map)),
+ vm_map_round_page(addr + size,
+ vm_map_page_mask(ipc_kernel_map)),
+ FALSE);
assert(KERN_SUCCESS == kr);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)size, TRUE, ©);