#include <mach/i386/vm_param.h>
#include <kern/kern_types.h>
#include <kern/misc_protos.h>
+#include <vm/pmap.h>
+#include <i386/param.h>
#include <i386/misc_protos.h>
+#define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
+#define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
+
/*
* Should be rewritten in asm anyway.
*/
+void
+bzero_phys(addr64_t p, uint32_t len)
+{
+ bzero((char *)phystokv(low32(p)), len);
+}
+
/*
* copy 'size' bytes from physical to physical address
* the caller must validate the physical ranges
* if flush_action == 3, flush both source and dest
*/
+extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
+
kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
switch(flush_action) {
break;
}
- bcopy_phys((char *)source, (char *)dest, size); /* Do a physical copy */
+ bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */
switch(flush_action) {
case 1:
break;
}
+ return KERN_SUCCESS;
}
* move data from the kernel to user state.
*
*/
-
+#if 0
kern_return_t
copyp2v(char *from, char *to, unsigned int size) {
return(copyout(phystokv(from), to, size));
}
+#endif
+
+/*
+ * Copies data from a virtual page to a physical page. This is used to
+ * move data from the user address space into the kernel.
+ *
+ */
+#if 0
+kern_return_t
+copyv2p(char *from, char *to, unsigned int size) {
+
+ return(copyin(from, phystokv(to), size));
+}
+#endif
/*
* bcopy_phys - like bcopy but copies from/to physical addresses.
*/
void
-bcopy_phys(const char *from, char *to, vm_size_t bytes)
+bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes)
{
- bcopy((char *)phystokv(from), (char *)phystokv(to), bytes);
+ /* this will die horribly if we ever run off the end of a page */
+ if ( value_64bit(from) || value_64bit(to)) panic("bcopy_phys: 64 bit value");
+ bcopy((char *)phystokv(low32(from)),
+ (char *)phystokv(low32(to)), bytes);
}
}
#endif /* MACH_ASSERT */
+
+
+
+
+void fillPage(ppnum_t pa, unsigned int fill)
+{
+ unsigned int *addr = (unsigned int *)phystokv(i386_ptob(pa));
+ int i;
+ int cnt = NBPG/sizeof(unsigned int);
+
+ for (i = 0; i < cnt ; i++ )
+ *addr++ = fill;
+}
+
+#define cppvPHYS (cppvPsnk|cppvPsrc)
+
+kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
+{
+ char *src32, *dst32;
+
+ if (value_64bit(source) | value_64bit(sink)) panic("copypv: 64 bit value");
+
+ src32 = (char *)low32(source);
+ dst32 = (char *)low32(sink);
+
+ if (which & cppvFsrc) flush_dcache(source, size, 1); /* If requested, flush source before move */
+ if (which & cppvFsnk) flush_dcache(sink, size, 1); /* If requested, flush sink before move */
+
+ switch (which & cppvPHYS) {
+
+ case cppvPHYS:
+ /*
+ * both destination and source are physical
+ */
+ bcopy_phys(source, sink, (vm_size_t)size);
+ break;
+
+ case cppvPsnk:
+ /*
+ * destination is physical, source is virtual
+ */
+ if (which & cppvKmap)
+ /*
+ * source is kernel virtual
+ */
+ bcopy(src32, (char *)phystokv(dst32), size);
+ else
+ /*
+ * source is user virtual
+ */
+ copyin(src32, (char *)phystokv(dst32), size);
+ break;
+
+ case cppvPsrc:
+ /*
+ * source is physical, destination is virtual
+ */
+ if (which & cppvKmap)
+ /*
+ * destination is kernel virtual
+ */
+ bcopy((char *)phystokv(src32), dst32, size);
+ else
+ /*
+ * destination is user virtual
+ */
+ copyout((char *)phystokv(src32), dst32, size);
+ break;
+
+ default:
+ panic("copypv: both virtual");
+ }
+
+ if (which & cppvFsrc) flush_dcache(source, size, 1); /* If requested, flush source before move */
+ if (which & cppvFsnk) flush_dcache(sink, size, 1); /* If requested, flush sink before move */
+
+ return KERN_SUCCESS;
+}
+
+
+void flush_dcache64(addr64_t addr, unsigned count, int phys)
+{
+}
+
+void invalidate_icache64(addr64_t addr, unsigned cnt, int phys)
+{
+}
+
+
+void switch_to_serial_console(void)
+{
+}
+
+addr64_t vm_last_addr;
+
+void
+mapping_set_mod(ppnum_t pn)
+{
+ pmap_set_modify(pn);
+}
+
+boolean_t
+mutex_preblock(
+ mutex_t *mutex,
+ thread_t thread)
+{
+ return (FALSE);
+}