-#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
- MACRO_BEGIN \
- pmap_t __pmap = (pmap); \
- vm_page_t __page = (page); \
- vm_prot_t __prot__ = (protection); \
- \
- if (__pmap == kernel_pmap) { \
- __prot__ |= VM_PROT_WRITE; \
- } else { \
- assert(!__page->encrypted); \
- } \
- \
- pmap_enter( \
- __pmap, \
- (virtual_address), \
- __page->phys_page, \
- __prot__, \
- flags, \
- (wired) \
- ); \
+
+#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \
+ MACRO_BEGIN \
+ pmap_set_cache_attributes((mem)->phys_page, (cache_attr)); \
+ (object)->set_cache_attr = TRUE; \
+ (void) batch_pmap_op; \
+ MACRO_END
+
+#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op)\
+ MACRO_BEGIN \
+ (void) user_page_list; \
+ (void) num_pages; \
+ (void) batch_pmap_op; \