4 #include <sys/sysctl.h>
6 #include <mach/vm_map.h>
7 #include <darwintest.h>
8 #include <TargetConditionals.h>
9 #include <perfcheck_keys.h>
12 T_META_NAMESPACE("xnu.vm.perf"),
13 T_META_CHECK_LEAKS(false),
18 #define MEMSIZE (1UL<<29) /* 512 MB */
20 #define MEMSIZE (1UL<<27) /* 128 MB */
30 static int num_threads
;
31 static int ready_thread_count
;
33 static size_t num_pages
;
34 static char *memblock
;
35 static char *memblock_share
;
36 static dt_stat_time_t t
;
37 static pthread_cond_t start_cvar
;
38 static pthread_cond_t threads_ready_cvar
;
39 static pthread_mutex_t ready_thread_count_lock
;
41 static void map_mem_regions(void);
42 static void unmap_mem_regions(void);
43 static void fault_pages(int thread_id
);
44 static void execute_threads(void);
45 static void *thread_setup(void *arg
);
46 static void run_test(int test
, int threads
, int cpus
);
47 static int get_ncpu(void);
49 static void map_mem_regions(void)
53 vm_prot_t curprot
, maxprot
;
55 memblock
= (char *)mmap(NULL
, MEMSIZE
, PROT_READ
| PROT_WRITE
, MAP_ANON
| MAP_PRIVATE
, -1, 0);
56 T_QUIET
; T_ASSERT_NE((void *)memblock
, MAP_FAILED
, "mmap");
58 if (test_type
== SOFT_FAULT
) {
60 /* Fault in all the pages of the original region. */
61 for(ptr
= memblock
; ptr
< memblock
+ MEMSIZE
; ptr
+= pgsize
) {
64 /* Remap the region so that subsequent accesses result in read soft faults. */
65 T_QUIET
; T_ASSERT_MACH_SUCCESS(vm_remap(mach_task_self(), (vm_address_t
*)&memblock_share
,
66 MEMSIZE
, 0, VM_FLAGS_ANYWHERE
, mach_task_self(), (vm_address_t
)memblock
, FALSE
,
67 &curprot
, &maxprot
, VM_INHERIT_DEFAULT
), "vm_remap");
71 static void unmap_mem_regions(void)
73 if (test_type
== SOFT_FAULT
) {
74 T_QUIET
; T_ASSERT_MACH_SUCCESS(munmap(memblock_share
, MEMSIZE
), "munmap");
76 T_QUIET
; T_ASSERT_MACH_SUCCESS(munmap(memblock
, MEMSIZE
), "munmap");
79 static void fault_pages(int thread_id
)
81 size_t region_len
, region_start
, region_end
;
85 region_len
= num_pages
/ (size_t)num_threads
;
86 region_start
= region_len
* (size_t)thread_id
;
88 if((size_t)thread_id
< num_pages
% (size_t)num_threads
) {
89 region_start
+= (size_t)thread_id
;
93 region_start
+= num_pages
% (size_t)num_threads
;
96 region_start
*= pgsize
;
98 region_end
= region_start
+ region_len
;
100 block
= (test_type
== SOFT_FAULT
)? memblock_share
: memblock
;
101 for(ptr
= block
+ region_start
; ptr
< block
+ region_end
; ptr
+= pgsize
) {
106 static void execute_threads(void)
108 int thread_index
, thread_retval
;
110 void *thread_retval_ptr
= &thread_retval
;
113 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_cond_init(&threads_ready_cvar
, NULL
), "pthread_cond_init");
114 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_cond_init(&start_cvar
, NULL
), "pthread_cond_init");
115 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_mutex_init(&ready_thread_count_lock
, NULL
), "pthread_mutex_init");
116 ready_thread_count
= 0;
118 threads
= (pthread_t
*)malloc(sizeof(*threads
) * (size_t)num_threads
);
119 thread_indices
= (int *)malloc(sizeof(*thread_indices
) * (size_t)num_threads
);
120 for(thread_index
= 0; thread_index
< num_threads
; thread_index
++) {
121 thread_indices
[thread_index
] = thread_index
;
122 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_create(&threads
[thread_index
], NULL
,
123 thread_setup
, (void *)&thread_indices
[thread_index
]), "pthread_create");
126 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&ready_thread_count_lock
), "pthread_mutex_lock");
127 if(ready_thread_count
!= num_threads
) {
128 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_cond_wait(&threads_ready_cvar
, &ready_thread_count_lock
),
129 "pthread_cond_wait");
131 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&ready_thread_count_lock
), "pthread_mutex_unlock");
134 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_cond_broadcast(&start_cvar
), "pthread_cond_broadcast");
135 for(thread_index
= 0; thread_index
< num_threads
; thread_index
++) {
136 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_join(threads
[thread_index
], &thread_retval_ptr
),
142 free(thread_indices
);
145 static void *thread_setup(void *arg
)
147 int my_index
= *((int *)arg
);
149 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&ready_thread_count_lock
), "pthread_mutex_lock");
150 ready_thread_count
++;
151 if(ready_thread_count
== num_threads
) {
152 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_cond_signal(&threads_ready_cvar
), "pthread_cond_signal");
154 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_cond_wait(&start_cvar
, &ready_thread_count_lock
), "pthread_cond_wait");
155 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&ready_thread_count_lock
), "pthread_mutex_unlock");
157 fault_pages(my_index
);
161 static void run_test(int test
, int threads
, int cpus
)
163 size_t sysctl_size
= sizeof(pgsize
);
164 int ret
= sysctlbyname("vm.pagesize", &pgsize
, &sysctl_size
, NULL
, 0);
165 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl vm.pagesize failed");
168 num_threads
= threads
;
169 num_pages
= MEMSIZE
/ pgsize
;
171 T_QUIET
; T_ASSERT_LT(test_type
, NUM_TESTS
, "invalid test type");
172 T_QUIET
; T_ASSERT_GT(num_threads
, 0, "num_threads <= 0");
173 T_QUIET
; T_ASSERT_GT((int)num_pages
/ num_threads
, 0, "num_pages/num_threads <= 0");
175 T_LOG("No. of cpus: %d", cpus
);
176 T_LOG("No. of threads: %d", num_threads
);
177 T_LOG("No. of pages: %ld", num_pages
);
178 T_LOG("Pagesize: %ld", pgsize
);
180 t
= dt_stat_time_create("Runtime");
181 // This sets the A/B failure threshold at 50% of baseline for Runtime
182 dt_stat_set_variable(t
, kPCFailureThresholdPctVar
, 50.0);
183 while (!dt_stat_stable(t
)) {
193 static int get_ncpu(void)
196 size_t length
= sizeof(ncpu
);
198 T_QUIET
; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.ncpu", &ncpu
, &length
, NULL
, 0),
199 "failed to query hw.ncpu");
203 T_DECL(read_soft_fault
,
204 "Read soft faults (single thread)")
206 run_test(SOFT_FAULT
, 1, get_ncpu());
209 T_DECL(read_soft_fault_multithreaded
,
210 "Read soft faults (multi-threaded)")
215 /* iOSMark passes in the no. of threads via an env. variable */
216 if ((e
= getenv("DT_STAT_NTHREADS"))) {
217 nthreads
= (int)strtol(e
, NULL
, 0);
219 nthreads
= get_ncpu();
221 run_test(SOFT_FAULT
, nthreads
, get_ncpu());
224 T_DECL(zero_fill_fault
,
225 "Zero fill faults (single thread)")
227 run_test(ZERO_FILL
, 1, get_ncpu());
230 T_DECL(zero_fill_fault_multithreaded
,
231 "Zero fill faults (multi-threaded)")
236 /* iOSMark passes in the no. of threads via an env. variable */
237 if ((e
= getenv("DT_STAT_NTHREADS"))) {
238 nthreads
= (int)strtol(e
, NULL
, 0);
240 nthreads
= get_ncpu();
242 run_test(ZERO_FILL
, nthreads
, get_ncpu());