]>
Commit | Line | Data |
---|---|---|
3e170ce0 A |
1 | #ifndef _WAITQ_H_ |
2 | #define _WAITQ_H_ | |
3 | /* | |
4 | * Copyright (c) 2014-2015 Apple Computer, Inc. All rights reserved. | |
5 | * | |
6 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. The rights granted to you under the License | |
12 | * may not be used to create, or enable the creation or redistribution of, | |
13 | * unlawful or unlicensed copies of an Apple operating system, or to | |
14 | * circumvent, violate, or enable the circumvention or violation of, any | |
15 | * terms of an Apple operating system software license agreement. | |
16 | * | |
17 | * Please obtain a copy of the License at | |
18 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
29 | */ | |
30 | #ifdef KERNEL_PRIVATE | |
31 | ||
32 | #include <mach/mach_types.h> | |
33 | #include <mach/sync_policy.h> | |
34 | #include <mach/kern_return.h> /* for kern_return_t */ | |
35 | ||
36 | #include <kern/kern_types.h> /* for wait_queue_t */ | |
37 | #include <kern/queue.h> | |
38 | #include <kern/assert.h> | |
39 | ||
40 | #include <sys/cdefs.h> | |
41 | ||
42 | /* | |
43 | * Constants and types used in the waitq APIs | |
44 | */ | |
45 | #define WAITQ_ALL_PRIORITIES (-1) | |
46 | #define WAITQ_PROMOTE_PRIORITY (-2) | |
39037602 | 47 | #define WAITQ_SELECT_MAX_PRI (-3) |
3e170ce0 A |
48 | |
49 | typedef enum e_waitq_lock_state { | |
50 | WAITQ_KEEP_LOCKED = 0x01, | |
51 | WAITQ_UNLOCK = 0x02, | |
52 | WAITQ_SHOULD_LOCK = 0x04, | |
53 | WAITQ_ALREADY_LOCKED = 0x08, | |
54 | WAITQ_DONT_LOCK = 0x10, | |
55 | } waitq_lock_state_t; | |
56 | ||
39037602 A |
57 | /* |
58 | * The Jenkins "one at a time" hash. | |
59 | * TBD: There may be some value to unrolling here, | |
60 | * depending on the architecture. | |
61 | */ | |
62 | static __inline__ uint32_t | |
63 | jenkins_hash(char *key, size_t length) | |
64 | { | |
65 | uint32_t hash = 0; | |
66 | size_t i; | |
67 | ||
68 | for (i = 0; i < length; i++) { | |
69 | hash += (uint32_t)key[i]; | |
70 | hash += (hash << 10); | |
71 | hash ^= (hash >> 6); | |
72 | } | |
73 | ||
74 | hash += (hash << 3); | |
75 | hash ^= (hash >> 11); | |
76 | hash += (hash << 15); | |
77 | ||
78 | return hash; | |
79 | } | |
80 | ||
81 | /* Opaque sizes and alignment used for struct verification */ | |
82 | #if __x86_64__ | |
83 | #define WQ_OPAQUE_ALIGN 8 | |
84 | #define WQS_OPAQUE_ALIGN 8 | |
85 | #define WQ_OPAQUE_SIZE 48 | |
86 | #define WQS_OPAQUE_SIZE 64 | |
87 | #else | |
88 | #error Unknown size requirement | |
89 | #endif | |
90 | ||
813fb2f6 | 91 | #ifdef MACH_KERNEL_PRIVATE |
3e170ce0 A |
92 | |
93 | #include <kern/spl.h> | |
94 | #include <kern/simple_lock.h> | |
95 | #include <mach/branch_predicates.h> | |
96 | ||
97 | #include <machine/cpu_number.h> | |
98 | #include <machine/machine_routines.h> /* machine_timeout_suspended() */ | |
99 | ||
100 | /* | |
101 | * The event mask is of 59 bits on 64 bit architeture and 27 bits on | |
102 | * 32 bit architecture and so we calculate its size using sizeof(long). | |
103 | * If the bitfield for wq_type and wq_fifo is changed, then value of | |
104 | * EVENT_MASK_BITS will also change. | |
105 | * | |
106 | * New plan: this is an optimization anyway, so I'm stealing 32bits | |
107 | * from the mask to shrink the waitq object even further. | |
108 | */ | |
39037602 | 109 | #define _EVENT_MASK_BITS ((sizeof(uint32_t) * 8) - 6) |
3e170ce0 A |
110 | |
111 | #define WAITQ_BOOST_PRIORITY 31 | |
112 | ||
113 | enum waitq_type { | |
114 | WQT_INVALID = 0, | |
115 | WQT_QUEUE = 0x2, | |
116 | WQT_SET = 0x3, | |
117 | }; | |
118 | ||
119 | #if CONFIG_WAITQ_STATS | |
120 | #define NWAITQ_BTFRAMES 5 | |
121 | struct wq_stats { | |
122 | uint64_t waits; | |
123 | uint64_t wakeups; | |
124 | uint64_t clears; | |
125 | uint64_t failed_wakeups; | |
126 | ||
127 | uintptr_t last_wait[NWAITQ_BTFRAMES]; | |
128 | uintptr_t last_wakeup[NWAITQ_BTFRAMES]; | |
129 | uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES]; | |
130 | }; | |
131 | #endif | |
132 | ||
133 | /* | |
134 | * struct waitq | |
135 | * | |
136 | * This is the definition of the common event wait queue | |
137 | * that the scheduler APIs understand. It is used | |
138 | * internally by the gerneralized event waiting mechanism | |
139 | * (assert_wait), and also for items that maintain their | |
140 | * own wait queues (such as ports and semaphores). | |
141 | * | |
142 | * It is not published to other kernel components. | |
143 | * | |
144 | * NOTE: Hardware locks are used to protect event wait | |
145 | * queues since interrupt code is free to post events to | |
146 | * them. | |
147 | */ | |
148 | struct waitq { | |
149 | uint32_t /* flags */ | |
150 | waitq_type:2, /* only public field */ | |
151 | waitq_fifo:1, /* fifo wakeup policy? */ | |
152 | waitq_prepost:1, /* waitq supports prepost? */ | |
153 | waitq_irq:1, /* waitq requires interrupts disabled */ | |
39037602 | 154 | waitq_isvalid:1, /* waitq structure is valid */ |
3e170ce0 A |
155 | waitq_eventmask:_EVENT_MASK_BITS; |
156 | /* the wait queue set (set-of-sets) to which this queue belongs */ | |
157 | hw_lock_data_t waitq_interlock; /* interlock */ | |
158 | ||
159 | uint64_t waitq_set_id; | |
160 | uint64_t waitq_prepost_id; | |
161 | queue_head_t waitq_queue; /* queue of elements */ | |
162 | }; | |
163 | ||
39037602 A |
164 | static_assert(sizeof(struct waitq) == WQ_OPAQUE_SIZE, "waitq structure size mismatch"); |
165 | static_assert(__alignof(struct waitq) == WQ_OPAQUE_ALIGN, "waitq structure alignment mismatch"); | |
166 | ||
3e170ce0 A |
167 | /* |
168 | * struct waitq_set | |
169 | * | |
170 | * This is the common definition for a set wait queue. | |
171 | */ | |
172 | struct waitq_set { | |
173 | struct waitq wqset_q; | |
174 | uint64_t wqset_id; | |
39037602 A |
175 | union { |
176 | uint64_t wqset_prepost_id; | |
177 | void *wqset_prepost_hook; | |
178 | }; | |
3e170ce0 A |
179 | }; |
180 | ||
39037602 A |
181 | static_assert(sizeof(struct waitq_set) == WQS_OPAQUE_SIZE, "waitq_set structure size mismatch"); |
182 | static_assert(__alignof(struct waitq_set) == WQS_OPAQUE_ALIGN, "waitq_set structure alignment mismatch"); | |
183 | ||
3e170ce0 A |
184 | extern void waitq_bootstrap(void); |
185 | ||
186 | #define waitq_is_queue(wq) \ | |
187 | ((wq)->waitq_type == WQT_QUEUE) | |
188 | ||
189 | #define waitq_is_set(wq) \ | |
190 | ((wq)->waitq_type == WQT_SET && ((struct waitq_set *)(wq))->wqset_id != 0) | |
191 | ||
192 | #define waitqs_is_set(wqs) \ | |
193 | (((wqs)->wqset_q.waitq_type == WQT_SET) && ((wqs)->wqset_id != 0)) | |
194 | ||
195 | #define waitq_valid(wq) \ | |
39037602 A |
196 | ((wq) != NULL && (wq)->waitq_isvalid && ((wq)->waitq_type & ~1) == WQT_QUEUE) |
197 | ||
198 | /* | |
199 | * Invalidate a waitq. The only valid waitq functions to call after this are: | |
200 | * waitq_deinit() | |
201 | * waitq_set_deinit() | |
202 | */ | |
203 | extern void waitq_invalidate_locked(struct waitq *wq); | |
3e170ce0 A |
204 | |
205 | #define waitq_empty(wq) \ | |
206 | (queue_empty(&(wq)->waitq_queue)) | |
207 | ||
39037602 | 208 | |
3e170ce0 A |
209 | #define waitq_held(wq) \ |
210 | (hw_lock_held(&(wq)->waitq_interlock)) | |
211 | ||
212 | #define waitq_lock_try(wq) \ | |
213 | (hw_lock_try(&(wq)->waitq_interlock)) | |
214 | ||
39037602 | 215 | |
3e170ce0 A |
216 | #define waitq_wait_possible(thread) \ |
217 | ((thread)->waitq == NULL) | |
218 | ||
219 | extern void waitq_lock(struct waitq *wq); | |
220 | extern void waitq_unlock(struct waitq *wq); | |
221 | ||
222 | #define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q) | |
223 | #define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q) | |
224 | #define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q) | |
225 | #define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \ | |
226 | (wqs)->wqset_q.waitq_prepost) | |
227 | #define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \ | |
228 | (wqs)->wqset_prepost_id > 0) | |
39037602 A |
229 | #define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \ |
230 | !((wqs)->wqset_q.waitq_prepost) && \ | |
231 | (wqs)->wqset_prepost_hook) | |
3e170ce0 A |
232 | |
233 | /* assert intent to wait on a locked wait queue */ | |
234 | extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, | |
235 | event64_t wait_event, | |
236 | wait_interrupt_t interruptible, | |
237 | wait_timeout_urgency_t urgency, | |
238 | uint64_t deadline, | |
239 | uint64_t leeway, | |
240 | thread_t thread); | |
241 | ||
242 | /* pull a thread from its wait queue */ | |
39037602 | 243 | extern int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread); |
3e170ce0 A |
244 | |
245 | /* wakeup all threads waiting for a particular event on locked queue */ | |
246 | extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, | |
247 | event64_t wake_event, | |
248 | wait_result_t result, | |
249 | uint64_t *reserved_preposts, | |
250 | int priority, | |
251 | waitq_lock_state_t lock_state); | |
252 | ||
253 | /* wakeup one thread waiting for a particular event on locked queue */ | |
254 | extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, | |
255 | event64_t wake_event, | |
256 | wait_result_t result, | |
257 | uint64_t *reserved_preposts, | |
258 | int priority, | |
259 | waitq_lock_state_t lock_state); | |
260 | ||
261 | /* return identity of a thread awakened for a particular <wait_queue,event> */ | |
39037602 A |
262 | extern thread_t |
263 | waitq_wakeup64_identify_locked(struct waitq *waitq, | |
264 | event64_t wake_event, | |
265 | wait_result_t result, | |
266 | spl_t *spl, | |
267 | uint64_t *reserved_preposts, | |
268 | int priority, | |
269 | waitq_lock_state_t lock_state); | |
3e170ce0 A |
270 | |
271 | /* wakeup thread iff its still waiting for a particular event on locked queue */ | |
272 | extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, | |
273 | event64_t wake_event, | |
274 | thread_t thread, | |
275 | wait_result_t result, | |
276 | waitq_lock_state_t lock_state); | |
277 | ||
278 | /* clear all preposts generated by the given waitq */ | |
39037602 | 279 | extern int waitq_clear_prepost_locked(struct waitq *waitq); |
3e170ce0 A |
280 | |
281 | /* clear all preposts from the given wait queue set */ | |
282 | extern void waitq_set_clear_preposts_locked(struct waitq_set *wqset); | |
283 | ||
39037602 A |
284 | /* unlink the given waitq from all sets - returns unlocked */ |
285 | extern kern_return_t waitq_unlink_all_unlock(struct waitq *waitq); | |
286 | ||
287 | /* unlink the given waitq set from all waitqs and waitq sets - returns unlocked */ | |
288 | extern kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset); | |
289 | ||
290 | ||
3e170ce0 A |
291 | |
292 | /* | |
293 | * clear a thread's boosted priority | |
294 | * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function) | |
295 | */ | |
296 | extern void waitq_clear_promotion_locked(struct waitq *waitq, | |
297 | thread_t thread); | |
298 | ||
299 | /* | |
300 | * waitq iteration | |
301 | */ | |
302 | ||
303 | enum waitq_iteration_constant { | |
304 | WQ_ITERATE_DROPPED = -4, | |
305 | WQ_ITERATE_INVALID = -3, | |
306 | WQ_ITERATE_ABORTED = -2, | |
307 | WQ_ITERATE_FAILURE = -1, | |
308 | WQ_ITERATE_SUCCESS = 0, | |
309 | WQ_ITERATE_CONTINUE = 1, | |
310 | WQ_ITERATE_BREAK = 2, | |
311 | WQ_ITERATE_BREAK_KEEP_LOCKED = 3, | |
312 | WQ_ITERATE_INVALIDATE_CONTINUE = 4, | |
313 | WQ_ITERATE_RESTART = 5, | |
314 | WQ_ITERATE_FOUND = 6, | |
315 | WQ_ITERATE_UNLINKED = 7, | |
316 | }; | |
317 | ||
318 | /* callback invoked with both 'waitq' and 'wqset' locked */ | |
319 | typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq, | |
320 | struct waitq_set *wqset); | |
321 | ||
322 | /* iterate over all sets to which waitq belongs */ | |
323 | extern int waitq_iterate_sets(struct waitq *waitq, void *ctx, | |
324 | waitq_iterator_t it); | |
325 | ||
326 | /* iterator over all waitqs that have preposted to wqset */ | |
327 | extern int waitq_set_iterate_preposts(struct waitq_set *wqset, | |
39037602 | 328 | void *ctx, waitq_iterator_t it); |
3e170ce0 A |
329 | |
330 | /* | |
331 | * prepost reservation | |
332 | */ | |
333 | extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, | |
39037602 | 334 | waitq_lock_state_t lock_state); |
3e170ce0 A |
335 | |
336 | extern void waitq_prepost_release_reserve(uint64_t id); | |
337 | ||
813fb2f6 A |
338 | #else /* !MACH_KERNEL_PRIVATE */ |
339 | ||
340 | /* | |
341 | * The opaque waitq structure is here mostly for AIO and selinfo, | |
342 | * but could potentially be used by other BSD subsystems. | |
343 | */ | |
344 | struct waitq { char opaque[WQ_OPAQUE_SIZE]; } __attribute__((aligned(WQ_OPAQUE_ALIGN))); | |
345 | struct waitq_set { char opaque[WQS_OPAQUE_SIZE]; } __attribute__((aligned(WQS_OPAQUE_ALIGN))); | |
346 | ||
3e170ce0 A |
347 | #endif /* MACH_KERNEL_PRIVATE */ |
348 | ||
349 | ||
350 | __BEGIN_DECLS | |
351 | ||
352 | /* | |
353 | * waitq init | |
354 | */ | |
355 | extern kern_return_t waitq_init(struct waitq *waitq, int policy); | |
356 | extern void waitq_deinit(struct waitq *waitq); | |
357 | ||
358 | /* | |
359 | * global waitqs | |
360 | */ | |
361 | extern struct waitq *_global_eventq(char *event, size_t event_length); | |
362 | #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event)) | |
363 | ||
364 | extern struct waitq *global_waitq(int index); | |
365 | ||
366 | /* | |
367 | * set alloc/init/free | |
368 | */ | |
39037602 | 369 | extern struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook); |
3e170ce0 A |
370 | |
371 | extern kern_return_t waitq_set_init(struct waitq_set *wqset, | |
39037602 A |
372 | int policy, uint64_t *reserved_link, |
373 | void *prepost_hook); | |
3e170ce0 A |
374 | |
375 | extern void waitq_set_deinit(struct waitq_set *wqset); | |
376 | ||
377 | extern kern_return_t waitq_set_free(struct waitq_set *wqset); | |
378 | ||
379 | #if defined(DEVELOPMENT) || defined(DEBUG) | |
380 | #if CONFIG_WAITQ_DEBUG | |
381 | extern uint64_t wqset_id(struct waitq_set *wqset); | |
382 | ||
383 | struct waitq *wqset_waitq(struct waitq_set *wqset); | |
384 | #endif /* CONFIG_WAITQ_DEBUG */ | |
385 | #endif /* DEVELOPMENT || DEBUG */ | |
386 | ||
387 | ||
388 | /* | |
389 | * set membership | |
390 | */ | |
391 | extern uint64_t waitq_link_reserve(struct waitq *waitq); | |
392 | ||
393 | extern void waitq_link_release(uint64_t id); | |
394 | ||
395 | extern boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset); | |
396 | ||
397 | /* returns true if the waitq is in at least 1 set */ | |
398 | extern boolean_t waitq_in_set(struct waitq *waitq); | |
399 | ||
400 | ||
401 | /* on success, consumes an reserved_link reference */ | |
402 | extern kern_return_t waitq_link(struct waitq *waitq, | |
403 | struct waitq_set *wqset, | |
404 | waitq_lock_state_t lock_state, | |
405 | uint64_t *reserved_link); | |
406 | ||
407 | extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset); | |
408 | ||
409 | extern kern_return_t waitq_unlink_all(struct waitq *waitq); | |
410 | ||
411 | extern kern_return_t waitq_set_unlink_all(struct waitq_set *wqset); | |
412 | ||
3e170ce0 A |
413 | /* |
414 | * preposts | |
415 | */ | |
416 | extern void waitq_clear_prepost(struct waitq *waitq); | |
417 | ||
418 | extern void waitq_set_clear_preposts(struct waitq_set *wqset); | |
419 | ||
420 | /* | |
421 | * interfaces used primarily by the select/kqueue subsystems | |
422 | */ | |
423 | extern uint64_t waitq_get_prepost_id(struct waitq *waitq); | |
424 | extern void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset); | |
425 | ||
426 | /* | |
427 | * waitq attributes | |
428 | */ | |
429 | extern int waitq_is_valid(struct waitq *waitq); | |
430 | ||
431 | extern int waitq_set_is_valid(struct waitq_set *wqset); | |
432 | ||
433 | extern int waitq_is_global(struct waitq *waitq); | |
434 | ||
435 | extern int waitq_irq_safe(struct waitq *waitq); | |
436 | ||
437 | #if CONFIG_WAITQ_STATS | |
438 | /* | |
439 | * waitq statistics | |
440 | */ | |
441 | #define WAITQ_STATS_VERSION 1 | |
442 | struct wq_table_stats { | |
443 | uint32_t version; | |
444 | uint32_t table_elements; | |
445 | uint32_t table_used_elems; | |
446 | uint32_t table_elem_sz; | |
447 | uint32_t table_slabs; | |
448 | uint32_t table_slab_sz; | |
449 | ||
450 | uint64_t table_num_allocs; | |
451 | uint64_t table_num_preposts; | |
452 | uint64_t table_num_reservations; | |
453 | ||
454 | uint64_t table_max_used; | |
455 | uint64_t table_avg_used; | |
456 | uint64_t table_max_reservations; | |
457 | uint64_t table_avg_reservations; | |
458 | }; | |
459 | ||
460 | extern void waitq_link_stats(struct wq_table_stats *stats); | |
461 | extern void waitq_prepost_stats(struct wq_table_stats *stats); | |
462 | #endif /* CONFIG_WAITQ_STATS */ | |
463 | ||
464 | /* | |
465 | * | |
466 | * higher-level waiting APIs | |
467 | * | |
468 | */ | |
469 | ||
470 | /* assert intent to wait on <waitq,event64> pair */ | |
471 | extern wait_result_t waitq_assert_wait64(struct waitq *waitq, | |
472 | event64_t wait_event, | |
473 | wait_interrupt_t interruptible, | |
474 | uint64_t deadline); | |
475 | ||
476 | extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq, | |
477 | event64_t wait_event, | |
478 | wait_interrupt_t interruptible, | |
479 | wait_timeout_urgency_t urgency, | |
480 | uint64_t deadline, | |
481 | uint64_t leeway); | |
482 | ||
483 | /* wakeup the most appropriate thread waiting on <waitq,event64> pair */ | |
484 | extern kern_return_t waitq_wakeup64_one(struct waitq *waitq, | |
485 | event64_t wake_event, | |
486 | wait_result_t result, | |
487 | int priority); | |
488 | ||
489 | /* wakeup all the threads waiting on <waitq,event64> pair */ | |
490 | extern kern_return_t waitq_wakeup64_all(struct waitq *waitq, | |
491 | event64_t wake_event, | |
492 | wait_result_t result, | |
493 | int priority); | |
494 | ||
39037602 A |
495 | #ifdef XNU_KERNEL_PRIVATE |
496 | ||
3e170ce0 A |
497 | /* wakeup a specified thread iff it's waiting on <waitq,event64> pair */ |
498 | extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq, | |
499 | event64_t wake_event, | |
500 | thread_t thread, | |
501 | wait_result_t result); | |
39037602 A |
502 | |
503 | /* return a reference to the thread that was woken up */ | |
504 | extern thread_t | |
505 | waitq_wakeup64_identify(struct waitq *waitq, | |
506 | event64_t wake_event, | |
507 | wait_result_t result, | |
508 | int priority); | |
509 | ||
510 | #endif /* XNU_KERNEL_PRIVATE */ | |
511 | ||
3e170ce0 A |
512 | __END_DECLS |
513 | ||
514 | #endif /* KERNEL_PRIVATE */ | |
515 | #endif /* _WAITQ_H_ */ |