]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kperf/action.c
1bfe48847d815a77ed26b562f909be4afb007ce3
[apple/xnu.git] / osfmk / kperf / action.c
1 /*
2 * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Called from a trigger. Actually takes the data from the different
31 * modules and puts them in a buffer
32 */
33
34 #include <mach/mach_types.h>
35 #include <machine/machine_routines.h>
36 // #include <libkern/libkern.h>
37 #include <kern/kalloc.h>
38 #include <kern/debug.h> /* panic */
39 #include <kern/thread.h>
40 #include <sys/errno.h>
41
42 #include <chud/chud_xnu.h>
43 #include <kperf/kperf.h>
44
45 #include <kperf/buffer.h>
46 #include <kperf/timetrigger.h>
47 #include <kperf/threadinfo.h>
48 #include <kperf/callstack.h>
49 #include <kperf/sample.h>
50 #include <kperf/action.h>
51 #include <kperf/context.h>
52 #include <kperf/ast.h>
53 #include <kperf/kperf_kpc.h>
54
55 #define ACTION_MAX 32
56
57 /* the list of different actions to take */
58 struct action
59 {
60 uint32_t sample;
61 uint32_t userdata;
62 int pid_filter;
63 };
64
65 /* the list of actions */
66 static unsigned actionc = 0;
67 static struct action *actionv = NULL;
68
69 /* manage callbacks from system */
70
71 /* callback set for kdebug */
72 static int kperf_kdbg_callback_set = 0;
73 /* whether to record callstacks on kdebug events */
74 static int kdebug_callstacks = 0;
75 /* the action ID to trigger on signposts */
76 static int kperf_signpost_action = 0;
77
78 /* callback set for context-switch */
79 int kperf_cswitch_callback_set = 0;
80 /* should emit tracepoint on context switch */
81 static int kdebug_cswitch = 0;
82 /* the action ID to trigger on context switches */
83 static int kperf_cswitch_action = 0;
84
85 /* indirect hooks to play nice with CHUD for the transition to kperf */
86 kern_return_t chudxnu_kdebug_callback_enter(chudxnu_kdebug_callback_func_t fn);
87 kern_return_t chudxnu_kdebug_callback_cancel(void);
88
89 /* Do the real work! */
90 /* this can be called in any context ... right? */
91 static kern_return_t
92 kperf_sample_internal(struct kperf_sample *sbuf,
93 struct kperf_context *context,
94 unsigned sample_what, unsigned sample_flags,
95 unsigned actionid)
96 {
97 boolean_t enabled;
98 int did_ucallstack = 0, did_tinfo_extra = 0;
99 uint32_t userdata;
100
101 /* not much point continuing here, but what to do ? return
102 * Shutdown? cut a tracepoint and continue?
103 */
104 if (sample_what == 0) {
105 return SAMPLE_CONTINUE;
106 }
107
108 int is_kernel = (context->cur_pid == 0);
109
110 sbuf->kcallstack.nframes = 0;
111 sbuf->kcallstack.flags = CALLSTACK_VALID;
112 sbuf->ucallstack.nframes = 0;
113 sbuf->ucallstack.flags = CALLSTACK_VALID;
114
115 /* an event occurred. Sample everything and dump it in a
116 * buffer.
117 */
118
119 /* collect data from samplers */
120 if (sample_what & SAMPLER_TINFO) {
121 kperf_threadinfo_sample(&sbuf->threadinfo, context);
122
123 /* See if we should drop idle thread samples */
124 if (!(sample_flags & SAMPLE_FLAG_IDLE_THREADS)) {
125 if (sbuf->threadinfo.runmode & 0x40) {
126 return SAMPLE_CONTINUE;
127 }
128 }
129 }
130
131 if ((sample_what & SAMPLER_KSTACK) && !(sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK)) {
132 kperf_kcallstack_sample(&(sbuf->kcallstack), context);
133 }
134
135 /* sensitive ones */
136 if (!is_kernel) {
137 if (sample_what & SAMPLER_MEMINFO) {
138 kperf_meminfo_sample(&(sbuf->meminfo), context);
139 }
140
141 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
142 if ((sample_what & SAMPLER_USTACK)
143 && !(sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK))
144 {
145 did_ucallstack = kperf_ucallstack_pend(context);
146 }
147
148 if (sample_what & SAMPLER_TINFOEX) {
149 did_tinfo_extra = kperf_threadinfo_extra_pend(context);
150 }
151 } else {
152 if ((sample_what & SAMPLER_USTACK)
153 && !(sample_flags & SAMPLE_FLAG_EMPTY_CALLSTACK))
154 {
155 kperf_ucallstack_sample(&(sbuf->ucallstack), context);
156 }
157
158 if (sample_what & SAMPLER_TINFOEX) {
159 kperf_threadinfo_extra_sample(&(sbuf->tinfo_ex),
160 context);
161 }
162 }
163 }
164
165 if (sample_what & SAMPLER_PMC_THREAD) {
166 kperf_kpc_thread_sample(&(sbuf->kpcdata), sample_what);
167 } else if (sample_what & SAMPLER_PMC_CPU) {
168 kperf_kpc_cpu_sample(&(sbuf->kpcdata), sample_what);
169 }
170
171 /* lookup the user tag, if any */
172 if (actionid && (actionid <= actionc)) {
173 userdata = actionv[actionid - 1].userdata;
174 } else {
175 userdata = actionid;
176 }
177
178 /* stash the data into the buffer
179 * interrupts off to ensure we don't get split
180 */
181 enabled = ml_set_interrupts_enabled(FALSE);
182
183 BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what,
184 actionid, userdata, sample_flags);
185
186 /* dump threadinfo */
187 if (sample_what & SAMPLER_TINFO) {
188 kperf_threadinfo_log( &sbuf->threadinfo );
189 }
190
191 /* dump kcallstack */
192 if (sample_what & SAMPLER_KSTACK) {
193 kperf_kcallstack_log( &sbuf->kcallstack );
194 }
195
196 /* dump user stuff */
197 if (!is_kernel) {
198 /* dump meminfo */
199 if (sample_what & SAMPLER_MEMINFO) {
200 kperf_meminfo_log(&(sbuf->meminfo));
201 }
202
203 if (sample_flags & SAMPLE_FLAG_PEND_USER) {
204 if (did_ucallstack) {
205 BUF_INFO1(PERF_CS_UPEND, 0);
206 }
207
208 if (did_tinfo_extra) {
209 BUF_INFO1(PERF_TI_XPEND, 0);
210 }
211 } else {
212 if (sample_what & SAMPLER_USTACK) {
213 kperf_ucallstack_log(&(sbuf->ucallstack));
214 }
215
216 if (sample_what & SAMPLER_TINFOEX) {
217 kperf_threadinfo_extra_log(&(sbuf->tinfo_ex));
218 }
219 }
220 }
221
222 if (sample_what & SAMPLER_PMC_THREAD) {
223 kperf_kpc_thread_log(&(sbuf->kpcdata));
224 } else if (sample_what & SAMPLER_PMC_CPU) {
225 kperf_kpc_cpu_log(&(sbuf->kpcdata));
226 }
227
228 BUF_DATA1(PERF_GEN_EVENT | DBG_FUNC_END, sample_what);
229
230 /* intrs back on */
231 ml_set_interrupts_enabled(enabled);
232
233 return SAMPLE_CONTINUE;
234 }
235
236 /* Translate actionid into sample bits and take a sample */
237 kern_return_t
238 kperf_sample(struct kperf_sample *sbuf,
239 struct kperf_context *context,
240 unsigned actionid, unsigned sample_flags)
241 {
242 unsigned sample_what = 0;
243 int pid_filter;
244
245 /* work out what to sample, if anything */
246 if ((actionid > actionc) || (actionid == 0)) {
247 return SAMPLE_SHUTDOWN;
248 }
249
250 /* check the pid filter against the context's current pid.
251 * filter pid == -1 means any pid
252 */
253 pid_filter = actionv[actionid - 1].pid_filter;
254 if ((pid_filter != -1) && (pid_filter != context->cur_pid)) {
255 return SAMPLE_CONTINUE;
256 }
257
258 /* the samplers to run */
259 sample_what = actionv[actionid - 1].sample;
260
261 /* do the actual sample operation */
262 return kperf_sample_internal(sbuf, context, sample_what,
263 sample_flags, actionid);
264 }
265
266 /* ast callback on a thread */
267 void
268 kperf_thread_ast_handler(thread_t thread)
269 {
270 int r;
271 uint32_t t_chud;
272 unsigned sample_what = 0;
273 /* we know we're on a thread, so let's do stuff */
274 task_t task = NULL;
275
276 BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_START, thread);
277
278 /* use ~2kb of the stack for the sample, should be ok since we're in the ast */
279 struct kperf_sample sbuf;
280 memset(&sbuf, 0, sizeof(struct kperf_sample));
281
282 /* make a context, take a sample */
283 struct kperf_context ctx;
284 ctx.cur_thread = thread;
285 ctx.cur_pid = -1;
286
287 task = chudxnu_task_for_thread(thread);
288 if (task) {
289 ctx.cur_pid = chudxnu_pid_for_task(task);
290 }
291
292 /* decode the chud bits so we know what to sample */
293 t_chud = kperf_get_thread_bits(thread);
294
295 if (t_chud & T_AST_NAME) {
296 sample_what |= SAMPLER_TINFOEX;
297 }
298
299 if (t_chud & T_AST_CALLSTACK) {
300 sample_what |= SAMPLER_USTACK;
301 sample_what |= SAMPLER_TINFO;
302 }
303
304 /* do the sample, just of the user stuff */
305 r = kperf_sample_internal(&sbuf, &ctx, sample_what, 0, 0);
306
307 BUF_INFO1(PERF_AST_HNDLR | DBG_FUNC_END, r);
308 }
309
310 /* register AST bits */
311 int
312 kperf_ast_pend(thread_t cur_thread, uint32_t check_bits,
313 uint32_t set_bits)
314 {
315 /* pend on the thread */
316 uint32_t t_chud, set_done = 0;
317
318 /* can only pend on the current thread */
319 if (cur_thread != chudxnu_current_thread()) {
320 panic("pending to non-current thread");
321 }
322
323 /* get our current bits */
324 t_chud = kperf_get_thread_bits(cur_thread);
325
326 /* see if it's already been done or pended */
327 if (!(t_chud & check_bits)) {
328 /* set the bit on the thread */
329 t_chud |= set_bits;
330 kperf_set_thread_bits(cur_thread, t_chud);
331
332 /* set the actual AST */
333 kperf_set_thread_ast(cur_thread);
334
335 set_done = 1;
336 }
337
338 return set_done;
339 }
340
341 /*
342 * kdebug callback & stack management
343 */
344
345 #define IS_END(debugid) ((debugid & 3) == DBG_FUNC_END)
346 #define IS_MIG(debugid) (IS_END(debugid) && ((debugid & 0xff000000U) == KDBG_CLASS_ENCODE((unsigned)DBG_MIG, 0U)))
347 #define IS_MACH_SYSCALL(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_EXCP_SC)))
348 #define IS_VM_FAULT(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_VM)))
349 #define IS_BSD_SYSCTLL(debugid) (IS_END(debugid) && (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_BSD, DBG_BSD_EXCP_SC)))
350 #define IS_APPS_SIGNPOST(debugid) (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_APPS, DBG_MACH_CHUD))
351 #define IS_MACH_SIGNPOST(debugid) (KDBG_CLASS_DECODE(debugid) == KDBG_CLASS_ENCODE(DBG_MACH, DBG_MACH_CHUD))
352 #define IS_ENERGYTRACE(debugid) ((debugid & 0xff000000U) == KDBG_CLASS_ENCODE((unsigned)DBG_ENERGYTRACE, 0U))
353
354 void
355 kperf_kdebug_callback(uint32_t debugid)
356 {
357 int cur_pid = 0;
358 task_t task = NULL;
359
360 if (!kdebug_callstacks && !kperf_signpost_action) {
361 return;
362 }
363
364 /* if we're looking at a kperf tracepoint, don't recurse */
365 if ((debugid & 0xff000000) == KDBG_CLASS_ENCODE(DBG_PERF, 0)) {
366 return;
367 }
368
369 /* ensure interrupts are already off thanks to kdebug */
370 if (ml_get_interrupts_enabled()) {
371 return;
372 }
373
374 /* make sure we're not being called recursively. */
375 #if NOTYET
376 if (kperf_kdbg_recurse(KPERF_RECURSE_IN)) {
377 return;
378 }
379 #endif
380
381 /* check the happy list of trace codes */
382 if(!(IS_MIG(debugid)
383 || IS_MACH_SYSCALL(debugid)
384 || IS_VM_FAULT(debugid)
385 || IS_BSD_SYSCTLL(debugid)
386 || IS_MACH_SIGNPOST(debugid)
387 || IS_ENERGYTRACE(debugid)
388 || IS_APPS_SIGNPOST(debugid)))
389 {
390 return;
391 }
392
393 /* check for kernel */
394 thread_t thread = chudxnu_current_thread();
395 task = chudxnu_task_for_thread(thread);
396 if (task) {
397 cur_pid = chudxnu_pid_for_task(task);
398 }
399 if (!cur_pid) {
400 return;
401 }
402
403 if (kdebug_callstacks) {
404 /* dicing with death */
405 BUF_INFO2(PERF_KDBG_HNDLR, debugid, cur_pid);
406
407 /* pend the AST */
408 kperf_ast_pend( thread, T_AST_CALLSTACK, T_AST_CALLSTACK );
409 }
410
411 if (kperf_signpost_action && (IS_MACH_SIGNPOST(debugid)
412 || IS_APPS_SIGNPOST(debugid)))
413 {
414 #if NOTYET
415 /* make sure we're not being called recursively. */
416 if(kperf_kdbg_recurse(KPERF_RECURSE_IN)) {
417 return;
418 }
419 #endif
420
421 /* setup a context */
422 struct kperf_context ctx;
423 struct kperf_sample *intbuf = NULL;
424 BUF_INFO2(PERF_SIGNPOST_HNDLR | DBG_FUNC_START, debugid, cur_pid);
425
426 ctx.cur_thread = thread;
427 ctx.cur_pid = cur_pid;
428 ctx.trigger_type = TRIGGER_TYPE_TRACE;
429 ctx.trigger_id = 0;
430
431 /* CPU sample buffer -- only valid with interrupts off (above)
432 * Technically this isn't true -- tracepoints can, and often
433 * are, cut from interrupt handlers, but none of those tracepoints
434 * should make it this far.
435 */
436 intbuf = kperf_intr_sample_buffer();
437
438 /* do the sample */
439 kperf_sample(intbuf, &ctx, kperf_signpost_action,
440 SAMPLE_FLAG_PEND_USER);
441
442 BUF_INFO2(PERF_SIGNPOST_HNDLR | DBG_FUNC_END, debugid, cur_pid);
443 #if NOTYET
444 /* no longer recursive */
445 kperf_kdbg_recurse(KPERF_RECURSE_OUT);
446 #endif
447 }
448 }
449
450 static void
451 kperf_kdbg_callback_update(void)
452 {
453 unsigned old_callback_set = kperf_kdbg_callback_set;
454
455 /* compute new callback state */
456 kperf_kdbg_callback_set = kdebug_callstacks || kperf_signpost_action;
457
458 if (old_callback_set && !kperf_kdbg_callback_set) {
459 /* callback should no longer be set */
460 chudxnu_kdebug_callback_cancel();
461 } else if (!old_callback_set && kperf_kdbg_callback_set) {
462 /* callback must now be set */
463 chudxnu_kdebug_callback_enter(NULL);
464 }
465 }
466
467 int
468 kperf_kdbg_get_stacks(void)
469 {
470 return kdebug_callstacks;
471 }
472
473 int
474 kperf_kdbg_set_stacks(int newval)
475 {
476 kdebug_callstacks = newval;
477 kperf_kdbg_callback_update();
478
479 return 0;
480 }
481
482 int
483 kperf_signpost_action_get(void)
484 {
485 return kperf_signpost_action;
486 }
487
488 int
489 kperf_signpost_action_set(int newval)
490 {
491 kperf_signpost_action = newval;
492 kperf_kdbg_callback_update();
493
494 return 0;
495 }
496
497 /*
498 * Thread switch
499 */
500
501 /* called from context switch handler */
502 void
503 kperf_switch_context(__unused thread_t old, thread_t new)
504 {
505 task_t task = get_threadtask(new);
506 int pid = chudxnu_pid_for_task(task);
507
508 /* cut a tracepoint to tell us what the new thread's PID is
509 * for Instruments
510 */
511 BUF_DATA2(PERF_TI_CSWITCH, thread_tid(new), pid);
512
513 /* trigger action after counters have been updated */
514 if (kperf_cswitch_action) {
515 struct kperf_sample sbuf;
516 struct kperf_context ctx;
517 int r;
518
519 BUF_DATA1(PERF_CSWITCH_HNDLR | DBG_FUNC_START, 0);
520
521 ctx.cur_pid = 0;
522 ctx.cur_thread = old;
523
524 /* get PID for context */
525 task_t old_task = chudxnu_task_for_thread(ctx.cur_thread);
526 if (old_task) {
527 ctx.cur_pid = chudxnu_pid_for_task(old_task);
528 }
529
530 ctx.trigger_type = TRIGGER_TYPE_CSWITCH;
531 ctx.trigger_id = 0;
532
533 r = kperf_sample(&sbuf, &ctx, kperf_cswitch_action,
534 SAMPLE_FLAG_PEND_USER);
535
536 BUF_INFO1(PERF_CSWITCH_HNDLR | DBG_FUNC_END, r);
537 }
538 }
539
540 static void
541 kperf_cswitch_callback_update(void)
542 {
543 unsigned old_callback_set = kperf_cswitch_callback_set;
544
545 unsigned new_callback_set = kdebug_cswitch || kperf_cswitch_action;
546
547 if (old_callback_set && !new_callback_set) {
548 kperf_cswitch_callback_set = 0;
549 } else if (!old_callback_set && new_callback_set) {
550 kperf_cswitch_callback_set = 1;
551 } else {
552 return;
553 }
554
555 kperf_kpc_cswitch_callback_update();
556 }
557
558 int
559 kperf_kdbg_cswitch_get(void)
560 {
561 return kdebug_cswitch;
562 }
563
564 int
565 kperf_kdbg_cswitch_set(int newval)
566 {
567 kdebug_cswitch = newval;
568 kperf_cswitch_callback_update();
569
570 return 0;
571 }
572
573 int
574 kperf_cswitch_action_get(void)
575 {
576 return kperf_cswitch_action;
577 }
578
579 int
580 kperf_cswitch_action_set(int newval)
581 {
582 kperf_cswitch_action = newval;
583 kperf_cswitch_callback_update();
584
585 return 0;
586 }
587
588 /*
589 * Action configuration
590 */
591 unsigned
592 kperf_action_get_count(void)
593 {
594 return actionc;
595 }
596
597 int
598 kperf_action_set_samplers(unsigned actionid, uint32_t samplers)
599 {
600 if ((actionid > actionc) || (actionid == 0)) {
601 return EINVAL;
602 }
603
604 /* disallow both CPU and thread counters to be sampled in the same
605 * action */
606 if ((samplers & SAMPLER_PMC_THREAD) && (samplers & SAMPLER_PMC_CPU)) {
607 return EINVAL;
608 }
609
610 actionv[actionid - 1].sample = samplers;
611
612 return 0;
613 }
614
615 int
616 kperf_action_get_samplers(unsigned actionid, uint32_t *samplers_out)
617 {
618 if ((actionid > actionc)) {
619 return EINVAL;
620 }
621
622 if (actionid == 0) {
623 *samplers_out = 0; /* "NULL" action */
624 } else {
625 *samplers_out = actionv[actionid - 1].sample;
626 }
627
628 return 0;
629 }
630
631 int
632 kperf_action_set_userdata(unsigned actionid, uint32_t userdata)
633 {
634 if ((actionid > actionc) || (actionid == 0)) {
635 return EINVAL;
636 }
637
638 actionv[actionid - 1].userdata = userdata;
639
640 return 0;
641 }
642
643 int
644 kperf_action_get_userdata(unsigned actionid, uint32_t *userdata_out)
645 {
646 if ((actionid > actionc)) {
647 return EINVAL;
648 }
649
650 if (actionid == 0) {
651 *userdata_out = 0; /* "NULL" action */
652 } else {
653 *userdata_out = actionv[actionid - 1].userdata;
654 }
655
656 return 0;
657 }
658
659 int
660 kperf_action_set_filter(unsigned actionid, int pid)
661 {
662 if ((actionid > actionc) || (actionid == 0)) {
663 return EINVAL;
664 }
665
666 actionv[actionid - 1].pid_filter = pid;
667
668 return 0;
669 }
670
671 int
672 kperf_action_get_filter(unsigned actionid, int *pid_out)
673 {
674 if ((actionid > actionc)) {
675 return EINVAL;
676 }
677
678 if (actionid == 0) {
679 *pid_out = -1; /* "NULL" action */
680 } else {
681 *pid_out = actionv[actionid - 1].pid_filter;
682 }
683
684 return 0;
685 }
686
687 int
688 kperf_action_set_count(unsigned count)
689 {
690 struct action *new_actionv = NULL, *old_actionv = NULL;
691 unsigned old_count, i;
692
693 /* easy no-op */
694 if (count == actionc) {
695 return 0;
696 }
697
698 /* TODO: allow shrinking? */
699 if (count < actionc) {
700 return EINVAL;
701 }
702
703 /* cap it for good measure */
704 if (count > ACTION_MAX) {
705 return EINVAL;
706 }
707
708 /* creating the action arror for the first time. create a few
709 * more things, too.
710 */
711 if (actionc == 0) {
712 int r;
713 r = kperf_init();
714
715 if (r != 0) {
716 return r;
717 }
718 }
719
720 /* create a new array */
721 new_actionv = kalloc(count * sizeof(*new_actionv));
722 if (new_actionv == NULL) {
723 return ENOMEM;
724 }
725
726 old_actionv = actionv;
727 old_count = actionc;
728
729 if (old_actionv != NULL) {
730 memcpy(new_actionv, actionv, actionc * sizeof(*actionv));
731 }
732
733 memset(&(new_actionv[actionc]), 0, (count - old_count) * sizeof(*actionv));
734
735 for (i = old_count; i < count; i++) {
736 new_actionv[i].pid_filter = -1;
737 }
738
739 actionv = new_actionv;
740 actionc = count;
741
742 if (old_actionv != NULL) {
743 kfree(old_actionv, old_count * sizeof(*actionv));
744 }
745
746 return 0;
747 }