2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
32 RB_GENERATE(rb_head
, vm_map_store
, entry
, rb_node_compare
);
34 #define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
38 vm_map_store_init_rb( struct vm_map_header
* hdr
)
40 RB_INIT(&(hdr
->rb_head_store
));
44 rb_node_compare(struct vm_map_store
*node
, struct vm_map_store
*parent
)
49 vme_c
= VME_FOR_STORE(node
);
50 vme_p
= VME_FOR_STORE(parent
);
51 if (vme_c
->vme_start
< vme_p
->vme_start
) {
54 if (vme_c
->vme_start
>= vme_p
->vme_end
) {
62 vm_map_store_walk_rb(vm_map_t map
, vm_map_entry_t
*wrong_vme
, vm_map_entry_t
*vm_entry
)
64 struct vm_map_header
*hdr
= &map
->hdr
;
65 struct vm_map_store
*rb_entry
= RB_ROOT(&hdr
->rb_head_store
);
66 vm_map_entry_t cur
= *vm_entry
;
68 rb_entry
= RB_FIND(rb_head
, &hdr
->rb_head_store
, &(cur
->store
));
69 if (rb_entry
== NULL
) {
70 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry
, *wrong_vme
);
72 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry
), VME_FOR_STORE(RB_LEFT(rb_entry
, entry
)), VME_FOR_STORE(RB_RIGHT(rb_entry
, entry
)));
78 vm_map_store_lookup_entry_rb(vm_map_t map
, vm_map_offset_t address
, vm_map_entry_t
*vm_entry
)
80 struct vm_map_header
*hdr
= &map
->hdr
;
81 struct vm_map_store
*rb_entry
= RB_ROOT(&hdr
->rb_head_store
);
82 vm_map_entry_t cur
= vm_map_to_entry(map
);
83 vm_map_entry_t prev
= VM_MAP_ENTRY_NULL
;
85 while (rb_entry
!= (struct vm_map_store
*)NULL
) {
86 cur
= VME_FOR_STORE(rb_entry
);
87 if (cur
== VM_MAP_ENTRY_NULL
) {
90 if (address
>= cur
->vme_start
) {
91 if (address
< cur
->vme_end
) {
95 rb_entry
= RB_RIGHT(rb_entry
, entry
);
98 rb_entry
= RB_LEFT(rb_entry
, entry
);
101 if (prev
== VM_MAP_ENTRY_NULL
) {
102 prev
= vm_map_to_entry(map
);
109 vm_map_store_entry_link_rb( struct vm_map_header
*mapHdr
, __unused vm_map_entry_t after_where
, vm_map_entry_t entry
)
111 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
112 struct vm_map_store
*store
= &(entry
->store
);
113 struct vm_map_store
*tmp_store
;
114 if ((tmp_store
= RB_INSERT( rb_head
, rbh
, store
)) != NULL
) {
115 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry
->vme_start
, (uintptr_t)entry
->vme_end
,
116 (uintptr_t)(VME_FOR_STORE(tmp_store
))->vme_start
, (uintptr_t)(VME_FOR_STORE(tmp_store
))->vme_end
);
121 vm_map_store_entry_unlink_rb( struct vm_map_header
*mapHdr
, vm_map_entry_t entry
)
123 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
124 struct vm_map_store
*rb_entry
;
125 struct vm_map_store
*store
= &(entry
->store
);
127 rb_entry
= RB_FIND( rb_head
, rbh
, store
);
128 if (rb_entry
== NULL
) {
129 panic("NO ENTRY TO DELETE");
131 RB_REMOVE( rb_head
, rbh
, store
);
135 vm_map_store_copy_reset_rb( vm_map_copy_t copy
, vm_map_entry_t entry
, int nentries
)
137 struct vm_map_header
*mapHdr
= &(copy
->cpy_hdr
);
138 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
139 struct vm_map_store
*store
;
142 while (entry
!= vm_map_copy_to_entry(copy
) && nentries
> 0) {
143 store
= &(entry
->store
);
144 RB_REMOVE( rb_head
, rbh
, store
);
145 entry
= entry
->vme_next
;
151 extern zone_t vm_map_holes_zone
; /* zone for vm map holes (vm_map_links) structures */
154 vm_map_combine_hole(vm_map_t map
, vm_map_entry_t hole_entry
);
156 vm_map_combine_hole(__unused vm_map_t map
, vm_map_entry_t hole_entry
)
158 vm_map_entry_t middle_hole_entry
, last_hole_entry
;
160 hole_entry
->vme_end
= hole_entry
->vme_next
->vme_end
;
162 middle_hole_entry
= hole_entry
->vme_next
;
163 last_hole_entry
= middle_hole_entry
->vme_next
;
165 assert(last_hole_entry
->vme_prev
== middle_hole_entry
);
166 assert(middle_hole_entry
->vme_end
!= last_hole_entry
->vme_start
);
168 last_hole_entry
->vme_prev
= hole_entry
;
169 hole_entry
->vme_next
= last_hole_entry
;
171 middle_hole_entry
->vme_prev
= NULL
;
172 middle_hole_entry
->vme_next
= NULL
;
174 zfree(vm_map_holes_zone
, middle_hole_entry
);
176 assert(hole_entry
->vme_start
< hole_entry
->vme_end
);
177 assert(last_hole_entry
->vme_start
< last_hole_entry
->vme_end
);
182 vm_map_delete_hole(vm_map_t map
, vm_map_entry_t hole_entry
);
184 vm_map_delete_hole(vm_map_t map
, vm_map_entry_t hole_entry
)
186 if (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
187 if (hole_entry
->vme_next
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
188 map
->holes_list
= NULL
;
189 SAVE_HINT_HOLE_WRITE(map
, NULL
);
191 vm_map_entry_t l_next
, l_prev
;
193 l_next
= (vm_map_entry_t
) map
->holes_list
->next
;
194 l_prev
= (vm_map_entry_t
) map
->holes_list
->prev
;
195 map
->holes_list
= (struct vm_map_links
*) l_next
;
197 l_next
->vme_prev
= l_prev
;
198 l_prev
->vme_next
= l_next
;
200 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) l_next
);
203 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
->vme_prev
);
205 hole_entry
->vme_prev
->vme_next
= hole_entry
->vme_next
;
206 hole_entry
->vme_next
->vme_prev
= hole_entry
->vme_prev
;
209 hole_entry
->vme_next
= NULL
;
210 hole_entry
->vme_prev
= NULL
;
211 zfree(vm_map_holes_zone
, hole_entry
);
220 extern int vm_check_map_sanity
;
223 check_map_sanity(vm_map_t map
, vm_map_entry_t old_hole_entry
)
225 vm_map_entry_t hole_entry
, next_hole_entry
;
226 vm_map_entry_t map_entry
, next_map_entry
;
228 if (map
->holes_list
== NULL
) {
232 hole_entry
= CAST_DOWN(vm_map_entry_t
, map
->holes_list
);
233 next_hole_entry
= hole_entry
->vme_next
;
235 map_entry
= vm_map_first_entry(map
);
236 next_map_entry
= map_entry
->vme_next
;
238 while (map_entry
->vme_start
> hole_entry
->vme_start
) {
239 hole_entry
= next_hole_entry
;
240 next_hole_entry
= hole_entry
->vme_next
;
242 if (hole_entry
== CAST_DOWN(vm_map_entry_t
, map
->holes_list
)) {
247 while (map_entry
!= vm_map_to_entry(map
)) {
248 if (map_entry
->vme_start
>= map
->max_offset
) {
252 if (map_entry
->vme_end
!= map_entry
->vme_next
->vme_start
) {
253 if (map_entry
->vme_next
== vm_map_to_entry(map
)) {
257 if (hole_entry
->vme_start
!= map_entry
->vme_end
) {
258 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry
, (unsigned long long)hole_entry
->vme_start
, map_entry
->vme_next
, (unsigned long long)map_entry
->vme_end
, old_hole_entry
);
259 assert(hole_entry
->vme_start
== map_entry
->vme_end
);
262 if (hole_entry
->vme_end
!= map_entry
->vme_next
->vme_start
) {
263 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry
, (unsigned long long)hole_entry
->vme_end
, map_entry
->vme_next
, (unsigned long long)map_entry
->vme_next
->vme_start
, old_hole_entry
);
264 assert(hole_entry
->vme_end
== map_entry
->vme_next
->vme_start
);
267 hole_entry
= next_hole_entry
;
268 next_hole_entry
= hole_entry
->vme_next
;
270 if (hole_entry
== CAST_DOWN(vm_map_entry_t
, map
->holes_list
)) {
275 map_entry
= map_entry
->vme_next
;
283 copy_hole_info(vm_map_entry_t hole_entry
, vm_map_entry_t old_hole_entry
)
285 old_hole_entry
->vme_prev
= hole_entry
->vme_prev
;
286 old_hole_entry
->vme_next
= hole_entry
->vme_next
;
287 old_hole_entry
->vme_start
= hole_entry
->vme_start
;
288 old_hole_entry
->vme_end
= hole_entry
->vme_end
;
293 update_holes_on_entry_deletion(vm_map_t map
, vm_map_entry_t old_entry
);
295 update_holes_on_entry_deletion(vm_map_t map
, vm_map_entry_t old_entry
)
298 * Dealing with the deletion of an older entry.
301 vm_map_entry_t hole_entry
, next_hole_entry
;
303 struct vm_map_entry old_hole_entry
;
305 boolean_t create_new_hole
= TRUE
;
307 hole_entry
= CAST_TO_VM_MAP_ENTRY(map
->hole_hint
);
310 if (hole_entry
->vme_end
== old_entry
->vme_start
) {
312 * Found a hole right after above our entry.
315 } else if (hole_entry
->vme_start
== old_entry
->vme_end
) {
316 if (hole_entry
!= CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
318 * Found a hole right after below our entry but
319 * make sure we don't erroneously extend backwards.
324 hole_entry
= hole_entry
->vme_prev
;
326 } else if (hole_entry
->vme_start
> old_entry
->vme_end
) {
328 * Useless hint. Start from the top.
331 hole_entry
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
334 if (hole_entry
!= CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
335 if (hole_entry
->vme_start
> old_entry
->vme_start
) {
336 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
337 (unsigned long long)hole_entry
->vme_start
,
338 (unsigned long long)old_entry
->vme_start
,
339 (unsigned long long)map
->holes_list
->start
,
340 (unsigned long long)map
->hole_hint
->start
);
342 if (hole_entry
->vme_end
> old_entry
->vme_start
) {
343 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
344 (unsigned long long)hole_entry
->vme_end
,
345 (unsigned long long)old_entry
->vme_start
,
346 (unsigned long long)map
->holes_list
->start
,
347 (unsigned long long)map
->hole_hint
->start
);
352 next_hole_entry
= hole_entry
->vme_next
;
355 * Hole is right above the entry.
357 if (hole_entry
->vme_end
== old_entry
->vme_start
) {
359 copy_hole_info(hole_entry
, &old_hole_entry
);
363 * Is there another hole right below the entry?
364 * Can we combine holes?
367 if (old_entry
->vme_end
== hole_entry
->vme_next
->vme_start
) {
368 vm_map_combine_hole(map
, hole_entry
);
370 hole_entry
->vme_end
= old_entry
->vme_end
;
372 create_new_hole
= FALSE
;
374 if (vm_check_map_sanity
) {
375 check_map_sanity(map
, &old_hole_entry
);
382 * Hole is right below the entry.
384 if (hole_entry
->vme_start
== old_entry
->vme_end
) {
386 copy_hole_info(hole_entry
, &old_hole_entry
);
389 hole_entry
->vme_start
= old_entry
->vme_start
;
390 create_new_hole
= FALSE
;
393 if (vm_check_map_sanity
) {
394 check_map_sanity(map
, &old_hole_entry
);
401 * Hole is beyond our entry. Let's go back to the last hole
402 * before our entry so we have the right place to link up the
403 * new hole that will be needed.
405 if (hole_entry
->vme_start
> old_entry
->vme_end
) {
407 copy_hole_info(hole_entry
, &old_hole_entry
);
410 if (hole_entry
!= CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
411 assert(hole_entry
->vme_start
!= old_entry
->vme_start
);
412 hole_entry
= hole_entry
->vme_prev
;
417 hole_entry
= next_hole_entry
;
419 if (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
420 hole_entry
= hole_entry
->vme_prev
;
426 if (create_new_hole
) {
427 struct vm_map_links
*new_hole_entry
= NULL
;
428 vm_map_entry_t l_next
, l_prev
;
430 new_hole_entry
= zalloc(vm_map_holes_zone
);
433 * First hole in the map?
435 * A hole that is located above the current first hole in the map?
437 if (map
->holes_list
== NULL
|| (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
) && hole_entry
->vme_start
> old_entry
->vme_start
)) {
438 if (map
->holes_list
== NULL
) {
439 map
->holes_list
= new_hole_entry
;
440 new_hole_entry
->prev
= new_hole_entry
->next
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
442 l_next
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
443 l_prev
= map
->holes_list
->prev
;
444 map
->holes_list
= new_hole_entry
;
445 new_hole_entry
->next
= l_next
;
446 new_hole_entry
->prev
= l_prev
;
448 l_prev
->vme_next
= l_next
->vme_prev
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
451 l_next
= hole_entry
->vme_next
;
452 l_prev
= hole_entry
->vme_next
->vme_prev
;
454 new_hole_entry
->prev
= hole_entry
;
455 new_hole_entry
->next
= l_next
;
457 hole_entry
->vme_next
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
458 l_next
->vme_prev
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
461 new_hole_entry
->start
= old_entry
->vme_start
;
462 new_hole_entry
->end
= old_entry
->vme_end
;
464 hole_entry
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
466 assert(new_hole_entry
->start
< new_hole_entry
->end
);
470 if (vm_check_map_sanity
) {
471 check_map_sanity(map
, &old_hole_entry
);
475 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
481 update_holes_on_entry_creation(vm_map_t map
, vm_map_entry_t new_entry
);
483 update_holes_on_entry_creation(vm_map_t map
, vm_map_entry_t new_entry
)
485 vm_map_entry_t hole_entry
, next_hole_entry
;
487 struct vm_map_entry old_hole_entry
;
488 vm_map_entry_t tmp_entry
;
489 boolean_t check_map_with_hole_sanity
= TRUE
;
493 * Case A: The entry is aligned exactly with the start and end of the hole.
494 * This will delete the hole.
496 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
497 * This will split a hole.
499 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
500 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
503 hole_entry
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
505 next_hole_entry
= hole_entry
->vme_next
;
510 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
511 * the entries belonging to the copy map are linked into the list of entries silently and
512 * then added to the RB-tree later on.
513 * So sanity checks are useless in that case.
515 check_map_with_hole_sanity
= vm_map_lookup_entry(map
, new_entry
->vme_start
, &tmp_entry
);
518 if (hole_entry
->vme_start
== new_entry
->vme_start
&&
519 hole_entry
->vme_end
== new_entry
->vme_end
) {
522 copy_hole_info(hole_entry
, &old_hole_entry
);
526 * This check makes sense only for regular maps, not copy maps.
527 * With a regular map, the VM entry is first linked and then
528 * the hole is deleted. So the check below, which makes sure that
529 * the map's bounds are being respected, is valid.
530 * But for copy maps, the hole is deleted before the VM entry is
531 * linked (vm_map_store_copy_insert) and so this check is invalid.
533 * if (hole_entry == (vm_map_entry_t) map->holes_list) {
535 * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
537 * next_hole_entry = vm_map_last_entry(map);
538 * assert(next_hole_entry->vme_end >= map->max_offset);
543 vm_map_delete_hole(map
, hole_entry
);
546 if (vm_check_map_sanity
&& check_map_with_hole_sanity
) {
547 check_map_sanity(map
, &old_hole_entry
);
551 } else if (hole_entry
->vme_start
< new_entry
->vme_start
&&
552 hole_entry
->vme_end
> new_entry
->vme_end
) {
554 struct vm_map_links
*new_hole_entry
= NULL
;
556 new_hole_entry
= zalloc(vm_map_holes_zone
);
559 copy_hole_info(hole_entry
, &old_hole_entry
);
562 new_hole_entry
->prev
= hole_entry
;
563 new_hole_entry
->next
= hole_entry
->vme_next
;
564 hole_entry
->vme_next
->vme_prev
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
565 hole_entry
->vme_next
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
567 new_hole_entry
->start
= new_entry
->vme_end
;
568 new_hole_entry
->end
= hole_entry
->vme_end
;
569 hole_entry
->vme_end
= new_entry
->vme_start
;
571 assert(hole_entry
->vme_start
< hole_entry
->vme_end
);
572 assert(new_hole_entry
->start
< new_hole_entry
->end
);
575 if (vm_check_map_sanity
&& check_map_with_hole_sanity
) {
576 check_map_sanity(map
, &old_hole_entry
);
580 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
582 } else if ((new_entry
->vme_start
<= hole_entry
->vme_start
) && (hole_entry
->vme_start
< new_entry
->vme_end
)) {
584 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
588 copy_hole_info(hole_entry
, &old_hole_entry
);
591 if (hole_entry
->vme_end
<= new_entry
->vme_end
) {
592 vm_map_delete_hole(map
, hole_entry
);
594 hole_entry
->vme_start
= new_entry
->vme_end
;
595 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
599 if (vm_check_map_sanity
&& check_map_with_hole_sanity
) {
600 check_map_sanity(map
, &old_hole_entry
);
605 } else if ((new_entry
->vme_start
< hole_entry
->vme_end
) && (hole_entry
->vme_end
<= new_entry
->vme_end
)) {
607 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
611 copy_hole_info(hole_entry
, &old_hole_entry
);
614 if (hole_entry
->vme_start
>= new_entry
->vme_start
) {
615 vm_map_delete_hole(map
, hole_entry
);
617 hole_entry
->vme_end
= new_entry
->vme_start
;
618 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
622 if (vm_check_map_sanity
&& check_map_with_hole_sanity
) {
623 check_map_sanity(map
, &old_hole_entry
);
630 hole_entry
= next_hole_entry
;
631 next_hole_entry
= hole_entry
->vme_next
;
633 if (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
638 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
639 hole_entry
->vme_prev
,
640 (unsigned long long)hole_entry
->vme_prev
->vme_start
,
641 (unsigned long long)hole_entry
->vme_prev
->vme_end
,
643 (unsigned long long)hole_entry
->vme_start
,
644 (unsigned long long)hole_entry
->vme_end
,
645 hole_entry
->vme_next
,
646 (unsigned long long)hole_entry
->vme_next
->vme_start
,
647 (unsigned long long)hole_entry
->vme_next
->vme_end
);
651 update_first_free_rb(vm_map_t map
, vm_map_entry_t entry
, boolean_t new_entry_creation
)
653 if (map
->holelistenabled
) {
655 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
657 vm_map_offset_t max_valid_offset
= (map
->max_offset
> MACH_VM_MAX_ADDRESS
) ? map
->max_offset
: MACH_VM_MAX_ADDRESS
;
660 * Clipping an entry will not result in the creation/deletion/modification of
661 * a hole. Those calls pass NULL for their target entry.
668 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
669 * holes within the bounds of the map.
671 if (vm_map_trunc_page(entry
->vme_start
, VM_MAP_PAGE_MASK(map
)) >= max_valid_offset
) {
679 * - A new entry has already been added to the map
681 * - An older entry has already been deleted from the map
683 * We are updating the hole list after the fact (except in one special case involving copy maps).
687 if (new_entry_creation
) {
688 update_holes_on_entry_creation(map
, entry
);
690 update_holes_on_entry_deletion(map
, entry
);