ia64/xen-unstable

annotate xen/arch/ia64/vmx/vmx_init.c @ 10695:6703fed8870f

[IA64] enable acceleration of external interrupt

This patch is to enable acceleration of externel interrupt
which is described in VTI spec.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jul 12 13:20:15 2006 -0600 (2006-07-12)
parents 63595abd80c5
children a59843bb699e
rev   line source
djm@6458 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
djm@6458 2 /*
djm@6458 3 * vmx_init.c: initialization work for vt specific domain
djm@6458 4 * Copyright (c) 2005, Intel Corporation.
djm@6458 5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
djm@6458 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
djm@6458 7 * Fred Yang <fred.yang@intel.com>
djm@6458 8 *
djm@6458 9 * This program is free software; you can redistribute it and/or modify it
djm@6458 10 * under the terms and conditions of the GNU General Public License,
djm@6458 11 * version 2, as published by the Free Software Foundation.
djm@6458 12 *
djm@6458 13 * This program is distributed in the hope it will be useful, but WITHOUT
djm@6458 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
djm@6458 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
djm@6458 16 * more details.
djm@6458 17 *
djm@6458 18 * You should have received a copy of the GNU General Public License along with
djm@6458 19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
djm@6458 20 * Place - Suite 330, Boston, MA 02111-1307 USA.
djm@6458 21 *
djm@6458 22 */
djm@6458 23
djm@6458 24 /*
djm@6458 25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 26 * Disable doubling mapping
djm@6458 27 *
djm@6458 28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
djm@6458 29 * Simplied design in first step:
djm@6458 30 * - One virtual environment
djm@6458 31 * - Domain is bound to one LP
djm@6458 32 * Later to support guest SMP:
djm@6458 33 * - Need interface to handle VP scheduled to different LP
djm@6458 34 */
djm@6458 35 #include <xen/config.h>
djm@6458 36 #include <xen/types.h>
djm@6458 37 #include <xen/sched.h>
djm@6458 38 #include <asm/pal.h>
djm@6458 39 #include <asm/page.h>
djm@6458 40 #include <asm/processor.h>
djm@6458 41 #include <asm/vmx_vcpu.h>
djm@6458 42 #include <xen/lib.h>
djm@6458 43 #include <asm/vmmu.h>
djm@6458 44 #include <public/arch-ia64.h>
kaf24@8492 45 #include <public/hvm/ioreq.h>
djm@6458 46 #include <asm/vmx_phy_mode.h>
djm@6458 47 #include <asm/processor.h>
djm@6458 48 #include <asm/vmx.h>
djm@6458 49 #include <xen/mm.h>
djm@7333 50 #include <public/arch-ia64.h>
kaf24@8708 51 #include <asm/hvm/vioapic.h>
kaf24@8971 52 #include <public/event_channel.h>
awilliam@9169 53 #include <xen/event.h>
awilliam@9169 54 #include <asm/vlsapic.h>
djm@6458 55
djm@6458 56 /* Global flag to identify whether Intel vmx feature is on */
djm@6458 57 u32 vmx_enabled = 0;
kaf24@7720 58 unsigned int opt_vmx_debug_level = 0;
djm@6458 59 static u32 vm_order;
djm@6458 60 static u64 buffer_size;
djm@6458 61 static u64 vp_env_info;
djm@6458 62 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
djm@6458 63 u64 __vsa_base = 0; /* Run-time service base of VMX */
djm@6458 64
djm@6458 65 /* Check whether vt feature is enabled or not. */
djm@6458 66 void
djm@6458 67 identify_vmx_feature(void)
djm@6458 68 {
djm@6458 69 pal_status_t ret;
djm@6458 70 u64 avail = 1, status = 1, control = 1;
djm@6458 71
djm@6458 72 vmx_enabled = 0;
djm@6458 73 /* Check VT-i feature */
djm@6458 74 ret = ia64_pal_proc_get_features(&avail, &status, &control);
djm@6458 75 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 76 printk("Get proc features failed.\n");
djm@6458 77 goto no_vti;
djm@6458 78 }
djm@6458 79
djm@6458 80 /* FIXME: do we need to check status field, to see whether
djm@6458 81 * PSR.vm is actually enabled? If yes, aonther call to
djm@6458 82 * ia64_pal_proc_set_features may be reuqired then.
djm@6458 83 */
djm@6458 84 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
djm@6458 85 avail, status, control, avail & PAL_PROC_VM_BIT);
djm@6458 86 if (!(avail & PAL_PROC_VM_BIT)) {
djm@6458 87 printk("No VT feature supported.\n");
djm@6458 88 goto no_vti;
djm@6458 89 }
djm@6458 90
djm@6458 91 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
djm@6458 92 if (ret != PAL_STATUS_SUCCESS) {
djm@6458 93 printk("Get vp environment info failed.\n");
djm@6458 94 goto no_vti;
djm@6458 95 }
djm@6458 96
djm@6458 97 /* Does xen has ability to decode itself? */
djm@6458 98 if (!(vp_env_info & VP_OPCODE))
djm@6458 99 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
djm@6458 100 vm_order = get_order(buffer_size);
awilliam@9164 101 printk("vm buffer size: %ld, order: %d\n", buffer_size, vm_order);
djm@6458 102
djm@6458 103 vmx_enabled = 1;
djm@6458 104 no_vti:
djm@6458 105 return;
djm@6458 106 }
djm@6458 107
djm@6458 108 /*
djm@6458 109 * Init virtual environment on current LP
djm@6458 110 * vsa_base is the indicator whether it's first LP to be initialized
djm@6458 111 * for current domain.
djm@6458 112 */
djm@6458 113 void
djm@6458 114 vmx_init_env(void)
djm@6458 115 {
djm@6458 116 u64 status, tmp_base;
djm@6458 117
djm@6458 118 if (!vm_buffer) {
awilliam@8916 119 vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
djm@6458 120 ASSERT(vm_buffer);
djm@6458 121 printk("vm_buffer: 0x%lx\n", vm_buffer);
djm@6458 122 }
djm@6458 123
djm@6458 124 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
djm@6458 125 __pa(vm_buffer),
djm@6458 126 vm_buffer,
djm@6458 127 &tmp_base);
djm@6458 128
djm@6458 129 if (status != PAL_STATUS_SUCCESS) {
djm@6458 130 printk("ia64_pal_vp_init_env failed.\n");
awilliam@8916 131 return ;
djm@6458 132 }
djm@6458 133
djm@6458 134 if (!__vsa_base)
djm@6458 135 __vsa_base = tmp_base;
djm@6458 136 else
djm@6458 137 ASSERT(tmp_base != __vsa_base);
djm@6458 138
djm@6458 139 }
djm@6458 140
djm@6458 141 typedef union {
djm@6458 142 u64 value;
djm@6458 143 struct {
djm@6458 144 u64 number : 8;
djm@6458 145 u64 revision : 8;
djm@6458 146 u64 model : 8;
djm@6458 147 u64 family : 8;
djm@6458 148 u64 archrev : 8;
djm@6458 149 u64 rv : 24;
djm@6458 150 };
djm@6458 151 } cpuid3_t;
djm@6458 152
djm@6458 153 /* Allocate vpd from xenheap */
djm@6458 154 static vpd_t *alloc_vpd(void)
djm@6458 155 {
djm@6458 156 int i;
djm@6458 157 cpuid3_t cpuid3;
djm@6458 158 vpd_t *vpd;
awilliam@10667 159 mapped_regs_t *mregs;
djm@6458 160
djm@6458 161 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
djm@6458 162 if (!vpd) {
djm@6458 163 printk("VPD allocation failed.\n");
djm@6458 164 return NULL;
djm@6458 165 }
djm@6458 166
awilliam@9169 167 printk("vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t));
djm@6458 168 memset(vpd, 0, VPD_SIZE);
awilliam@10667 169 mregs = &vpd->vpd_low;
awilliam@10667 170
djm@6458 171 /* CPUID init */
djm@6458 172 for (i = 0; i < 5; i++)
awilliam@10667 173 mregs->vcpuid[i] = ia64_get_cpuid(i);
djm@6458 174
djm@6458 175 /* Limit the CPUID number to 5 */
awilliam@10667 176 cpuid3.value = mregs->vcpuid[3];
djm@6458 177 cpuid3.number = 4; /* 5 - 1 */
awilliam@10667 178 mregs->vcpuid[3] = cpuid3.value;
djm@6458 179
awilliam@10667 180 mregs->vac.a_from_int_cr = 1;
awilliam@10667 181 mregs->vac.a_to_int_cr = 1;
awilliam@10667 182 mregs->vac.a_from_psr = 1;
awilliam@10667 183 mregs->vac.a_from_cpuid = 1;
awilliam@10667 184 mregs->vac.a_cover = 1;
awilliam@10667 185 mregs->vac.a_bsw = 1;
awilliam@10695 186 mregs->vac.a_int = 1;
awilliam@10695 187
awilliam@10667 188 mregs->vdc.d_vmsw = 1;
awilliam@9011 189
djm@6458 190 return vpd;
djm@6458 191 }
djm@6458 192
awilliam@9376 193 /* Free vpd to xenheap */
awilliam@9376 194 static void
awilliam@9376 195 free_vpd(struct vcpu *v)
awilliam@9376 196 {
awilliam@9376 197 if ( v->arch.privregs )
awilliam@9376 198 free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
awilliam@9376 199 }
djm@6458 200
djm@6458 201 /*
djm@6458 202 * Create a VP on intialized VMX environment.
djm@6458 203 */
djm@6458 204 static void
djm@6458 205 vmx_create_vp(struct vcpu *v)
djm@6458 206 {
djm@6458 207 u64 ret;
awilliam@10667 208 vpd_t *vpd = (vpd_t *)v->arch.privregs;
djm@6458 209 u64 ivt_base;
djm@6458 210 extern char vmx_ia64_ivt;
djm@6458 211 /* ia64_ivt is function pointer, so need this tranlation */
djm@6458 212 ivt_base = (u64) &vmx_ia64_ivt;
djm@6458 213 printk("ivt_base: 0x%lx\n", ivt_base);
awilliam@8916 214 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
awilliam@9982 215 if (ret != PAL_STATUS_SUCCESS){
awilliam@9982 216 panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
awilliam@9982 217 }
djm@6458 218 }
djm@6458 219
djm@6458 220 /* Other non-context related tasks can be done in context switch */
djm@6458 221 void
djm@6458 222 vmx_save_state(struct vcpu *v)
djm@6458 223 {
awilliam@8916 224 u64 status;
djm@6458 225
djm@6458 226 /* FIXME: about setting of pal_proc_vector... time consuming */
awilliam@8916 227 status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
awilliam@9982 228 if (status != PAL_STATUS_SUCCESS){
awilliam@9982 229 panic_domain(vcpu_regs(v),"Save vp status failed\n");
awilliam@9982 230 }
djm@6458 231
djm@6458 232
djm@6458 233 /* Need to save KR when domain switch, though HV itself doesn;t
djm@6458 234 * use them.
djm@6458 235 */
djm@6458 236 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
djm@6458 237 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
djm@6458 238 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
djm@6458 239 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
djm@6458 240 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
djm@6458 241 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
djm@6458 242 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
djm@6458 243 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
djm@6458 244 }
djm@6458 245
djm@6458 246 /* Even guest is in physical mode, we still need such double mapping */
djm@6458 247 void
djm@6458 248 vmx_load_state(struct vcpu *v)
djm@6458 249 {
awilliam@8916 250 u64 status;
djm@6458 251
awilliam@9169 252 status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
awilliam@9982 253 if (status != PAL_STATUS_SUCCESS){
awilliam@9982 254 panic_domain(vcpu_regs(v),"Restore vp status failed\n");
awilliam@9982 255 }
djm@6458 256
djm@6458 257 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
djm@6458 258 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
djm@6458 259 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
djm@6458 260 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
djm@6458 261 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
djm@6458 262 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
djm@6458 263 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
djm@6458 264 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
djm@6458 265 /* Guest vTLB is not required to be switched explicitly, since
djm@6458 266 * anchored in vcpu */
djm@6458 267 }
djm@6458 268
djm@6458 269 /*
djm@6458 270 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
djm@6458 271 * is registered here.
djm@6458 272 */
djm@6458 273 void
kaf24@7720 274 vmx_final_setup_guest(struct vcpu *v)
djm@6458 275 {
djm@6458 276 vpd_t *vpd;
djm@6458 277
djm@6458 278 vpd = alloc_vpd();
djm@6458 279 ASSERT(vpd);
djm@6458 280
awilliam@10667 281 v->arch.privregs = (mapped_regs_t *)vpd;
awilliam@10667 282 vpd->vpd_low.virt_env_vaddr = vm_buffer;
djm@6458 283
djm@6878 284 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
djm@6878 285 * to this solution. Maybe it can be deferred until we know created
djm@6878 286 * one as vmx domain */
awilliam@9765 287 #ifndef HASH_VHPT
awilliam@9765 288 init_domain_tlb(v);
awilliam@9765 289 #endif
djm@6458 290 /* v->arch.schedule_tail = arch_vmx_do_launch; */
djm@6458 291 vmx_create_vp(v);
djm@6458 292
djm@6458 293 /* Physical mode emulation initialization, including
djm@6458 294 * emulation ID allcation and related memory request
djm@6458 295 */
djm@6458 296 physical_mode_init(v);
djm@6458 297
djm@6458 298 vlsapic_reset(v);
djm@6458 299 vtm_init(v);
djm@6458 300
kaf24@7720 301 /* One more step to enable interrupt assist */
kaf24@7720 302 set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
djm@6458 303 }
djm@6799 304
awilliam@9376 305 void
awilliam@9376 306 vmx_relinquish_vcpu_resources(struct vcpu *v)
awilliam@9376 307 {
awilliam@9376 308 vtime_t *vtm = &(v->arch.arch_vmx.vtm);
awilliam@9376 309
awilliam@9376 310 kill_timer(&vtm->vtm_timer);
awilliam@9376 311
awilliam@9376 312 free_domain_tlb(v);
awilliam@9376 313 free_vpd(v);
awilliam@9376 314 }
awilliam@9376 315
djm@6799 316 typedef struct io_range {
djm@6799 317 unsigned long start;
djm@6799 318 unsigned long size;
djm@6799 319 unsigned long type;
djm@6799 320 } io_range_t;
djm@6799 321
awilliam@10570 322 static const io_range_t io_ranges[] = {
djm@6799 323 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
djm@6799 324 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
djm@6799 325 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
djm@6799 326 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
djm@6799 327 {PIB_START, PIB_SIZE, GPFN_PIB},
djm@6799 328 };
djm@6799 329
awilliam@10570 330 /* Reseve 1 page for shared I/O and 1 page for xenstore. */
djm@7333 331 #define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT))
djm@6799 332 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
djm@6799 333
awilliam@10570 334 static void vmx_build_physmap_table(struct domain *d)
djm@6799 335 {
awilliam@9489 336 unsigned long i, j, start, tmp, end, mfn;
djm@6799 337 struct vcpu *v = d->vcpu[0];
awilliam@9489 338 struct list_head *list_ent = d->page_list.next;
djm@6799 339
djm@6799 340 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
awilliam@9489 341 ASSERT(d->max_pages == d->tot_pages);
djm@6799 342
djm@7333 343 /* Mark I/O ranges */
djm@7333 344 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
djm@7333 345 for (j = io_ranges[i].start;
awilliam@10570 346 j < io_ranges[i].start + io_ranges[i].size;
awilliam@10570 347 j += PAGE_SIZE)
awilliam@10152 348 __assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable);
djm@7333 349 }
djm@7333 350
awilliam@9489 351 /* Map normal memory below 3G */
awilliam@9489 352 end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT;
awilliam@9489 353 tmp = end < MMIO_START ? end : MMIO_START;
awilliam@9489 354 for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) {
awilliam@10011 355 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
awilliam@10011 356 list_ent = mfn_to_page(mfn)->list.next;
awilliam@10011 357 if (VGA_IO_START <= i && i < VGA_IO_START + VGA_IO_SIZE)
awilliam@10011 358 continue;
awilliam@9489 359 assign_domain_page(d, i, mfn << PAGE_SHIFT);
djm@6799 360 }
awilliam@9489 361 ASSERT(list_ent != &d->page_list);
awilliam@9011 362
djm@6799 363 /* Map normal memory beyond 4G */
djm@6799 364 if (unlikely(end > MMIO_START)) {
djm@6799 365 start = 4 * MEM_G;
djm@6799 366 end = start + (end - 3 * MEM_G);
awilliam@10570 367 for (i = start;
awilliam@10570 368 (i < end) && (list_ent != &d->page_list); i += PAGE_SIZE) {
awilliam@10570 369 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
awilliam@9489 370 assign_domain_page(d, i, mfn << PAGE_SHIFT);
awilliam@9489 371 list_ent = mfn_to_page(mfn)->list.next;
awilliam@9489 372 }
awilliam@9489 373 ASSERT(list_ent != &d->page_list);
awilliam@10570 374 }
awilliam@9489 375
awilliam@9489 376 /* Map guest firmware */
awilliam@9489 377 for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
awilliam@9489 378 (list_ent != &d->page_list); i += PAGE_SIZE) {
awilliam@10570 379 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
awilliam@9489 380 assign_domain_page(d, i, mfn << PAGE_SHIFT);
awilliam@9489 381 list_ent = mfn_to_page(mfn)->list.next;
djm@6799 382 }
awilliam@9489 383 ASSERT(list_ent != &d->page_list);
awilliam@9489 384
awilliam@9489 385 /* Map for shared I/O page and xenstore */
awilliam@9489 386 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
awilliam@9489 387 assign_domain_page(d, IO_PAGE_START, mfn << PAGE_SHIFT);
awilliam@9489 388 list_ent = mfn_to_page(mfn)->list.next;
awilliam@9489 389 ASSERT(list_ent != &d->page_list);
awilliam@9489 390
awilliam@9489 391 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
awilliam@9489 392 assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
awilliam@9489 393 list_ent = mfn_to_page(mfn)->list.next;
awilliam@9489 394 ASSERT(list_ent == &d->page_list);
djm@6799 395
djm@6799 396 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
djm@6799 397 }
djm@7333 398
awilliam@10570 399 void vmx_setup_platform(struct domain *d)
djm@7333 400 {
djm@7333 401 ASSERT(d != dom0); /* only for non-privileged vti domain */
awilliam@9489 402
awilliam@10570 403 vmx_build_physmap_table(d);
awilliam@9489 404
djm@7333 405 d->arch.vmx_platform.shared_page_va =
awilliam@8916 406 (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
djm@7333 407 /* TEMP */
djm@7333 408 d->arch.vmx_platform.pib_base = 0xfee00000UL;
djm@7333 409
awilliam@10570 410 d->arch.sal_data = xmalloc(struct xen_sal_data);
awilliam@10570 411
djm@7333 412 /* Only open one port for I/O and interrupt emulation */
kaf24@7720 413 memset(&d->shared_info->evtchn_mask[0], 0xff,
kaf24@7720 414 sizeof(d->shared_info->evtchn_mask));
djm@7333 415
kaf24@7720 416 /* Initialize the virtual interrupt lines */
kaf24@7720 417 vmx_virq_line_init(d);
djm@7333 418
kaf24@7720 419 /* Initialize iosapic model within hypervisor */
kaf24@8708 420 hvm_vioapic_init(d);
djm@7333 421 }
djm@7333 422
kaf24@8971 423 void vmx_do_launch(struct vcpu *v)
kaf24@8971 424 {
kaf24@8971 425 if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
kaf24@8971 426 printk("VMX domain bind port %d to vcpu %d failed!\n",
kaf24@8971 427 iopacket_port(v), v->vcpu_id);
kaf24@8971 428 domain_crash_synchronous();
kaf24@8971 429 }
djm@7333 430
awilliam@10570 431 clear_bit(iopacket_port(v), &v->domain->shared_info->evtchn_mask[0]);
kaf24@8971 432
kaf24@8971 433 vmx_load_all_rr(v);
kaf24@8971 434 }