ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/unaligned.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: unaligned.c,v 1.24 2002/02/09 19:49:31 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <asm/asi.h>
15 #include <asm/ptrace.h>
16 #include <asm/pstate.h>
17 #include <asm/processor.h>
18 #include <asm/system.h>
19 #include <asm/uaccess.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/bitops.h>
23 #include <linux/kallsyms.h>
24 #include <asm/fpumacro.h>
26 /* #define DEBUG_MNA */
28 enum direction {
29 load, /* ld, ldd, ldh, ldsh */
30 store, /* st, std, sth, stsh */
31 both, /* Swap, ldstub, cas, ... */
32 fpld,
33 fpst,
34 invalid,
35 };
37 #ifdef DEBUG_MNA
38 static char *dirstrings[] = {
39 "load", "store", "both", "fpload", "fpstore", "invalid"
40 };
41 #endif
43 static inline enum direction decode_direction(unsigned int insn)
44 {
45 unsigned long tmp = (insn >> 21) & 1;
47 if (!tmp)
48 return load;
49 else {
50 switch ((insn>>19)&0xf) {
51 case 15: /* swap* */
52 return both;
53 default:
54 return store;
55 }
56 }
57 }
59 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
60 static inline int decode_access_size(unsigned int insn)
61 {
62 unsigned int tmp;
64 tmp = ((insn >> 19) & 0xf);
65 if (tmp == 11 || tmp == 14) /* ldx/stx */
66 return 8;
67 tmp &= 3;
68 if (!tmp)
69 return 4;
70 else if (tmp == 3)
71 return 16; /* ldd/std - Although it is actually 8 */
72 else if (tmp == 2)
73 return 2;
74 else {
75 printk("Impossible unaligned trap. insn=%08x\n", insn);
76 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
78 /* GCC should never warn that control reaches the end
79 * of this function without returning a value because
80 * die_if_kernel() is marked with attribute 'noreturn'.
81 * Alas, some versions do...
82 */
84 return 0;
85 }
86 }
88 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
89 {
90 if (insn & 0x800000) {
91 if (insn & 0x2000)
92 return (unsigned char)(regs->tstate >> 24); /* %asi */
93 else
94 return (unsigned char)(insn >> 5); /* imm_asi */
95 } else
96 return ASI_P;
97 }
99 /* 0x400000 = signed, 0 = unsigned */
100 static inline int decode_signedness(unsigned int insn)
101 {
102 return (insn & 0x400000);
103 }
105 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
106 unsigned int rd, int from_kernel)
107 {
108 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
109 if (from_kernel != 0)
110 __asm__ __volatile__("flushw");
111 else
112 flushw_user();
113 }
114 }
116 static inline long sign_extend_imm13(long imm)
117 {
118 return imm << 51 >> 51;
119 }
121 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
122 {
123 unsigned long value;
125 if (reg < 16)
126 return (!reg ? 0 : regs->u_regs[reg]);
127 if (regs->tstate & TSTATE_PRIV) {
128 struct reg_window *win;
129 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
130 value = win->locals[reg - 16];
131 } else if (test_thread_flag(TIF_32BIT)) {
132 struct reg_window32 __user *win32;
133 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
134 get_user(value, &win32->locals[reg - 16]);
135 } else {
136 struct reg_window __user *win;
137 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
138 get_user(value, &win->locals[reg - 16]);
139 }
140 return value;
141 }
143 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
144 {
145 if (reg < 16)
146 return &regs->u_regs[reg];
147 if (regs->tstate & TSTATE_PRIV) {
148 struct reg_window *win;
149 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
150 return &win->locals[reg - 16];
151 } else if (test_thread_flag(TIF_32BIT)) {
152 struct reg_window32 *win32;
153 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
154 return (unsigned long *)&win32->locals[reg - 16];
155 } else {
156 struct reg_window *win;
157 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
158 return &win->locals[reg - 16];
159 }
160 }
162 unsigned long compute_effective_address(struct pt_regs *regs,
163 unsigned int insn, unsigned int rd)
164 {
165 unsigned int rs1 = (insn >> 14) & 0x1f;
166 unsigned int rs2 = insn & 0x1f;
167 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
169 if (insn & 0x2000) {
170 maybe_flush_windows(rs1, 0, rd, from_kernel);
171 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
172 } else {
173 maybe_flush_windows(rs1, rs2, rd, from_kernel);
174 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
175 }
176 }
178 /* This is just to make gcc think die_if_kernel does return... */
179 static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
180 {
181 die_if_kernel(str, regs);
182 }
184 extern int do_int_load(unsigned long *dest_reg, int size,
185 unsigned long *saddr, int is_signed, int asi);
187 extern int __do_int_store(unsigned long *dst_addr, int size,
188 unsigned long src_val, int asi);
190 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
191 struct pt_regs *regs, int asi, int orig_asi)
192 {
193 unsigned long zero = 0;
194 unsigned long *src_val_p = &zero;
195 unsigned long src_val;
197 if (size == 16) {
198 size = 8;
199 zero = (((long)(reg_num ?
200 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
201 (unsigned)fetch_reg(reg_num + 1, regs);
202 } else if (reg_num) {
203 src_val_p = fetch_reg_addr(reg_num, regs);
204 }
205 src_val = *src_val_p;
206 if (unlikely(asi != orig_asi)) {
207 switch (size) {
208 case 2:
209 src_val = swab16(src_val);
210 break;
211 case 4:
212 src_val = swab32(src_val);
213 break;
214 case 8:
215 src_val = swab64(src_val);
216 break;
217 case 16:
218 default:
219 BUG();
220 break;
221 };
222 }
223 return __do_int_store(dst_addr, size, src_val, asi);
224 }
226 static inline void advance(struct pt_regs *regs)
227 {
228 regs->tpc = regs->tnpc;
229 regs->tnpc += 4;
230 if (test_thread_flag(TIF_32BIT)) {
231 regs->tpc &= 0xffffffff;
232 regs->tnpc &= 0xffffffff;
233 }
234 }
236 static inline int floating_point_load_or_store_p(unsigned int insn)
237 {
238 return (insn >> 24) & 1;
239 }
241 static inline int ok_for_kernel(unsigned int insn)
242 {
243 return !floating_point_load_or_store_p(insn);
244 }
246 static void kernel_mna_trap_fault(void)
247 {
248 struct pt_regs *regs = current_thread_info()->kern_una_regs;
249 unsigned int insn = current_thread_info()->kern_una_insn;
250 const struct exception_table_entry *entry;
252 entry = search_exception_tables(regs->tpc);
253 if (!entry) {
254 unsigned long address;
256 address = compute_effective_address(regs, insn,
257 ((insn >> 25) & 0x1f));
258 if (address < PAGE_SIZE) {
259 printk(KERN_ALERT "Unable to handle kernel NULL "
260 "pointer dereference in mna handler");
261 } else
262 printk(KERN_ALERT "Unable to handle kernel paging "
263 "request in mna handler");
264 printk(KERN_ALERT " at virtual address %016lx\n",address);
265 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
266 (current->mm ? CTX_HWBITS(current->mm->context) :
267 CTX_HWBITS(current->active_mm->context)));
268 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
269 (current->mm ? (unsigned long) current->mm->pgd :
270 (unsigned long) current->active_mm->pgd));
271 die_if_kernel("Oops", regs);
272 /* Not reached */
273 }
274 regs->tpc = entry->fixup;
275 regs->tnpc = regs->tpc + 4;
277 regs->tstate &= ~TSTATE_ASI;
278 regs->tstate |= (ASI_AIUS << 24UL);
279 }
281 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
282 {
283 static unsigned long count, last_time;
284 enum direction dir = decode_direction(insn);
285 int size = decode_access_size(insn);
287 current_thread_info()->kern_una_regs = regs;
288 current_thread_info()->kern_una_insn = insn;
290 if (jiffies - last_time > 5 * HZ)
291 count = 0;
292 if (count < 5) {
293 last_time = jiffies;
294 count++;
295 printk("Kernel unaligned access at TPC[%lx] ", regs->tpc);
296 print_symbol("%s\n", regs->tpc);
297 }
299 if (!ok_for_kernel(insn) || dir == both) {
300 printk("Unsupported unaligned load/store trap for kernel "
301 "at <%016lx>.\n", regs->tpc);
302 unaligned_panic("Kernel does fpu/atomic "
303 "unaligned load/store.", regs);
305 kernel_mna_trap_fault();
306 } else {
307 unsigned long addr, *reg_addr;
308 int orig_asi, asi, err;
310 addr = compute_effective_address(regs, insn,
311 ((insn >> 25) & 0x1f));
312 #ifdef DEBUG_MNA
313 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
314 "retpc[%016lx]\n",
315 regs->tpc, dirstrings[dir], addr, size,
316 regs->u_regs[UREG_RETPC]);
317 #endif
318 orig_asi = asi = decode_asi(insn, regs);
319 switch (asi) {
320 case ASI_NL:
321 case ASI_AIUPL:
322 case ASI_AIUSL:
323 case ASI_PL:
324 case ASI_SL:
325 case ASI_PNFL:
326 case ASI_SNFL:
327 asi &= ~0x08;
328 break;
329 };
330 switch (dir) {
331 case load:
332 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
333 err = do_int_load(reg_addr, size,
334 (unsigned long *) addr,
335 decode_signedness(insn), asi);
336 if (likely(!err) && unlikely(asi != orig_asi)) {
337 unsigned long val_in = *reg_addr;
338 switch (size) {
339 case 2:
340 val_in = swab16(val_in);
341 break;
342 case 4:
343 val_in = swab32(val_in);
344 break;
345 case 8:
346 val_in = swab64(val_in);
347 break;
348 case 16:
349 default:
350 BUG();
351 break;
352 };
353 *reg_addr = val_in;
354 }
355 break;
357 case store:
358 err = do_int_store(((insn>>25)&0x1f), size,
359 (unsigned long *) addr, regs,
360 asi, orig_asi);
361 break;
363 default:
364 panic("Impossible kernel unaligned trap.");
365 /* Not reached... */
366 }
367 if (unlikely(err))
368 kernel_mna_trap_fault();
369 else
370 advance(regs);
371 }
372 }
374 static char popc_helper[] = {
375 0, 1, 1, 2, 1, 2, 2, 3,
376 1, 2, 2, 3, 2, 3, 3, 4,
377 };
379 int handle_popc(u32 insn, struct pt_regs *regs)
380 {
381 u64 value;
382 int ret, i, rd = ((insn >> 25) & 0x1f);
383 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
385 if (insn & 0x2000) {
386 maybe_flush_windows(0, 0, rd, from_kernel);
387 value = sign_extend_imm13(insn);
388 } else {
389 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
390 value = fetch_reg(insn & 0x1f, regs);
391 }
392 for (ret = 0, i = 0; i < 16; i++) {
393 ret += popc_helper[value & 0xf];
394 value >>= 4;
395 }
396 if (rd < 16) {
397 if (rd)
398 regs->u_regs[rd] = ret;
399 } else {
400 if (test_thread_flag(TIF_32BIT)) {
401 struct reg_window32 __user *win32;
402 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
403 put_user(ret, &win32->locals[rd - 16]);
404 } else {
405 struct reg_window __user *win;
406 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
407 put_user(ret, &win->locals[rd - 16]);
408 }
409 }
410 advance(regs);
411 return 1;
412 }
414 extern void do_fpother(struct pt_regs *regs);
415 extern void do_privact(struct pt_regs *regs);
416 extern void spitfire_data_access_exception(struct pt_regs *regs,
417 unsigned long sfsr,
418 unsigned long sfar);
419 extern void sun4v_data_access_exception(struct pt_regs *regs,
420 unsigned long addr,
421 unsigned long type_ctx);
423 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
424 {
425 unsigned long addr = compute_effective_address(regs, insn, 0);
426 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
427 struct fpustate *f = FPUSTATE;
428 int asi = decode_asi(insn, regs);
429 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
431 save_and_clear_fpu();
432 current_thread_info()->xfsr[0] &= ~0x1c000;
433 if (freg & 3) {
434 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
435 do_fpother(regs);
436 return 0;
437 }
438 if (insn & 0x200000) {
439 /* STQ */
440 u64 first = 0, second = 0;
442 if (current_thread_info()->fpsaved[0] & flag) {
443 first = *(u64 *)&f->regs[freg];
444 second = *(u64 *)&f->regs[freg+2];
445 }
446 if (asi < 0x80) {
447 do_privact(regs);
448 return 1;
449 }
450 switch (asi) {
451 case ASI_P:
452 case ASI_S: break;
453 case ASI_PL:
454 case ASI_SL:
455 {
456 /* Need to convert endians */
457 u64 tmp = __swab64p(&first);
459 first = __swab64p(&second);
460 second = tmp;
461 break;
462 }
463 default:
464 if (tlb_type == hypervisor)
465 sun4v_data_access_exception(regs, addr, 0);
466 else
467 spitfire_data_access_exception(regs, 0, addr);
468 return 1;
469 }
470 if (put_user (first >> 32, (u32 __user *)addr) ||
471 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
472 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
473 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
474 if (tlb_type == hypervisor)
475 sun4v_data_access_exception(regs, addr, 0);
476 else
477 spitfire_data_access_exception(regs, 0, addr);
478 return 1;
479 }
480 } else {
481 /* LDF, LDDF, LDQF */
482 u32 data[4] __attribute__ ((aligned(8)));
483 int size, i;
484 int err;
486 if (asi < 0x80) {
487 do_privact(regs);
488 return 1;
489 } else if (asi > ASI_SNFL) {
490 if (tlb_type == hypervisor)
491 sun4v_data_access_exception(regs, addr, 0);
492 else
493 spitfire_data_access_exception(regs, 0, addr);
494 return 1;
495 }
496 switch (insn & 0x180000) {
497 case 0x000000: size = 1; break;
498 case 0x100000: size = 4; break;
499 default: size = 2; break;
500 }
501 for (i = 0; i < size; i++)
502 data[i] = 0;
504 err = get_user (data[0], (u32 __user *) addr);
505 if (!err) {
506 for (i = 1; i < size; i++)
507 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
508 }
509 if (err && !(asi & 0x2 /* NF */)) {
510 if (tlb_type == hypervisor)
511 sun4v_data_access_exception(regs, addr, 0);
512 else
513 spitfire_data_access_exception(regs, 0, addr);
514 return 1;
515 }
516 if (asi & 0x8) /* Little */ {
517 u64 tmp;
519 switch (size) {
520 case 1: data[0] = le32_to_cpup(data + 0); break;
521 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
522 break;
523 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
524 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
525 *(u64 *)(data + 2) = tmp;
526 break;
527 }
528 }
529 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
530 current_thread_info()->fpsaved[0] = FPRS_FEF;
531 current_thread_info()->gsr[0] = 0;
532 }
533 if (!(current_thread_info()->fpsaved[0] & flag)) {
534 if (freg < 32)
535 memset(f->regs, 0, 32*sizeof(u32));
536 else
537 memset(f->regs+32, 0, 32*sizeof(u32));
538 }
539 memcpy(f->regs + freg, data, size * 4);
540 current_thread_info()->fpsaved[0] |= flag;
541 }
542 advance(regs);
543 return 1;
544 }
546 void handle_ld_nf(u32 insn, struct pt_regs *regs)
547 {
548 int rd = ((insn >> 25) & 0x1f);
549 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
550 unsigned long *reg;
552 maybe_flush_windows(0, 0, rd, from_kernel);
553 reg = fetch_reg_addr(rd, regs);
554 if (from_kernel || rd < 16) {
555 reg[0] = 0;
556 if ((insn & 0x780000) == 0x180000)
557 reg[1] = 0;
558 } else if (test_thread_flag(TIF_32BIT)) {
559 put_user(0, (int __user *) reg);
560 if ((insn & 0x780000) == 0x180000)
561 put_user(0, ((int __user *) reg) + 1);
562 } else {
563 put_user(0, (unsigned long __user *) reg);
564 if ((insn & 0x780000) == 0x180000)
565 put_user(0, (unsigned long __user *) reg + 1);
566 }
567 advance(regs);
568 }
570 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
571 {
572 unsigned long pc = regs->tpc;
573 unsigned long tstate = regs->tstate;
574 u32 insn;
575 u32 first, second;
576 u64 value;
577 u8 freg;
578 int flag;
579 struct fpustate *f = FPUSTATE;
581 if (tstate & TSTATE_PRIV)
582 die_if_kernel("lddfmna from kernel", regs);
583 if (test_thread_flag(TIF_32BIT))
584 pc = (u32)pc;
585 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
586 int asi = decode_asi(insn, regs);
587 if ((asi > ASI_SNFL) ||
588 (asi < ASI_P))
589 goto daex;
590 if (get_user(first, (u32 __user *)sfar) ||
591 get_user(second, (u32 __user *)(sfar + 4))) {
592 if (asi & 0x2) /* NF */ {
593 first = 0; second = 0;
594 } else
595 goto daex;
596 }
597 save_and_clear_fpu();
598 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
599 value = (((u64)first) << 32) | second;
600 if (asi & 0x8) /* Little */
601 value = __swab64p(&value);
602 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
603 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
604 current_thread_info()->fpsaved[0] = FPRS_FEF;
605 current_thread_info()->gsr[0] = 0;
606 }
607 if (!(current_thread_info()->fpsaved[0] & flag)) {
608 if (freg < 32)
609 memset(f->regs, 0, 32*sizeof(u32));
610 else
611 memset(f->regs+32, 0, 32*sizeof(u32));
612 }
613 *(u64 *)(f->regs + freg) = value;
614 current_thread_info()->fpsaved[0] |= flag;
615 } else {
616 daex:
617 if (tlb_type == hypervisor)
618 sun4v_data_access_exception(regs, sfar, sfsr);
619 else
620 spitfire_data_access_exception(regs, sfsr, sfar);
621 return;
622 }
623 advance(regs);
624 return;
625 }
627 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
628 {
629 unsigned long pc = regs->tpc;
630 unsigned long tstate = regs->tstate;
631 u32 insn;
632 u64 value;
633 u8 freg;
634 int flag;
635 struct fpustate *f = FPUSTATE;
637 if (tstate & TSTATE_PRIV)
638 die_if_kernel("stdfmna from kernel", regs);
639 if (test_thread_flag(TIF_32BIT))
640 pc = (u32)pc;
641 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
642 int asi = decode_asi(insn, regs);
643 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
644 value = 0;
645 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
646 if ((asi > ASI_SNFL) ||
647 (asi < ASI_P))
648 goto daex;
649 save_and_clear_fpu();
650 if (current_thread_info()->fpsaved[0] & flag)
651 value = *(u64 *)&f->regs[freg];
652 switch (asi) {
653 case ASI_P:
654 case ASI_S: break;
655 case ASI_PL:
656 case ASI_SL:
657 value = __swab64p(&value); break;
658 default: goto daex;
659 }
660 if (put_user (value >> 32, (u32 __user *) sfar) ||
661 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
662 goto daex;
663 } else {
664 daex:
665 if (tlb_type == hypervisor)
666 sun4v_data_access_exception(regs, sfar, sfsr);
667 else
668 spitfire_data_access_exception(regs, sfsr, sfar);
669 return;
670 }
671 advance(regs);
672 return;
673 }