ia64/linux-2.6.18-xen.hg

view arch/i386/mm/hypervisor.c @ 893:f994bfe9b93b

linux/blktap2: reduce TLB flush scope

c/s 885 added very coarse TLB flushing. Since these flushes always
follow single page updates, single page flushes (when available) are
sufficient.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 04 10:32:57 2009 +0100 (2009-06-04)
parents 485fe5efa4ff
children
line source
1 /******************************************************************************
2 * mm/hypervisor.c
3 *
4 * Update page tables via the hypervisor.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/sched.h>
34 #include <linux/mm.h>
35 #include <linux/vmalloc.h>
36 #include <asm/page.h>
37 #include <asm/pgtable.h>
38 #include <asm/hypervisor.h>
39 #include <xen/balloon.h>
40 #include <xen/features.h>
41 #include <xen/interface/memory.h>
42 #include <linux/module.h>
43 #include <linux/percpu.h>
44 #include <asm/tlbflush.h>
45 #include <linux/highmem.h>
47 void xen_l1_entry_update(pte_t *ptr, pte_t val)
48 {
49 mmu_update_t u;
50 u.ptr = ptep_to_machine(ptr);
51 u.val = __pte_val(val);
52 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
53 }
54 EXPORT_SYMBOL_GPL(xen_l1_entry_update);
56 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
57 {
58 mmu_update_t u;
59 u.ptr = virt_to_machine(ptr);
60 u.val = __pmd_val(val);
61 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
62 }
64 #if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
65 void xen_l3_entry_update(pud_t *ptr, pud_t val)
66 {
67 mmu_update_t u;
68 u.ptr = virt_to_machine(ptr);
69 u.val = __pud_val(val);
70 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
71 }
72 #endif
74 #ifdef CONFIG_X86_64
75 void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
76 {
77 mmu_update_t u;
78 u.ptr = virt_to_machine(ptr);
79 u.val = __pgd_val(val);
80 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
81 }
82 #endif /* CONFIG_X86_64 */
84 void xen_pt_switch(unsigned long ptr)
85 {
86 struct mmuext_op op;
87 op.cmd = MMUEXT_NEW_BASEPTR;
88 op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
89 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
90 }
92 void xen_new_user_pt(unsigned long ptr)
93 {
94 struct mmuext_op op;
95 op.cmd = MMUEXT_NEW_USER_BASEPTR;
96 op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
97 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
98 }
100 void xen_tlb_flush(void)
101 {
102 struct mmuext_op op;
103 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
104 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
105 }
106 EXPORT_SYMBOL(xen_tlb_flush);
108 void xen_invlpg(unsigned long ptr)
109 {
110 struct mmuext_op op;
111 op.cmd = MMUEXT_INVLPG_LOCAL;
112 op.arg1.linear_addr = ptr & PAGE_MASK;
113 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
114 }
115 EXPORT_SYMBOL(xen_invlpg);
117 #ifdef CONFIG_SMP
119 void xen_tlb_flush_all(void)
120 {
121 struct mmuext_op op;
122 op.cmd = MMUEXT_TLB_FLUSH_ALL;
123 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
124 }
125 EXPORT_SYMBOL_GPL(xen_tlb_flush_all);
127 void xen_tlb_flush_mask(cpumask_t *mask)
128 {
129 struct mmuext_op op;
130 if ( cpus_empty(*mask) )
131 return;
132 op.cmd = MMUEXT_TLB_FLUSH_MULTI;
133 set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
134 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
135 }
136 EXPORT_SYMBOL_GPL(xen_tlb_flush_mask);
138 void xen_invlpg_all(unsigned long ptr)
139 {
140 struct mmuext_op op;
141 op.cmd = MMUEXT_INVLPG_ALL;
142 op.arg1.linear_addr = ptr & PAGE_MASK;
143 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
144 }
145 EXPORT_SYMBOL_GPL(xen_invlpg_all);
147 void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
148 {
149 struct mmuext_op op;
150 if ( cpus_empty(*mask) )
151 return;
152 op.cmd = MMUEXT_INVLPG_MULTI;
153 op.arg1.linear_addr = ptr & PAGE_MASK;
154 set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
155 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
156 }
157 EXPORT_SYMBOL_GPL(xen_invlpg_mask);
159 #endif /* CONFIG_SMP */
161 void xen_pgd_pin(unsigned long ptr)
162 {
163 struct mmuext_op op;
164 #ifdef CONFIG_X86_64
165 op.cmd = MMUEXT_PIN_L4_TABLE;
166 #elif defined(CONFIG_X86_PAE)
167 op.cmd = MMUEXT_PIN_L3_TABLE;
168 #else
169 op.cmd = MMUEXT_PIN_L2_TABLE;
170 #endif
171 op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
172 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
173 }
175 void xen_pgd_unpin(unsigned long ptr)
176 {
177 struct mmuext_op op;
178 op.cmd = MMUEXT_UNPIN_TABLE;
179 op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
180 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
181 }
183 void xen_set_ldt(const void *ptr, unsigned int ents)
184 {
185 struct mmuext_op op;
186 op.cmd = MMUEXT_SET_LDT;
187 op.arg1.linear_addr = (unsigned long)ptr;
188 op.arg2.nr_ents = ents;
189 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
190 }
192 /* Protected by balloon_lock. */
193 #define MAX_CONTIG_ORDER 9 /* 2MB */
194 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
195 static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
196 static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
198 /* Ensure multi-page extents are contiguous in machine memory. */
199 int xen_create_contiguous_region(
200 unsigned long vstart, unsigned int order, unsigned int address_bits)
201 {
202 unsigned long *in_frames = discontig_frames, out_frame;
203 unsigned long frame, flags;
204 unsigned int i;
205 int rc, success;
206 struct xen_memory_exchange exchange = {
207 .in = {
208 .nr_extents = 1UL << order,
209 .extent_order = 0,
210 .domid = DOMID_SELF
211 },
212 .out = {
213 .nr_extents = 1,
214 .extent_order = order,
215 .address_bits = address_bits,
216 .domid = DOMID_SELF
217 }
218 };
220 /*
221 * Currently an auto-translated guest will not perform I/O, nor will
222 * it require PAE page directories below 4GB. Therefore any calls to
223 * this function are redundant and can be ignored.
224 */
225 if (xen_feature(XENFEAT_auto_translated_physmap))
226 return 0;
228 if (unlikely(order > MAX_CONTIG_ORDER))
229 return -ENOMEM;
231 set_xen_guest_handle(exchange.in.extent_start, in_frames);
232 set_xen_guest_handle(exchange.out.extent_start, &out_frame);
234 scrub_pages((void *)vstart, 1 << order);
236 balloon_lock(flags);
238 /* 1. Zap current PTEs, remembering MFNs. */
239 for (i = 0; i < (1U<<order); i++) {
240 in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
241 MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
242 __pte_ma(0), 0);
243 set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
244 INVALID_P2M_ENTRY);
245 }
246 if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
247 BUG();
249 /* 2. Get a new contiguous memory extent. */
250 out_frame = __pa(vstart) >> PAGE_SHIFT;
251 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
252 success = (exchange.nr_exchanged == (1UL << order));
253 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
254 BUG_ON(success && (rc != 0));
255 #if CONFIG_XEN_COMPAT <= 0x030002
256 if (unlikely(rc == -ENOSYS)) {
257 /* Compatibility when XENMEM_exchange is unsupported. */
258 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
259 &exchange.in) != (1UL << order))
260 BUG();
261 success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
262 &exchange.out) == 1);
263 if (!success) {
264 /* Couldn't get special memory: fall back to normal. */
265 for (i = 0; i < (1U<<order); i++)
266 in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
267 if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
268 &exchange.in) != (1UL<<order))
269 BUG();
270 }
271 }
272 #endif
274 /* 3. Map the new extent in place of old pages. */
275 for (i = 0; i < (1U<<order); i++) {
276 frame = success ? (out_frame + i) : in_frames[i];
277 MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
278 pfn_pte_ma(frame, PAGE_KERNEL), 0);
279 set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
280 }
282 cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
283 ? UVMF_TLB_FLUSH|UVMF_ALL
284 : UVMF_INVLPG|UVMF_ALL;
285 if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
286 BUG();
288 balloon_unlock(flags);
290 return success ? 0 : -ENOMEM;
291 }
292 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
294 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
295 {
296 unsigned long *out_frames = discontig_frames, in_frame;
297 unsigned long frame, flags;
298 unsigned int i;
299 int rc, success;
300 struct xen_memory_exchange exchange = {
301 .in = {
302 .nr_extents = 1,
303 .extent_order = order,
304 .domid = DOMID_SELF
305 },
306 .out = {
307 .nr_extents = 1UL << order,
308 .extent_order = 0,
309 .domid = DOMID_SELF
310 }
311 };
313 if (xen_feature(XENFEAT_auto_translated_physmap))
314 return;
316 if (unlikely(order > MAX_CONTIG_ORDER))
317 return;
319 set_xen_guest_handle(exchange.in.extent_start, &in_frame);
320 set_xen_guest_handle(exchange.out.extent_start, out_frames);
322 scrub_pages((void *)vstart, 1 << order);
324 balloon_lock(flags);
326 /* 1. Find start MFN of contiguous extent. */
327 in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
329 /* 2. Zap current PTEs. */
330 for (i = 0; i < (1U<<order); i++) {
331 MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
332 __pte_ma(0), 0);
333 set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
334 INVALID_P2M_ENTRY);
335 out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
336 }
337 if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
338 BUG();
340 /* 3. Do the exchange for non-contiguous MFNs. */
341 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
342 success = (exchange.nr_exchanged == 1);
343 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
344 BUG_ON(success && (rc != 0));
345 #if CONFIG_XEN_COMPAT <= 0x030002
346 if (unlikely(rc == -ENOSYS)) {
347 /* Compatibility when XENMEM_exchange is unsupported. */
348 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
349 &exchange.in) != 1)
350 BUG();
351 if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
352 &exchange.out) != (1UL << order))
353 BUG();
354 success = 1;
355 }
356 #endif
358 /* 4. Map new pages in place of old pages. */
359 for (i = 0; i < (1U<<order); i++) {
360 frame = success ? out_frames[i] : (in_frame + i);
361 MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
362 pfn_pte_ma(frame, PAGE_KERNEL), 0);
363 set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
364 }
366 cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
367 ? UVMF_TLB_FLUSH|UVMF_ALL
368 : UVMF_INVLPG|UVMF_ALL;
369 if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
370 BUG();
372 balloon_unlock(flags);
373 }
374 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
376 static void undo_limit_pages(struct page *pages, unsigned int order)
377 {
378 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
379 BUG_ON(order > MAX_CONTIG_ORDER);
380 xen_limit_pages_to_max_mfn(pages, order, 0);
381 ClearPageForeign(pages);
382 __free_pages(pages, order);
383 }
385 int xen_limit_pages_to_max_mfn(
386 struct page *pages, unsigned int order, unsigned int address_bits)
387 {
388 unsigned long flags, frame;
389 unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
390 struct page *page;
391 unsigned int i, n, nr_mcl;
392 int rc, success;
393 DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
395 struct xen_memory_exchange exchange = {
396 .in = {
397 .extent_order = 0,
398 .domid = DOMID_SELF
399 },
400 .out = {
401 .extent_order = 0,
402 .address_bits = address_bits,
403 .domid = DOMID_SELF
404 }
405 };
407 if (xen_feature(XENFEAT_auto_translated_physmap))
408 return 0;
410 if (unlikely(order > MAX_CONTIG_ORDER))
411 return -ENOMEM;
413 if (address_bits) {
414 if (address_bits < PAGE_SHIFT)
415 return -EINVAL;
416 bitmap_zero(limit_map, 1U << order);
417 } else if (order) {
418 BUILD_BUG_ON(sizeof(pages->index) != sizeof(*limit_map));
419 for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
420 limit_map[i] = pages[i + 1].index;
421 } else
422 __set_bit(0, limit_map);
424 set_xen_guest_handle(exchange.in.extent_start, in_frames);
425 set_xen_guest_handle(exchange.out.extent_start, out_frames);
427 /* 0. Scrub the pages. */
428 for (i = 0, n = 0; i < 1U<<order ; i++) {
429 page = &pages[i];
430 if (address_bits) {
431 if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
432 continue;
433 __set_bit(i, limit_map);
434 }
436 if (!PageHighMem(page))
437 scrub_pages(page_address(page), 1);
438 #ifdef CONFIG_XEN_SCRUB_PAGES
439 else {
440 scrub_pages(kmap(page), 1);
441 kunmap(page);
442 ++n;
443 }
444 #endif
445 }
446 if (bitmap_empty(limit_map, 1U << order))
447 return 0;
449 if (n)
450 kmap_flush_unused();
452 balloon_lock(flags);
454 /* 1. Zap current PTEs (if any), remembering MFNs. */
455 for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
456 if(!test_bit(i, limit_map))
457 continue;
458 page = &pages[i];
460 out_frames[n] = page_to_pfn(page);
461 in_frames[n] = pfn_to_mfn(out_frames[n]);
463 if (!PageHighMem(page))
464 MULTI_update_va_mapping(cr_mcl + nr_mcl++,
465 (unsigned long)page_address(page),
466 __pte_ma(0), 0);
468 set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
469 ++n;
470 }
471 if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
472 BUG();
474 /* 2. Get new memory below the required limit. */
475 exchange.in.nr_extents = n;
476 exchange.out.nr_extents = n;
477 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
478 success = (exchange.nr_exchanged == n);
479 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
480 BUG_ON(success && (rc != 0));
481 #if CONFIG_XEN_COMPAT <= 0x030002
482 if (unlikely(rc == -ENOSYS)) {
483 /* Compatibility when XENMEM_exchange is unsupported. */
484 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
485 &exchange.in) != n)
486 BUG();
487 if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
488 &exchange.out) != n)
489 BUG();
490 success = 1;
491 }
492 #endif
494 /* 3. Map the new pages in place of old pages. */
495 for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
496 if(!test_bit(i, limit_map))
497 continue;
498 page = &pages[i];
500 frame = success ? out_frames[n] : in_frames[n];
502 if (!PageHighMem(page))
503 MULTI_update_va_mapping(cr_mcl + nr_mcl++,
504 (unsigned long)page_address(page),
505 pfn_pte_ma(frame, PAGE_KERNEL), 0);
507 set_phys_to_machine(page_to_pfn(page), frame);
508 ++n;
509 }
510 if (nr_mcl) {
511 cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
512 ? UVMF_TLB_FLUSH|UVMF_ALL
513 : UVMF_INVLPG|UVMF_ALL;
514 if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
515 BUG();
516 }
518 balloon_unlock(flags);
520 if (!success)
521 return -ENOMEM;
523 if (address_bits) {
524 if (order) {
525 BUILD_BUG_ON(sizeof(*limit_map) != sizeof(pages->index));
526 for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
527 pages[i + 1].index = limit_map[i];
528 }
529 SetPageForeign(pages, undo_limit_pages);
530 }
532 return 0;
533 }
534 EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
536 #ifdef __i386__
537 int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
538 {
539 __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
540 maddr_t mach_lp = arbitrary_virt_to_machine(lp);
541 return HYPERVISOR_update_descriptor(
542 mach_lp, (u64)entry_a | ((u64)entry_b<<32));
543 }
544 #endif
546 #define MAX_BATCHED_FULL_PTES 32
548 int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
549 unsigned long addr, unsigned long end, pgprot_t newprot)
550 {
551 int rc = 0, i = 0;
552 mmu_update_t u[MAX_BATCHED_FULL_PTES];
553 pte_t *pte;
554 spinlock_t *ptl;
556 if (!xen_feature(XENFEAT_mmu_pt_update_preserve_ad))
557 return 0;
559 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
560 do {
561 if (pte_present(*pte)) {
562 u[i].ptr = (__pmd_val(*pmd) & PHYSICAL_PAGE_MASK)
563 | ((unsigned long)pte & ~PAGE_MASK)
564 | MMU_PT_UPDATE_PRESERVE_AD;
565 u[i].val = __pte_val(pte_modify(*pte, newprot));
566 if (++i == MAX_BATCHED_FULL_PTES) {
567 if ((rc = HYPERVISOR_mmu_update(
568 &u[0], i, NULL, DOMID_SELF)) != 0)
569 break;
570 i = 0;
571 }
572 }
573 } while (pte++, addr += PAGE_SIZE, addr != end);
574 if (i)
575 rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
576 pte_unmap_unlock(pte - 1, ptl);
577 BUG_ON(rc && rc != -ENOSYS);
578 return !rc;
579 }