]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
block: don't allow enabling a cache on devices that don't support it
authorChristoph Hellwig <hch@lst.de>
Fri, 7 Jul 2023 09:42:39 +0000 (11:42 +0200)
committerJens Axboe <axboe@kernel.dk>
Mon, 17 Jul 2023 14:18:18 +0000 (08:18 -0600)
Currently the write_cache attribute allows enabling the QUEUE_FLAG_WC
flag on devices that never claimed the capability.

Fix that by adding a QUEUE_FLAG_HW_WC flag that is set by
blk_queue_write_cache and guards re-enabling the cache through sysfs.

Note that any rescan that calls blk_queue_write_cache will still
re-enable the write cache as in the current code.

Fixes: 93e9d8e836cb ("block: add ability to flag write back caching on a device")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230707094239.107968-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-settings.c
block/blk-sysfs.c
include/linux/blkdev.h

index 4dd59059b788eb3b8b45cc555716eec11bfcd995..0046b447268f912a1b587c6367adb88548571feb 100644 (file)
@@ -830,10 +830,13 @@ EXPORT_SYMBOL(blk_set_queue_depth);
  */
 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 {
-       if (wc)
+       if (wc) {
+               blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
                blk_queue_flag_set(QUEUE_FLAG_WC, q);
-       else
+       } else {
+               blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
                blk_queue_flag_clear(QUEUE_FLAG_WC, q);
+       }
        if (fua)
                blk_queue_flag_set(QUEUE_FLAG_FUA, q);
        else
index 0cde6598fb2f4dcb8e16f2a75b3ffb4c84929a05..63e4812623361ddde759809c4e04b715aa871e43 100644 (file)
@@ -449,13 +449,16 @@ static ssize_t queue_wc_show(struct request_queue *q, char *page)
 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
                              size_t count)
 {
-       if (!strncmp(page, "write back", 10))
+       if (!strncmp(page, "write back", 10)) {
+               if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
+                       return -EINVAL;
                blk_queue_flag_set(QUEUE_FLAG_WC, q);
-       else if (!strncmp(page, "write through", 13) ||
-                !strncmp(page, "none", 4))
+       else if (!strncmp(page, "write through", 13) ||
+                !strncmp(page, "none", 4)) {
                blk_queue_flag_clear(QUEUE_FLAG_WC, q);
-       else
+       } else {
                return -EINVAL;
+       }
 
        return count;
 }
index ed44a997f629f5dd1829d0529f6762e78bacc828..2f5371b8482c004ee992f33367bbf2fc5358f00d 100644 (file)
@@ -538,6 +538,7 @@ struct request_queue {
 #define QUEUE_FLAG_ADD_RANDOM  10      /* Contributes to random pool */
 #define QUEUE_FLAG_SYNCHRONOUS 11      /* always completes in submit context */
 #define QUEUE_FLAG_SAME_FORCE  12      /* force complete on same CPU */
+#define QUEUE_FLAG_HW_WC       18      /* Write back caching supported */
 #define QUEUE_FLAG_INIT_DONE   14      /* queue is initialized */
 #define QUEUE_FLAG_STABLE_WRITES 15    /* don't modify blks until WB is done */
 #define QUEUE_FLAG_POLL                16      /* IO polling enabled if set */