diff options
author | Brian Behlendorf <[email protected]> | 2018-04-12 19:46:14 -0700 |
---|---|---|
committer | Tony Hutter <[email protected]> | 2018-05-07 17:19:57 -0700 |
commit | 4ed30958cee6a7e5e6da41f5767fc7acde1dd1cf (patch) | |
tree | bcb6d02e4f83235bd9fc2a3cf717dcf4d79e61aa | |
parent | 2f118072cbd2146fd8b5d79de5cc6ad537617ac6 (diff) |
Linux compat 4.16: blk_queue_flag_{set,clear}
The HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY case was overlooked in
the original 10f88c5c commit because blk_queue_write_cache()
was available for the in-kernel builds.
Update the blk_queue_flag_{set,clear} wrappers to call the locked
versions to avoid confusion. This is safe for all existing callers.
The blk_queue_set_write_cache() function has been updated to use
these wrappers. This means setting/clearing both QUEUE_FLAG_WC
and QUEUE_FLAG_FUA is no longer atomic but this only done early
in zvol_alloc() prior to any requests so there is no issue.
Reviewed-by: Tony Hutter <[email protected]>
Reviewed-by: Giuseppe Di Natale <[email protected]>
Reviewed-by: Kash Pande <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #7428
Closes #7431
-rw-r--r-- | include/linux/blkdev_compat.h | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/include/linux/blkdev_compat.h b/include/linux/blkdev_compat.h index d7af1d89d..f99980ab3 100644 --- a/include/linux/blkdev_compat.h +++ b/include/linux/blkdev_compat.h @@ -41,7 +41,7 @@ typedef unsigned __bitwise__ fmode_t; static inline void blk_queue_flag_set(unsigned int flag, struct request_queue *q) { - queue_flag_set_unlocked(flag, q); + queue_flag_set(flag, q); } #endif @@ -49,7 +49,7 @@ blk_queue_flag_set(unsigned int flag, struct request_queue *q) static inline void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) { - queue_flag_clear_unlocked(flag, q); + queue_flag_clear(flag, q); } #endif @@ -72,16 +72,14 @@ static inline void blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua) { #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY) - spin_lock_irq(q->queue_lock); if (wc) - queue_flag_set(QUEUE_FLAG_WC, q); + blk_queue_flag_set(QUEUE_FLAG_WC, q); else - queue_flag_clear(QUEUE_FLAG_WC, q); + blk_queue_flag_clear(QUEUE_FLAG_WC, q); if (fua) - queue_flag_set(QUEUE_FLAG_FUA, q); + blk_queue_flag_set(QUEUE_FLAG_FUA, q); else - queue_flag_clear(QUEUE_FLAG_FUA, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_FUA, q); #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE) blk_queue_write_cache(q, wc, fua); #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) |