aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2013-07-02 11:59:51 -0700
committerBrian Behlendorf <[email protected]>2013-07-03 09:24:38 -0700
commit91604b298c24c84fe03bc6c028abb961ca3e6fcf (patch)
treebff83bc3f42c2fc185ff20a4d9002bdc76d71773
parent2a3871d4bcc65dff7be4c9b55cb863421ddc8c3a (diff)
Open pools asynchronously after module load
One of the side effects of calling zvol_create_minors() in zvol_init() is that all pools listed in the cache file will be opened. Depending on the state and contents of your pool this operation can take a considerable length of time. Doing this at load time is undesirable because the kernel is holding a global module lock. This prevents other modules from loading and can serialize an otherwise parallel boot process. Doing this after module inititialization also reduces the chances of accidentally introducing a race during module init. To ensure that /dev/zvol/<pool>/<dataset> devices are still automatically created after the module load completes a udev rules has been added. When udev notices that the /dev/zfs device has been create the 'zpool list' command will be run. This then will cause all the pools listed in the zpool.cache file to be opened. Because this process in now driven asynchronously by udev there is the risk of problems in downstream distributions. Signed-off-by: Brian Behlendorf <[email protected]> Issue #756 Issue #1020 Issue #1234
-rw-r--r--module/zfs/zvol.c2
-rwxr-xr-xscripts/zconfig.sh3
-rw-r--r--udev/rules.d/90-zfs.rules.in4
3 files changed, 5 insertions, 4 deletions
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index 97b65c815..e35c91bc1 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -1582,8 +1582,6 @@ zvol_init(void)
blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
THIS_MODULE, zvol_probe, NULL, NULL);
- (void) zvol_create_minors(NULL);
-
return (0);
out2:
diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh
index 141348c03..281166c59 100755
--- a/scripts/zconfig.sh
+++ b/scripts/zconfig.sh
@@ -264,8 +264,9 @@ test_4() {
zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
- # Load the modules, wait 1 second for udev
+ # Load the modules, list the pools to ensure they are opened
${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
+ ${ZPOOL} list &>/dev/null
# Verify the devices were created
zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
diff --git a/udev/rules.d/90-zfs.rules.in b/udev/rules.d/90-zfs.rules.in
index 52e1d6393..a2715d2e7 100644
--- a/udev/rules.d/90-zfs.rules.in
+++ b/udev/rules.d/90-zfs.rules.in
@@ -1,4 +1,4 @@
-SUBSYSTEM!="block", GOTO="zfs_end"
+SUBSYSTEM!="block|misc", GOTO="zfs_end"
ACTION!="add|change", GOTO="zfs_end"
ENV{ID_FS_TYPE}=="zfs", RUN+="/sbin/modprobe zfs"
@@ -7,4 +7,6 @@ ENV{ID_FS_TYPE}=="zfs_member", RUN+="/sbin/modprobe zfs"
KERNEL=="null", SYMLINK+="root"
SYMLINK=="null", SYMLINK+="root"
+SUBSYSTEM=="misc", KERNEL=="zfs", RUN+="@sbindir@/zpool list"
+
LABEL="zfs_end"