From 91604b298c24c84fe03bc6c028abb961ca3e6fcf Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Tue, 2 Jul 2013 11:59:51 -0700 Subject: Open pools asynchronously after module load One of the side effects of calling zvol_create_minors() in zvol_init() is that all pools listed in the cache file will be opened. Depending on the state and contents of your pool this operation can take a considerable length of time. Doing this at load time is undesirable because the kernel is holding a global module lock. This prevents other modules from loading and can serialize an otherwise parallel boot process. Doing this after module inititialization also reduces the chances of accidentally introducing a race during module init. To ensure that /dev/zvol// devices are still automatically created after the module load completes a udev rules has been added. When udev notices that the /dev/zfs device has been create the 'zpool list' command will be run. This then will cause all the pools listed in the zpool.cache file to be opened. Because this process in now driven asynchronously by udev there is the risk of problems in downstream distributions. Signed-off-by: Brian Behlendorf Issue #756 Issue #1020 Issue #1234 --- module/zfs/zvol.c | 2 -- scripts/zconfig.sh | 3 ++- udev/rules.d/90-zfs.rules.in | 4 +++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 97b65c815..e35c91bc1 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -1582,8 +1582,6 @@ zvol_init(void) blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS, THIS_MODULE, zvol_probe, NULL, NULL); - (void) zvol_create_minors(NULL); - return (0); out2: diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh index 141348c03..281166c59 100755 --- a/scripts/zconfig.sh +++ b/scripts/zconfig.sh @@ -264,8 +264,9 @@ test_4() { zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9 - # Load the modules, wait 1 second for udev + # Load the modules, list the pools to ensure they are opened ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10 + ${ZPOOL} list &>/dev/null # Verify the devices were created zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ diff --git a/udev/rules.d/90-zfs.rules.in b/udev/rules.d/90-zfs.rules.in index 52e1d6393..a2715d2e7 100644 --- a/udev/rules.d/90-zfs.rules.in +++ b/udev/rules.d/90-zfs.rules.in @@ -1,4 +1,4 @@ -SUBSYSTEM!="block", GOTO="zfs_end" +SUBSYSTEM!="block|misc", GOTO="zfs_end" ACTION!="add|change", GOTO="zfs_end" ENV{ID_FS_TYPE}=="zfs", RUN+="/sbin/modprobe zfs" @@ -7,4 +7,6 @@ ENV{ID_FS_TYPE}=="zfs_member", RUN+="/sbin/modprobe zfs" KERNEL=="null", SYMLINK+="root" SYMLINK=="null", SYMLINK+="root" +SUBSYSTEM=="misc", KERNEL=="zfs", RUN+="@sbindir@/zpool list" + LABEL="zfs_end" -- cgit v1.2.3