summaryrefslogtreecommitdiffstats
path: root/etc/init.d
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2011-05-02 15:39:59 -0700
committerBrian Behlendorf <[email protected]>2011-05-02 15:59:13 -0700
commit712f8bd87b2d3799107e102652875996fa59647b (patch)
tree7accb1ffa17551954bc9b7dbb3451e4efdb94527 /etc/init.d
parent5f35b190071048f25d66db81ce9763ecd0c1760e (diff)
Add Gentoo/Lunar/Redhat Init Scripts
Every distribution has slightly different requirements for their init scripts. Because of this the zfs package contains several init scripts for various distributions. These scripts have been contributed by, and are supported by, the larger zfs community. Init scripts for Gentoo/Lunar/Redhat have been contributed by: Gentoo - devsk <[email protected]> Lunar - Jean-Michel Bruenn <[email protected]> Redhat - Fajar A. Nugraha <[email protected]>
Diffstat (limited to 'etc/init.d')
-rw-r--r--etc/init.d/Makefile.am2
-rw-r--r--etc/init.d/Makefile.in2
-rw-r--r--etc/init.d/zfs.gentoo151
-rw-r--r--etc/init.d/zfs.lunar80
-rw-r--r--etc/init.d/zfs.redhat166
5 files changed, 399 insertions, 2 deletions
diff --git a/etc/init.d/Makefile.am b/etc/init.d/Makefile.am
index 094681edd..dd11946e7 100644
--- a/etc/init.d/Makefile.am
+++ b/etc/init.d/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = zfs.fedora zfs.lsb
+EXTRA_DIST = zfs.fedora zfs.gentoo zfs.lsb zfs.lunar zfs.redhat
install-data-local:
@instdest=$(DESTDIR)/$(sysconfdir)/init.d/zfs; \
diff --git a/etc/init.d/Makefile.in b/etc/init.d/Makefile.in
index f221818f9..63012dc99 100644
--- a/etc/init.d/Makefile.in
+++ b/etc/init.d/Makefile.in
@@ -256,7 +256,7 @@ target_vendor = @target_vendor@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
-EXTRA_DIST = zfs.fedora zfs.lsb
+EXTRA_DIST = zfs.fedora zfs.gentoo zfs.lsb zfs.lunar zfs.redhat
all: all-am
.SUFFIXES:
diff --git a/etc/init.d/zfs.gentoo b/etc/init.d/zfs.gentoo
new file mode 100644
index 000000000..d2ea90267
--- /dev/null
+++ b/etc/init.d/zfs.gentoo
@@ -0,0 +1,151 @@
+#!/sbin/runscript
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/sys-fs/zfs/files/zfs,v 0.9 2011/04/30 10:13:43 devsk Exp $
+
+depend()
+{
+ before net
+ after udev
+}
+
+CACHEFILE=/etc/zfs/zpool.cache
+ZPOOL=/usr/sbin/zpool
+ZFS=/usr/sbin/zfs
+ZFS_MODULE=zfs
+LOCKFILE=/var/lock/zfs/zfs_lockfile
+
+checksystem()
+{
+ /sbin/modinfo $ZFS_MODULE &>/dev/null
+ if [[ $? -ne 0 ]]
+ then
+ eerror "$ZFS_MODULE not found. Is the ZFS package installed?"
+ return 1
+ fi
+ if [[ ! -x $ZPOOL ]]
+ then
+ eerror "$ZPOOL binary not found."
+ return 1
+ fi
+ if [[ ! -x $ZFS ]]
+ then
+ eerror "$ZFS binary not found."
+ return 1
+ fi
+
+ # create the lockdir if not there
+ lockdir=$(dirname ${LOCKFILE})
+ if [[ ! -d ${lockdir} ]]
+ then
+ mkdir -p ${lockdir} &>/dev/null
+ fi
+ return 0
+}
+
+start()
+{
+ if [[ -f $LOCKFILE ]]
+ then
+ einfo "ZFS already running, please stop it first. Delete $LOCKFILE if its not so."
+ eend 3
+ return 3
+ fi
+ ebegin "Starting ZFS"
+ checksystem || return 1
+ if ! grep -q $ZFS_MODULE /proc/modules
+ then
+ /sbin/modprobe $ZFS_MODULE &>/dev/null
+ rv=$?
+ if [[ $rv -ne 0 ]]
+ then
+ eerror "Failed to load the $ZFS_MODULE module, check 'dmesg|tail'."
+ eend $rv
+ return $rv
+ fi
+ fi
+
+ # Ensure / exists in /etc/mtab, if not update mtab accordingly.
+ # This should be handled by rc.sysinit but lets be paranoid.
+ awk '$2 == "/" { exit 1 }' /etc/mtab
+ RETVAL=$?
+ if [[ $RETVAL -eq 0 ]]
+ then
+ /bin/mount -f /
+ fi
+
+ # Import all pools described by the cache file, and then mount
+ # all filesystem based on their properties.
+ if [[ -f $CACHEFILE ]]
+ then
+ einfo "Importing ZFS pools"
+
+ # as per fedora script, import can fail if all pools are already imported
+ # The check for $rv makes no sense...but someday, it will work right.
+ $ZPOOL import -c $CACHEFILE -aN 2>/dev/null || true
+ rv=$?
+ if [[ $rv -ne 0 ]]
+ then
+ eerror "Failed to import not-yet imported pools."
+ eend $rv
+ return $rv
+ fi
+ fi
+
+ einfo "Mounting ZFS filesystems"
+ $ZFS mount -a
+ rv=$?
+ if [[ $rv -ne 0 ]]
+ then
+ eerror "Failed to mount ZFS filesystems."
+ eend $rv
+ return $rv
+ fi
+
+ # hack to read mounted file systems because otherwise
+ # zfs returns EPERM when a non-root user reads a mounted filesystem before root did
+ savepwd="$PWD"
+ mount | grep " type zfs " | sed 's/.*on //' | sed 's/ type zfs.*$//' | \
+ while read line
+ do
+ cd "$line" &> /dev/null
+ ls &> /dev/null
+ done
+ cd "$savepwd"
+
+ touch $LOCKFILE
+ eend 0
+ return 0
+}
+
+stop()
+{
+ if [[ ! -f $LOCKFILE ]]
+ then
+ einfo "ZFS is not started, remove $LOCKFILE if its not so."
+ eend 3
+ return 3
+ fi
+ ebegin "Unmounting ZFS filesystems"
+ sync
+ $ZFS umount -a
+ if [[ $rv -ne 0 ]]
+ then
+ eerror "Failed to umount ZFS filesystems."
+ fi
+ rm -f $LOCKFILE
+ eend $rv
+}
+
+status()
+{
+ if [[ ! -f $LOCKFILE ]]
+ then
+ einfo "ZFS is not started, remove $LOCKFILE if its not so."
+ eend 3
+ return 3
+ fi
+
+ # show pool status and list
+ $ZPOOL status && echo && $ZPOOL list
+}
diff --git a/etc/init.d/zfs.lunar b/etc/init.d/zfs.lunar
new file mode 100644
index 000000000..c7aa1edb4
--- /dev/null
+++ b/etc/init.d/zfs.lunar
@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+# zfs This shell script takes care of starting (mount) and
+# stopping (umount) zfs shares.
+#
+# chkconfig: 35 60 40
+# description: ZFS is a filesystem developed by Sun, ZFS is a
+# combined file system and logical volume manager
+# designed by Sun Microsystems. Made available to Linux
+# using SPL (Solaris Porting Layer) by zfsonlinux.org.
+# probe: true
+
+case $1 in
+ start) echo "$1ing ZFS filesystems"
+
+ if ! grep "zfs" /proc/modules > /dev/null; then
+ echo "ZFS kernel module not loaded yet; loading...";
+ if ! modprobe zfs; then
+ echo "Failed to load ZFS kernel module...";
+ exit 0;
+ fi
+ fi
+
+ if ! [ `uname -m` == "x86_64" ]; then
+ echo "Warning: You're not running 64bit. Currently native zfs in";
+ echo " linux is only supported and tested on 64bit.";
+ # should we break here? People doing this should know what they
+ # do, thus i'm not breaking here.
+ fi
+
+ # mount the filesystems
+ while IFS= read -r -d $'\n' dev; do
+ mdev=$(echo "$dev" | awk '{ print $1; }')
+ echo -n "mounting $mdev..."
+ if `zfs mount $mdev`; then
+ echo -e "done";
+ else
+ echo -e "failed";
+ fi
+ done < <(zfs list -H);
+
+
+ ;;
+
+ stop) echo "$1ping ZFS filesystems"
+
+ if grep "zfs" /proc/modules > /dev/null; then
+ # module is loaded, so we can try to umount filesystems
+ while IFS= read -r -d $'\n' dev; do
+ mdev=$(echo "$dev" | awk '{ print $1 }');
+ echo -n "umounting $mdev...";
+ if `zfs umount $mdev`; then
+ echo -e "done";
+ else
+ echo -e "failed";
+ fi
+ # the next line is, because i have to reverse the
+ # output, otherwise it wouldn't work as it should
+ done < <(zfs list -H | tac);
+
+ # and finally let's rmmod the module
+ rmmod zfs
+
+
+ else
+ # module not loaded, no need to umount anything
+ exit 0
+ fi
+
+ ;;
+
+ restart) echo "$1ing ZFS filesystems"
+ $0 stop
+ $0 start
+ ;;
+
+ *) echo "Usage: $0 {start|stop|restart}"
+ ;;
+
+esac
diff --git a/etc/init.d/zfs.redhat b/etc/init.d/zfs.redhat
new file mode 100644
index 000000000..99ff80e92
--- /dev/null
+++ b/etc/init.d/zfs.redhat
@@ -0,0 +1,166 @@
+#!/bin/bash
+#
+# zfs This script will mount/umount the zfs filesystems.
+#
+# chkconfig: 2345 01 99
+# description: This script will mount/umount the zfs filesystems during
+# system boot/shutdown. Configuration of which filesystems
+# should be mounted is handled by the zfs 'mountpoint' and
+# 'canmount' properties. See the zfs(8) man page for details.
+# It is also responsible for all userspace zfs services.
+#
+### BEGIN INIT INFO
+# Provides: zfs
+# Required-Start:
+# Required-Stop:
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 1
+# Short-Description: Mount/umount the zfs filesystems
+# Description: ZFS is an advanced filesystem designed to simplify managing
+# and protecting your data. This service mounts the ZFS
+# filesystems and starts all related zfs services.
+### END INIT INFO
+
+export PATH=/usr/local/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin
+
+# Source function library & LSB routines
+. /etc/rc.d/init.d/functions
+
+# script variables
+RETVAL=0
+ZPOOL=zpool
+ZFS=zfs
+servicename=zfs
+LOCKFILE=/var/lock/subsys/$servicename
+
+# functions
+zfs_installed() {
+ modinfo zfs > /dev/null 2>&1 || return 5
+ $ZPOOL > /dev/null 2>&1
+ [ $? == 127 ] && return 5
+ $ZFS > /dev/null 2>&1
+ [ $? == 127 ] && return 5
+ return 0
+}
+
+# i need a bash guru to simplify this, since this is copy and paste, but donno how
+# to correctly dereference variable names in bash, or how to do this right
+
+# first parameter is a regular expression that filters fstab
+read_fstab() {
+ unset FSTAB
+ n=0
+ while read -r fs mntpnt fstype opts blah ; do
+ fs=`printf '%b\n' "$fs"`
+ FSTAB[$n]=$fs
+ let n++
+ done < <(egrep "$1" /etc/fstab)
+}
+
+start()
+{
+ # Disable lockfile check
+ # if [ -f "$LOCKFILE" ] ; then return 0 ; fi
+
+ # check if ZFS is installed. If not, comply to FC standards and bail
+ zfs_installed || {
+ action $"Checking if ZFS is installed: not installed" /bin/false
+ return 5
+ }
+
+ # Requires selinux policy which has not been written.
+ if [ -r "/selinux/enforce" ] &&
+ [ "$(cat /selinux/enforce)" = "1" ]; then
+ action $"SELinux ZFS policy required: " /bin/false || return 6
+ fi
+
+ # load kernel module infrastructure
+ if ! grep -q zfs /proc/modules ; then
+ action $"Loading kernel ZFS infrastructure: " modprobe zfs || return 5
+ fi
+ sleep 1
+
+ action $"Mounting automounted ZFS filesystems: " $ZFS mount -a || return 152
+
+ # Read fstab, try to mount zvols ignoring error
+ read_fstab "^/dev/(zd|zvol)"
+ template=$"Mounting volume %s registered in fstab: "
+ for volume in "${FSTAB[@]}" ; do
+ string=`printf "$template" "$volume"`
+ action "$string" mount "$volume" 2>/dev/null || /bin/true
+ done
+
+ # touch "$LOCKFILE"
+}
+
+stop()
+{
+ # Disable lockfile check
+ # if [ ! -f "$LOCKFILE" ] ; then return 0 ; fi
+
+ # check if ZFS is installed. If not, comply to FC standards and bail
+ zfs_installed || {
+ action $"Checking if ZFS is installed: not installed" /bin/false
+ return 5
+ }
+
+ # the poweroff of the system takes care of this
+ # but it never unmounts the root filesystem itself
+ # shit
+
+ action $"Syncing ZFS filesystems: " sync
+ # about the only thing we can do, and then we
+ # hope that the umount process will succeed
+ # unfortunately the umount process does not dismount
+ # the root file system, there ought to be some way
+ # we can tell zfs to just flush anything in memory
+ # when a request to remount,ro comes in
+
+ #echo -n $"Unmounting ZFS filesystems: "
+ #$ZFS umount -a
+ #RETVAL=$?
+ #if [ $RETVAL -ne 0 ]; then
+ # failure
+
+ # return 8
+ #fi
+ #success
+
+ rm -f "$LOCKFILE"
+}
+
+# See how we are called
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ status)
+ lsmod | grep -q zfs || RETVAL=3
+ $ZPOOL status && echo && $ZFS list || {
+ [ -f "$LOCKFILE" ] && RETVAL=2 || RETVAL=4
+ }
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ condrestart)
+ if [ -f "$LOCKFILE" ] ; then
+ stop
+ start
+ fi
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart}"
+ RETVAL=3
+ ;;
+esac
+
+exit $RETVAL