#!/bin/ksh
# IBM_PROLOG_BEGIN_TAG 
# This is an automatically generated prolog. 
#  
#  
#  
# Licensed Materials - Property of IBM 
#  
# (C) COPYRIGHT International Business Machines Corp. 2005,2007 
# All Rights Reserved 
#  
# US Government Users Restricted Rights - Use, duplication or 
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. 
#  
# IBM_PROLOG_END_TAG 
# @(#)41 1.4.1.2 src/avs/fs/mmfs/ts/admin/mmlsmount.sh, mmfs, avs_rgpfs24, rgpfs24s011a 3/28/07 19:15:31
##############################################################################
#
#  Display how many nodes have a given GPFS file system mounted,
#  and optionally list the nodes that have the file system mounted.
#
#  Usage:
#
#    mmlsmount {Device | all | all_local | all_remote} [-L]
#              [-C {all | all_remote | ClusterName[,ClusterName...]}]
#
#  where:
#
#    Device | all | all_local | all_remote
#       specifies the device(s) for which mount information is sought.
#       all denotes "all file systems known to this cluster".
#       all_local denotes "all file systems owned by this cluster".
#       all_remote denotes "all file systems not owned by this cluster".
#
#    -C {all | all_remote | ClusterName[,ClusterName...]}
#       specifies the clusters for which mount information is requested.
#       If one or more ClusterName is specified, only the names of nodes
#       that belong to these clusters and have the file system mounted
#       will be displayed.  The dot character (".") can be used in place
#       of the cluster name to denote the local cluster.
#
#       -C all_remote denotes all clusters other than the one from which
#       the command is ussued (this cluster).
#
#       -C all refers to all clusters, local and remote, that can have
#       the file system mounted.  -C all is the default.
#
#    -L specifies "list the nodes that have the device(s) mounted".
#
#  Note:
#    If invoked by a non-root user, the command is executed on the local
#    node "as-is".  In particular, the local configuration information may
#    not be up-to-date.  There is no attempt to find an active GPFS daemon
#    on another node.  Non-root users are not allowed to invoke commands on
#    other nodes.
#
##############################################################################

# Include global declarations and service routines.
. /usr/lpp/mmfs/bin/mmglobfuncs
if [[ $ourUid -eq 0 ]]
then
  . /usr/lpp/mmfs/bin/mmsdrfsdef
  . /usr/lpp/mmfs/bin/mmfsfuncs
fi

sourceFile="mmlsmount.sh"
[[ -n $DEBUG || -n $DEBUGmmlsmount ]] && set -x
$mmTRACE_ENTER "$*"

# Local work files.  Names should be of the form:
#   fn=${tmpDir}fn.${mmcmd}.$$

LOCAL_FILES=" "


# Local variables

usageMsg=540
rc=0
Lflag=""



#######################
# Mainline processing
#######################


##################################
# Process each of the arguments.
##################################
[[ $arg1 = '-?' || $arg1 = '-h' || $arg1 = '--help' || $arg1 = '--' ]] &&  \
  syntaxError "help" $usageMsg

[[ $argc -lt 1  ]] &&  \
  syntaxError "missingArgs" $usageMsg

device=$arg1                       # Save the file system device name.
deviceName=${device##+(/)dev+(/)}  # Name stripped of /dev/ prefix.
shift 1                            # Drop the name from the parameter list.

while getopts :C:LY OPT
do
  case $OPT in

    C) [[ -n $Cflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       Cflag="-$OPT"
       clusterList="$OPTARG"
       ;;

    L) [[ -n $Lflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       Lflag=yes
       ;;

    Y) [[ -n $Yflag ]] && syntaxError "multiple" $noUsageMsg "-$OPT"
       Yflag=yes
       ;;

    :) syntaxError "missingValue" $usageMsg $OPTARG
       ;;

    +[CLY])
       syntaxError "invalidOption" $usageMsg "$OPT"
       ;;

    *) syntaxError "invalidOption" $usageMsg $OPTARG
       ;;

  esac
done

shift OPTIND-1
[[ $# != 0 ]] && syntaxError "extraArg" $usageMsg $1

[[ -z $clusterList ]] && clusterList=NULL

[[ -z $Lflag ]] && Lflag=no
[[ -n $Yflag ]] && Lflag=unformatted


###################################
# Set up trap exception handling.
###################################
trap pretrap2 HUP INT QUIT KILL


####################################################################
# If invoked by a root user, call the gpfsInit function to ensure
# that the local copy of the mmsdrfs file and the rest of the GPFS
# system files are up-to-date.  There is no need to lock the sdr.
# Non-root users are not allowed to invoke commands on other nodes.
####################################################################
if [[ $ourUid -eq 0 ]]
then
  gpfsInitOutput=$(gpfsInit nolock)
  setGlobalVar $? $gpfsInitOutput
fi


###########################################################################
# If device is one of the keywords (all, all_local, or all_remote), do not
# do anything right now.  The list of file systems will be created by the
# lsmount function.  Otherwise, ensure the specified file system exists
# and create the fully-qualified name for the lsmount call.
###########################################################################
if [[ $device != all && $device != all_local && $device != all_remote ]]
then
  if [[ $device = *:* ]]
  then
    # If this is an explicit invocation for a remote file system,
    # find out the home cluster and the local and remote device
    # names for the file system.
    IFS=":"
    set -f ; set -- $device ; set +f
    IFS="$IFS_sv"
    fsHomeCluster=$1
    remoteDevice=$2
    localDevice=$($awk -F: '                                 \
      $'$NODESETID_Field'       == "'$fsHomeCluster'"  &&    \
      $'$LINE_TYPE_Field'       == "'$SG_HEADR'"       &&    \
      $'$REMOTE_DEV_NAME_Field' == "'$remoteDevice'"   {     \
        { print $'$DEV_NAME_Field' }                         \
        { exit }                                             \
      }                                                      \
    ' $mmsdrfsFile)
    checkForErrors awk $?

    # Create the fully-qualified name for the lsmount call.
    fsToShow="${fsHomeCluster}:${remoteDevice}:${localDevice}"

  else
    # If this a simple device name for a local or remote file system,
    # find the needed information about the file system.
    findFSoutput=$(findFS "$deviceName" $mmsdrfsFile)
    [[ -z $findFSoutput ]] && cleanupAndExit

    set -f ; set -- $findFSoutput ; set +f
    localDevice=$2
    fsHomeCluster=$3
    remoteDevice=$4

    # Create the fully-qualified name for the lsmount call.
    if [[ $fsHomeCluster = $HOME_CLUSTER ]]
    then
      fsToShow=":${localDevice}:"
    else
      fsToShow="${fsHomeCluster}:${remoteDevice}:${localDevice}"
    fi
  fi  # end of if [[ $device = *:* ]]

else
  # Just pass the keyword to the lsmount function.
  fsToShow=$device
fi  # end of if [[ $device != all && $device != all_local ...


################################################################
# First, try to run the lsmount function on the local node.
#
# Note:  The lsmount invocation must be done in a subshell,
#        by enclosing it in parens.  If this is not done,
#        there will be left over work files in /var/mmfs/tmp.
################################################################
(lsmount "$fsToShow" "$clusterList" $Lflag norefresh 2>$errMsg2)
rc=$?
if [[ ($rc -ne $MM_DaemonDown && $rc -ne $MM_QuorumWait) ||
      $ourUid -ne 0                                      ||
      $MMMODE = single ]]
then
  if [[ $rc -eq $MM_DaemonDown ]]
  then
    # GPFS is down on this node.
    printErrorMsg 109 $mmcmd
  elif [[ $rc -eq $MM_QuorumWait ]]
  then
    # GPFS is not ready for commands.
    printErrorMsg 110 $mmcmd
  elif [[ $rc -eq $MM_ConnectionReset ]]
  then
    # An internode connection was reset.
    printErrorMsg 257 $mmcmd
  elif [[ $rc -ne 0 ]]
  then
    # Something else went wrong.
    # Show error messages, if any.
    [[ -s $errMsg2 ]] && $cat $errMsg2 1>&2
  else
    :  # The command must have worked.
  fi  # end of if [[ $rc -eq $MM_DaemonDown ]]
  cleanupAndExit $rc
fi  # end of if [[ ($rc -ne $MM_DaemonDown && ... ]]
$rm -f $errMsg2


########################################################################
# If GPFS is down on the local node, find a node with an active daemon.
########################################################################
# Create a file containing all of the nodes in the cluster.
getNodeList $REL_HOSTNAME_Field $GLOBAL_ID $mmsdrfsFile > $nodefile

# Ensure we have the proper credentials.
[[ $getCredCalled = no ]] && getCred

# Find out who has the file systems mounted.
$mmcommon onactive $ourNodeName $nodefile        \
    $NO_FILE_COPY $NO_MOUNT_CHECK NULL $NO_LINK  \
    mmremote lsmount "$fsToShow" "$clusterList" $Lflag
rc=$?

cleanupAndExit $rc

