#!/bin/sh
#============================================================================
#
#     This file is part of the Code_Saturne Kernel, element of the
#     Code_Saturne CFD tool.
#
#     Copyright (C) 1998-2008 EDF S.A., France
#
#     contact: saturne-support@edf.fr
#
#     The Code_Saturne Kernel is free software; you can redistribute it
#     and/or modify it under the terms of the GNU General Public License
#     as published by the Free Software Foundation; either version 2 of
#     the License, or (at your option) any later version.
#
#     The Code_Saturne Kernel is distributed in the hope that it will be
#     useful, but WITHOUT ANY WARRANTY; without even the implied warranty
#     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#     GNU General Public License for more details.
#
#     You should have received a copy of the GNU General Public License
#     along with the Code_Saturne Kernel; if not, write to the
#     Free Software Foundation, Inc.,
#     51 Franklin St, Fifth Floor,
#     Boston, MA  02110-1301  USA
#
#============================================================================
#
# Setup MPI environment, using the optional NUMBER_OF_PROCESSORS and
# PROCESSOR_LIST environment variables, as well as other system-specific
# environment variables.
#
if [ -f "$TMPDIR/machines" ] ; then     # Univ Manchester queueing system
   MPIHOSTS="$TMPDIR/machines"
elif [ ! -z "$PBS_NODEFILE" ] ; then    # PBS queueing system
   MPIHOSTS="$PBS_NODEFILE"
elif [ ! -z "$LSB_HOSTS" ] ; then       # LSF queueing system
   \rm -f $RUN/hostsfile
   echo $LSB_HOSTS | awk '{ for (ii = 1; ii < NF+1; ++ii) print $ii }' >> $RUN/hostsfile
   MPIHOSTS="$RUN/hostsfile"
elif [ ! -z "$PROCESSOR_LIST" ] ; then
   \rm -f $RUN/hostsfile
   localproc=`hostname`
   echo $PROCESSOR_LIST | grep -q $localproc || PROCESSOR_LIST="$localproc & $PROCESSOR_LIST"
   echo $PROCESSOR_LIST | awk  -F\& '{ for (ii = 1; ii < NF+1; ++ii) print $ii }' >> $RUN/hostsfile
   MPIHOSTS="$RUN/hostsfile"
fi
# Check for the number of processors
if [ -z  "${NUMBER_OF_PROCESSORS}" ] ; then
  if [ ! -z  "${NSLOTS}" ] ; then
    # Univ Manchester queueing system
    NUMBER_OF_PROCESSORS=$NSLOTS
  elif [ ! -z "${SLURM_NPROCS}" ] ; then
    # LSF queueing system with Slurm
    NUMBER_OF_PROCESSORS=$SLURM_NPROCS
  elif [ ! -z "${LSB_HOSTS}" ] ; then
    # LSF queueing system without Slurm
    NUMBER_OF_PROCESSORS=`echo $LSB_HOSTS | wc -w`
  elif [ ! -z  "${MPIHOSTS}" ] ; then
    # PBS queueing system
    NUMBER_OF_PROCESSORS=`cat $MPIHOSTS | wc -l`
  else
    NUMBER_OF_PROCESSORS=1
  fi
fi
#
# Check the number of nodes if possible
NUMBER_OF_NODES=1
if [ ! -z  "${MPIHOSTS}" ] ; then
  NUMBER_OF_NODES=`sort $MPIHOSTS | sed -e 's/:/ /' | cut -f 1 -d" " | uniq | wc -l`
fi
#
if [ ! -z "$MPIHOSTS" ] ; then
  echo "Total number of processors: $NUMBER_OF_PROCESSORS"
fi
#
# System-related parameters
#
if [ $NUMBER_OF_PROCESSORS -gt 1 ] ; then
#
# Temporarily set the architecture name (for EDF purposes)
  NOM_ARCH=`uname -s`
  if [ "$NOM_ARCH" = "Linux" ] ; then
    if [ "`domainname 2>/dev/null`" = "cluster-chatou" ] ; then
      NOM_ARCH=Linux_Ch
    elif [ -d /bgl/BlueLight/ppcfloor ] ; then
      NOM_ARCH=Blue_Gene_L
    elif [ -d /bgsys/drivers/ppcfloor ] ; then
      NOM_ARCH=Blue_Gene_P
    else
      MACHINE=`uname -m`
      case "$MACHINE" in
        *86)    NOM_ARCH=Linux ;;
        x86_64) NOM_ARCH=Linux_x86_64 ;;
        ia64)   NOM_ARCH=Linux_IA64 ;;
        *)      NOM_ARCH=Linux_$MACHINE ;;
      esac
    fi
  fi
# Default values
  MPIBOOT=""
  MPIHALT=""
  MPIRUN="mpirun -np ${NUMBER_OF_PROCESSORS}"
  if [ -d "$CS_MPI_PATH" ] ; then
    export PATH=${CS_MPI_PATH}:$PATH
  fi
#
  if [ "$NOM_ARCH" = "OSF1" ] ; then
    MPIRUN="prun -n ${NUMBER_OF_PROCESSORS} -stv"
#
  elif [ "$NOM_ARCH" = "Linux_CCRT" ] ; then
    MPIRUN="mpirun -srun"
#
  elif [ "$NOM_ARCH" = "Linux_IA64" ] ; then
    MPIRUN="srun -n ${NUMBER_OF_PROCESSORS}"
#
  elif [ "$NOM_ARCH" = "Linux_Ch" ] ; then
    MPIRUN="mpirun -np ${NUMBER_OF_PROCESSORS} -machinefile $PBS_NODEFILE"
#
  elif [ "$NOM_ARCH" = "AIX" ] ; then
    MPIRUN="poe"
#
  else
    if [ -d "$CS_MPI_PATH" ] ; then
      if [ -f "${CS_MPI_PATH}/mpdboot" ] ; then        # For MPICH2
        MPIBOOT="${CS_MPI_PATH}/mpdboot"
        MPIHALT="${CS_MPI_PATH}/mpdallexit"
        MPIRUN="${CS_MPI_PATH}/mpiexec -n ${NUMBER_OF_PROCESSORS}"
        if [ ! -z "$MPIHOSTS" ] ; then
          MPIBOOT="$MPIBOOT -n ${NUMBER_OF_NODES} -v -f $MPIHOSTS"
        fi
      elif [ -f "${CS_MPI_PATH}/ompi_info" ] ; then    # For Open MPI
        MPIRUN="${CS_MPI_PATH}/mpiexec -n ${NUMBER_OF_PROCESSORS}"
        if [ ! -z "$MPIHOSTS" ] ; then
          MPIRUN="$MPIRUN -machinefile $MPIHOSTS"
        fi
      elif [ -f "${CS_MPI_PATH}/lamboot" ] ; then      # For LAM MPI
        MPIBOOT="${CS_MPI_PATH}/lamboot"
        MPIHALT="${CS_MPI_PATH}/lamhalt"
        MPIRUN="${CS_MPI_PATH}/mpirun -np ${NUMBER_OF_PROCESSORS}"
        if [ ! -z "$MPIHOSTS" ] ; then
          MPIBOOT="$MPIBOOT -v $MPIHOSTS"
          MPIHALT="$MPIHALT -v $MPIHOSTS"
        fi
      elif [ -f "${CS_MPI_PATH}/mpichversion" ] ; then # For MPICH
        MPIRUN="${CS_MPI_PATH}/mpirun -np ${NUMBER_OF_PROCESSORS}"
        if [ ! -z "$TMPDIR/machines" ] ; then          # For Univ Manchester queueing system
          MPIRUN="$MPIRUN -machinefile $TMPDIR/machines"
        elif [ ! -z "$MPIHOSTS" ] ; then
          MPIRUN="$MPIRUN -machinefile $MPIHOSTS"
        fi
      fi
    fi
  fi
#
#   In serial mode
#
else
#
  MPIRUN=""
  MPIBOOT=""
  MPIHALT=""
fi
#

