diff -Nru slurm-llnl-2.2.7/aclocal.m4 slurm-llnl-2.3.2/aclocal.m4
--- slurm-llnl-2.2.7/aclocal.m4 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/aclocal.m4 2011-12-05 17:20:08.000000000 +0000
@@ -353,7 +353,7 @@
Consider adjusting the PKG_CONFIG_PATH environment variable if you
installed software in a non-standard prefix.
-_PKG_TEXT])dnl
+_PKG_TEXT])[]dnl
])
elif test $pkg_failed = untried; then
AC_MSG_RESULT([no])
@@ -364,7 +364,7 @@
_PKG_TEXT
-To get pkg-config, see .])dnl
+To get pkg-config, see .])[]dnl
])
else
$1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
@@ -1367,6 +1367,7 @@
m4_include([auxdir/x_ac_hwloc.m4])
m4_include([auxdir/x_ac_iso.m4])
m4_include([auxdir/x_ac_lua.m4])
+m4_include([auxdir/x_ac_man2html.m4])
m4_include([auxdir/x_ac_munge.m4])
m4_include([auxdir/x_ac_ncurses.m4])
m4_include([auxdir/x_ac_pam.m4])
@@ -1377,5 +1378,6 @@
m4_include([auxdir/x_ac_setproctitle.m4])
m4_include([auxdir/x_ac_sgi_job.m4])
m4_include([auxdir/x_ac_slurm_ssl.m4])
+m4_include([auxdir/x_ac_srun.m4])
m4_include([auxdir/x_ac_sun_const.m4])
m4_include([auxdir/x_ac_xcpu.m4])
diff -Nru slurm-llnl-2.2.7/auxdir/ltmain.sh slurm-llnl-2.3.2/auxdir/ltmain.sh
--- slurm-llnl-2.2.7/auxdir/ltmain.sh 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/ltmain.sh 2011-12-05 17:20:08.000000000 +0000
@@ -65,7 +65,7 @@
# compiler: $LTCC
# compiler flags: $LTCFLAGS
# linker: $LD (gnu? $with_gnu_ld)
-# $progname: (GNU libtool) 2.2.6b Debian-2.2.6b-2ubuntu1
+# $progname: (GNU libtool) 2.2.6b Debian-2.2.6b-2ubuntu3
# automake: $automake_version
# autoconf: $autoconf_version
#
@@ -73,7 +73,7 @@
PROGRAM=ltmain.sh
PACKAGE=libtool
-VERSION="2.2.6b Debian-2.2.6b-2ubuntu1"
+VERSION="2.2.6b Debian-2.2.6b-2ubuntu3"
TIMESTAMP=""
package_revision=1.3017
diff -Nru slurm-llnl-2.2.7/auxdir/Makefile.am slurm-llnl-2.3.2/auxdir/Makefile.am
--- slurm-llnl-2.2.7/auxdir/Makefile.am 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/Makefile.am 2011-12-05 17:20:08.000000000 +0000
@@ -15,6 +15,7 @@
x_ac_bluegene.m4 \
x_ac_cflags.m4 \
x_ac_cray.m4 \
+ x_ac_databases.m4 \
x_ac_debug.m4 \
x_ac_elan.m4 \
x_ac_env.m4 \
@@ -22,12 +23,17 @@
x_ac_gpl_licensed.m4 \
x_ac_hwloc.m4 \
x_ac_iso.m4 \
- x_ac_pam.m4 \
+ x_ac_lua.m4 \
+ x_ac_man2html.m4 \
x_ac_munge.m4 \
x_ac_ncurses.m4 \
x_ac_pam.m4 \
+ x_ac_printf_null.m4 \
x_ac_ptrace.m4 \
x_ac_readline.m4 \
x_ac_setproctitle.m4 \
+ x_ac_sgi_job.m4 \
x_ac_slurm_ssl.m4 \
- x_ac_sun_const.m4
+ x_ac_srun.m4 \
+ x_ac_sun_const.m4 \
+ x_ac_xcpu.m4
diff -Nru slurm-llnl-2.2.7/auxdir/Makefile.in slurm-llnl-2.3.2/auxdir/Makefile.in
--- slurm-llnl-2.2.7/auxdir/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -61,6 +61,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -71,6 +72,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -92,7 +94,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -129,6 +134,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -186,6 +192,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -221,6 +228,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
@@ -284,6 +292,7 @@
x_ac_bluegene.m4 \
x_ac_cflags.m4 \
x_ac_cray.m4 \
+ x_ac_databases.m4 \
x_ac_debug.m4 \
x_ac_elan.m4 \
x_ac_env.m4 \
@@ -291,15 +300,20 @@
x_ac_gpl_licensed.m4 \
x_ac_hwloc.m4 \
x_ac_iso.m4 \
- x_ac_pam.m4 \
+ x_ac_lua.m4 \
+ x_ac_man2html.m4 \
x_ac_munge.m4 \
x_ac_ncurses.m4 \
x_ac_pam.m4 \
+ x_ac_printf_null.m4 \
x_ac_ptrace.m4 \
x_ac_readline.m4 \
x_ac_setproctitle.m4 \
+ x_ac_sgi_job.m4 \
x_ac_slurm_ssl.m4 \
- x_ac_sun_const.m4
+ x_ac_srun.m4 \
+ x_ac_sun_const.m4 \
+ x_ac_xcpu.m4
all: all-am
diff -Nru slurm-llnl-2.2.7/auxdir/slurm.m4 slurm-llnl-2.3.2/auxdir/slurm.m4
--- slurm-llnl-2.2.7/auxdir/slurm.m4 2011-06-10 16:55:35.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/slurm.m4 2011-12-05 17:20:08.000000000 +0000
@@ -204,7 +204,7 @@
SLURM_RELEASE="unstable svn build $DATE"
SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR ($SLURM_RELEASE)"
else
- SLURM_RELEASE="`echo $RELEASE | sed 's/^.*\.//'`"
+ SLURM_RELEASE="`echo $RELEASE | sed 's/^0\.//'`"
SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR.$SLURM_MICRO"
test $RELEASE = "1" || SLURM_VERSION_STRING="$SLURM_VERSION_STRING-$SLURM_RELEASE"
fi
@@ -225,4 +225,25 @@
]) dnl AC_SLURM_VERSION
+dnl
+dnl Test if we want to include rpath in the executables (default=yes)
+dnl Doing so is generally discouraged due to problems this causes in upgrading
+dnl software and general incompatability issues
+dnl
+AC_DEFUN([X_AC_RPATH], [
+ ac_with_rpath=yes
+ AC_MSG_CHECKING([whether to include rpath in build])
+ AC_ARG_WITH(
+ [rpath],
+ AS_HELP_STRING(--without-rpath, Do not include rpath in build),
+ [ case "$withval" in
+ yes) ac_with_rpath=yes ;;
+ no) ac_with_rpath=no ;;
+ *) AC_MSG_RESULT([doh!])
+ AC_MSG_ERROR([bad value "$withval" for --without-rpath]) ;;
+ esac
+ ]
+ )
+ AC_MSG_RESULT([$ac_with_rpath])
+])
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_bluegene.m4 slurm-llnl-2.3.2/auxdir/x_ac_bluegene.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_bluegene.m4 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_bluegene.m4 2011-12-05 17:20:08.000000000 +0000
@@ -15,6 +15,7 @@
AC_DEFUN([X_AC_BGL],
[
+ ac_real_bluegene_loaded=no
ac_bluegene_loaded=no
AC_ARG_WITH(db2-dir, AS_HELP_STRING(--with-db2-dir=PATH,Specify path to parent directory of DB2 library), [ trydb2dir=$withval ])
@@ -109,6 +110,7 @@
AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
+ ac_real_bluegene_loaded=yes
fi
AC_SUBST(BG_INCLUDES)
@@ -193,7 +195,8 @@
AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
- fi
+ ac_real_bluegene_loaded=yes
+ fi
AC_SUBST(BG_INCLUDES)
])
@@ -212,6 +215,7 @@
if test "x$ac_bluegene_loaded" = "xyes" ; then
bg_default_dirs=""
elif test "x$bgq_emulation" = "xyes"; then
+ AC_DEFINE(HAVE_4D, 1, [Define to 1 if 4-dimensional architecture])
AC_DEFINE(SYSTEM_DIMENSIONS, 4, [4-dimensional schedulable architecture])
AC_DEFINE(HAVE_BG, 1, [Define to 1 if emulating or running on Blue Gene system])
AC_DEFINE(HAVE_BGQ, 1, [Define to 1 if emulating or running on Blue Gene/Q system])
@@ -220,11 +224,13 @@
bg_default_dirs=""
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
+ ac_bgq_loaded=yes
else
bg_default_dirs="/bgsys/drivers/ppcfloor"
fi
libname=bgsched
+ loglibname=log4cxx
for bg_dir in $trydb2dir "" $bg_default_dirs; do
# Skip directories that don't exist
@@ -232,49 +238,80 @@
continue;
fi
- soloc=$bg_dir/lib64/lib$libname.so
+ soloc=$bg_dir/hlcs/lib/lib$libname.so
# Search for required BG API libraries in the directory
if test -z "$have_bg_ar" -a -f "$soloc" ; then
have_bgq_ar=yes
- bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -l$libname"
+ if test "$ac_with_rpath" = "yes"; then
+ bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/hlcs/lib -L$bg_dir/hlcs/lib -l$libname"
+ else
+ bg_ldflags="$bg_ldflags -L$bg_dir/hlcs/lib -l$libname"
+ fi
+ fi
+
+ soloc=$bg_dir/extlib/lib/lib$loglibname.so
+ if test -z "$have_bg_ar" -a -f "$soloc" ; then
+ have_bgq_ar=yes
+ if test "$ac_with_rpath" = "yes"; then
+ bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/extlib/lib -L$bg_dir/extlib/lib -l$loglibname"
+ else
+ bg_ldflags="$bg_ldflags -L$bg_dir/extlib/lib -l$loglibname"
+ fi
fi
# Search for headers in the directory
- if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
+ if test -z "$have_bg_hdr" -a -f "$bg_dir/hlcs/include/bgsched/bgsched.h" ; then
have_bgq_hdr=yes
- bg_includes="-I$bg_dir/include"
+ bg_includes="-I$bg_dir/hlcs/include"
fi
+ if test -z "$have_bg_hdr" -a -f "$bg_dir/extlib/include/log4cxx/logger.h" ; then
+ have_bgq_hdr=yes
+ bg_includes="$bg_includes -I$bg_dir/extlib/include"
+ fi
done
if test ! -z "$have_bgq_ar" -a ! -z "$have_bgq_hdr" ; then
# ac_with_readline="no"
# Test to make sure the api is good
saved_LDFLAGS="$LDFLAGS"
- LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
- AC_LINK_IFELSE([AC_LANG_PROGRAM([[ int rm_set_serial(char *); ]], [[ rm_set_serial(""); ]])],[have_bgq_files=yes],[AC_MSG_ERROR(There is a problem linking to the BG/P api.)])
+ LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64 $bg_includes"
+ AC_LANG_PUSH(C++)
+ AC_LINK_IFELSE([AC_LANG_PROGRAM(
+ [[#include
+#include ]],
+ [[ bgsched::init("");
+ log4cxx::LoggerPtr logger_ptr(log4cxx::Logger::getLogger( "ibm" ));]])],
+ [have_bgq_files=yes],
+ [AC_MSG_ERROR(There is a problem linking to the BG/Q api.)])
+ AC_LANG_POP(C++)
LDFLAGS="$saved_LDFLAGS"
fi
if test ! -z "$have_bgq_files" ; then
+ BG_LDFLAGS="$bg_ldflags"
BG_INCLUDES="$bg_includes"
CFLAGS="$CFLAGS -m64"
CXXFLAGS="$CXXFLAGS $CFLAGS"
- AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
- AC_DEFINE(SYSTEM_DIMENSIONS, 3, [3-dimensional architecture])
+ AC_DEFINE(HAVE_4D, 1, [Define to 1 if 4-dimensional architecture])
+ AC_DEFINE(SYSTEM_DIMENSIONS, 4, [4-dimensional architecture])
AC_DEFINE(HAVE_BG, 1, [Define to 1 if emulating or running on Blue Gene system])
AC_DEFINE(HAVE_BGQ, 1, [Define to 1 if emulating or running on Blue Gene/Q system])
AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
AC_DEFINE(HAVE_BG_FILES, 1, [Define to 1 if have Blue Gene files])
- AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$soloc", [Define the BG_BRIDGE_SO value])
+ #AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$soloc", [Define the BG_BRIDGE_SO value])
- AC_MSG_CHECKING(for BG serial value)
- bg_serial="BGQ"
- AC_ARG_WITH(bg-serial,, [bg_serial="$withval"])
- AC_MSG_RESULT($bg_serial)
- AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
+ AC_MSG_NOTICE([Running on a legitimate BG/Q system])
+ # AC_MSG_CHECKING(for BG serial value)
+ # bg_serial="BGQ"
+ # AC_ARG_WITH(bg-serial,, [bg_serial="$withval"])
+ # AC_MSG_RESULT($bg_serial)
+ # AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
- fi
+ ac_real_bluegene_loaded=yes
+ ac_bgq_loaded=yes
+ fi
AC_SUBST(BG_INCLUDES)
+ AC_SUBST(BG_LDFLAGS)
])
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_cray.m4 slurm-llnl-2.3.2/auxdir/x_ac_cray.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_cray.m4 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_cray.m4 2011-12-05 17:20:08.000000000 +0000
@@ -6,45 +6,84 @@
# X_AC_CRAY
#
# DESCRIPTION:
-# Test for Cray systems including XT with 3-D interconect
-# Also test for the apbasil client (Cray's Batch Application Scheduler
-# Interface Layer interface)
-##*****************************************************************************
+# Test for Cray XT and XE systems with 2-D/3-D interconnects.
+# Tests for required libraries (native Cray systems only):
+# * mySQL (relies on testing for mySQL presence earlier);
+# * libexpat, needed for XML-RPC calls to Cray's BASIL
+# (Batch Application Scheduler Interface Layer) interface.
+#*****************************************************************************
+
+AC_DEFUN([X_AC_CRAY],
+[
+ ac_have_cray="no"
+ ac_have_real_cray="no"
+ ac_have_alps_emulation="no"
+ ac_have_cray_emulation="no"
+
+ AC_ARG_WITH(
+ [alps-emulation],
+ AS_HELP_STRING(--with-alps-emulation,Run SLURM against an emulated Alps system - requires option cray.conf @<:@default=no@:>@),
+ [test "$withval" = no || ac_have_alps_emulation=yes],
+ [ac_have_alps_emulation=no])
-AC_DEFUN([X_AC_CRAY], [
- AC_MSG_CHECKING([for Cray XT])
AC_ARG_ENABLE(
- [cray-xt],
- AS_HELP_STRING(--enable-cray-xt,enable Cray XT system support),
- [ case "$enableval" in
- yes) x_ac_cray_xt=yes ;;
- no) x_ac_cray_xt=no ;;
- *) AC_MSG_RESULT([doh!])
- AC_MSG_ERROR([bad value "$enableval" for --enable-cray-xt]) ;;
- esac
- ],
- [x_ac_cray_xt=no]
+ [cray-emulation],
+ AS_HELP_STRING(--enable-cray-emulation,Run SLURM in an emulated Cray mode),
+ [ case "$enableval" in
+ yes) ac_have_cray_emulation="yes" ;;
+ no) ac_have_cray_emulation="no" ;;
+ *) AC_MSG_ERROR([bad value "$enableval" for --enable-cray-emulation]) ;;
+ esac ]
)
- if test "$x_ac_cray_xt" = yes; then
- AC_MSG_RESULT([yes])
- AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
- AC_DEFINE(SYSTEM_DIMENSIONS, 3, [3-dimensional architecture])
- AC_DEFINE(HAVE_CRAY,1,[Define if Cray system])
- AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
+ if test "$ac_have_alps_emulation" = "yes"; then
+ ac_have_cray="yes"
+ AC_MSG_NOTICE([Running A Cray system against an Alps emulation])
+ AC_DEFINE(HAVE_ALPS_EMULATION, 1, [Define to 1 if running against an Alps emulation])
+ elif test "$ac_have_cray_emulation" = "yes"; then
+ ac_have_cray="yes"
+ AC_MSG_NOTICE([Running in Cray emulation mode])
+ AC_DEFINE(HAVE_CRAY_EMULATION, 1, [Define to 1 for emulating a Cray XT/XE system])
else
- AC_MSG_RESULT([no])
+ # Check for a Cray-specific file:
+ # * older XT systems use an /etc/xtrelease file
+ # * newer XT/XE systems use an /etc/opt/cray/release/xtrelease file
+ # * both have an /etc/xthostname
+ AC_MSG_CHECKING([whether this is a native Cray XT or XE system or have ALPS simulator])
+
+ if test -f /etc/xtrelease || test -d /etc/opt/cray/release; then
+ ac_have_cray="yes"
+ ac_have_real_cray="yes"
+ AC_DEFINE(HAVE_REAL_CRAY, 1, [Define to 1 for running on a real Cray XT/XE system])
+ fi
+ AC_MSG_RESULT([$ac_have_cray])
fi
- AC_ARG_WITH(apbasil, AS_HELP_STRING(--with-apbasil=PATH,Specify path to apbasil command), [ try_apbasil=$withval ])
- apbasil_default_locs="/usr/bin/apbasil"
- for apbasil_loc in $try_apbasil "" $apbasil_default_locs; do
- if test -z "$have_apbasil" -a -x "$apbasil_loc" ; then
- have_apbasil=$apbasil_loc
+ if test "$ac_have_cray" = "yes"; then
+ # libexpat is always required for the XML-RPC interface
+ AC_CHECK_HEADER(expat.h, [],
+ AC_MSG_ERROR([Cray BASIL requires expat headers/rpm]))
+ AC_CHECK_LIB(expat, XML_ParserCreate, [],
+ AC_MSG_ERROR([Cray BASIL requires libexpat.so (i.e. libexpat1-dev)]))
+
+ if test "$ac_have_real_cray" = "yes"; then
+ AC_CHECK_LIB([job], [job_getjid], [],
+ AC_MSG_ERROR([Need cray-job (usually in /opt/cray/job/default)]))
fi
- done
- if test ! -z "$have_apbasil" ; then
- AC_DEFINE_UNQUOTED(APBASIL_LOC, "$have_apbasil", [Define the apbasil command location])
+
+ if test -z "$MYSQL_CFLAGS" || test -z "$MYSQL_LIBS"; then
+ AC_MSG_ERROR([Cray BASIL requires the cray-MySQL-devel-enterprise rpm])
+ fi
+
+ AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
+ AC_DEFINE(SYSTEM_DIMENSIONS, 3, [3-dimensional architecture])
+ AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
+ AC_DEFINE(HAVE_CRAY, 1, [Define to 1 for Cray XT/XE systems])
+ AC_DEFINE(SALLOC_KILL_CMD, 1, [Define to 1 for salloc to kill child processes at job termination])
+ AC_DEFINE(SALLOC_RUN_FOREGROUND, 1, [Define to 1 to require salloc execution in the foreground.])
fi
+ AM_CONDITIONAL(HAVE_CRAY, test "$ac_have_cray" = "yes")
+ AM_CONDITIONAL(HAVE_REAL_CRAY, test "$ac_have_real_cray" = "yes")
+ AM_CONDITIONAL(HAVE_ALPS_EMULATION, test "$ac_have_alps_emulation" = "yes")
+ AM_CONDITIONAL(HAVE_CRAY_EMULATION, test "$ac_have_cray_emulation" = "yes")
])
-
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_debug.m4 slurm-llnl-2.3.2/auxdir/x_ac_debug.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_debug.m4 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_debug.m4 2011-12-05 17:20:08.000000000 +0000
@@ -33,6 +33,7 @@
)
if test "$x_ac_debug" = yes; then
test "$GCC" = yes && CFLAGS="$CFLAGS -Wall -fno-strict-aliasing"
+ test "$GXX" = yes && CXXFLAGS="$CXXFLAGS -Wall -fno-strict-aliasing"
else
AC_DEFINE([NDEBUG], [1],
[Define to 1 if you are building a production release.]
@@ -91,6 +92,24 @@
fi
AC_MSG_RESULT([${x_ac_partial_attach=no}])
+ AC_MSG_CHECKING([whether salloc should kill child processes at job termination])
+ AC_ARG_ENABLE(
+ [salloc-kill-cmd],
+ AS_HELP_STRING(--enable-salloc-kill-cmd,salloc should kill child processes at job termination),
+ [ case "$enableval" in
+ yes) x_ac_salloc_kill_cmd=yes ;;
+ no) x_ac_salloc_kill_cmd=no ;;
+ *) AC_MSG_RESULT([doh!])
+ AC_MSG_ERROR([bad value "$enableval" for --enable-salloc-kill-cmd]) ;;
+ esac
+ ]
+ )
+ if test "$x_ac_salloc_kill_cmd" = yes; then
+ AC_DEFINE(SALLOC_KILL_CMD, 1, [Define to 1 for salloc to kill child processes at job termination])
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ fi
AC_MSG_CHECKING([whether to disable salloc execution in the background])
AC_ARG_ENABLE(
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_lua.m4 slurm-llnl-2.3.2/auxdir/x_ac_lua.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_lua.m4 2011-06-10 16:55:35.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_lua.m4 2011-12-05 17:20:08.000000000 +0000
@@ -22,9 +22,9 @@
if test "x$x_ac_have_lua" = "xyes"; then
saved_CFLAGS="$CFLAGS"
- saved_LDFLAGS="$LDFLAGS"
+ saved_LIBS="$LIBS"
CFLAGS="$CFLAGS $lua_CFLAGS"
- LDFLAGS="$LDFLAGS $lua_LIBS"
+ LIBS="$LIBS $lua_LIBS"
AC_MSG_CHECKING([for whether we can link to liblua])
AC_TRY_LINK(
[#include
@@ -37,7 +37,7 @@
AC_MSG_RESULT([$x_ac_have_lua])
CFLAGS="$saved_CFLAGS"
- LDFLAGS="$saved_LDFLAGS"
+ LIBS="$saved_LIBS"
fi
AM_CONDITIONAL(HAVE_LUA, test "x$x_ac_have_lua" = "xyes")
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_man2html.m4 slurm-llnl-2.3.2/auxdir/x_ac_man2html.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_man2html.m4 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_man2html.m4 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,23 @@
+##*****************************************************************************
+# AUTHOR:
+# Don Lipari
+#
+# SYNOPSIS:
+# X_AC_MAN2HTML
+#
+# DESCRIPTION:
+# Test for the presence of the man2html command.
+#
+##*****************************************************************************
+
+AC_DEFUN([X_AC_MAN2HTML],
+[
+ AC_MSG_CHECKING([whether man2html is available])
+ AC_CHECK_PROG(ac_have_man2html, man2html, [yes], [no], [$bindir:/usr/bin:/usr/local/bin])
+
+ AM_CONDITIONAL(HAVE_MAN2HTML, test "x$ac_have_man2html" == "xyes")
+
+ if test "x$ac_have_man2html" != "xyes" ; then
+ AC_MSG_NOTICE([Unable to build man page html files without man2html])
+ fi
+])
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_munge.m4 slurm-llnl-2.3.2/auxdir/x_ac_munge.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_munge.m4 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_munge.m4 2011-12-05 17:20:08.000000000 +0000
@@ -36,7 +36,7 @@
test -f "$d/include/munge.h" || continue
for bit in $_x_ac_munge_libs; do
test -d "$d/$bit" || continue
-
+
_x_ac_munge_libs_save="$LIBS"
LIBS="-L$d/$bit -lmunge $LIBS"
AC_LINK_IFELSE(
@@ -54,7 +54,11 @@
else
MUNGE_LIBS="-lmunge"
MUNGE_CPPFLAGS="-I$x_ac_cv_munge_dir/include"
- MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+ if test "$ac_with_rpath" = "yes"; then
+ MUNGE_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_munge_dir/$bit -L$x_ac_cv_munge_dir/$bit"
+ else
+ MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+ fi
fi
AC_SUBST(MUNGE_LIBS)
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_srun.m4 slurm-llnl-2.3.2/auxdir/x_ac_srun.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_srun.m4 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_srun.m4 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,32 @@
+##*****************************************************************************
+## $Id: x_ac_srun.m4 17616 2009-05-27 21:24:58Z jette $
+##*****************************************************************************
+# AUTHOR:
+# Morris Jette
+#
+# SYNOPSIS:
+# AC_SRUN
+#
+# DESCRIPTION:
+# Adds support for --with-srun2aprun. If set then build srun-aprun wrapper
+# rather than native SLURM srun.
+##*****************************************************************************
+
+AC_DEFUN([X_AC_SRUN2APRUN],
+[
+ ac_with_srun2aprun="no"
+
+ AC_MSG_CHECKING([for whether to include srun-aprun wrapper rather than native SLURM srun])
+ AC_ARG_WITH([srun2aprun],
+ AS_HELP_STRING(--with-srun2aprun,use aprun wrapper instead of native SLURM srun command),
+ [ case "$withval" in
+ yes) ac_with_srun2aprun=yes ;;
+ no) ac_with_srun2aprun=no ;;
+ *) AC_MSG_RESULT([doh!])
+ AC_MSG_ERROR([bad value "$withval" for --with-srun2aprun]) ;;
+ esac
+ ]
+ )
+ AC_MSG_RESULT($ac_with_srun2aprun)
+ AM_CONDITIONAL(BUILD_SRUN2APRUN, test "x$ac_with_srun2aprun" = "xyes")
+])
diff -Nru slurm-llnl-2.2.7/auxdir/x_ac_sun_const.m4 slurm-llnl-2.3.2/auxdir/x_ac_sun_const.m4
--- slurm-llnl-2.2.7/auxdir/x_ac_sun_const.m4 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/auxdir/x_ac_sun_const.m4 2011-12-05 17:20:08.000000000 +0000
@@ -6,7 +6,7 @@
# X_AC_SUN_CONST
#
# DESCRIPTION:
-# Test for Sun Constellation system with 3-D interconect
+# Test for Sun Constellation system with 3-D interconnect
##*****************************************************************************
AC_DEFUN([X_AC_SUN_CONST], [
diff -Nru slurm-llnl-2.2.7/BUILD.NOTES slurm-llnl-2.3.2/BUILD.NOTES
--- slurm-llnl-2.2.7/BUILD.NOTES 2011-06-10 16:57:26.000000000 +0000
+++ slurm-llnl-2.3.2/BUILD.NOTES 2011-12-05 17:20:08.000000000 +0000
@@ -23,9 +23,11 @@
Here is a step-by-step HOWTO for creating a new release of SLURM on a
Linux cluster (See BlueGene and AIX specific notes below for some differences).
-0. svn co https://eris.llnl.gov/svn/slurm/trunk slurm
- svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
- put the buildfarm directory in your search path
+0. Get current copies of SLURM and buildfarm
+ > git clone https://@github.com/chaos/slurm.git
+ > svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
+ place the buildfarm directory in your search path
+ > export PATH=~/buildfarm:$PATH
1. Update NEWS and META files for the new release. In the META file,
the API, Major, Minor, Micro, Version, and Release fields must all
by up-to-date. **** DON'T UPDATE META UNTIL RIGHT BEFORE THE TAG ****
@@ -35,39 +37,37 @@
files, but not to code.
- this is a prerelease (Release = 0.preX)
2. Tag the repository with the appropriate name for the new version.
- svn copy https://eris.llnl.gov/svn/slurm/trunk \
- https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3 \
- -m "description"
+ > git tag -a slurm-2-3-0-0-pre5 -m "create tag v2.3.0-pre5"
+ > git push --tags
3. Use the rpm make target to create the new RPMs. This requires a .rpmmacros
(.rpmrc for newer versions of rpmbuild) file containing:
%_slurm_sysconfdir /etc/slurm
%_with_debug 1
%_with_sgijob 1
%_with_elan 1 (ONLY ON SYSTEMS WITH ELAN SWITCH)
- I usually build with using the following syntax:
- build -s https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3
-4. Remove the RPMs that we don't want:
- rm -f slurm-perlapi*rpm slurm-torque*rpm
-5. Move the RPMs to
- /usr/local/admin/rpms/llnl/RPMS-RHEL4/x86_64 (odevi, or gauss)
- /usr/local/admin/rpms/llnl/RPMS-RHEL4/i386/ (adevi)
- /usr/local/admin/rpms/llnl/RPMS-RHEL4/ia64/ (tdevi)
- send an announcement email (with the latest entry from the NEWS
- file) out to linux-admin@lists.llnl.gov.
-6. Copy tagged bzip file (e.g. slurm-0.6.0-0.pre3.bz2) to FTP server
- for external SLURM users.
-7. Copy bzip file and rpms (including src.rpm) to sourceforge.net:
- ncftp upload.sf.net
- cd upload
- put filename
- Use SourceForge admin tool to add new release, including changelog.
+ NOTE: build will make a tar-ball based upon ALL of the files in your current
+ local directory. If that includes scratch files, everyone will get those
+ files in the tar-ball. For that reason, it is a good idea to clone a clean
+ copy of the repository and build from that
+ > git clone https://@github.com/chaos/slurm.git
+ Build using the following syntax:
+ > build --snapshot -s OR
+ > build --nosnapshot -s
+ --nosnapshot will name the tar-ball and RPMs based upon the META file
+ --snapshot will name the tar-ball and RPMs based upon the META file plus a
+ timestamp. Do this to make a tar-ball for a non-tagged release.
+ NOTE: should be a fully-qualified pathname
+4. scp the files to schedmd.com in to ~/www/download/development or
+ ~/www/download/development. Move the older files to ~/www/download/archive,
+ login to schedmd.com, cd to ~/download, and execute "php process.php" to
+ update the web pages.
BlueGene build notes:
0. If on a bgp system and you want sview export these variables
- export CFLAGS="-I/opt/gnome/lib/gtk-2.0/include -I/opt/gnome/lib/glib-2.0/include $CFLAGS"
- export LIBS="-L/usr/X11R6/lib64 $LIBS"
- export CMD_LDFLAGS='-L/usr/X11R6/lib64'
- export PKG_CONFIG_PATH="/opt/gnome/lib64/pkgconfig/:$PKG_CONFIG_PATH"
+ > export CFLAGS="-I/opt/gnome/lib/gtk-2.0/include -I/opt/gnome/lib/glib-2.0/include $CFLAGS"
+ > export LIBS="-L/usr/X11R6/lib64 $LIBS"
+ > export CMD_LDFLAGS='-L/usr/X11R6/lib64'
+ > export PKG_CONFIG_PATH="/opt/gnome/lib64/pkgconfig/:$PKG_CONFIG_PATH"
1. Use the rpm make target to create the new RPMs. This requires a .rpmmacros
(.rpmrc for newer versions of rpmbuild) file containing:
%_prefix /usr
@@ -76,13 +76,17 @@
%_without_pam 1
%_with_debug 1
Build on Service Node with using the following syntax
- rpmbuild -ta slurm-...bz2
+ > rpmbuild -ta slurm-...bz2
The RPM files get written to the directory
/usr/src/packages/RPMS/ppc64
To build and run on AIX:
-0. svn co https://eris.llnl.gov/svn/slurm/trunk slurm
- svn co https://eris.llnl.gov/svn/buildfarm/trunk buildfarm
+0. Get current copies of SLURM and buildfarm
+ > git clone https://@github.com/chaos/slurm.git
+ > svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
+ put the buildfarm directory in your search path
+ > export PATH=~/buildfarm:$PATH
+
Put the buildfarm directory in your search path
Also, you will need several commands to appear FIRST in your PATH:
@@ -93,10 +97,11 @@
I do this by making symlinks to those commands in the buildfarm directory,
then making the buildfarm directory the first one in my PATH.
Also, make certain that the "proctrack" rpm is installed.
-1. export OBJECT_MODE=32
- export PKG_CONFIG="/usr/bin/pkg-config"
+1. Export some environment variables
+ > export OBJECT_MODE=32
+ > export PKG_CONFIG="/usr/bin/pkg-config"
2. Build with:
- ./configure --enable-debug --prefix=/opt/freeware \
+ > ./configure --enable-debug --prefix=/opt/freeware \
--sysconfdir=/opt/freeware/etc/slurm \
--with-ssl=/opt/freeware --with-munge=/opt/freeware \
--with-proctrack=/opt/freeware
@@ -119,11 +124,23 @@
%with_munge "--with-munge=/opt/freeware"
%with_proctrack "--with-proctrack=/opt/freeware"
Log in to the machine "uP". uP is currently the lowest-common-denominator
- AIX machine.
- CC=/usr/bin/gcc build -s https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3
-4. export MP_RMLIB=./slurm_ll_api.so
- export CHECKPOINT=yes
-5. poe hostname -rmpool debug
+ AIX machine.
+ NOTE: build will make a tar-ball based upon ALL of the files in your current
+ local directory. If that includes scratch files, everyone will get those
+ files in the tar-ball. For that reason, it is a good idea to clone a clean
+ copy of the repository and build from that
+ > git clone https://@github.com/chaos/slurm.git
+ Build using the following syntax:
+ > export CC=/usr/bin/gcc
+ > build --snapshot -s OR
+ > build --nosnapshot -s
+ --nosnapshot will name the tar-ball and RPMs based upon the META file
+ --snapshot will name the tar-ball and RPMs based upon the META file plus a
+ timestamp. Do this to make a tar-ball for a non-tagged release.
+4. Test POE after telling POE where to find SLURM's LoadLeveler wrapper.
+ > export MP_RMLIB=./slurm_ll_api.so
+ > export CHECKPOINT=yes
+5. > poe hostname -rmpool debug
6. To debug, set SLURM_LL_API_DEBUG=3 before running poe - will create a file
/tmp/slurm.*
It can also be helpful to use poe options "-ilevel 6 -pmdlog yes"
@@ -205,9 +222,17 @@
with the bug.
For memory leaks (for AIX use zerofault, zf; for linux use valgrind)
- - run configure with the option --enable-memory-leak-debug
- - valgrind --tool=memcheck --leak-check=yes --num-callers=6 --leak-resolution=med \
- ./slurmctld -Dc >ctld.out 2>&1 (or similar like for slurmd)
+ - Run configure with the option "--enable-memory-leak-debug" to completely
+ release allocated memory when the daemons exit
+ - valgrind --tool=memcheck --leak-check=yes --num-callers=8 --leak-resolution=med \
+ ./slurmctld -Dc >valg.ctld.out 2>&1
+ - valgrind --tool=memcheck --leak-check=yes --num-callers=8 --leak-resolution=med \
+ ./slurmd -Dc >valg.slurmd.out 2>&1 (Probably only one one node of cluster)
+ - Run the regression test. In the globals.local file include:
+ "set enable_memory_leak_debug 1"
+ - Shutdown the daemons using "scontrol shutdown"
+ - Examine the end of the log files for leaks. pthread_create() and dlopen()
+ have small memory leaks on some systems, which do not grow over time
Before new major release:
- Test on ia64, i386, x86_64, BGL, AIX, OSX, XCPU
diff -Nru slurm-llnl-2.2.7/config.h.in slurm-llnl-2.3.2/config.h.in
--- slurm-llnl-2.2.7/config.h.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/config.h.in 2011-12-05 17:20:08.000000000 +0000
@@ -3,9 +3,6 @@
/* Define if building universal (internal helper macro) */
#undef AC_APPLE_UNIVERSAL_BUILD
-/* Define the apbasil command location */
-#undef APBASIL_LOC
-
/* Define the BG_BRIDGE_SO value */
#undef BG_BRIDGE_SO
@@ -39,9 +36,15 @@
/* Define to 1 if 3-dimensional architecture */
#undef HAVE_3D
+/* Define to 1 if 4-dimensional architecture */
+#undef HAVE_4D
+
/* Define to 1 for AIX operating system */
#undef HAVE_AIX
+/* Define to 1 if running against an Alps emulation */
+#undef HAVE_ALPS_EMULATION
+
/* Define to 1 if emulating or running on Blue Gene system */
#undef HAVE_BG
@@ -63,9 +66,12 @@
/* Define to 1 if you have the `cfmakeraw' function. */
#undef HAVE_CFMAKERAW
-/* Define if Cray system */
+/* Define to 1 for Cray XT/XE systems */
#undef HAVE_CRAY
+/* Define to 1 for emulating a Cray XT/XE system */
+#undef HAVE_CRAY_EMULATION
+
/* Define to 1 if you have the header file. */
#undef HAVE_CURSES_H
@@ -136,6 +142,12 @@
/* define if you have libelanhosts. */
#undef HAVE_LIBELANHOSTS
+/* Define to 1 if you have the `expat' library (-lexpat). */
+#undef HAVE_LIBEXPAT
+
+/* Define to 1 if you have the `job' library (-ljob). */
+#undef HAVE_LIBJOB
+
/* define if you have libntbl. */
#undef HAVE_LIBNTBL
@@ -209,6 +221,9 @@
/* Define if you are compiling with readline. */
#undef HAVE_READLINE
+/* Define to 1 for running on a real Cray XT/XE system */
+#undef HAVE_REAL_CRAY
+
/* Define to 1 if you have the `sched_setaffinity' function. */
#undef HAVE_SCHED_SETAFFINITY
@@ -389,6 +404,9 @@
/* Define the project's release. */
#undef RELEASE
+/* Define to 1 for salloc to kill child processes at job termination */
+#undef SALLOC_KILL_CMD
+
/* Define to 1 to require salloc execution in the foreground. */
#undef SALLOC_RUN_FOREGROUND
@@ -461,7 +479,7 @@
/* Define to 1 if strerror_r returns char *. */
#undef STRERROR_R_CHAR_P
-/* Define system dimension count */
+/* 3-dimensional architecture */
#undef SYSTEM_DIMENSIONS
/* Define to 1 if you can safely include both and . */
diff -Nru slurm-llnl-2.2.7/configure slurm-llnl-2.3.2/configure
--- slurm-llnl-2.2.7/configure 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/configure 2011-12-05 17:20:08.000000000 +0000
@@ -743,6 +743,8 @@
ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
+BUILD_SRUN2APRUN_FALSE
+BUILD_SRUN2APRUN_TRUE
WITH_BLCR_FALSE
WITH_BLCR_TRUE
BLCR_LDFLAGS
@@ -766,6 +768,10 @@
SSL_LIBS
SSL_LDFLAGS
READLINE_LIBS
+HAVE_MAN2HTML
+HAVE_MAN2HTML_FALSE
+HAVE_MAN2HTML_TRUE
+ac_have_man2html
HAVE_LUA_FALSE
HAVE_LUA_TRUE
lua_LIBS
@@ -787,6 +793,14 @@
SLURMCTLD_PORT
DEBUG_MODULES_FALSE
DEBUG_MODULES_TRUE
+HAVE_CRAY_EMULATION_FALSE
+HAVE_CRAY_EMULATION_TRUE
+HAVE_ALPS_EMULATION_FALSE
+HAVE_ALPS_EMULATION_TRUE
+HAVE_REAL_CRAY_FALSE
+HAVE_REAL_CRAY_TRUE
+HAVE_CRAY_FALSE
+HAVE_CRAY_TRUE
WITH_PGSQL_FALSE
WITH_PGSQL_TRUE
PGSQL_CFLAGS
@@ -827,6 +841,8 @@
NUMA_LIBS
WITH_GNU_LD_FALSE
WITH_GNU_LD_TRUE
+WITH_CXX_FALSE
+WITH_CXX_TRUE
PKG_CONFIG_LIBDIR
PKG_CONFIG_PATH
PKG_CONFIG
@@ -848,12 +864,8 @@
FGREP
SED
LIBTOOL
-am__fastdepCXX_FALSE
-am__fastdepCXX_TRUE
-CXXDEPMODE
-ac_ct_CXX
-CXXFLAGS
-CXX
+WITH_CYGWIN_FALSE
+WITH_CYGWIN_TRUE
HAVE_AIX_PROCTRACK_FALSE
HAVE_AIX_PROCTRACK_TRUE
EGREP
@@ -869,6 +881,22 @@
BLUEGENE_LOADED
BLUEGENE_LOADED_FALSE
BLUEGENE_LOADED_TRUE
+BGQ_LOADED
+BGQ_LOADED_FALSE
+BGQ_LOADED_TRUE
+BG_LDFLAGS
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+ac_ct_CXX
+CXXFLAGS
+CXX
+REAL_BG_L_P_LOADED
+REAL_BG_L_P_LOADED_FALSE
+REAL_BG_L_P_LOADED_TRUE
+BG_L_P_LOADED
+BG_L_P_LOADED_FALSE
+BG_L_P_LOADED_TRUE
BGL_LOADED
BGL_LOADED_FALSE
BGL_LOADED_TRUE
@@ -981,6 +1009,7 @@
ac_user_opts='
enable_option_checking
enable_maintainer_mode
+with_rpath
with_db2_dir
enable_bluegene_emulation
enable_bgl_emulation
@@ -1001,8 +1030,6 @@
with_pam_dir
enable_iso8601
enable_load_env_no_login
-enable_cray_xt
-with_apbasil
enable_sun_const
with_dimensions
with_hwloc
@@ -1010,10 +1037,13 @@
enable_gtktest
with_mysql_config
with_pg_config
+with_alps_emulation
+enable_cray_emulation
enable_debug
enable_memory_leak_debug
enable_front_end
enable_partial_attach
+enable_salloc_kill_cmd
enable_salloc_background
with_slurmctld_port
with_slurmd_port
@@ -1024,6 +1054,7 @@
with_munge
enable_multiple_slurmd
with_blcr
+with_srun2aprun
'
ac_precious_vars='build_alias
host_alias
@@ -1033,10 +1064,10 @@
LDFLAGS
LIBS
CPPFLAGS
-CPP
CXX
CXXFLAGS
CCC
+CPP
CXXCPP
PKG_CONFIG
PKG_CONFIG_PATH
@@ -1683,15 +1714,18 @@
--enable-load-env-no-login
enable --get-user-env option to load user
environment without .login
- --enable-cray-xt enable Cray XT system support
--enable-sun-const enable Sun Constellation system support
--disable-gtktest do not try to compile and run a test GTK+ program
+ --enable-cray-emulation Run SLURM in an emulated Cray mode
--enable-debug enable debugging code for development
--enable-memory-leak-debug
enable memory leak debugging code for development
--enable-front-end enable slurmd operation on a front-end
--disable-partial-attach
disable debugger partial task attach support
+ --enable-salloc-kill-cmd
+ salloc should kill child processes at job
+ termination
--disable-salloc-background
disable salloc execution in the background
--enable-multiple-slurmd
@@ -1700,6 +1734,7 @@
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
--without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
+ --without-rpath Do not include rpath in build
--with-db2-dir=PATH Specify path to parent directory of DB2 library
--with-bg-serial=NAME set BG_SERIAL value
@@ -1710,7 +1745,6 @@
--with-cpusetdir=PATH specify path to cpuset directory default is
/dev/cpuset
--with-pam_dir=PATH Specify path to PAM module installation
- --with-apbasil=PATH Specify path to apbasil command
--with-dimensions=N set system dimension count for generic computer
system
--with-hwloc=PATH Specify path to hwloc installation
@@ -1718,6 +1752,8 @@
--with-mysql_config=PATH
Specify path to mysql_config binary
--with-pg_config=PATH Specify path to pg_config binary
+ --with-alps-emulation Run SLURM against an emulated Alps system - requires
+ option cray.conf [default=no]
--with-slurmctld-port=N set slurmctld default port [6817]
--with-slurmd-port=N set slurmd default port [6818]
--with-slurmdbd-port=N set slurmdbd default port [6819]
@@ -1727,6 +1763,8 @@
--with-ssl=PATH Specify path to OpenSSL installation
--with-munge=PATH Specify path to munge installation
--with-blcr=PATH Specify path to BLCR installation
+ --with-srun2aprun use aprun wrapper instead of native SLURM srun
+ command
Some influential environment variables:
CC C compiler command
@@ -1736,9 +1774,9 @@
LIBS libraries to pass to the linker, e.g. -l
CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if
you have headers in a nonstandard directory
- CPP C preprocessor
CXX C++ compiler command
CXXFLAGS C++ compiler flags
+ CPP C preprocessor
CXXCPP C++ preprocessor
PKG_CONFIG path to pkg-config utility
PKG_CONFIG_PATH
@@ -1912,6 +1950,90 @@
} # ac_fn_c_try_link
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext
+ if { { ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compile") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest.$ac_objext; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ rm -f conftest.$ac_objext conftest$ac_exeext
+ if { { ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_link") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ grep -v '^ *+' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ mv -f conftest.er1 conftest.err
+ fi
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && {
+ test -z "$ac_cxx_werror_flag" ||
+ test ! -s conftest.err
+ } && test -s conftest$ac_exeext && {
+ test "$cross_compiling" = yes ||
+ $as_test_x conftest$ac_exeext
+ }; then :
+ ac_retval=0
+else
+ $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_retval=1
+fi
+ # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+ # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+ # interfere with the next link command; also delete a directory that is
+ # left behind by Apple's compiler. We do this before executing the actions.
+ rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+ eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+
# ac_fn_c_try_cpp LINENO
# ----------------------
# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
@@ -2109,44 +2231,6 @@
} # ac_fn_c_check_header_compile
-# ac_fn_cxx_try_compile LINENO
-# ----------------------------
-# Try to compile conftest.$ac_ext, and return whether this succeeded.
-ac_fn_cxx_try_compile ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- rm -f conftest.$ac_objext
- if { { ac_try="$ac_compile"
-case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_compile") 2>conftest.err
- ac_status=$?
- if test -s conftest.err; then
- grep -v '^ *+' conftest.err >conftest.er1
- cat conftest.er1 >&5
- mv -f conftest.er1 conftest.err
- fi
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } && {
- test -z "$ac_cxx_werror_flag" ||
- test ! -s conftest.err
- } && test -s conftest.$ac_objext; then :
- ac_retval=0
-else
- $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
- ac_retval=1
-fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
- as_fn_set_status $ac_retval
-
-} # ac_fn_cxx_try_compile
-
# ac_fn_c_check_func LINENO FUNC VAR
# ----------------------------------
# Tests whether FUNC exists, setting the cache variable VAR accordingly
@@ -2251,52 +2335,6 @@
} # ac_fn_cxx_try_cpp
-# ac_fn_cxx_try_link LINENO
-# -------------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_cxx_try_link ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- rm -f conftest.$ac_objext conftest$ac_exeext
- if { { ac_try="$ac_link"
-case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_link") 2>conftest.err
- ac_status=$?
- if test -s conftest.err; then
- grep -v '^ *+' conftest.err >conftest.er1
- cat conftest.er1 >&5
- mv -f conftest.er1 conftest.err
- fi
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } && {
- test -z "$ac_cxx_werror_flag" ||
- test ! -s conftest.err
- } && test -s conftest$ac_exeext && {
- test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
- }; then :
- ac_retval=0
-else
- $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
- ac_retval=1
-fi
- # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
- # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
- # interfere with the next link command; also delete a directory that is
- # left behind by Apple's compiler. We do this before executing the actions.
- rm -rf conftest.dSYM conftest_ipa8_conftest.oo
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
- as_fn_set_status $ac_retval
-
-} # ac_fn_cxx_try_link
-
# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES
# ---------------------------------------------
# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR
@@ -2938,7 +2976,7 @@
SLURM_RELEASE="unstable svn build $DATE"
SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR ($SLURM_RELEASE)"
else
- SLURM_RELEASE="`echo $RELEASE | sed 's/^.*\.//'`"
+ SLURM_RELEASE="`echo $RELEASE | sed 's/^0\.//'`"
SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR.$SLURM_MICRO"
test $RELEASE = "1" || SLURM_VERSION_STRING="$SLURM_VERSION_STRING-$SLURM_RELEASE"
fi
@@ -3482,6 +3520,28 @@
ac_config_headers="$ac_config_headers slurm/slurm.h"
+
+ ac_with_rpath=yes
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include rpath in build" >&5
+$as_echo_n "checking whether to include rpath in build... " >&6; }
+
+# Check whether --with-rpath was given.
+if test "${with_rpath+set}" = set; then :
+ withval=$with_rpath; case "$withval" in
+ yes) ac_with_rpath=yes ;;
+ no) ac_with_rpath=no ;;
+ *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
+$as_echo "doh!" >&6; }
+ as_fn_error $? "bad value \"$withval\" for --without-rpath" "$LINENO" 5 ;;
+ esac
+
+
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_with_rpath" >&5
+$as_echo "$ac_with_rpath" >&6; }
+
DEPDIR="${am__leading_dot}deps"
ac_config_commands="$ac_config_commands depfiles"
@@ -4463,6 +4523,7 @@
+ ac_real_bluegene_loaded=no
ac_bluegene_loaded=no
@@ -4630,6 +4691,7 @@
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
+ ac_real_bluegene_loaded=yes
fi
@@ -4785,48 +4847,460 @@
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
- fi
+ ac_real_bluegene_loaded=yes
+ fi
- # test for bluegene emulation mode
- # Check whether --enable-bgq-emulation was given.
-if test "${enable_bgq_emulation+set}" = set; then :
- enableval=$enable_bgq_emulation; case "$enableval" in
- yes) bgq_emulation=yes ;;
- no) bgq_emulation=no ;;
- *) as_fn_error $? "bad value \"$enableval\" for --enable-bgq-emulation" "$LINENO" 5 ;;
- esac
+ if test "x$ac_bluegene_loaded" = "xyes"; then
+ BG_L_P_LOADED_TRUE=
+ BG_L_P_LOADED_FALSE='#'
+else
+ BG_L_P_LOADED_TRUE='#'
+ BG_L_P_LOADED_FALSE=
fi
- # Skip if already set
- if test "x$ac_bluegene_loaded" = "xyes" ; then
- bg_default_dirs=""
- elif test "x$bgq_emulation" = "xyes"; then
-
-$as_echo "#define SYSTEM_DIMENSIONS 4" >>confdefs.h
-
-
-$as_echo "#define HAVE_BG 1" >>confdefs.h
+ if test "x$ac_real_bluegene_loaded" = "xyes"; then
+ REAL_BG_L_P_LOADED_TRUE=
+ REAL_BG_L_P_LOADED_FALSE='#'
+else
+ REAL_BG_L_P_LOADED_TRUE='#'
+ REAL_BG_L_P_LOADED_FALSE=
+fi
-$as_echo "#define HAVE_BGQ 1" >>confdefs.h
-$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+ if test -n "$CCC"; then
+ CXX=$CCC
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CXX+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$CXX"; then
+ ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
+
+ fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+ { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+ (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+ ac_status=$?
+ if test -s conftest.err; then
+ sed '10a\
+... rest of stderr output deleted ...
+ 10q' conftest.err >conftest.er1
+ cat conftest.er1 >&5
+ fi
+ rm -f conftest.er1 conftest.err
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if test "${ac_cv_cxx_compiler_gnu+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_compiler_gnu=yes
+else
+ ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+ GXX=yes
+else
+ GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if test "${ac_cv_prog_cxx_g+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+ ac_cxx_werror_flag=yes
+ ac_cv_prog_cxx_g=no
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+else
+ CXXFLAGS=""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ CXXFLAGS="-g"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+ CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+ if test "$GXX" = yes; then
+ CXXFLAGS="-g -O2"
+ else
+ CXXFLAGS="-g"
+ fi
+else
+ if test "$GXX" = yes; then
+ CXXFLAGS="-O2"
+ else
+ CXXFLAGS=
+ fi
+fi
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+depcc="$CXX" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named `D' -- because `-MD' means `put the output
+ # in D'.
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CXX_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+ case " $depcc " in #(
+ *\ -arch\ *\ -arch\ *) am__universal=true ;;
+ esac
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+ # Solaris 8's {/usr,}/bin/sh.
+ touch sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle `-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # after this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvisualcpp | msvcmsys)
+ # This compiler won't grok `-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CXX_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+ am__fastdepCXX_TRUE=
+ am__fastdepCXX_FALSE='#'
+else
+ am__fastdepCXX_TRUE='#'
+ am__fastdepCXX_FALSE=
+fi
+
+
+
+
+ # test for bluegene emulation mode
+ # Check whether --enable-bgq-emulation was given.
+if test "${enable_bgq_emulation+set}" = set; then :
+ enableval=$enable_bgq_emulation; case "$enableval" in
+ yes) bgq_emulation=yes ;;
+ no) bgq_emulation=no ;;
+ *) as_fn_error $? "bad value \"$enableval\" for --enable-bgq-emulation" "$LINENO" 5 ;;
+ esac
+fi
+
+
+ # Skip if already set
+ if test "x$ac_bluegene_loaded" = "xyes" ; then
+ bg_default_dirs=""
+ elif test "x$bgq_emulation" = "xyes"; then
+
+$as_echo "#define HAVE_4D 1" >>confdefs.h
+
+
+$as_echo "#define SYSTEM_DIMENSIONS 4" >>confdefs.h
+
+
+$as_echo "#define HAVE_BG 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_BGQ 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: Running in BG/Q emulation mode" >&5
$as_echo "$as_me: Running in BG/Q emulation mode" >&6;}
bg_default_dirs=""
#define ac_bluegene_loaded so we don't load another bluegene conf
ac_bluegene_loaded=yes
+ ac_bgq_loaded=yes
else
bg_default_dirs="/bgsys/drivers/ppcfloor"
fi
libname=bgsched
+ loglibname=log4cxx
for bg_dir in $trydb2dir "" $bg_default_dirs; do
# Skip directories that don't exist
@@ -4834,55 +5308,88 @@
continue;
fi
- soloc=$bg_dir/lib64/lib$libname.so
+ soloc=$bg_dir/hlcs/lib/lib$libname.so
# Search for required BG API libraries in the directory
if test -z "$have_bg_ar" -a -f "$soloc" ; then
have_bgq_ar=yes
- bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -l$libname"
+ if test "$ac_with_rpath" = "yes"; then
+ bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/hlcs/lib -L$bg_dir/hlcs/lib -l$libname"
+ else
+ bg_ldflags="$bg_ldflags -L$bg_dir/hlcs/lib -l$libname"
+ fi
+ fi
+
+ soloc=$bg_dir/extlib/lib/lib$loglibname.so
+ if test -z "$have_bg_ar" -a -f "$soloc" ; then
+ have_bgq_ar=yes
+ if test "$ac_with_rpath" = "yes"; then
+ bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/extlib/lib -L$bg_dir/extlib/lib -l$loglibname"
+ else
+ bg_ldflags="$bg_ldflags -L$bg_dir/extlib/lib -l$loglibname"
+ fi
fi
# Search for headers in the directory
- if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
+ if test -z "$have_bg_hdr" -a -f "$bg_dir/hlcs/include/bgsched/bgsched.h" ; then
have_bgq_hdr=yes
- bg_includes="-I$bg_dir/include"
+ bg_includes="-I$bg_dir/hlcs/include"
fi
+ if test -z "$have_bg_hdr" -a -f "$bg_dir/extlib/include/log4cxx/logger.h" ; then
+ have_bgq_hdr=yes
+ bg_includes="$bg_includes -I$bg_dir/extlib/include"
+ fi
done
if test ! -z "$have_bgq_ar" -a ! -z "$have_bgq_hdr" ; then
# ac_with_readline="no"
# Test to make sure the api is good
saved_LDFLAGS="$LDFLAGS"
- LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
+ LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64 $bg_includes"
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
- int rm_set_serial(char *);
+#include
+#include
int
main ()
{
- rm_set_serial("");
+ bgsched::init("");
+ log4cxx::LoggerPtr logger_ptr(log4cxx::Logger::getLogger( "ibm" ));
;
return 0;
}
_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
+if ac_fn_cxx_try_link "$LINENO"; then :
have_bgq_files=yes
else
- as_fn_error $? "There is a problem linking to the BG/P api." "$LINENO" 5
+ as_fn_error $? "There is a problem linking to the BG/Q api." "$LINENO" 5
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
LDFLAGS="$saved_LDFLAGS"
fi
if test ! -z "$have_bgq_files" ; then
+ BG_LDFLAGS="$bg_ldflags"
BG_INCLUDES="$bg_includes"
CFLAGS="$CFLAGS -m64"
CXXFLAGS="$CXXFLAGS $CFLAGS"
-$as_echo "#define HAVE_3D 1" >>confdefs.h
+$as_echo "#define HAVE_4D 1" >>confdefs.h
-$as_echo "#define SYSTEM_DIMENSIONS 3" >>confdefs.h
+$as_echo "#define SYSTEM_DIMENSIONS 4" >>confdefs.h
$as_echo "#define HAVE_BG 1" >>confdefs.h
@@ -4896,32 +5403,32 @@
$as_echo "#define HAVE_BG_FILES 1" >>confdefs.h
+ #AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$soloc", [Define the BG_BRIDGE_SO value])
-cat >>confdefs.h <<_ACEOF
-#define BG_BRIDGE_SO "$soloc"
-_ACEOF
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Running on a legitimate BG/Q system" >&5
+$as_echo "$as_me: Running on a legitimate BG/Q system" >&6;}
+ # AC_MSG_CHECKING(for BG serial value)
+ # bg_serial="BGQ"
+ # AC_ARG_WITH(bg-serial,, [bg_serial="$withval"])
+ # AC_MSG_RESULT($bg_serial)
+ # AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
+ #define ac_bluegene_loaded so we don't load another bluegene conf
+ ac_bluegene_loaded=yes
+ ac_real_bluegene_loaded=yes
+ ac_bgq_loaded=yes
+ fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BG serial value" >&5
-$as_echo_n "checking for BG serial value... " >&6; }
- bg_serial="BGQ"
-# Check whether --with-bg-serial was given.
-if test "${with_bg_serial+set}" = set; then :
- withval=$with_bg_serial; bg_serial="$withval"
-fi
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $bg_serial" >&5
-$as_echo "$bg_serial" >&6; }
-cat >>confdefs.h <<_ACEOF
-#define BG_SERIAL "$bg_serial"
-_ACEOF
-
- #define ac_bluegene_loaded so we don't load another bluegene conf
- ac_bluegene_loaded=yes
- fi
+ if test "x$ac_bgq_loaded" = "xyes"; then
+ BGQ_LOADED_TRUE=
+ BGQ_LOADED_FALSE='#'
+else
+ BGQ_LOADED_TRUE='#'
+ BGQ_LOADED_FALSE=
+fi
@@ -4935,6 +5442,7 @@
+
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -5638,11 +6146,26 @@
;;
esac
+ac_have_cygwin=no
case "$host" in
+ *cygwin) LDFLAGS="$LDFLAGS -no-undefined"
+ SO_LDFLAGS="$SO_LDFLAGS \$(top_builddir)/src/api/libslurmhelper.la"
+
+ ac_have_cygwin=yes
+ ;;
*solaris*) CC="/usr/sfw/bin/gcc"
CFLAGS="$CFLAGS -D_POSIX_PTHREAD_SEMANTICS -I/usr/sfw/include"
LDFLAGS="$LDFLAGS -L/usr/sfw/lib"
+ ;;
esac
+ if test x"$ac_have_cygwin" == x"yes"; then
+ WITH_CYGWIN_TRUE=
+ WITH_CYGWIN_FALSE='#'
+else
+ WITH_CYGWIN_TRUE='#'
+ WITH_CYGWIN_FALSE=
+fi
+
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
@@ -7205,13 +7728,13 @@
else
lt_cv_nm_interface="BSD nm"
echo "int some_variable = 0;" > conftest.$ac_ext
- (eval echo "\"\$as_me:7208: $ac_compile\"" >&5)
+ (eval echo "\"\$as_me:7731: $ac_compile\"" >&5)
(eval "$ac_compile" 2>conftest.err)
cat conftest.err >&5
- (eval echo "\"\$as_me:7211: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+ (eval echo "\"\$as_me:7734: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
(eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
cat conftest.err >&5
- (eval echo "\"\$as_me:7214: output\"" >&5)
+ (eval echo "\"\$as_me:7737: output\"" >&5)
cat conftest.out >&5
if $GREP 'External.*some_variable' conftest.out > /dev/null; then
lt_cv_nm_interface="MS dumpbin"
@@ -8416,7 +8939,7 @@
;;
*-*-irix6*)
# Find out which ABI we are using.
- echo '#line 8419 "configure"' > conftest.$ac_ext
+ echo '#line 8942 "configure"' > conftest.$ac_ext
if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
(eval $ac_compile) 2>&5
ac_status=$?
@@ -9704,7 +10227,6 @@
-
# Set options
@@ -10205,11 +10727,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:10208: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:10730: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:10212: \$? = $ac_status" >&5
+ echo "$as_me:10734: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -10544,11 +11066,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:10547: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:11069: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:10551: \$? = $ac_status" >&5
+ echo "$as_me:11073: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -10649,11 +11171,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:10652: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:11174: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:10656: \$? = $ac_status" >&5
+ echo "$as_me:11178: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -10704,11 +11226,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:10707: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:11229: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:10711: \$? = $ac_status" >&5
+ echo "$as_me:11233: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -13088,7 +13610,7 @@
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 13091 "configure"
+#line 13613 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -13184,7 +13706,7 @@
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 13187 "configure"
+#line 13709 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -15140,11 +15662,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:15143: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:15665: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:15147: \$? = $ac_status" >&5
+ echo "$as_me:15669: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -15239,11 +15761,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:15242: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:15764: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:15246: \$? = $ac_status" >&5
+ echo "$as_me:15768: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -15291,11 +15813,11 @@
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:15294: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:15816: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:15298: \$? = $ac_status" >&5
+ echo "$as_me:15820: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -16378,6 +16900,14 @@
fi
fi
+ if test -n "$ac_ct_CXX"; then
+ WITH_CXX_TRUE=
+ WITH_CXX_FALSE='#'
+else
+ WITH_CXX_TRUE='#'
+ WITH_CXX_FALSE=
+fi
+
if test "$with_gnu_ld" = "yes"; then
WITH_GNU_LD_TRUE=
WITH_GNU_LD_FALSE='#'
@@ -18110,90 +18640,31 @@
-# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
-if test x"$acx_pthread_ok" = xyes; then
-
-$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h
-
- :
-else
- acx_pthread_ok=no
- as_fn_error $? "Error: Cannot figure out how to use pthreads!" "$LINENO" 5
-fi
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-# Always define WITH_PTHREADS if we make it this far
-
-$as_echo "#define WITH_PTHREADS 1" >>confdefs.h
-
-LDFLAGS="$LDFLAGS "
-CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
-LIBS="$PTHREAD_LIBS $LIBS"
-
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Cray XT" >&5
-$as_echo_n "checking for Cray XT... " >&6; }
- # Check whether --enable-cray-xt was given.
-if test "${enable_cray_xt+set}" = set; then :
- enableval=$enable_cray_xt; case "$enableval" in
- yes) x_ac_cray_xt=yes ;;
- no) x_ac_cray_xt=no ;;
- *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
-$as_echo "doh!" >&6; }
- as_fn_error $? "bad value \"$enableval\" for --enable-cray-xt" "$LINENO" 5 ;;
- esac
-
-else
- x_ac_cray_xt=no
-
-fi
-
-
- if test "$x_ac_cray_xt" = yes; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-
-$as_echo "#define HAVE_3D 1" >>confdefs.h
-
-
-$as_echo "#define SYSTEM_DIMENSIONS 3" >>confdefs.h
-
-
-$as_echo "#define HAVE_CRAY 1" >>confdefs.h
-
-
-$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
-
- else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- fi
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$acx_pthread_ok" = xyes; then
+$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h
-# Check whether --with-apbasil was given.
-if test "${with_apbasil+set}" = set; then :
- withval=$with_apbasil; try_apbasil=$withval
+ :
+else
+ acx_pthread_ok=no
+ as_fn_error $? "Error: Cannot figure out how to use pthreads!" "$LINENO" 5
fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
- apbasil_default_locs="/usr/bin/apbasil"
- for apbasil_loc in $try_apbasil "" $apbasil_default_locs; do
- if test -z "$have_apbasil" -a -x "$apbasil_loc" ; then
- have_apbasil=$apbasil_loc
- fi
- done
- if test ! -z "$have_apbasil" ; then
-cat >>confdefs.h <<_ACEOF
-#define APBASIL_LOC "$have_apbasil"
-_ACEOF
- fi
+# Always define WITH_PTHREADS if we make it this far
+
+$as_echo "#define WITH_PTHREADS 1" >>confdefs.h
+
+LDFLAGS="$LDFLAGS "
+CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+LIBS="$PTHREAD_LIBS $LIBS"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Sun Constellation system" >&5
@@ -19275,6 +19746,230 @@
+ ac_have_cray="no"
+ ac_have_real_cray="no"
+ ac_have_alps_emulation="no"
+ ac_have_cray_emulation="no"
+
+
+# Check whether --with-alps-emulation was given.
+if test "${with_alps_emulation+set}" = set; then :
+ withval=$with_alps_emulation; test "$withval" = no || ac_have_alps_emulation=yes
+else
+ ac_have_alps_emulation=no
+fi
+
+
+ # Check whether --enable-cray-emulation was given.
+if test "${enable_cray_emulation+set}" = set; then :
+ enableval=$enable_cray_emulation; case "$enableval" in
+ yes) ac_have_cray_emulation="yes" ;;
+ no) ac_have_cray_emulation="no" ;;
+ *) as_fn_error $? "bad value \"$enableval\" for --enable-cray-emulation" "$LINENO" 5 ;;
+ esac
+
+fi
+
+
+ if test "$ac_have_alps_emulation" = "yes"; then
+ ac_have_cray="yes"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Running A Cray system against an Alps emulation" >&5
+$as_echo "$as_me: Running A Cray system against an Alps emulation" >&6;}
+
+$as_echo "#define HAVE_ALPS_EMULATION 1" >>confdefs.h
+
+ elif test "$ac_have_cray_emulation" = "yes"; then
+ ac_have_cray="yes"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Running in Cray emulation mode" >&5
+$as_echo "$as_me: Running in Cray emulation mode" >&6;}
+
+$as_echo "#define HAVE_CRAY_EMULATION 1" >>confdefs.h
+
+ else
+ # Check for a Cray-specific file:
+ # * older XT systems use an /etc/xtrelease file
+ # * newer XT/XE systems use an /etc/opt/cray/release/xtrelease file
+ # * both have an /etc/xthostname
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether this is a native Cray XT or XE system or have ALPS simulator" >&5
+$as_echo_n "checking whether this is a native Cray XT or XE system or have ALPS simulator... " >&6; }
+
+ if test -f /etc/xtrelease || test -d /etc/opt/cray/release; then
+ ac_have_cray="yes"
+ ac_have_real_cray="yes"
+
+$as_echo "#define HAVE_REAL_CRAY 1" >>confdefs.h
+
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_have_cray" >&5
+$as_echo "$ac_have_cray" >&6; }
+ fi
+
+ if test "$ac_have_cray" = "yes"; then
+ # libexpat is always required for the XML-RPC interface
+ ac_fn_c_check_header_mongrel "$LINENO" "expat.h" "ac_cv_header_expat_h" "$ac_includes_default"
+if test "x$ac_cv_header_expat_h" = x""yes; then :
+
+else
+ as_fn_error $? "Cray BASIL requires expat headers/rpm" "$LINENO" 5
+fi
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XML_ParserCreate in -lexpat" >&5
+$as_echo_n "checking for XML_ParserCreate in -lexpat... " >&6; }
+if test "${ac_cv_lib_expat_XML_ParserCreate+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lexpat $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char XML_ParserCreate ();
+int
+main ()
+{
+return XML_ParserCreate ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_expat_XML_ParserCreate=yes
+else
+ ac_cv_lib_expat_XML_ParserCreate=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_expat_XML_ParserCreate" >&5
+$as_echo "$ac_cv_lib_expat_XML_ParserCreate" >&6; }
+if test "x$ac_cv_lib_expat_XML_ParserCreate" = x""yes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBEXPAT 1
+_ACEOF
+
+ LIBS="-lexpat $LIBS"
+
+else
+ as_fn_error $? "Cray BASIL requires libexpat.so (i.e. libexpat1-dev)" "$LINENO" 5
+fi
+
+
+ if test "$ac_have_real_cray" = "yes"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for job_getjid in -ljob" >&5
+$as_echo_n "checking for job_getjid in -ljob... " >&6; }
+if test "${ac_cv_lib_job_job_getjid+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-ljob $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char job_getjid ();
+int
+main ()
+{
+return job_getjid ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_job_job_getjid=yes
+else
+ ac_cv_lib_job_job_getjid=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_job_job_getjid" >&5
+$as_echo "$ac_cv_lib_job_job_getjid" >&6; }
+if test "x$ac_cv_lib_job_job_getjid" = x""yes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBJOB 1
+_ACEOF
+
+ LIBS="-ljob $LIBS"
+
+else
+ as_fn_error $? "Need cray-job (usually in /opt/cray/job/default)" "$LINENO" 5
+fi
+
+ fi
+
+ if test -z "$MYSQL_CFLAGS" || test -z "$MYSQL_LIBS"; then
+ as_fn_error $? "Cray BASIL requires the cray-MySQL-devel-enterprise rpm" "$LINENO" 5
+ fi
+
+
+$as_echo "#define HAVE_3D 1" >>confdefs.h
+
+
+$as_echo "#define SYSTEM_DIMENSIONS 3" >>confdefs.h
+
+
+$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_CRAY 1" >>confdefs.h
+
+
+$as_echo "#define SALLOC_KILL_CMD 1" >>confdefs.h
+
+
+$as_echo "#define SALLOC_RUN_FOREGROUND 1" >>confdefs.h
+
+ fi
+ if test "$ac_have_cray" = "yes"; then
+ HAVE_CRAY_TRUE=
+ HAVE_CRAY_FALSE='#'
+else
+ HAVE_CRAY_TRUE='#'
+ HAVE_CRAY_FALSE=
+fi
+
+ if test "$ac_have_real_cray" = "yes"; then
+ HAVE_REAL_CRAY_TRUE=
+ HAVE_REAL_CRAY_FALSE='#'
+else
+ HAVE_REAL_CRAY_TRUE='#'
+ HAVE_REAL_CRAY_FALSE=
+fi
+
+ if test "$ac_have_alps_emulation" = "yes"; then
+ HAVE_ALPS_EMULATION_TRUE=
+ HAVE_ALPS_EMULATION_FALSE='#'
+else
+ HAVE_ALPS_EMULATION_TRUE='#'
+ HAVE_ALPS_EMULATION_FALSE=
+fi
+
+ if test "$ac_have_cray_emulation" = "yes"; then
+ HAVE_CRAY_EMULATION_TRUE=
+ HAVE_CRAY_EMULATION_FALSE='#'
+else
+ HAVE_CRAY_EMULATION_TRUE='#'
+ HAVE_CRAY_EMULATION_FALSE=
+fi
+
+
+
+
@@ -19353,6 +20048,7 @@
if test "$x_ac_debug" = yes; then
test "$GCC" = yes && CFLAGS="$CFLAGS -Wall -fno-strict-aliasing"
+ test "$GXX" = yes && CXXFLAGS="$CXXFLAGS -Wall -fno-strict-aliasing"
else
$as_echo "#define NDEBUG 1" >>confdefs.h
@@ -19430,6 +20126,31 @@
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${x_ac_partial_attach=no}" >&5
$as_echo "${x_ac_partial_attach=no}" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether salloc should kill child processes at job termination" >&5
+$as_echo_n "checking whether salloc should kill child processes at job termination... " >&6; }
+ # Check whether --enable-salloc-kill-cmd was given.
+if test "${enable_salloc_kill_cmd+set}" = set; then :
+ enableval=$enable_salloc_kill_cmd; case "$enableval" in
+ yes) x_ac_salloc_kill_cmd=yes ;;
+ no) x_ac_salloc_kill_cmd=no ;;
+ *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
+$as_echo "doh!" >&6; }
+ as_fn_error $? "bad value \"$enableval\" for --enable-salloc-kill-cmd" "$LINENO" 5 ;;
+ esac
+
+
+fi
+
+ if test "$x_ac_salloc_kill_cmd" = yes; then
+
+$as_echo "#define SALLOC_KILL_CMD 1" >>confdefs.h
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to disable salloc execution in the background" >&5
$as_echo_n "checking whether to disable salloc execution in the background... " >&6; }
@@ -19976,9 +20697,9 @@
if test "x$x_ac_have_lua" = "xyes"; then
saved_CFLAGS="$CFLAGS"
- saved_LDFLAGS="$LDFLAGS"
+ saved_LIBS="$LIBS"
CFLAGS="$CFLAGS $lua_CFLAGS"
- LDFLAGS="$LDFLAGS $lua_LIBS"
+ LIBS="$LIBS $lua_LIBS"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for whether we can link to liblua" >&5
$as_echo_n "checking for whether we can link to liblua... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -20007,7 +20728,7 @@
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $x_ac_have_lua" >&5
$as_echo "$x_ac_have_lua" >&6; }
CFLAGS="$saved_CFLAGS"
- LDFLAGS="$saved_LDFLAGS"
+ LIBS="$saved_LIBS"
fi
if test "x$x_ac_have_lua" = "xyes"; then
@@ -20021,6 +20742,73 @@
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether man2html is available" >&5
+$as_echo_n "checking whether man2html is available... " >&6; }
+ # Extract the first word of "man2html", so it can be a program name with args.
+set dummy man2html; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_have_man2html+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_have_man2html"; then
+ ac_cv_prog_ac_have_man2html="$ac_have_man2html" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_dummy="$bindir:/usr/bin:/usr/local/bin"
+for as_dir in $as_dummy
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_have_man2html="yes"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_prog_ac_have_man2html" && ac_cv_prog_ac_have_man2html="no"
+fi
+fi
+ac_have_man2html=$ac_cv_prog_ac_have_man2html
+if test -n "$ac_have_man2html"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_have_man2html" >&5
+$as_echo "$ac_have_man2html" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+ if test "x$ac_have_man2html" == "xyes"; then
+ HAVE_MAN2HTML_TRUE=
+ HAVE_MAN2HTML_FALSE='#'
+else
+ HAVE_MAN2HTML_TRUE='#'
+ HAVE_MAN2HTML_FALSE=
+fi
+
+
+ if test "x$ac_have_man2html" != "xyes" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: Unable to build man page html files without man2html" >&5
+$as_echo "$as_me: Unable to build man page html files without man2html" >&6;}
+ fi
+
+ if test "x$ac_have_man2html" = "xyes"; then
+ HAVE_MAN2HTML_TRUE=
+ HAVE_MAN2HTML_FALSE='#'
+else
+ HAVE_MAN2HTML_TRUE='#'
+ HAVE_MAN2HTML_FALSE=
+fi
+
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for support of printf(\"%s\", NULL)" >&5
$as_echo_n "checking for support of printf(\"%s\", NULL)... " >&6; }
if test "$cross_compiling" = yes; then :
@@ -20374,7 +21162,11 @@
else
MUNGE_LIBS="-lmunge"
MUNGE_CPPFLAGS="-I$x_ac_cv_munge_dir/include"
- MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+ if test "$ac_with_rpath" = "yes"; then
+ MUNGE_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_munge_dir/$bit -L$x_ac_cv_munge_dir/$bit"
+ else
+ MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+ fi
fi
@@ -20619,8 +21411,38 @@
+ ac_with_srun2aprun="no"
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for whether to include srun-aprun wrapper rather than native SLURM srun" >&5
+$as_echo_n "checking for whether to include srun-aprun wrapper rather than native SLURM srun... " >&6; }
+
+# Check whether --with-srun2aprun was given.
+if test "${with_srun2aprun+set}" = set; then :
+ withval=$with_srun2aprun; case "$withval" in
+ yes) ac_with_srun2aprun=yes ;;
+ no) ac_with_srun2aprun=no ;;
+ *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
+$as_echo "doh!" >&6; }
+ as_fn_error $? "bad value \"$withval\" for --with-srun2aprun" "$LINENO" 5 ;;
+ esac
+
+
+fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_with_srun2aprun" >&5
+$as_echo "$ac_with_srun2aprun" >&6; }
+ if test "x$ac_with_srun2aprun" = "xyes"; then
+ BUILD_SRUN2APRUN_TRUE=
+ BUILD_SRUN2APRUN_FALSE='#'
+else
+ BUILD_SRUN2APRUN_TRUE='#'
+ BUILD_SRUN2APRUN_FALSE=
+fi
+
+
+
-ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/pam/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm/Makefile contribs/perlapi/libslurm/perl/Makefile.PL contribs/perlapi/libslurmdb/Makefile contribs/perlapi/libslurmdb/perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/sjobexit/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/db_api/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/sshare/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/sprio/Makefile src/srun/Makefile src/srun_cr/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/common/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/gres/Makefile src/plugins/gres/gpu/Makefile src/plugins/gres/nic/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/job_submit/Makefile src/plugins/job_submit/cnode/Makefile src/plugins/job_submit/defaults/Makefile src/plugins/job_submit/logging/Makefile src/plugins/job_submit/lua/Makefile src/plugins/job_submit/partition/Makefile src/plugins/preempt/Makefile src/plugins/preempt/none/Makefile src/plugins/preempt/partition_prio/Makefile src/plugins/preempt/qos/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/proctrack/cgroup/Makefile src/plugins/proctrack/lua/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bgq/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/cray/Makefile src/plugins/select/linear/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/node_rank/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile"
+ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/arrayrun/Makefile contribs/cray/Makefile contribs/lua/Makefile contribs/pam/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm/Makefile contribs/perlapi/libslurm/perl/Makefile.PL contribs/perlapi/libslurmdb/Makefile contribs/perlapi/libslurmdb/perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/sjobexit/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/db_api/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/sshare/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/sprio/Makefile src/srun/Makefile src/srun_cr/Makefile src/slurmd/Makefile src/slurmd/common/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/common/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/gres/Makefile src/plugins/gres/gpu/Makefile src/plugins/gres/nic/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/job_submit/Makefile src/plugins/job_submit/cnode/Makefile src/plugins/job_submit/defaults/Makefile src/plugins/job_submit/logging/Makefile src/plugins/job_submit/lua/Makefile src/plugins/job_submit/partition/Makefile src/plugins/preempt/Makefile src/plugins/preempt/none/Makefile src/plugins/preempt/partition_prio/Makefile src/plugins/preempt/qos/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/cgroup/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/proctrack/lua/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/ba/Makefile src/plugins/select/bluegene/ba_bgq/Makefile src/plugins/select/bluegene/bl/Makefile src/plugins/select/bluegene/bl_bgq/Makefile src/plugins/select/bluegene/sfree/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/cray/Makefile src/plugins/select/cray/libalps/Makefile src/plugins/select/cray/libemulate/Makefile src/plugins/select/linear/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/cgroup/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/node_rank/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile"
cat >confcache <<\_ACEOF
@@ -20745,6 +21567,22 @@
as_fn_error $? "conditional \"BGL_LOADED\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${BG_L_P_LOADED_TRUE}" && test -z "${BG_L_P_LOADED_FALSE}"; then
+ as_fn_error $? "conditional \"BG_L_P_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${REAL_BG_L_P_LOADED_TRUE}" && test -z "${REAL_BG_L_P_LOADED_FALSE}"; then
+ as_fn_error $? "conditional \"REAL_BG_L_P_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${BGQ_LOADED_TRUE}" && test -z "${BGQ_LOADED_FALSE}"; then
+ as_fn_error $? "conditional \"BGQ_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${BLUEGENE_LOADED_TRUE}" && test -z "${BLUEGENE_LOADED_FALSE}"; then
as_fn_error $? "conditional \"BLUEGENE_LOADED\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20757,6 +21595,10 @@
as_fn_error $? "conditional \"HAVE_AIX_PROCTRACK\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${WITH_CYGWIN_TRUE}" && test -z "${WITH_CYGWIN_FALSE}"; then
+ as_fn_error $? "conditional \"WITH_CYGWIN\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20769,6 +21611,10 @@
as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${WITH_CXX_TRUE}" && test -z "${WITH_CXX_FALSE}"; then
+ as_fn_error $? "conditional \"WITH_CXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${WITH_GNU_LD_TRUE}" && test -z "${WITH_GNU_LD_FALSE}"; then
as_fn_error $? "conditional \"WITH_GNU_LD\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20806,6 +21652,22 @@
as_fn_error $? "conditional \"WITH_PGSQL\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${HAVE_CRAY_TRUE}" && test -z "${HAVE_CRAY_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_CRAY\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_REAL_CRAY_TRUE}" && test -z "${HAVE_REAL_CRAY_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_REAL_CRAY\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_ALPS_EMULATION_TRUE}" && test -z "${HAVE_ALPS_EMULATION_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_ALPS_EMULATION\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_CRAY_EMULATION_TRUE}" && test -z "${HAVE_CRAY_EMULATION_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_CRAY_EMULATION\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${DEBUG_MODULES_TRUE}" && test -z "${DEBUG_MODULES_FALSE}"; then
as_fn_error $? "conditional \"DEBUG_MODULES\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20826,6 +21688,14 @@
as_fn_error $? "conditional \"HAVE_LUA\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${HAVE_MAN2HTML_TRUE}" && test -z "${HAVE_MAN2HTML_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_MAN2HTML\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_MAN2HTML_TRUE}" && test -z "${HAVE_MAN2HTML_FALSE}"; then
+ as_fn_error $? "conditional \"HAVE_MAN2HTML\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${HAVE_OPENSSL_TRUE}" && test -z "${HAVE_OPENSSL_FALSE}"; then
as_fn_error $? "conditional \"HAVE_OPENSSL\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20842,6 +21712,10 @@
as_fn_error $? "conditional \"WITH_BLCR\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${BUILD_SRUN2APRUN_TRUE}" && test -z "${BUILD_SRUN2APRUN_FALSE}"; then
+ as_fn_error $? "conditional \"BUILD_SRUN2APRUN\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
: ${CONFIG_STATUS=./config.status}
ac_write_fail=0
@@ -21804,6 +22678,9 @@
"config.xml") CONFIG_FILES="$CONFIG_FILES config.xml" ;;
"auxdir/Makefile") CONFIG_FILES="$CONFIG_FILES auxdir/Makefile" ;;
"contribs/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/Makefile" ;;
+ "contribs/arrayrun/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/arrayrun/Makefile" ;;
+ "contribs/cray/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/cray/Makefile" ;;
+ "contribs/lua/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/lua/Makefile" ;;
"contribs/pam/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/pam/Makefile" ;;
"contribs/perlapi/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/Makefile" ;;
"contribs/perlapi/libslurm/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/libslurm/Makefile" ;;
@@ -21832,6 +22709,7 @@
"src/srun/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun/Makefile" ;;
"src/srun_cr/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun_cr/Makefile" ;;
"src/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/Makefile" ;;
+ "src/slurmd/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/common/Makefile" ;;
"src/slurmd/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmd/Makefile" ;;
"src/slurmd/slurmstepd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmstepd/Makefile" ;;
"src/slurmdbd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmdbd/Makefile" ;;
@@ -21860,7 +22738,6 @@
"src/plugins/checkpoint/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/aix/Makefile" ;;
"src/plugins/checkpoint/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/none/Makefile" ;;
"src/plugins/checkpoint/ompi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/ompi/Makefile" ;;
- "src/plugins/checkpoint/xlch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/xlch/Makefile" ;;
"src/plugins/checkpoint/blcr/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/Makefile" ;;
"src/plugins/checkpoint/blcr/cr_checkpoint.sh") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/cr_checkpoint.sh" ;;
"src/plugins/checkpoint/blcr/cr_restart.sh") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/cr_restart.sh" ;;
@@ -21895,11 +22772,11 @@
"src/plugins/priority/multifactor/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/priority/multifactor/Makefile" ;;
"src/plugins/proctrack/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/Makefile" ;;
"src/plugins/proctrack/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/aix/Makefile" ;;
+ "src/plugins/proctrack/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/cgroup/Makefile" ;;
"src/plugins/proctrack/pgid/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/pgid/Makefile" ;;
"src/plugins/proctrack/linuxproc/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/linuxproc/Makefile" ;;
"src/plugins/proctrack/rms/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/rms/Makefile" ;;
"src/plugins/proctrack/sgi_job/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/sgi_job/Makefile" ;;
- "src/plugins/proctrack/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/cgroup/Makefile" ;;
"src/plugins/proctrack/lua/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/lua/Makefile" ;;
"src/plugins/sched/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/Makefile" ;;
"src/plugins/sched/backfill/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/backfill/Makefile" ;;
@@ -21908,12 +22785,16 @@
"src/plugins/sched/wiki/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/wiki/Makefile" ;;
"src/plugins/sched/wiki2/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/wiki2/Makefile" ;;
"src/plugins/select/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/Makefile" ;;
- "src/plugins/select/bgq/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bgq/Makefile" ;;
"src/plugins/select/bluegene/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/Makefile" ;;
- "src/plugins/select/bluegene/block_allocator/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/block_allocator/Makefile" ;;
- "src/plugins/select/bluegene/plugin/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/plugin/Makefile" ;;
+ "src/plugins/select/bluegene/ba/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/ba/Makefile" ;;
+ "src/plugins/select/bluegene/ba_bgq/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/ba_bgq/Makefile" ;;
+ "src/plugins/select/bluegene/bl/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/bl/Makefile" ;;
+ "src/plugins/select/bluegene/bl_bgq/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/bl_bgq/Makefile" ;;
+ "src/plugins/select/bluegene/sfree/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/sfree/Makefile" ;;
"src/plugins/select/cons_res/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cons_res/Makefile" ;;
"src/plugins/select/cray/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cray/Makefile" ;;
+ "src/plugins/select/cray/libalps/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cray/libalps/Makefile" ;;
+ "src/plugins/select/cray/libemulate/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cray/libemulate/Makefile" ;;
"src/plugins/select/linear/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/linear/Makefile" ;;
"src/plugins/switch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/Makefile" ;;
"src/plugins/switch/elan/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/elan/Makefile" ;;
@@ -21930,6 +22811,7 @@
"src/plugins/mpi/openmpi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/mpi/openmpi/Makefile" ;;
"src/plugins/task/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/Makefile" ;;
"src/plugins/task/affinity/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/affinity/Makefile" ;;
+ "src/plugins/task/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/cgroup/Makefile" ;;
"src/plugins/task/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/none/Makefile" ;;
"src/plugins/topology/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/Makefile" ;;
"src/plugins/topology/3d_torus/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/3d_torus/Makefile" ;;
diff -Nru slurm-llnl-2.2.7/configure.ac slurm-llnl-2.3.2/configure.ac
--- slurm-llnl-2.2.7/configure.ac 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/configure.ac 2011-12-05 17:20:08.000000000 +0000
@@ -25,6 +25,7 @@
AC_CONFIG_HEADERS([slurm/slurm.h])
dnl This needs to be close to the front to set CFLAGS=-m64
+X_AC_RPATH
X_AC_BGL
dnl we need to know if this is a bgl in the Makefile.am to do
@@ -33,12 +34,28 @@
AC_SUBST(BGL_LOADED)
X_AC_BGP
+
+dnl ok now check if We have an L or P system, Q is handled differently
+dnl so handle it later.
+AM_CONDITIONAL(BG_L_P_LOADED, test "x$ac_bluegene_loaded" = "xyes")
+AC_SUBST(BG_L_P_LOADED)
+
+dnl ok now check if We are on a real L or P system, (test if to build srun
+dnl or not. If we are emulating things we should build it.
+AM_CONDITIONAL(REAL_BG_L_P_LOADED, test "x$ac_real_bluegene_loaded" = "xyes")
+AC_SUBST(REAL_BG_L_P_LOADED)
+
X_AC_BGQ
-dnl ok now check if bluegene was loaded at all
+dnl We need to know if this is a Q system
+AM_CONDITIONAL(BGQ_LOADED, test "x$ac_bgq_loaded" = "xyes")
+AC_SUBST(BGQ_LOADED)
+
+dnl ok now check if any bluegene was loaded.
AM_CONDITIONAL(BLUEGENE_LOADED, test "x$ac_bluegene_loaded" = "xyes")
AC_SUBST(BLUEGENE_LOADED)
+
X_AC_AIX
dnl
@@ -54,14 +71,22 @@
[Define slurm_ prefix function aliases for plugins]) ;;
esac
+ac_have_cygwin=no
dnl
-dnl add some flags for Solaris
+dnl add some flags for Solaris and cygwin
dnl
case "$host" in
+ *cygwin) LDFLAGS="$LDFLAGS -no-undefined"
+ SO_LDFLAGS="$SO_LDFLAGS \$(top_builddir)/src/api/libslurmhelper.la"
+ AC_SUBST(SO_LDFLAGS)
+ ac_have_cygwin=yes
+ ;;
*solaris*) CC="/usr/sfw/bin/gcc"
CFLAGS="$CFLAGS -D_POSIX_PTHREAD_SEMANTICS -I/usr/sfw/include"
LDFLAGS="$LDFLAGS -L/usr/sfw/lib"
+ ;;
esac
+AM_CONDITIONAL(WITH_CYGWIN, test x"$ac_have_cygwin" == x"yes")
dnl Checks for programs.
dnl
@@ -71,6 +96,7 @@
AC_PROG_LIBTOOL
PKG_PROG_PKG_CONFIG([0.9.0])
+AM_CONDITIONAL(WITH_CXX, test -n "$ac_ct_CXX")
AM_CONDITIONAL(WITH_GNU_LD, test "$with_gnu_ld" = "yes")
@@ -172,7 +198,6 @@
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
LIBS="$PTHREAD_LIBS $LIBS"
-X_AC_CRAY
X_AC_SUN_CONST
X_AC_DIMENSIONS
@@ -216,6 +241,9 @@
X_AC_DATABASES
+dnl Cray ALPS/Basil support depends on mySQL
+X_AC_CRAY
+
dnl checks for system services.
dnl
@@ -268,6 +296,12 @@
dnl
X_AC_LUA
+dnl check for presence of the man2html command
+dnl
+X_AC_MAN2HTML
+AM_CONDITIONAL(HAVE_MAN2HTML, test "x$ac_have_man2html" = "xyes")
+AC_SUBST(HAVE_MAN2HTML)
+
dnl check if we can use standard printf functions
dnl
X_AC_PRINTF_NULL
@@ -335,6 +369,10 @@
dnl
X_AC_BLCR
+dnl
+dnl Check to build native SLURM srun command or an aprun (Cray ALPS) wrapper.
+dnl
+X_AC_SRUN2APRUN
dnl All slurm Makefiles:
@@ -342,6 +380,9 @@
config.xml
auxdir/Makefile
contribs/Makefile
+ contribs/arrayrun/Makefile
+ contribs/cray/Makefile
+ contribs/lua/Makefile
contribs/pam/Makefile
contribs/perlapi/Makefile
contribs/perlapi/libslurm/Makefile
@@ -370,6 +411,7 @@
src/srun/Makefile
src/srun_cr/Makefile
src/slurmd/Makefile
+ src/slurmd/common/Makefile
src/slurmd/slurmd/Makefile
src/slurmd/slurmstepd/Makefile
src/slurmdbd/Makefile
@@ -398,7 +440,6 @@
src/plugins/checkpoint/aix/Makefile
src/plugins/checkpoint/none/Makefile
src/plugins/checkpoint/ompi/Makefile
- src/plugins/checkpoint/xlch/Makefile
src/plugins/checkpoint/blcr/Makefile
src/plugins/checkpoint/blcr/cr_checkpoint.sh
src/plugins/checkpoint/blcr/cr_restart.sh
@@ -433,11 +474,11 @@
src/plugins/priority/multifactor/Makefile
src/plugins/proctrack/Makefile
src/plugins/proctrack/aix/Makefile
+ src/plugins/proctrack/cgroup/Makefile
src/plugins/proctrack/pgid/Makefile
src/plugins/proctrack/linuxproc/Makefile
src/plugins/proctrack/rms/Makefile
src/plugins/proctrack/sgi_job/Makefile
- src/plugins/proctrack/cgroup/Makefile
src/plugins/proctrack/lua/Makefile
src/plugins/sched/Makefile
src/plugins/sched/backfill/Makefile
@@ -446,12 +487,16 @@
src/plugins/sched/wiki/Makefile
src/plugins/sched/wiki2/Makefile
src/plugins/select/Makefile
- src/plugins/select/bgq/Makefile
src/plugins/select/bluegene/Makefile
- src/plugins/select/bluegene/block_allocator/Makefile
- src/plugins/select/bluegene/plugin/Makefile
+ src/plugins/select/bluegene/ba/Makefile
+ src/plugins/select/bluegene/ba_bgq/Makefile
+ src/plugins/select/bluegene/bl/Makefile
+ src/plugins/select/bluegene/bl_bgq/Makefile
+ src/plugins/select/bluegene/sfree/Makefile
src/plugins/select/cons_res/Makefile
src/plugins/select/cray/Makefile
+ src/plugins/select/cray/libalps/Makefile
+ src/plugins/select/cray/libemulate/Makefile
src/plugins/select/linear/Makefile
src/plugins/switch/Makefile
src/plugins/switch/elan/Makefile
@@ -468,6 +513,7 @@
src/plugins/mpi/openmpi/Makefile
src/plugins/task/Makefile
src/plugins/task/affinity/Makefile
+ src/plugins/task/cgroup/Makefile
src/plugins/task/none/Makefile
src/plugins/topology/Makefile
src/plugins/topology/3d_torus/Makefile
diff -Nru slurm-llnl-2.2.7/contribs/arrayrun/arrayrun slurm-llnl-2.3.2/contribs/arrayrun/arrayrun
--- slurm-llnl-2.2.7/contribs/arrayrun/arrayrun 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/arrayrun/arrayrun 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,69 @@
+#!/bin/bash
+### Simulate an array job
+### $Id: arrayrun,v 1.6 2011/02/10 11:57:53 root Exp $
+
+### Copyright 2009,2010 Bjørn-Helge Mevik
+###
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License version 2 as
+### published by the Free Software Foundation.
+###
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+### GNU General Public License version 2 for more details.
+###
+### A copy of the GPL v. 2 text is available here:
+### http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
+
+
+## Debugging
+#set -x
+
+### Configuration:
+## The work horse:
+WORKER=/site/lib/arrayrun_worker
+
+## Documentation:
+function usage () {
+ echo "Run many instances of the same job or command in the queue system.
+The instances are submitted via sbatch, and each get their own value
+of the environment variable TASK_ID. This can be used to select which
+intput or output file to use, etc.
+
+Usage:
+ arrayrun [-r] taskids [sbatch arguments] command [arguments]
+ arrayrun [-h | --help]
+
+Arguments:
+ '-r': Restart a job if it fails. For security reasons, each job is
+ restarted only once, and no more than 5 jobs will be restarted.
+ 'taskids': Run 'command' with TASK_ID set to the values specified in
+ 'taskids'. 'taskids' is a comma separated list of integers,
+ ranges of integers (first-last) or ranges with step size
+ (first-last:step). For instance
+ 1-5 means 1, 2, 3, 4, 5
+ 1,4,6 means 1, 4, 6
+ 10-20:5 means 10, 15, 20
+ 1-5,15,100-150:25 means 1, 2, 3, 4, 5, 15, 100, 125, 150
+ Note: spaces, negative number or decimal numbers are not allowed.
+ 'sbatch arguments': Any command line arguments for the implied sbatch. This
+ is most useful when 'command' is not a job script.
+ 'command': The command or job script to run. If it is a job script, it can
+ contain #SBATCH lines in addition to or instead of the 'sbatch
+ arguments'.
+ 'arguments': Any arguments for 'command'.
+ '-h', '--help' (or no arguments): Display this help."
+}
+
+if [ $# == 0 -o "$1" == '--help' -o "$1" == '-h' ]; then
+ usage
+ exit 0
+fi
+
+if [ -n "$SLURM_JOB_ID" ]; then
+ ## Started in a job script. Run with srun to make "scancel" work
+ exec srun --ntasks=1 $WORKER "$@"
+else
+ exec $WORKER "$@"
+fi
diff -Nru slurm-llnl-2.2.7/contribs/arrayrun/arrayrun_worker slurm-llnl-2.3.2/contribs/arrayrun/arrayrun_worker
--- slurm-llnl-2.2.7/contribs/arrayrun/arrayrun_worker 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/arrayrun/arrayrun_worker 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,255 @@
+#!/usr/bin/perl
+### Simulate an array job -- work horse script
+### $Id: arrayrun_worker,v 1.30 2011/04/27 08:58:25 root Exp $
+
+### Copyright 2009,2010,2011 Bjørn-Helge Mevik
+###
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License version 2 as
+### published by the Free Software Foundation.
+###
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+### GNU General Public License version 2 for more details.
+###
+### A copy of the GPL v. 2 text is available here:
+### http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
+
+
+### Note: This script is meant to be run by 'arrayrun'; do not
+### run this script directly.
+
+use strict;
+use List::Util qw/min/;
+use Time::HiRes qw/sleep/;
+
+## Debug:
+use warnings;
+use constant DEBUG => 1;
+$| = 1 if DEBUG;
+
+## Configuration:
+my $maxJobs = 100; # Max total number of jobs in queue
+my $maxIdleJobs = 10; # Max number of pending jobs in queue
+my $maxBurst = 10; # Max number of jobs to submit at a time
+my $pollSeconds = 180; # How many seconds to sleep between each poll
+my $maxFails = 300; # Max errors to accept when submitting a job
+my $retrySleep = 300; # Seconds to sleep between each retry
+my $doubleCheckSleep = 30; # Seconds to sleep before double checking
+my $maxRestarts = 10; # Max number of restarts all in all
+my $sbatch = "/site/bin/sbatch";# Which sbatch command to use
+
+## Parse command line
+my $restart = 0;
+if (@ARGV && $ARGV[0] eq '-r') {
+ $restart = 1;
+ shift @ARGV;
+}
+my $jobSpec = shift @ARGV or die "Too few arguments\n";
+my @commandLine = @ARGV or die "Too few arguments\n";
+my @jobArray;
+foreach (split /,/, $jobSpec) {
+ if (/^(\d+)$/) {
+ push @jobArray, $1;
+ } elsif (/^(\d+)[-:](\d+)$/) {
+ push @jobArray, $1 .. $2;
+ } elsif (/^(\d+)[-:](\d+):(\d+)$/) {
+ for (my $i = $1; $i <= $2; $i += $3) {
+ push @jobArray, $i;
+ }
+ } else {
+ die "Unknown TASK_ID specification: '$_'\n";
+ }
+}
+die "No TASK_IDs specified\n" unless (@jobArray);
+
+print "TASK_IDs to submit: ", join(",", @jobArray), "
+Command line: @commandLine\n" if DEBUG;
+print "Will restart failed jobs\n" if DEBUG && $restart;
+
+## Setup
+my $mainid = $ENV{'SLURM_JOB_ID'} || $ENV{'SLURM_JOBID'} || 'null';
+my $runids = []; # List of IDs of running jobs
+my $pendids = []; # List of IDs of pending jobs
+my $testids = []; # List of IDs to test
+my %taskid; # TASK_ID for all submitted jobs
+my @restartedTasks; # TASK_ID of all restarted jobs
+my @tmp = (localtime())[5,4,3];
+my $starttime = sprintf "%d-%02d-%02d", $tmp[0] + 1900, $tmp[1] + 1, $tmp[2];
+
+print "Main job id: $mainid\nStart time: $starttime\n" if DEBUG;
+
+## Trap signals such that any running sub jobs are cancelled if the
+## main job is cancelled or times out.
+sub clean_up {
+ print "Caught signal. Cleaning up...\n" if DEBUG;
+ ## Cancel any subjobs:
+ if (@{$runids} || @{$pendids} || @{$testids}) {
+ print "Cancelling @{$runids} @{$pendids} @{$testids}\n" if DEBUG;
+ system("echo scancel @{$runids} @{$pendids} @{$testids}");
+ system("scancel @{$runids} @{$pendids} @{$testids}");
+ print "Cancelled @{$runids} @{$pendids} @{$testids}\n" if DEBUG;
+ }
+ exit 0;
+}
+$SIG{'TERM'} = 'clean_up'; # scancel/timeout
+$SIG{'INT'} = 'clean_up'; # ^C in interactive use
+
+
+## Submit a job with fail resilience:
+sub submit_job {
+ my $jobName = shift;
+ (my $commandLine = shift) || die "Job script not specified\n";
+ my $id;
+ my $nFails = 0;
+ my $success = 0;
+ until ($success) {
+ my $fail = 0;
+ $id = `$sbatch --job-name=$jobName $commandLine 2>&1`;
+ if ($? == 0) {
+ chomp($id);
+ print " Result from submit: $id" if DEBUG;
+ if ($id =~ s/.*Submitted batch job //) {
+ $success = 1;
+ }
+ } else {
+ warn " sbatch failed with error code '$?' (output: '",
+ $id || '', "'): $!\n";
+ $nFails++;
+ }
+ until ($success || $fail || $nFails > $maxFails) {
+ ## Double check that the job did not start
+ warn " Problem with submitting/checking job. Checking with squeue in a while.\n";
+ sleep $doubleCheckSleep - 5 + int(rand(11));
+ $id = `squeue -h -o '%i %j' -u $ENV{USER}`;
+ if ($? == 0) {
+ chomp($id);
+ print " Result from squeue: $id" if DEBUG;
+ if ($id =~ s/ $jobName//) {
+ warn "Job '$jobName' seems to have been started as jobid '$id'. Using that id.\n";
+ $success = 1;
+ } else {
+ warn "Job '$jobName' did not start.\n";
+ $fail = 1;
+ }
+ } else {
+ $nFails++;
+ }
+ }
+ unless ($success) {
+ if ($nFails <= $maxFails) {
+ warn " Could not submit job. Trying again in a while.\n";
+ sleep $retrySleep - 5 + int(rand(11));
+ } else {
+ die " Cannot submit job. Giving up after $nFails errors.\n";
+ }
+ }
+ }
+ print " => job ID $id\n" if DEBUG;
+ $id;
+}
+
+
+## Check the given jobs, and return lists of the ones still running/waiting:
+sub check_queue {
+ print scalar localtime, ": Checking queue...\n" if DEBUG;
+ my $queueids = `squeue -h -o '%i %t' 2>&1`;
+ if ($? != 0) {
+ print "squeue failed with error code '$?',\nmessage: $queueids\nI will assume all jobs are still running/waiting\n";
+ return;
+ }
+ my $testids = [ @{$runids}, @{$pendids} ];
+ print "Number of jobs to check: ", scalar @{$testids}, "\n" if DEBUG;
+ sleep 10 + rand; # Sleep to allow requeued jobs to get back
+ # in queue.
+ $runids = [];
+ $pendids = [];
+ foreach my $id (@{$testids}) {
+ if ($queueids =~ /$id (\w+)/) {
+ if ($1 eq "PD") {
+ print " Job $id is still waiting\n" if DEBUG;
+ push @{$pendids}, $id;
+ } else {
+ print " Job $id is still running\n" if DEBUG;
+ push @{$runids}, $id;
+ }
+ } else {
+ print " Job $id has finished:\n" if DEBUG;
+ my @sacctres = `sacct -o jobid,start,end,maxvmsize,maxrss,state,exitcode -S $starttime -j $id 2>&1`;
+ if ($? != 0) {
+ print " sacct failed with error code '$?',\n message: ",
+ @sacctres, " I will assume job $id finished successfully\n";
+ } else {
+ print join(" ", @sacctres);
+ if (grep /^[ ]*$id[ ]+.*RUNNING/, @sacctres) {
+ print " Job seems to be still running, after all.\n" if DEBUG;
+ push @{$runids}, $id;
+ } elsif ($restart && !grep /^[ ]*$id[ ]+.*COMPLETED[ ]+0:0/, @sacctres) {
+ print " Job failed. ";
+ if (@restartedTasks >= $maxRestarts) {
+ print "Too many jobs have been restarted. Will not restart TASK_ID $taskid{$id}\n";
+ } elsif (grep /^$taskid{$id}$/, @restartedTasks) {
+ print "TASK_ID $taskid{$id} has already been restarted once. Will not restart it again\n";
+ } else {
+ print "Restarting TASK_ID $taskid{$id}\n";
+ $ENV{'TASK_ID'} = $taskid{$id};
+ my $newid = submit_job "$mainid.$taskid{$id}", "@commandLine";
+ push @{$runids}, $newid;
+ $taskid{$newid} = $taskid{$id};
+ push @restartedTasks, $taskid{$newid};
+ sleep 1.5 + rand; # Sleep between 1.5 and 2.5 secs
+ }
+ }
+ }
+ }
+ }
+}
+
+
+## Make sure sub jobs do not inherit the main job TMPDIR or jobname:
+delete $ENV{'TMPDIR'};
+delete $ENV{'SLURM_JOB_NAME'};
+
+while (@jobArray) {
+ ## There is more to submit
+ print scalar localtime, ": Submitting jobs...\n" if DEBUG;
+ print scalar @jobArray, " more job(s) to submit\n" if DEBUG;
+ ## Submit as many as possible:
+ my $nToSubmit = min(scalar @jobArray,
+ $maxJobs - @{$runids} - @{$pendids},
+ $maxIdleJobs - @{$pendids},
+ $maxBurst);
+ print scalar(@{$runids}), " job(s) are running, and ",
+ scalar(@{$pendids}), " are waiting\n" if DEBUG;
+ print "Submitting $nToSubmit job(s):\n" if DEBUG;
+ for (my $i = 1; $i <= $nToSubmit; $i++) {
+ my $currJob = shift @jobArray;
+ print " TASK_ID $currJob:\n" if DEBUG;
+ ## Set $TASK_ID for the job:
+ $ENV{'TASK_ID'} = $currJob;
+ my $id = submit_job "$mainid.$currJob", "@commandLine";
+ push @{$pendids}, $id;
+ $taskid{$id} = $currJob;
+ sleep 1.5 + rand; # Sleep between 1.5 and 2.5 secs
+ }
+ ## Wait a while:
+ print "Sleeping...\n" if DEBUG;
+ sleep $pollSeconds - 5 + int(rand(11));
+ ## Find which are still running or waiting:
+ check_queue();
+}
+print "All jobs have been submitted\n" if DEBUG;
+
+while (@{$runids} || @{$pendids}) {
+ ## Some jobs are still running or pending
+ print scalar(@{$runids}), " job(s) are still running, and ",
+ scalar(@{$pendids}), " are waiting\n" if DEBUG;
+ ## Wait a while
+ print "Sleeping...\n" if DEBUG;
+ sleep $pollSeconds - 5 + int(rand(11));
+ ## Find which are still running or waiting:
+ check_queue();
+}
+
+print "Done.\n" if DEBUG;
diff -Nru slurm-llnl-2.2.7/contribs/arrayrun/Makefile.am slurm-llnl-2.3.2/contribs/arrayrun/Makefile.am
--- slurm-llnl-2.2.7/contribs/arrayrun/Makefile.am 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/arrayrun/Makefile.am 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,4 @@
+EXTRA_DIST = \
+ arrayrun \
+ arrayrun_worker \
+ README
diff -Nru slurm-llnl-2.2.7/contribs/arrayrun/Makefile.in slurm-llnl-2.3.2/contribs/arrayrun/Makefile.in
--- slurm-llnl-2.2.7/contribs/arrayrun/Makefile.in 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/arrayrun/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,475 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/arrayrun
+DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+ $(top_srcdir)/auxdir/libtool.m4 \
+ $(top_srcdir)/auxdir/ltoptions.m4 \
+ $(top_srcdir)/auxdir/ltsugar.m4 \
+ $(top_srcdir)/auxdir/ltversion.m4 \
+ $(top_srcdir)/auxdir/lt~obsolete.m4 \
+ $(top_srcdir)/auxdir/slurm.m4 \
+ $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+ $(top_srcdir)/auxdir/x_ac_affinity.m4 \
+ $(top_srcdir)/auxdir/x_ac_aix.m4 \
+ $(top_srcdir)/auxdir/x_ac_blcr.m4 \
+ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+ $(top_srcdir)/auxdir/x_ac_cflags.m4 \
+ $(top_srcdir)/auxdir/x_ac_cray.m4 \
+ $(top_srcdir)/auxdir/x_ac_databases.m4 \
+ $(top_srcdir)/auxdir/x_ac_debug.m4 \
+ $(top_srcdir)/auxdir/x_ac_elan.m4 \
+ $(top_srcdir)/auxdir/x_ac_env.m4 \
+ $(top_srcdir)/auxdir/x_ac_federation.m4 \
+ $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+ $(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+ $(top_srcdir)/auxdir/x_ac_iso.m4 \
+ $(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
+ $(top_srcdir)/auxdir/x_ac_munge.m4 \
+ $(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+ $(top_srcdir)/auxdir/x_ac_pam.m4 \
+ $(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+ $(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+ $(top_srcdir)/auxdir/x_ac_readline.m4 \
+ $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+ $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+ $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+ $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
+ $(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+ $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = \
+ arrayrun \
+ arrayrun_worker \
+ README
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu contribs/arrayrun/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu contribs/arrayrun/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+ distclean distclean-generic distclean-libtool distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff -Nru slurm-llnl-2.2.7/contribs/arrayrun/README slurm-llnl-2.3.2/contribs/arrayrun/README
--- slurm-llnl-2.2.7/contribs/arrayrun/README 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/arrayrun/README 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,132 @@
+-*- text -*- $Id: README.arrayrun,v 1.2 2011/06/28 11:21:27 bhm Exp $
+
+Overview
+========
+
+Arrayrun is an attempt to simulate arrayjobs as found in SGE and PBS. It
+works very similarly to mpirun:
+
+ arrayrun [-r] taskids [sbatch arguments] YourCommand [arguments]
+
+In principle, arrayrun does
+
+ TASK_ID=id sbatch [sbatch arguments] YourCommand [arguments]
+
+for each id in the 'taskids' specification. 'taskids' is a comma separated
+list of integers, ranges of integers (first-last) or ranges with step size
+(first-last:step). If -r is specified, arrayrun will restart a job that has
+failed. To avoid endless loops, a job is only restarted once, and a maximum
+of 10 (configurable) jobs will be restarted.
+
+The idea is to submit a master job that calls arrayrun to start the jobs,
+for instance
+
+ $ cat workerScript
+ #!/bin/sh
+ #SBATCH --account=YourProject
+ #SBATCH --time=1:0:0
+ #SBATCH --mem-per-cpu=1G
+
+ DATASET=dataset.$TASK_ID
+ OUTFILE=result.$TASK_ID
+ cd $SCRATCH
+ YourProgram $DATASET > $OUTFILE
+ # end of workerScript
+
+ $ cat submitScript
+ #!/bin/sh
+ #SBATCH --account=YourProject
+ #SBATCH --time=50:0:0
+ #SBATCH --mem-per-cpu=100M
+
+ arrayrun 1-200 workerScript
+ # end of submitScript
+
+ $ sbatch submitScript
+
+The --time specification in the master script must be long enough for all
+jobs to finish.
+
+Alternatively, arrayrun can be run on the command line of a login or master
+node.
+
+If the master job is cancelled, or the arrayrun process is killed, it tries
+to scancel all running or pending jobs before it exits.
+
+Arrayrun tries not to flood the queue with jobs. It works by submitting a
+limited number of jobs, sleeping a while, checking the status of its jobs,
+and iterating, until all jobs have finished. All limits and times are
+configurable (see below). It also tries to handle all errors in a graceful
+manner.
+
+
+Installation and configuration
+==============================
+
+There are two files, arrayrun (to be called by users) and arrayrun_worker
+(exec'ed or srun'ed by arrayrun, to make scancel work).
+
+arrayrun should be placed somewhere on the $PATH. arrayrun_worker can be
+place anywhere. Both files should be accessible from all nodes.
+
+There are quite a few configuration variables, so arrayrun can be tuned to
+work under different policies and work loads.
+
+Configuration variables in arrayrun:
+
+- WORKER: the location of arrayrun_worker
+
+Configuration variables in arrayrun_worker:
+
+- $maxJobs: The maximal number of jobs arrayrun will allow in the
+ queue at any time
+- $maxIdleJobs: The maximal number of _pending_ jobs arrayrun will allow
+ in the queue at any time
+- $maxBurst: The maximal number of jobs submitted at a time
+- $pollSeconds: How many seconds to sleep between each iteration
+- $maxFails: The maximal number of errors to accept when submitting a
+ job
+- $retrySleep: The number of seconds to sleep between each retry when
+ submitting a job
+- $doubleCheckSleep: The number of seconds to sleep after a failed sbatch
+ before runnung squeue to double check whether the job
+ was submitted or not.
+- $maxRestarts: The maximal number of restarts all in all
+- $sbatch: The full path of the sbatch command to use
+
+
+Notes and caveats
+=================
+
+Arrayrun is an attempt to simulate array jobs. As such, it is not
+perfect or foolproof. Here are a couple of caveats.
+
+- Sometimes, arrayrun fails to scancel all jobs when it is itself cancelled
+
+- When arrayrun is run as a master job, it consumes one CPU for the whole
+ duration of the job. Also, the --time limit must be long enough. This can
+ be avoided by running arrayrun interactively on a master/login node (in
+ which case running it under screen is probably a good idea).
+
+- Arrayrun does (currently) not checkpoint, so if an arrayrun is restarted,
+ it starts from scratch with the first taskid.
+
+We welcome any suggestions for improvements or additional functionality!
+
+
+Copyright
+=========
+
+Copyright 2009,2010,2011 Bjørn-Helge Mevik
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License version 2 for more details.
+
+A copy of the GPL v. 2 text is available here:
+http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
diff -Nru slurm-llnl-2.2.7/contribs/cray/etc_init_d_munge slurm-llnl-2.3.2/contribs/cray/etc_init_d_munge
--- slurm-llnl-2.2.7/contribs/cray/etc_init_d_munge 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/etc_init_d_munge 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,559 @@
+#!/bin/sh
+#
+# /etc/init.d/munge - Start/stop script configured for Cray XT/XE
+#
+###############################################################################
+# Written by Chris Dunlap .
+# Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# UCRL-CODE-155910.
+###############################################################################
+# chkconfig: - 40 60
+# Description: Start/Stop the MUNGE authentication service.
+###############################################################################
+### BEGIN INIT INFO
+# Provides: munge
+# Required-Start: $remote_fs
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 5
+# Default-Stop:
+# Short-Description: Start/Stop the MUNGE authentication service.
+# Description: MUNGE (MUNGE Uid 'N' Gid Emporium) is a highly scalable
+# authentication service for creating and validating
+# credentials.
+### END INIT INFO
+
+unset DESC DAEMON CONFIG DAEMON_ARGS PIDFILE NICE USER SIGHUP_RELOAD
+
+prefix="/opt/slurm/munge"
+exec_prefix="${prefix}"
+sbindir="${exec_prefix}/sbin"
+sysconfdir="${prefix}/etc"
+localstatedir="/var"
+
+DESC="MUNGE"
+DAEMON="$sbindir/munged"
+#CONFIG=#_NOT_SUPPORTED_#
+DAEMON_ARGS="--key-file ${prefix}/etc/munge.key"
+PIDFILE="$localstatedir/run/munge/munged.pid"
+#NICE=
+#USER="daemon"
+#SIGHUP_RELOAD=#_NOT_SUPPORTED_#
+
+###############################################################################
+
+service_init ()
+{
+# Determine the system type and initialize the environment.
+#
+# Note that the shell positional parameters must be preserved when calling
+# this function in order for SuSE to initialize its environment properly.
+##
+ PATH=/sbin:/usr/sbin:/bin:/usr/bin
+ DAEMON_NAME="`basename \"$DAEMON\"`"
+ SCRIPT_NAME="`basename \"$0\" .init | sed 's/^[SK][0-9][0-9]*//'`"
+ SIGTERM_TIMEOUT="3"
+ STATUS=0
+
+ # Read configuration defaults to override variables:
+ # $CONFIG, $DAEMON_ARGS, $PIDFILE, $USER, $NICE, $SIGHUP_RELOAD
+ ##
+ for dir in "$sysconfdir/default" "$sysconfdir/sysconfig"; do
+ [ -r "$dir/$SCRIPT_NAME" ] && . "$dir/$SCRIPT_NAME"
+ done
+ [ -z "$DAEMON_ARGS" -a -n "$OPTIONS" ] && DAEMON_ARGS="$OPTIONS"
+ [ "`id | sed 's/^uid=\([0-9]*\).*/\1/'`" -ne 0 ] && unset USER
+ expr -- "$NICE" : '[0-9]*$' >/dev/null 2>&1 && NICE="+$NICE"
+ [ -n "$SIGHUP_RELOAD" -a "$SIGHUP_RELOAD" != 0 ] \
+ && RELOAD=1 || unset RELOAD
+
+ if [ -f /etc/debian_version -a -x /sbin/start-stop-daemon ]; then
+ SYSTEM="DEBIAN"
+ [ -x "$DAEMON" ] || exit 0 # pkg removed but not purged
+ [ -r /etc/default/rcS ] && . /etc/default/rcS
+ [ -r /lib/init/vars.sh ] && . /lib/init/vars.sh
+ [ -r /lib/lsb/init-functions ] && . /lib/lsb/init-functions
+ elif [ -f /etc/redhat-release -a -r /etc/init.d/functions ]; then
+ SYSTEM="REDHAT"
+ . /etc/init.d/functions
+ RH_SUBSYS="/var/lock/subsys/$DAEMON_NAME"
+ elif [ -f /etc/SuSE-release -a -r /etc/rc.status ]; then
+ SYSTEM="SUSE"
+ . /etc/rc.status
+ rc_reset
+ elif [ -r /lib/lsb/init-functions ]; then
+ SYSTEM="LSB"
+ . /lib/lsb/init-functions
+ else
+ SYSTEM="OTHER"
+ fi
+
+ # Exit if the package has been removed.
+ ##
+ [ -x "$DAEMON" ] || exit 5 # LSB: program not installed
+
+ # Exit if the configuration has been removed.
+ ##
+ [ -z "$CONFIG" -o -r "$CONFIG" ] || exit 6 # LSB: program not configured
+}
+
+service_fini ()
+{
+# Return the exit status.
+##
+ case $SYSTEM in
+ SUSE)
+ rc_exit
+ ;;
+ DEBIAN|REDHAT|LSB|*)
+ exit $STATUS
+ ;;
+ esac
+}
+
+service_start ()
+{
+# Start the service.
+#
+# Required by LSB, where running "start" on a service already running should be
+# considered successful.
+##
+ log_init "Starting $DESC" "$DAEMON_NAME"
+
+ VARRUNDIR="$localstatedir/run/munge"
+ if [ ! -d "$VARRUNDIR" ]; then
+ mkdir -m 755 -p "$VARRUNDIR"
+ [ -n "$USER" ] && chown "$USER" "$VARRUNDIR"
+ fi
+
+ case $SYSTEM in
+ DEBIAN)
+ if $0 status >/dev/null 2>&1; then
+ STATUS=0
+ else
+ ERRMSG=`start-stop-daemon --start --quiet \
+ ${NICE:+"--nicelevel"} ${NICE:+"$NICE"} \
+ ${USER:+"--chuid"} ${USER:+"$USER"} \
+ ${PIDFILE:+"--pidfile"} ${PIDFILE:+"$PIDFILE"} \
+ --exec "$DAEMON" -- $DAEMON_ARGS 2>&1`
+ STATUS=$?
+ fi
+ ;;
+ REDHAT)
+ if $0 status >/dev/null 2>&1; then
+ STATUS=0
+ else
+ daemon ${NICE:+"$NICE"} ${USER:+"--user"} ${USER:+"$USER"} \
+ "$DAEMON" $DAEMON_ARGS
+ STATUS=$?
+ fi
+ [ $STATUS -eq 0 ] && touch "$RH_SUBSYS" >/dev/null 2>&1
+ ;;
+ SUSE)
+ ERRMSG=`startproc ${NICE:+"-n"} ${NICE:+"$NICE"} \
+ ${USER:+"-u"} ${USER:+"$USER"} \
+ ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} \
+ "$DAEMON" $DAEMON_ARGS 2>&1`
+ rc_status -v
+ STATUS=$?
+ ;;
+ LSB)
+ if [ -n "$USER" ]; then
+ ERRMSG=`su "$USER" -c "/sbin/start_daemon \
+ ${NICE:+\"-n\"} ${NICE:+\"$NICE\"} \
+ ${PIDFILE:+\"-p\"} ${PIDFILE:+\"$PIDFILE\"} \
+ \"$DAEMON\" $DAEMON_ARGS" 2>&1`
+ else
+ ERRMSG=`start_daemon ${NICE:+"-n"} ${NICE:+"$NICE"} \
+ ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON" $DAEMON_ARGS 2>&1`
+ fi
+ STATUS=$?
+ ;;
+ *)
+ if $0 status >/dev/null 2>&1; then
+ STATUS=0
+ else
+ [ -n "$NICE" ] && nice="nice -n $NICE"
+ if [ -n "$USER" ]; then
+ ERRMSG=`su "$USER" -c "$nice \"$DAEMON\" $DAEMON_ARGS" 2>&1`
+ else
+ ERRMSG=`$nice "$DAEMON" $DAEMON_ARGS 2>&1`
+ fi
+ STATUS=$?
+ fi
+ ;;
+ esac
+ log_fini "$STATUS" "$ERRMSG"
+}
+
+service_stop ()
+{
+# Stop the service.
+#
+# Required by LSB, where running "stop" on a service already stopped or not
+# running should be considered successful.
+##
+ log_init "Stopping $DESC" "$DAEMON_NAME"
+ case $SYSTEM in
+ DEBIAN)
+ if ! $0 status >/dev/null 2>&1; then
+ STATUS=0
+ else
+ start-stop-daemon --stop --quiet \
+ ${PIDFILE:+"--pidfile"} ${PIDFILE:+"$PIDFILE"} \
+ --name "$DAEMON_NAME" ${SIGTERM_TIMEOUT:+"--retry"} \
+ ${SIGTERM_TIMEOUT:+"$SIGTERM_TIMEOUT"} >/dev/null 2>&1
+ STATUS=$?
+ fi
+ ;;
+ REDHAT)
+ if ! $0 status >/dev/null 2>&1; then
+ STATUS=0
+ else
+ killproc "$DAEMON"
+ STATUS=$?
+ fi
+ [ $STATUS -eq 0 ] && rm -f "$RH_SUBSYS" >/dev/null 2>&1
+ ;;
+ SUSE)
+ killproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} \
+ ${SIGTERM_TIMEOUT:+"-t"} ${SIGTERM_TIMEOUT:+"$SIGTERM_TIMEOUT"} \
+ "$DAEMON"
+ rc_status -v
+ ;;
+ LSB)
+ killproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON"
+ STATUS=$?
+ ;;
+ *)
+ signal_process "$DAEMON"
+ rc=$?
+ [ $rc -eq 0 -o $rc -eq 2 ] && STATUS=0 || STATUS=1
+ ;;
+ esac
+ log_fini "$STATUS"
+ [ -f "$PIDFILE" ] && rm -f "$PIDFILE"
+}
+
+service_restart ()
+{
+# Stop and restart the service if it is already running;
+# otherwise, start the service.
+#
+# Required by LSB, where running "restart" on a service already stopped or not
+# running should be considered successful.
+##
+ if $0 status >/dev/null 2>&1; then
+ $0 stop && $0 start
+ else
+ $0 start
+ fi
+
+ case $SYSTEM in
+ SUSE)
+ rc_status
+ ;;
+ DEBIAN|REDHAT|LSB|*)
+ STATUS=$?
+ ;;
+ esac
+}
+
+service_try_restart ()
+{
+# Restart the service if it is already running.
+#
+# Optional for LSB, where running "try-restart" on a service already stopped or
+# not running should be considered successful.
+# Also known as "condrestart" by RedHat.
+##
+ case $SYSTEM in
+ REDHAT)
+ [ -f "$RH_SUBSYS" ] && $0 restart || :
+ STATUS=$?
+ ;;
+ SUSE)
+ $0 status >/dev/null 2>&1 && $0 restart || rc_reset
+ rc_status
+ ;;
+ DEBIAN|LSB|*)
+ $0 status >/dev/null 2>&1 && $0 restart || :
+ STATUS=$?
+ ;;
+ esac
+}
+
+service_reload ()
+{
+# Reload the configuration without stopping and restarting the service.
+#
+# Optional for LSB.
+##
+ [ -z "$RELOAD" ] && STATUS=3 # LSB: unimplemented feature
+
+ log_init "Reloading $DESC" "$DAEMON_NAME"
+ case $SYSTEM in
+ DEBIAN)
+ if [ -n "$RELOAD" ]; then
+ start-stop-daemon --stop --quiet --signal HUP \
+ ${PIDFILE:+"--pidfile"} ${PIDFILE:+"$PIDFILE"} \
+ --name "$DAEMON_NAME" >/dev/null 2>&1
+ STATUS=$?
+ fi
+ ;;
+ REDHAT)
+ if [ -n "$RELOAD" ]; then
+ killproc "$DAEMON" -HUP
+ STATUS=$?
+ else
+ echo_failure
+ fi
+ ;;
+ SUSE)
+ if [ -n "$RELOAD" ]; then
+ killproc -HUP ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON"
+ else
+ rc_failed $STATUS
+ fi
+ rc_status -v
+ ;;
+ LSB)
+ if [ -n "$RELOAD" ]; then
+ killproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON" -HUP
+ STATUS=$?
+ fi
+ ;;
+ *)
+ if [ -n "$RELOAD" ]; then
+ signal_process "$DAEMON" "HUP"
+ STATUS=$?
+ fi
+ ;;
+ esac
+ log_fini "$STATUS"
+}
+
+service_force_reload ()
+{
+# Reload the configuration if the service supports this;
+# otherwise, restart the service if it is already running.
+#
+# Required by LSB, where running "force-reload" on a service already stopped or
+# not running should be considered successful.
+##
+ if [ -n "$RELOAD" ]; then
+ $0 reload
+ else
+ $0 try-restart
+ fi
+
+ case $SYSTEM in
+ SUSE)
+ rc_status
+ ;;
+ DEBIAN|REDHAT|LSB|*)
+ STATUS=$?
+ ;;
+ esac
+}
+
+service_status ()
+{
+# Print the current status of the service.
+#
+# Required by LSB.
+##
+ case $SYSTEM in
+ REDHAT)
+ status "$DAEMON"
+ STATUS=$?
+ ;;
+ SUSE)
+ printf "Checking for service $DESC: "
+ checkproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON"
+ rc_status -v
+ ;;
+ LSB)
+ printf "Checking status of $DESC: "
+ pids=`pidofproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} \
+ "$DAEMON" 2>/dev/null`
+ STATUS=$?
+ if [ $STATUS -eq 0 -a -n "$pids" ]; then
+ echo "running."
+ elif [ $STATUS -ne 0 -a -s "$PIDFILE" ]; then
+ echo "dead."
+ else
+ echo "stopped."
+ fi
+ ;;
+ DEBIAN|*)
+ printf "Checking status of $DESC: "
+ pids=`query_pids "$DAEMON" "$PIDFILE"`
+ rc=$?
+ if [ $rc -eq 0 -a -n "$pids" ]; then
+ echo "running."
+ STATUS=0 # LSB: program is running
+ elif [ $rc -ne 0 -a -s "$PIDFILE" ]; then
+ echo "dead."
+ STATUS=1 # LSB: program is dead & pidfile exists
+ elif [ $rc -ne 0 ]; then
+ echo "stopped."
+ STATUS=3 # LSB: program is not running
+ else
+ echo "unknown."
+ STATUS=4 # LSB: program status unknown
+ fi
+ ;;
+ esac
+}
+
+query_pids ()
+{
+# Writes the matching PIDs to stdout.
+# Returns 0 on success (ie, pids found).
+##
+ PROCNAME="$1"
+ PIDFILE="$2"
+
+ if type pgrep >/dev/null 2>&1; then
+ pids=`pgrep -d ' ' -x "\`basename \"$PROCNAME\"\`" 2>/dev/null`
+ rc=$?
+ elif type pidof >/dev/null 2>&1; then
+ pids=`pidof -o $$ -x "$PROCNAME" 2>/dev/null`
+ rc=$?
+ else
+ pids=`(ps awx -o pid -o command || ps -e -f -o pid -o args) 2>/dev/null \
+ | tail +2 | egrep "( |/)$PROCNAME( |$)" | grep -v egrep \
+ | sed 's/ *\([0-9]*\).*/\1/' | sort -n | tr '\012' ' '`
+ [ -n "$pids" ] && rc=0 || rc=1
+ fi
+
+ unset pids_running
+ if [ -n "$pids" -a -r "$PIDFILE" ]; then
+ read pid_line < "$PIDFILE"
+ for pid in $pid_line; do
+ expr -- "$pid" : '[0-9]*$' >/dev/null 2>&1 \
+ && expr -- " $pids " : ".* $pid .*" >/dev/null 2>&1 \
+ && pids_running="$pids_running $pid"
+ done
+ [ -n "$pids_running" ] && pids=$pids_running
+ fi
+
+ echo $pids
+ return $rc
+}
+
+signal_process ()
+{
+# Returns 0 on success, 1 if kill failed, 2 if PROCNAME is not running.
+##
+ PROCNAME="$1"
+ SIGNUM="$2"
+
+ pids=`query_pids "$DAEMON" "$PIDFILE"`
+ [ $? -ne 0 -o -z "$pids" ] && return 2
+
+ kill ${SIGNUM:+"-$SIGNUM"} $pids >/dev/null 2>&1
+ [ $? -ne 0 ] && return 1
+ [ -n "$SIGNUM" ] && return 0
+
+ pids=`query_pids "$DAEMON" "$PIDFILE"`
+ [ $? -ne 0 -o -z "$pids" ] && return 0
+ [ -z "$SIGTERM_TIMEOUT" ] && return 1
+
+ sleep "$SIGTERM_TIMEOUT"
+ kill -KILL $pids >/dev/null 2>&1
+ pids=`query_pids "$DAEMON" "$PIDFILE"`
+ [ $? -ne 0 -o -z "$pids" ] && return 0
+ return 1
+}
+
+log_init ()
+{
+# Output informational message at beginning of action.
+##
+ MESSAGE="$1"
+ PROCNAME="$2"
+
+ case $SYSTEM in
+ DEBIAN)
+ if [ "$VERBOSE" != no ]; then
+ if type log_daemon_msg >/dev/null 2>&1; then
+ log_daemon_msg "$MESSAGE" "$PROCNAME"
+ else
+ printf "$MESSAGE: $PROCNAME"
+ fi
+ fi
+ ;;
+ REDHAT|SUSE|LSB|*)
+ printf "$MESSAGE: $PROCNAME"
+ ;;
+ esac
+}
+
+log_fini ()
+{
+# Output informational/error message at end of action.
+##
+ STATUS="$1"
+ ERRMSG="$2"
+
+ case $SYSTEM in
+ DEBIAN)
+ if [ "$VERBOSE" != no ]; then
+ if ( type log_end_msg && type log_failure_msg ) >/dev/null 2>&1; then
+ log_end_msg "$STATUS"
+ [ $STATUS -eq 0 -o -z "$ERRMSG" ] || log_failure_msg "$ERRMSG"
+ else
+ [ $STATUS -eq 0 ] && echo "." || echo " (failed)."
+ [ $STATUS -eq 0 -o -z "$ERRMSG" ] || echo "$ERRMSG" >&2
+ fi
+ fi
+ ;;
+ REDHAT)
+ echo
+ ;;
+ SUSE)
+ [ $STATUS -eq 0 -o -z "$ERRMSG" ] || echo "$ERRMSG" >&2
+ ;;
+ LSB|*)
+ [ $STATUS -eq 0 ] && echo "." || echo " (failed)."
+ [ $STATUS -eq 0 -o -z "$ERRMSG" ] || echo "$ERRMSG" >&2
+ ;;
+ esac
+}
+
+###############################################################################
+
+service_init "$@"
+
+case "$1" in
+ start)
+ service_start
+ ;;
+ stop)
+ service_stop
+ ;;
+ restart)
+ service_restart
+ ;;
+ try-restart|condrestart)
+ service_try_restart
+ ;;
+ reload)
+ service_reload
+ ;;
+ force-reload)
+ service_force_reload
+ ;;
+ status)
+ service_status
+ ;;
+ *)
+ echo "Usage: `basename \"$0\"`" \
+ "(start|stop|restart|try-restart|reload|force-reload|status)" >&2
+ exit 2 # LSB: invalid or excess argument(s)
+ ;;
+esac
+
+service_fini
diff -Nru slurm-llnl-2.2.7/contribs/cray/etc_sysconfig_slurm slurm-llnl-2.3.2/contribs/cray/etc_sysconfig_slurm
--- slurm-llnl-2.2.7/contribs/cray/etc_sysconfig_slurm 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/etc_sysconfig_slurm 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,24 @@
+#
+# /etc/sysconfig/slurm for Cray XT/XE systems
+#
+# Cray is SuSe-based, which means that ulimits from /etc/security/limits.conf
+# will get picked up any time slurm is restarted e.g. via pdsh/ssh. Since slurm
+# respects configured limits, this can mean that for instance batch jobs get
+# killed as a result of configuring CPU time limits. Set sane start limits here.
+#
+# Values were taken from pam-1.1.2 Debian package
+ulimit -t unlimited # max amount of CPU time in seconds
+ulimit -d unlimited # max size of a process's data segment in KB
+ulimit -l 64 # max memory size (KB) that may be locked into memory
+ulimit -m unlimited # max RSS size in KB
+ulimit -u unlimited # max number of processes
+ulimit -f unlimited # max size of files written by process and children
+ulimit -x unlimited # max number of file locks
+ulimit -i 16382 # max number of pending signals
+ulimit -q 819200 # max number of bytes in POSIX message queues
+ulimit -Sc 0 # max size of core files (soft limit)
+ulimit -Hc unlimited # max size of core files (hard limit)
+ulimit -Ss 8192 # max stack size (soft limit)
+ulimit -Hs unlimited # max stack size (hard limit)
+ulimit -n 1024 # max number of open file descriptors
+ulimit -v unlimited # max size of virtual memory (address space) in KB
Binary files /tmp/jKTrgHjCbo/slurm-llnl-2.2.7/contribs/cray/libalps_test_programs.tar.gz and /tmp/eIE_aeUSMX/slurm-llnl-2.3.2/contribs/cray/libalps_test_programs.tar.gz differ
diff -Nru slurm-llnl-2.2.7/contribs/cray/Makefile.am slurm-llnl-2.3.2/contribs/cray/Makefile.am
--- slurm-llnl-2.2.7/contribs/cray/Makefile.am 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/Makefile.am 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,40 @@
+#
+# Makefile for cray scripts
+#
+
+AUTOMAKE_OPTIONS = foreign
+
+EXTRA_DIST = \
+ etc_init_d_munge \
+ etc_sysconfig_slurm \
+ libalps_test_programs.tar.gz \
+ munge_build_script.sh \
+ opt_modulefiles_slurm \
+ pam_job.c \
+ slurm-build-script.sh
+
+if BUILD_SRUN2APRUN
+ bin_SCRIPTS = srun
+endif
+
+srun:
+_perldir=$(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
+
+install-binSCRIPTS: $(bin_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ test -z "$(DESTDIR)$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)"
+ @list='$(bin_SCRIPTS)'; for p in $$list; do \
+ echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/cray/$$p.pl | sed 's%BINDIR%@bindir@%' > $(DESTDIR)$(bindir)/$$p"; \
+ sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/cray/$$p.pl | sed "s%BINDIR%@bindir@%" > $(DESTDIR)$(bindir)/$$p; \
+ chmod 755 $(DESTDIR)$(bindir)/$$p;\
+ done
+
+uninstall-binSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_SCRIPTS)'; for p in $$list; do \
+ echo " rm -f '$(DESTDIR)$(bindir)/$$p'"; \
+ rm -f "$(DESTDIR)$(bindir)/$$p"; \
+ done
+
+clean:
+
diff -Nru slurm-llnl-2.2.7/contribs/cray/Makefile.in slurm-llnl-2.3.2/contribs/cray/Makefile.in
--- slurm-llnl-2.2.7/contribs/cray/Makefile.in 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,532 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for cray scripts
+#
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/cray
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+ $(top_srcdir)/auxdir/libtool.m4 \
+ $(top_srcdir)/auxdir/ltoptions.m4 \
+ $(top_srcdir)/auxdir/ltsugar.m4 \
+ $(top_srcdir)/auxdir/ltversion.m4 \
+ $(top_srcdir)/auxdir/lt~obsolete.m4 \
+ $(top_srcdir)/auxdir/slurm.m4 \
+ $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+ $(top_srcdir)/auxdir/x_ac_affinity.m4 \
+ $(top_srcdir)/auxdir/x_ac_aix.m4 \
+ $(top_srcdir)/auxdir/x_ac_blcr.m4 \
+ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+ $(top_srcdir)/auxdir/x_ac_cflags.m4 \
+ $(top_srcdir)/auxdir/x_ac_cray.m4 \
+ $(top_srcdir)/auxdir/x_ac_databases.m4 \
+ $(top_srcdir)/auxdir/x_ac_debug.m4 \
+ $(top_srcdir)/auxdir/x_ac_elan.m4 \
+ $(top_srcdir)/auxdir/x_ac_env.m4 \
+ $(top_srcdir)/auxdir/x_ac_federation.m4 \
+ $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+ $(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+ $(top_srcdir)/auxdir/x_ac_iso.m4 \
+ $(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
+ $(top_srcdir)/auxdir/x_ac_munge.m4 \
+ $(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+ $(top_srcdir)/auxdir/x_ac_pam.m4 \
+ $(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+ $(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+ $(top_srcdir)/auxdir/x_ac_readline.m4 \
+ $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+ $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+ $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+ $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
+ $(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+ $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(bindir)"
+SCRIPTS = $(bin_SCRIPTS)
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+EXTRA_DIST = \
+ etc_init_d_munge \
+ etc_sysconfig_slurm \
+ libalps_test_programs.tar.gz \
+ munge_build_script.sh \
+ opt_modulefiles_slurm \
+ pam_job.c \
+ slurm-build-script.sh
+
+@BUILD_SRUN2APRUN_TRUE@bin_SCRIPTS = srun
+_perldir = $(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign contribs/cray/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign contribs/cray/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS)
+installdirs:
+ for dir in "$(DESTDIR)$(bindir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binSCRIPTS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+ distclean distclean-generic distclean-libtool distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-binSCRIPTS install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip installcheck installcheck-am installdirs \
+ maintainer-clean maintainer-clean-generic mostlyclean \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ uninstall uninstall-am uninstall-binSCRIPTS
+
+
+srun:
+
+install-binSCRIPTS: $(bin_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ test -z "$(DESTDIR)$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)"
+ @list='$(bin_SCRIPTS)'; for p in $$list; do \
+ echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/cray/$$p.pl | sed 's%BINDIR%@bindir@%' > $(DESTDIR)$(bindir)/$$p"; \
+ sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/cray/$$p.pl | sed "s%BINDIR%@bindir@%" > $(DESTDIR)$(bindir)/$$p; \
+ chmod 755 $(DESTDIR)$(bindir)/$$p;\
+ done
+
+uninstall-binSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(bin_SCRIPTS)'; for p in $$list; do \
+ echo " rm -f '$(DESTDIR)$(bindir)/$$p'"; \
+ rm -f "$(DESTDIR)$(bindir)/$$p"; \
+ done
+
+clean:
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff -Nru slurm-llnl-2.2.7/contribs/cray/munge_build_script.sh slurm-llnl-2.3.2/contribs/cray/munge_build_script.sh
--- slurm-llnl-2.2.7/contribs/cray/munge_build_script.sh 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/munge_build_script.sh 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,81 @@
+#!/bin/bash
+#
+# Build munge from sources on Cray
+#
+
+#----------------------------------------------------------------------------
+# CONFIGURATION
+#----------------------------------------------------------------------------
+# source and build directories
+LIBROOT="${LIBROOT:-/ufs/slurm/build}"
+MUNGE_BUILD="${LIBROOT}/munge"
+
+# packaging installation directory
+DESTDIR="/tmp/munge-build"
+
+# installation and runtime directories
+MUNGE_DIR="/opt/slurm/munge"
+MUNGE_LOG="/var"
+
+# input and output tarballs
+ZIP="${MUNGE_BUILD}/zip"
+MUNGE_TAR=${ZIP}/munge*bz2
+TARBALL="${LIBROOT}/munge_build-$(date +%F).tar.gz"
+#----------------------------------------------------------------------------
+# SUBROUTINES
+#----------------------------------------------------------------------------
+function die() { echo -e "$@" >&2; exit 1; }
+
+function extract_top_level_from_tarball() {
+ local tarball="${1:?}" dir
+ test -r "${tarball}" || die "can not read ${tarball}"
+
+ case $(file "${tarball}") in
+ *gzip*) compression="-z";;
+ *bzip2*) compression="--bzip2";;
+ *compress*data) compression="--uncompress";;
+ *tar*) compression="";;
+ *) compression="--auto-compress";;
+ esac
+ dir="$(tar ${compression} -tf ${tarball} | \
+ sed -n '/\// { s@^\([^/]\+\).*$@\1@p;q }')"
+ test -n "${dir}" || die "can not determine directory from $tarball"
+ echo $dir
+}
+#----------------------------------------------------------------------------
+# SCRIPT PROPER
+#----------------------------------------------------------------------------
+test ${UID} -eq 0 || die "This script wants to be run by root"
+test -d $ZIP || die "No tarball directory '$ZIP'"
+test -f ${MUNGE_TAR} || die "No munge tarball in $ZIP?"
+test -d ${LIBROOT} || die "Can not cd to LIBROOT=$LIBROOT "
+test -d ${MUNGE_BUILD} || mkdir -vp ${MUNGE_BUILD}
+test -n "${DESTDIR}" || die "DESTDIR not set"
+
+# generate a clean build directory
+rm -rf ${DESTDIR} ${TARBALL}
+
+# DEPENDENT CONFIGURATION
+shopt -s nullglob
+MUNGE_SRC="${MUNGE_BUILD}/$(extract_top_level_from_tarball ${MUNGE_TAR})" || exit 1
+MUNGE_LIB="${DESTDIR}${MUNGE_DIR}/lib"
+
+# extract source
+test -d "${LIBROOT}" || mkdir -vp "${LIBROOT}"
+test -d "${MUNGE_SRC}" || tar jxvf ${MUNGE_TAR} -C ${MUNGE_BUILD}
+test -d "${MUNGE_SRC}" || die "need to extract munge tarball"
+cd ${MUNGE_SRC}
+
+# Build
+set -e
+./configure --prefix=${MUNGE_DIR} --localstatedir=${MUNGE_LOG}
+
+make -j
+
+mkdir -p ${DESTDIR}
+make DESTDIR=${DESTDIR%/}/ install
+
+# final tarball
+tar -C ${DESTDIR} -zcpPvf ${TARBALL} .${MUNGE_DIR%/}
+# scp ${TARBALL} boot:
+echo generated output tarball ${TARBALL}
diff -Nru slurm-llnl-2.2.7/contribs/cray/opt_modulefiles_slurm slurm-llnl-2.3.2/contribs/cray/opt_modulefiles_slurm
--- slurm-llnl-2.2.7/contribs/cray/opt_modulefiles_slurm 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/opt_modulefiles_slurm 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,48 @@
+#%Module1.0#####################################################################
+# slurm/munge support module
+# Put into /opt/modulefiles/slurm or some other part of $MODULEPATH
+################################################################################
+
+# SUBROUTINES
+proc ModulesHelp { } {
+ puts stderr "\tThis is slurm $::version.\n"
+ puts stderr "\tPlease consult http://www.schedmd.com/slurmdocs/cray.html"
+}
+
+# CONFIGURATION
+conflict xt-pbs pbs torque
+set slurmdir "/opt/slurm/default"
+set mungedir "/opt/slurm/munge"
+
+set version "UNKNOWN"
+if {![catch {exec $slurmdir/bin/sbatch --version} out]} {
+ set version [lindex $out 1]
+}
+set helptext "Support for the SLURM $version resource allocation system"
+
+# SCRIPT PROPER
+module-whatis $helptext
+
+prepend-path PATH "$slurmdir/bin"
+prepend-path PATH "$mungedir/bin"
+
+prepend-path MANPATH "$slurmdir/share/man"
+prepend-path MANPATH "$mungedir/share/man"
+
+prepend-path PERL5LIB "$slurmdir/lib/perl5/site_perl"
+
+# other useful environment variables
+setenv SINFO_FORMAT {%9P %5a %8s %.10l %.6c %.6z %.7D %10T %N}
+setenv SQUEUE_FORMAT {%.6i %.8u %.7a %.14j %.3t %9r %19S %.10M %.10L %.5D %.4C}
+setenv SQUEUE_ALL {yes} ;# show hidden partitions, too
+setenv SQUEUE_SORT {-t,e,S}
+
+# logfile aliases
+set-alias sd_log {tail -f "/ufs/slurm/var/log/slurmd.log"}
+set-alias sc_log {tail -f "/ufs/slurm/var/log/slurmctld.log"}
+
+if {[exec id -u] == 0} {
+ prepend-path PATH "$slurmdir/sbin"
+ prepend-path PATH "$mungedir/sbin"
+ set-alias sdown {scontrol shutdown}
+}
diff -Nru slurm-llnl-2.2.7/contribs/cray/pam_job.c slurm-llnl-2.3.2/contribs/cray/pam_job.c
--- slurm-llnl-2.2.7/contribs/cray/pam_job.c 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/pam_job.c 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,117 @@
+/*
+ * pam_job.so module to create SGI PAGG container on user login.
+ * Needed on Cray systems to enable PAGG support in interactive salloc sessions.
+ *
+ * 1. install the pam-devel-xxx.rpm corresponding to your pam-xxx.rpm
+ * 2. compile with gcc -fPIC -DPIC -shared pam_job.c -o pam_job.so
+ * 3. install on boot:/rr/current/lib64/security/pam_job.so
+ * 4. in xtopview -c login, add the following line to /etc/pam.d/common-session:
+ * session optional pam_job.so
+ */
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ * Copyright (c) 2011 Centro Svizzero di Calcolo Scientifico
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#define error(fmt, args...) syslog(LOG_CRIT, "pam_job: " fmt, ##args);
+
+#define PAM_SM_ACCOUNT
+#define PAM_SM_SESSION
+#include
+#include
+
+/*
+ * Unroll job.h/jobctl.h header declarations. The rationale is that not all
+ * systems will have the required kernel header (job.h, jobctl.h, paggctl.h).
+ * On early 2.4/2.5 kernels there was a paggctl() system call which was then
+ * replaced by the /proc/job ioctl, which this implementation tests for. All
+ * patches from ftp://oss.sgi.com/projects/pagg/download that use /proc/job
+ * for ioctl have the same ioctl declarations and identical ioctl parameters.
+ * Comparing these patches shows that, when using a 2.6 kernel, there are no
+ * differences at all in the 23 ioctl calls (last patch was for 2.6.16.21).
+ */
+#define JOB_CREATE _IOWR('A', 1, void *)
+struct job_create {
+ uint64_t r_jid; /* Return value of JID */
+ uint64_t jid; /* Jid value requested */
+ int user; /* UID of user associated with job */
+ int options; /* creation options - unused */
+};
+
+PAM_EXTERN int pam_sm_open_session(pam_handle_t * pamh, int flags,
+ int argc, const char **argv)
+{
+ struct job_create jcreate = {0};
+ struct passwd *passwd;
+ char *username;
+ int job_ioctl_fd;
+
+ if (pam_get_item(pamh, PAM_USER, (void *)&username) != PAM_SUCCESS
+ || username == NULL) {
+ error("error recovering username");
+ return PAM_SESSION_ERR;
+ }
+
+ passwd = getpwnam(username);
+ if (!passwd) {
+ error("error getting passwd entry for %s", username);
+ return PAM_SESSION_ERR;
+ }
+ jcreate.user = passwd->pw_uid; /* uid associated with job */
+
+ if ((job_ioctl_fd = open("/proc/job", 0)) < 0) {
+ error("can not open /proc/job: %s", strerror(errno));
+ return PAM_SESSION_ERR;
+ } else if (ioctl(job_ioctl_fd, JOB_CREATE, (void *)&jcreate) != 0) {
+ error("job_create failed (no container): %s", strerror(errno));
+ close(job_ioctl_fd);
+ return PAM_SESSION_ERR;
+ }
+ close(job_ioctl_fd);
+
+ if (jcreate.r_jid == 0)
+ error("WARNING - job containers disabled, no PAGG IDs created");
+ return PAM_SUCCESS;
+}
+
+/*
+ * Not all PAMified apps invoke session management modules. So, we supply
+ * this account management function for such cases. Whenever possible, it
+ * is still better to use the session management version.
+ */
+PAM_EXTERN int pam_sm_acct_mgmt(pam_handle_t *pamh, int flags,
+ int argc, const char **argv)
+{
+ if (pam_sm_open_session(pamh, flags, argc, argv) != PAM_SUCCESS)
+ return PAM_AUTH_ERR;
+ return PAM_SUCCESS;
+}
+
+PAM_EXTERN int pam_sm_close_session(pam_handle_t *pamh, int flags,
+ int argc, const char **argv)
+{
+ return PAM_SUCCESS;
+}
diff -Nru slurm-llnl-2.2.7/contribs/cray/slurm-build-script.sh slurm-llnl-2.3.2/contribs/cray/slurm-build-script.sh
--- slurm-llnl-2.2.7/contribs/cray/slurm-build-script.sh 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/slurm-build-script.sh 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,144 @@
+#!/bin/bash
+#
+# Build script for slurm on Cray XT/XE
+#
+#-------------------------------------------------------------------------------
+# CONFIGURATION
+#-------------------------------------------------------------------------------
+#REBUILD="true" # remuild (no distclean/configure)
+
+# source and build directories
+LIBROOT="${LIBROOT:-/ufs/slurm/build}"
+SLURM_SRC="${SLURM_SRC:-${LIBROOT}/slurm-2.3.0-0.pre4}"
+
+BUILD_ERR="make.err" # make: stderr only
+BUILD_LOG="make.log" # make: stdout + stderr
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# installation
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# packaging installation directory
+DESTDIR="/tmp/slurm-build"
+
+# installation directory
+SLURM_ROOT="/opt/slurm"
+
+# symlink to current version
+SLURM_DEFAULT="${SLURM_ROOT}/default"
+
+# separate system configuration directory
+SLURM_CONF="${SLURM_DEFAULT}/etc"
+
+# space-separated list of things to be built in the contribs/ folder
+SLURM_CONTRIBS="contribs/perlapi contribs/torque"
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# dependencies
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# path to 'mysql_config' (will be overridden if mysql_config is in $PATH)
+MYSQLCONF="${MYSQLCONF:-${LIBROOT}/mysql}"
+
+# munge installation directory containing lib/ and include/ subdirectories
+MUNGE_DIR="${SLURM_ROOT}/munge"
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#-------------------------------------------------------------------------------
+# SUBROUTINES
+#-------------------------------------------------------------------------------
+function die() { echo -e "$@">&2; exit -1; }
+
+function get_slurm_version() {
+ local vers_file="META"
+ if ! test -f $vers_file; then
+ die "ERROR: no version file '$vers_file'"\
+ "\nRun this script from within the slurm source directory"
+ fi
+ sed -n 's/^.*Version:[^0-9]*\([0-9\.]\+\).*$/\1/p' ${vers_file}
+}
+
+#-------------------------------------------------------------------------------
+# SCRIPT PROPER
+#-------------------------------------------------------------------------------
+shopt -u nullglob
+test ${UID} -eq 0 || die "This script wants to be run by root"
+test -d ${SLURM_SRC} || die "can not cd to SLURM_SRC=$SLURM_SRC"
+test -d $MUNGE_DIR/lib || die "munge is not yet installed"
+test -d ${LIBROOT} || die "can not cd to LIBROOT=$LIBROOT"
+test -n "${DESTDIR}" || die "DESTDIR not set"
+
+#-------------------------------------------------------------------
+# Dependent Configuration
+#-------------------------------------------------------------------
+cd ${SLURM_SRC}
+
+# get current slurm version
+SLURM_VER=$(get_slurm_version) || die "check your PWD (current: $(pwd))"
+SLURM_DIR="${SLURM_ROOT}/${SLURM_VER}"
+
+# name of the tarball to generate at the end of the build process
+TARBALL="${LIBROOT}/slurm_build-${SLURM_VER}.tar.gz"
+#-------------------------------------------------------------------
+# Dependent Tests
+#-------------------------------------------------------------------
+MYSQL_CONFIG="$(which mysql_config 2>/dev/null)"
+if test -z "$MYSQL_CONFIG" -a -z "$MYSQLCONF"; then
+ die 'no mysql_config in $PATH - set $MYSQLCONF manually'
+elif test -n "$MYSQL_CONFIG"; then
+ MYSQLCONF="$(dirname ${MYSQL_CONFIG})"
+fi
+
+# generate a clean build directory
+rm -rf ${DESTDIR} ${TARBALL}
+rm -f ${BUILD_ERR} ${BUILD_LOG}
+
+# (re)configure
+if test -z "${REBUILD}"; then
+ set -x
+ # clean everything else
+ make -j distclean &>/dev/null
+
+ ./configure \
+ --prefix="${SLURM_DIR}" \
+ --sysconfdir="${SLURM_CONF}" \
+ --enable-debug \
+ --enable-front-end\
+ --enable-memory-leak-debug \
+ --with-mysql_config=${MYSQLCONF}\
+ --with-munge="${MUNGE_DIR}" \
+ --with-hwloc="${HWLOC_DIR}" \
+ || die "configure failed"
+else
+ # avoid the slow reconfiguration process, don't build extras
+ unset SLURM_CONTRIBS
+ touch -r config.status configure config.* configure.ac Mak*
+fi
+
+# Build
+tail -F ${BUILD_LOG} & TAIL_PID=$!
+set -ex
+
+# swap stderr, stdout, redirect errors in separate, and both into log file
+(make -j 3>&1 1>&2 2>&3 | tee ${BUILD_ERR}) &>${BUILD_LOG}
+kill ${TAIL_PID} 2>/dev/null
+test -s ${BUILD_ERR} && cat ${BUILD_ERR} >&2
+
+# Installation
+mkdir -p ${DESTDIR}
+make -j DESTDIR=${DESTDIR%/}/ install
+
+if false;then
+# Perl-API and wrappers for qsub/qstat etc.
+for CONTRIB in ${SLURM_CONTRIBS}
+do
+ test -n "${REBUILD}" || make -C ${CONTRIB} clean
+ make -C ${CONTRIB}
+ make -C ${CONTRIB} DESTDIR=${DESTDIR%/} install
+done
+fi
+
+# create the default symlink
+rm -vf ${DESTDIR}${SLURM_DEFAULT}
+ln -s ${SLURM_VER} ${DESTDIR}${SLURM_DEFAULT}
+
+# Synchronize sources or generate tarball.
+tar -C ${DESTDIR} -zcf ${TARBALL} .${SLURM_ROOT} && scp ${TARBALL} boot:
diff -Nru slurm-llnl-2.2.7/contribs/cray/srun.pl slurm-llnl-2.3.2/contribs/cray/srun.pl
--- slurm-llnl-2.2.7/contribs/cray/srun.pl 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/cray/srun.pl 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,1103 @@
+#! /usr/bin/perl -w
+###############################################################################
+#
+# srun - Wrapper for Cray's "aprun" command. If not executed within a job
+# allocation, then also use "salloc" to create the allocation before
+# executing "aprun".
+#
+###############################################################################
+#
+# Copyright (C) 2011 SchedMD LLC .
+# Supported by the Oak Ridge National Laboratory Extreme Scale Systems Center
+# Written by Morris Jette .
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see .
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# In addition, as a special exception, the copyright holders give permission
+# to link the code of portions of this program with the OpenSSL library under
+# certain conditions as described in each individual source file, and
+# distribute linked combinations including the two. You must obey the GNU
+# General Public License in all respects for all of the code used other than
+# OpenSSL. If you modify file(s) with this exception, you may extend this
+# exception to your version of the file(s), but you are not obligated to do
+# so. If you do not wish to do so, delete this exception statement from your
+# version. If you delete this exception statement from all source files in
+# the program, then also delete it here.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+###############################################################################
+
+use strict;
+use FindBin;
+use Getopt::Long 2.24 qw(:config no_ignore_case require_order autoabbrev bundling);
+use lib "${FindBin::Bin}/../lib/perl";
+use autouse 'Pod::Usage' => qw(pod2usage);
+use Slurm ':all';
+use Switch;
+
+my ( $account,
+ $acctg_freq,
+ $alps,
+ $aprun_line_buf,
+ $aprun_quiet,
+ $begin_time,
+ $chdir,
+ $check_time,
+ $check_dir,
+ $comment,
+ $constraint,
+ $contiguous,
+ $cores_per_socket,
+ $cpu_bind,
+ $cpus_per_task,
+ $debugger_test,
+ $dependency,
+ $disable_status,
+ $distribution,
+ $error_file,
+ $epilog,
+ $exclude_nodes,
+ $exclusive,
+ $extra_node_info,
+ $group_id,
+ $gres,
+ $help,
+ $hint,
+ $hold,
+ $immediate,
+ $input_file,
+ $job_id,
+ $job_name,
+ $kill_on_bad_exit,
+ $label,
+ $licenses,
+ $mail_type,
+ $mail_user,
+ $man,
+ $memory,
+ $memory_per_cpu,
+ $memory_bind, $mem_local,
+ $min_cpus,
+ $msg_timeout,
+ $mpi_type,
+ $multi_prog, $multi_executables,
+ $network,
+ $nice,
+ $no_allocate,
+ $nodelist, $nid_list,
+ $ntasks_per_core,
+ $ntasks_per_node,
+ $ntasks_per_socket,
+ $num_nodes,
+ $num_tasks,
+ $overcommit,
+ $output_file,
+ $open_mode,
+ $partition,
+ $preserve_env,
+ $prolog,
+ $propagate,
+ $pty,
+ $quiet,
+ $quit_on_interrupt,
+ $qos,
+ $relative,
+ $resv_ports,
+ $reservation,
+ $restart_dir,
+ $share,
+ $signal,
+ $slurmd_debug,
+ $sockets_per_node,
+ $task_epilog,
+ $task_prolog,
+ $test_only,
+ $threads_per_core,
+ $threads,
+ $time_limit, $time_secs,
+ $time_min,
+ $tmp_disk,
+ $unbuffered,
+ $user_id,
+ $version,
+ $verbose,
+ $wait,
+ $wc_key
+);
+
+my $aprun = "aprun";
+my $salloc = "BINDIR/salloc";
+my $srun = "BINDIR/srun";
+
+my $have_job;
+$aprun_line_buf = 1;
+$aprun_quiet = 1;
+$have_job = 0;
+
+foreach (keys %ENV) {
+# print "$_=$ENV{$_}\n";
+ $have_job = 1 if $_ eq "SLURM_JOBID";
+ $account = $ENV{$_} if $_ eq "SLURM_ACCOUNT";
+ $acctg_freq = $ENV{$_} if $_ eq "SLURM_ACCTG_FREQ";
+ $chdir = $ENV{$_} if $_ eq "SLURM_WORKING_DIR";
+ $check_time = $ENV{$_} if $_ eq "SLURM_CHECKPOINT";
+ $check_dir = $ENV{$_} if $_ eq "SLURM_CHECKPOINT_DIR";
+ $cpu_bind = $ENV{$_} if $_ eq "SLURM_CPU_BIND";
+ $cpus_per_task = $ENV{$_} if $_ eq "SLURM_CPUS_PER_TASK";
+ $dependency = $ENV{$_} if $_ eq "SLURM_DEPENDENCY";
+ $distribution = $ENV{$_} if $_ eq "SLURM_DISTRIBUTION";
+ $epilog = $ENV{$_} if $_ eq "SLURM_EPILOG";
+ $error_file = $ENV{$_} if $_ eq "SLURM_STDERRMODE";
+ $exclusive = 1 if $_ eq "SLURM_EXCLUSIVE";
+ $input_file = $ENV{$_} if $_ eq "SLURM_STDINMODE";
+ $job_name = $ENV{$_} if $_ eq "SLURM_JOB_NAME";
+ $label = 1 if $_ eq "SLURM_LABELIO";
+ $memory_bind = $ENV{$_} if $_ eq "SLURM_MEM_BIND";
+ $memory_per_cpu = $ENV{$_} if $_ eq "SLURM_MEM_PER_CPU";
+ $memory = $ENV{$_} if $_ eq "SLURM_MEM_PER_NODE";
+ $mpi_type = $ENV{$_} if $_ eq "SLURM_MPI_TYPE";
+ $network = $ENV{$_} if $_ eq "SLURM_NETWORK";
+ $ntasks_per_core = $ENV{$_} if $_ eq "SLURM_NTASKS_PER_CORE";
+ $ntasks_per_node = $ENV{$_} if $_ eq "SLURM_NTASKS_PER_NODE";
+ $ntasks_per_socket = $ENV{$_} if $_ eq "SLURM_NTASKS_PER_SOCKET";
+ $num_tasks = $ENV{$_} if $_ eq "SLURM_NTASKS";
+ $num_nodes = $ENV{$_} if $_ eq "SLURM_NNODES";
+ $overcommit = $ENV{$_} if $_ eq "SLURM_OVERCOMMIT";
+ $open_mode = $ENV{$_} if $_ eq "SLURM_OPEN_MODE";
+ $output_file = $ENV{$_} if $_ eq "SLURM_STDOUTMODE";
+ $partition = $ENV{$_} if $_ eq "SLURM_PARTITION";
+ $prolog = $ENV{$_} if $_ eq "SLURM_PROLOG";
+ $qos = $ENV{$_} if $_ eq "SLURM_QOS";
+ $restart_dir = $ENV{$_} if $_ eq "SLURM_RESTART_DIR";
+ $resv_ports = 1 if $_ eq "SLURM_RESV_PORTS";
+ $signal = $ENV{$_} if $_ eq "SLURM_SIGNAL";
+ $task_epilog = $ENV{$_} if $_ eq "SLURM_TASK_EPILOG";
+ $task_prolog = $ENV{$_} if $_ eq "SLURM_TASK_PROLOG";
+ $threads = $ENV{$_} if $_ eq "SLURM_THREADS";
+ $time_limit = $ENV{$_} if $_ eq "SLURM_TIMELIMIT";
+ $unbuffered = 1 if $_ eq "SLURM_UNBUFFEREDIO";
+ $wait = $ENV{$_} if $_ eq "SLURM_WAIT";
+ $wc_key = $ENV{$_} if $_ eq "SLURM_WCKEY";
+}
+
+# Make fully copy of execute line. This is needed only so that srun can run
+# again and get the job's memory allocation for aprun (which is not available
+# until after the allocation has been made). Add quotes if an argument contains
+# spaces (e.g. --alps="-r 1" needs to be treadted as a single argument).
+my ($i, $len, $orig_exec_line);
+if ($ARGV[0]) {
+ foreach (@ARGV) {
+ if (index($_, " ") == -1) {
+ $orig_exec_line .= "$_ ";
+ } else {
+ $orig_exec_line .= "\"$_\" ";
+ }
+ }
+}
+
+GetOptions(
+ 'A|account=s' => \$account,
+ 'acctg-freq=i' => \$acctg_freq,
+ 'alps=s' => \$alps,
+ 'B|extra-node-info=s' => \$extra_node_info,
+ 'begin=s' => \$begin_time,
+ 'D|chdir=s' => \$chdir,
+ 'checkpoint=s' => \$check_time,
+ 'checkpoint-dir=s' => \$check_dir,
+ 'comment=s' => \$comment,
+ 'C|constraint=s' => \$constraint,
+ 'contiguous' => \$contiguous,
+ 'cores-per-socket=i' => \$cores_per_socket,
+ 'cpu_bind=s' => \$cpu_bind,
+ 'c|cpus-per-task=i' => \$cpus_per_task,
+ 'd|dependency=s' => \$dependency,
+ 'debugger-test' => \$debugger_test,
+ 'X|disable-status' => \$disable_status,
+ 'e|error=s' => \$error_file,
+ 'epilog=s' => \$epilog,
+ 'x|exclude=s' => \$exclude_nodes,
+ 'exclusive' => \$exclusive,
+ 'gid=s' => \$group_id,
+ 'gres=s' => \$gres,
+ 'help|usage|?' => \$help,
+ 'hint=s' => \$hint,
+ 'H|hold' => \$hold,
+ 'I|immediate' => \$immediate,
+ 'i|input=s' => \$input_file,
+ 'jobid=i' => \$job_id,
+ 'J|job-name=s' => \$job_name,
+ 'K|kill-on-bad-exit' => \$kill_on_bad_exit,
+ 'l|label' => \$label,
+ 'L|licenses=s' => \$licenses,
+ 'm|distribution=s' => \$distribution,
+ 'mail-type=s' => \$mail_type,
+ 'mail-user=s' => \$mail_user,
+ 'man' => \$man,
+ 'mem=s' => \$memory,
+ 'mem-per-cpu=s' => \$memory_per_cpu,
+ 'mem_bind=s' => \$memory_bind,
+ 'mincpus=i' => \$min_cpus,
+ 'msg-timeout=i' => \$msg_timeout,
+ 'mpi=s' => \$mpi_type,
+ 'multi-prog' => \$multi_prog,
+ 'network=s' => \$network,
+ 'nice=i' => \$nice,
+ 'Z|no-allocate' => \$no_allocate,
+ 'w|nodelist=s' => \$nodelist,
+ 'ntasks-per-core=i' => \$ntasks_per_core,
+ 'ntasks-per-node=i' => \$ntasks_per_node,
+ 'ntasks-per-socket=i' => \$ntasks_per_socket,
+ 'n|ntasks=s' => \$num_tasks,
+ 'N|nodes=s' => \$num_nodes,
+ 'O|overcommit' => \$overcommit,
+ 'o|output=s' => \$output_file,
+ 'open-mode=s' => \$open_mode,
+ 'p|partition=s' => \$partition,
+ 'E|preserve-env' => \$preserve_env,
+ 'prolog=s' => \$prolog,
+ 'propagate=s' => \$propagate,
+ 'pty' => \$pty,
+ 'Q|quiet' => \$quiet,
+ 'q|quit-on-interrupt' => \$quit_on_interrupt,
+ 'qos=s' => \$qos,
+ 'r|relative=i' => \$relative,
+ 'resv-ports' => \$resv_ports,
+ 'reservation=s' => \$reservation,
+ 'restart-dir=s' => \$restart_dir,
+ 's|share' => \$share,
+ 'signal=s' => \$signal,
+ 'slurmd-debug=i' => \$slurmd_debug,
+ 'sockets-per-node=i' => \$sockets_per_node,
+ 'task-epilog=s' => \$task_epilog,
+ 'task-prolog=s' => \$task_prolog,
+ 'test-only' => \$test_only,
+ 'threads-per-core=i' => \$threads_per_core,
+ 'T|threads=i' => \$threads,
+ 't|time=s' => \$time_limit,
+ 'time-min=s' => \$time_min,
+ 'tmp=s' => \$tmp_disk,
+ 'u|unbuffered' => \$unbuffered,
+ 'uid=s' => \$user_id,
+ 'V|version' => \$version,
+ 'v|verbose' => \$verbose,
+ 'W|wait=i' => \$wait,
+ 'wckey=s' => \$wc_key
+) or pod2usage(2);
+
+if ($version) {
+ system("$salloc --version");
+ exit(0);
+}
+
+# Display man page or usage if necessary
+pod2usage(0) if $man;
+if ($help) {
+ if ($< == 0) { # Cannot invoke perldoc as root
+ my $id = eval { getpwnam("nobody") };
+ $id = eval { getpwnam("nouser") } unless defined $id;
+ $id = -2 unless defined $id;
+ $< = $id;
+ }
+ $> = $<; # Disengage setuid
+ $ENV{PATH} = "/bin:/usr/bin"; # Untaint PATH
+ delete @ENV{'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};
+ if ($0 =~ /^([-\/\w\.]+)$/) { $0 = $1; } # Untaint $0
+ else { die "Illegal characters were found in \$0 ($0)\n"; }
+
+}
+
+my $script;
+if ($ARGV[0]) {
+ foreach (@ARGV) {
+ $script .= "$_ ";
+ }
+} else {
+ pod2usage(2);
+}
+my %res_opts;
+my %node_opts;
+
+my $command;
+
+if ($have_job == 0) {
+ if ($memory_per_cpu) {
+ $i = index($memory_per_cpu, "hs");
+ if ($i >= 0) {
+ $memory_per_cpu = substr($memory_per_cpu, 0, $i);
+ }
+ $i = index($memory_per_cpu, "h");
+ if ($i >= 0) {
+ $memory_per_cpu = substr($memory_per_cpu, 0, $i);
+ }
+ }
+
+ $command = "$salloc";
+ $command .= " --account=$account" if $account;
+ $command .= " --acctg-freq=$acctg_freq" if $acctg_freq;
+ $command .= " --begin=$begin_time" if $begin_time;
+ $command .= " --chdir=$chdir" if $chdir;
+ $command .= " --comment=\"$comment\"" if $comment;
+ $command .= " --constraint=\"$constraint\"" if $constraint;
+ $command .= " --contiguous" if $contiguous;
+ $command .= " --cores-per-socket=$cores_per_socket" if $cores_per_socket;
+ $command .= " --cpu_bind=$cpu_bind" if $cpu_bind;
+ $command .= " --cpus-per-task=$cpus_per_task" if $cpus_per_task;
+ $command .= " --dependency=$dependency" if $dependency;
+ $command .= " --distribution=$distribution" if $distribution;
+ $command .= " --exclude=$exclude_nodes" if $exclude_nodes;
+ $command .= " --exclusive" if $exclusive;
+ $command .= " --extra-node-info=$extra_node_info" if $extra_node_info;
+ $command .= " --gid=$group_id" if $group_id;
+ $command .= " --gres=$gres" if $gres;
+ $command .= " --hint=$hint" if $hint;
+ $command .= " --hold" if $hold;
+ $command .= " --immediate" if $immediate;
+ $command .= " --jobid=$job_id" if $job_id;
+ $command .= " --job-name=$job_name" if $job_name;
+ $command .= " --licenses=$licenses" if $licenses;
+ $command .= " --mail-type=$mail_type" if $mail_type;
+ $command .= " --mail-user=$mail_user" if $mail_user;
+ $command .= " --mem=$memory" if $memory;
+ $command .= " --mem-per-cpu=$memory_per_cpu" if $memory_per_cpu;
+ $command .= " --mem_bind=$memory_bind" if $memory_bind;
+ $command .= " --mincpus=$min_cpus" if $min_cpus;
+ $command .= " --network=$network" if $network;
+ $command .= " --nice=$nice" if $nice;
+ $command .= " --nodelist=$nodelist" if $nodelist;
+ $command .= " --ntasks-per-core=$ntasks_per_core" if $ntasks_per_core;
+ $command .= " --ntasks-per-node=$ntasks_per_node" if $ntasks_per_node;
+ $command .= " --ntasks-per-socket=$ntasks_per_socket" if $ntasks_per_socket;
+ $command .= " --ntasks=$num_tasks" if $num_tasks;
+ $command .= " --nodes=$num_nodes" if $num_nodes;
+ $command .= " --overcommit" if $overcommit;
+ $command .= " --partition=$partition" if $partition;
+ $command .= " --qos=$qos" if $qos;
+ $command .= " --quiet" if !$verbose;
+ $command .= " --reservation=$reservation" if $reservation;
+ $command .= " --share" if $share;
+ $command .= " --signal=$signal" if $signal;
+ $command .= " --sockets-per-node=$sockets_per_node" if $sockets_per_node;
+ $command .= " --threads-per-core=$threads_per_core" if $threads_per_core;
+ $command .= " --minthreads=$threads" if $threads;
+ $command .= " --time=$time_limit" if $time_limit;
+ $command .= " --time-min=$time_min" if $time_min;
+ $command .= " --tmp=$tmp_disk" if $tmp_disk;
+ $command .= " --uid=$user_id" if $user_id;
+ $command .= " --verbose" if $verbose;
+ $command .= " --wait=$wait" if $wait;
+ $command .= " --wckey=$wc_key" if $wc_key;
+ $command .= " $srun";
+ $command .= " $orig_exec_line";
+} else {
+ $command = "$aprun";
+
+ # Options that get set if aprun is launch either under salloc or directly
+ if ($alps) {
+ # aprun fails when arguments are duplicated, prevent duplicates here
+ $command .= " $alps";
+ if (index($alps, "-d") >= 0) { $cpus_per_task = 0 };
+ if (index($alps, "-L") >= 0) { $nodelist = 0 };
+ if (index($alps, "-m") >= 0) { $memory_per_cpu = 0 };
+ if (index($alps, "-n") >= 0) { $num_tasks = 0; $num_nodes = 0; }
+ if (index($alps, "-N") >= 0) { $ntasks_per_node = 0; $num_nodes = 0; }
+ if (index($alps, "-q") >= 0) { $aprun_quiet = 0 };
+ if (index($alps, "-S") >= 0) { $ntasks_per_socket = 0 };
+ if (index($alps, "-sn") >= 0) { $sockets_per_node = 0 };
+ if (index($alps, "-ss") >= 0) { $memory_bind = 0 };
+ if (index($alps, "-T") >= 0) { $aprun_line_buf = 0 };
+ if (index($alps, "-t") >= 0) { $time_limit = 0 };
+ }
+ # $command .= " -a" no srun equivalent, architecture
+ # $command .= " -b" no srun equivalent, bypass transfer of executable
+ # $command .= " -B" no srun equivalent, reservation options
+ # $command .= " -cc" NO GOOD MAPPING, cpu binding
+ $command .= " -d $cpus_per_task" if $cpus_per_task;
+ # Resource sharing largely controlled by SLURM configuration,
+ # so this is an imperfect mapping of options
+ if ($share) {
+ $command .= " -F share";
+ } elsif ($exclusive) {
+ $command .= " -F exclusive";
+ }
+ $nid_list = get_nids($nodelist) if $nodelist;
+ $command .= " -L $nid_list" if $nodelist;
+ $command .= " -m $memory_per_cpu" if $memory_per_cpu;
+ if ($ntasks_per_node) {
+ $command .= " -N $ntasks_per_node";
+ if (!$num_tasks && $num_nodes) {
+ $num_tasks = $ntasks_per_node * $num_nodes;
+ }
+ } elsif ($num_nodes) {
+ $num_tasks = $num_nodes if !$num_tasks;
+ $ntasks_per_node = int (($num_tasks + $num_nodes - 1) / $num_nodes);
+ $command .= " -N $ntasks_per_node";
+ }
+
+ if ($num_tasks) {
+ $command .= " -n $num_tasks";
+ } elsif ($num_nodes) {
+ $command .= " -n $num_nodes";
+ }
+
+ $command .= " -q" if $aprun_quiet;
+ # $command .= " -r" no srun equivalent, core specialization
+ $command .= " -S $ntasks_per_socket" if $ntasks_per_socket;
+ # $command .= " -sl" no srun equivalent, task placement on nodes
+ $command .= " -sn $sockets_per_node" if $sockets_per_node;
+ if ($memory_bind && ($memory_bind =~ /local/i)) {
+ $command .= " -ss"
+ }
+ $command .= " -T" if $aprun_line_buf;
+ $time_secs = get_seconds($time_limit) if $time_limit;
+ $command .= " -t $time_secs" if $time_secs;
+ $script = get_multi_prog($script) if $multi_prog;
+
+ # Input and output file options are not supported by aprun, but can be handled by perl
+ $command .= " <$input_file" if $input_file;
+ if ($error_file && ($error_file eq "none")) {
+ $error_file = "/dev/null"
+ }
+ if ($output_file && ($output_file eq "none")) {
+ $output_file = "/dev/null"
+ }
+ if ($open_mode && ($open_mode eq "a")) {
+ $command .= " >>$output_file" if $output_file;
+ if ($error_file) {
+ $command .= " 2>>$error_file";
+ } elsif ($output_file) {
+ $command .= " 2>&1";
+ }
+ } else {
+ $command .= " >$output_file" if $output_file;
+ if ($error_file) {
+ $command .= " 2>$error_file";
+ } elsif ($output_file) {
+ $command .= " 2>&1";
+ }
+ }
+
+ # Srun option which are not supported by aprun
+ # $command .= " --disable-status" if $disable_status;
+ # $command .= " --epilog=$epilog" if $epilog;
+ # $command .= " --kill-on-bad-exit" if $kill_on_bad_exit;
+ # $command .= " --label" if $label;
+ # $command .= " --mpi=$mpi_type" if $mpi_type;
+ # $command .= " --msg-timeout=$msg_timeout" if $msg_timeout;
+ # $command .= " --no-allocate" if $no_allocate;
+ # $command .= " --open-mode=$open_mode" if $open_mode;
+ # $command .= " --preserve_env" if $preserve_env;
+ # $command .= " --prolog=$prolog" if $prolog;
+ # $command .= " --propagate=$propagate" if $propagate;
+ # $command .= " --pty" if $pty;
+ # $command .= " --quit-on-interrupt" if $quit_on_interrupt;
+ # $command .= " --relative=$relative" if $relative;
+ # $command .= " --restart-dir=$restart_dir" if $restart_dir;
+ # $command .= " --resv-ports" if $resv_ports;
+ # $command .= " --slurmd-debug=$slurmd_debug" if $slurmd_debug;
+ # $command .= " --task-epilog=$task_epilog" if $task_epilog;
+ # $command .= " --task-prolog=$task_prolog" if $task_prolog;
+ # $command .= " --test-only" if $test_only;
+ # $command .= " --unbuffered" if $unbuffered;
+
+ $command .= " $script";
+}
+
+# Print here for debugging
+#print "command=$command\n";
+exec $command;
+
+# Convert a SLURM time format to a number of seconds
+sub get_seconds {
+ my ($duration) = @_;
+ $duration = 0 unless $duration;
+ my $seconds = 0;
+
+ # Convert [[HH:]MM:]SS to duration in seconds
+ if ($duration =~ /^(?:(\d+):)?(\d*):(\d+)$/) {
+ my ($hh, $mm, $ss) = ($1 || 0, $2 || 0, $3);
+ $seconds += $ss;
+ $seconds += $mm * 60;
+ $seconds += $hh * 60;
+ } elsif ($duration =~ /^(\d+)$/) { # Convert number in minutes to seconds
+ $seconds = $duration * 60;
+ } else { # Unsupported format
+ die("Invalid time limit specified ($duration)\n");
+ }
+ return $seconds;
+}
+
+# Convert a SLURM hostlist expression into the equivalent node index value
+# expression
+sub get_nids {
+ my ($host_list) = @_;
+ my ($nid_list) = $host_list;
+
+ $nid_list =~ s/nid//g;
+ $nid_list =~ s/\[//g;
+ $nid_list =~ s/\]//g;
+ $nid_list =~ s/\d+/sprintf("%d", $&)/ge;
+
+ return $nid_list;
+}
+
+# Convert SLURM multi_prog file into a aprun options
+# srun file format is "task_IDs command args..."
+sub get_multi_prog {
+ my ($fname) = @_;
+ my ($out_line);
+ my ($line_num) = 0;
+ my (@words, $word, $word_num, $num_pes);
+
+ open(MP, $fname) || die("Can not read $fname");
+ while () {
+ chop;
+ if ($line_num != 0) {
+ $out_line .= " : ";
+ }
+ $line_num++;
+ @words = split(' ', $_);
+ $word_num = 0;
+ foreach $word (@words) {
+ if ($word_num == 0) {
+ $num_pes = get_num_pes($word);
+ $out_line .= " -n $num_pes";
+ } else {
+ $out_line .= " $word";
+ }
+ $word_num++;
+ }
+ }
+ return $out_line;
+}
+
+# Convert number ranges and sets into a total count
+sub get_num_pes {
+ my ($pes_range) = @_;
+ my (@ranges, $range);
+ my (@pairs, $value);
+ my ($min_value, $max_value);
+ my ($value_num);
+ my ($num_pes) = 0;
+
+ @ranges = split(',', $pes_range);
+ foreach $range (@ranges) {
+ @pairs = split('-', $range);
+ $value_num = 0;
+ foreach $value (@pairs) {
+ if ($value_num == 0) {
+ $min_value = $value;
+ }
+ $max_value = $value;
+ $value_num++;
+ }
+ $num_pes += ($max_value - $min_value + 1);
+ }
+ return $num_pes;
+}
+
+# Convert a size format containing optional K, M, G or T suffix to the
+# equvalent number of megabytes
+sub convert_mb_format {
+ my ($value) = @_;
+ my ($amount, $suffix) = $value =~ /(\d+)($|[KMGT])/i;
+ return if !$amount;
+ $suffix = lc($suffix);
+
+ if (!$suffix) {
+ $amount /= 1048576;
+ } elsif ($suffix eq "k") {
+ $amount /= 1024;
+ } elsif ($suffix eq "m") {
+ #do nothing this is what we want.
+ } elsif ($suffix eq "g") {
+ $amount *= 1024;
+ } elsif ($suffix eq "t") {
+ $amount *= 1048576;
+ } else {
+ print "don't know what to do with suffix $suffix\n";
+ return;
+
+ }
+
+ return $amount;
+}
+##############################################################################
+
+__END__
+
+=head1 NAME
+
+B - Run a parallel job
+
+=head1 SYNOPSIS
+
+srun [OPTIONS...] executable [arguments...]
+
+=head1 DESCRIPTION
+
+Run a parallel job on cluster managed by SLURM. If necessary, srun will
+first create a resource allocation in which to run the parallel job.
+
+=head1 OPTIONS
+
+NOTE: Many options only apply only when creating a job allocation as noted
+below. When srun is allocated within an existing job allocation, these options
+are silently ignored.
+The following aprun options have no equivalent in srun and must be specified
+by using the B<--alps> option: B<-a>, B<-b>, B<-B>, B<-cc>, B<-f>, B<-r>, and
+B<-sl>. Many other options do not exact functionality matches, but duplication
+srun behavior to the extent possible.
+
+=over 4
+
+=item B<-A> | B<--account=account>
+
+Charge resources used by this job to specified account.
+Applies only when creating a job allocation.
+
+=item B<--acctg-freq=seconds>
+
+Specify the accounting sampling interval.
+Applies only when creating a job allocation.
+
+=item B<--alps=options>
+
+Specify the options to be passed to the aprun command.
+If conflicting native srun options and --alps options are specified, the srun
+option will take precedence for creating the job allocation (if necessary) and
+the --alps options will take precedence for launching tasks with the aprun
+command.
+
+=item B<-B> | B<--extra-node-info=sockets[:cores[:threads]]>
+
+Request a specific allocation of resources with details as to the
+number and type of computational resources within a cluster:
+number of sockets (or physical processors) per node,
+cores per socket, and threads per core.
+The individual levels can also be specified in separate options if desired:
+B<--sockets-per-node=sockets>, B<--cores-per-socket=cores>, and
+B<--threads-per-core=threads>.
+Applies only when creating a job allocation.
+
+=item B<--begin=time>
+
+Defer job initiation until the specified time.
+Applies only when creating a job allocation.
+
+=item B<--checkpoint=interval>
+
+Specify the time interval between checkpoint creations.
+Not supported on Cray computers.
+
+=item B<--checkpoint-dir=directory>
+
+Directory where the checkpoint image should be written.
+Not supported on Cray computers.
+
+=item B<--comment=string>
+
+An arbitrary comment.
+Applies only when creating a job allocation.
+
+=item B<-C> | B<--constraint=string>
+
+Constrain job allocation to nodes with the specified features.
+Applies only when creating a job allocation.
+
+=item B<--contiguous>
+
+Constrain job allocation to contiguous nodes.
+Applies only when creating a job allocation.
+
+=item B<--cores-per-socket=number>
+
+Count of cores to be allocated per per socket.
+Applies only when creating a job allocation.
+
+=item B<--cpu_bind=options>
+
+Strategy to be used for binding tasks to the CPUs.
+Not supported on Cray computers due to many incompatible options.
+Use --alps="-cc=..." instead.
+
+=item B<-c> | B<--cpus-per-task=number>
+
+Count of CPUs required per task.
+
+=item B<-d> | B<--dependency=[condition:]jobid>
+
+Wait for job(s) to enter specified condition before starting the job.
+Valid conditions include after, afterany, afternotok, and singleton.
+Applies only when creating a job allocation.
+
+=item B<-D> | B<--chdir=directory>
+
+Execute the program from the specified directory.
+Applies only when creating a job allocation.
+
+=item B<--epilog=filename>
+
+Execute the specified program after the job step completes.
+Not supported on Cray computers.
+
+=item B<-e> | B<--error=filename>
+
+Write stderr to the specified file.
+
+=item B<--exclusive>
+
+The job or job step will not share resources with other jobs or job steps.
+Applies only when creating a job allocation.
+
+=item B<-E> | B<--preserve-env>
+
+Pass the current values of environment variables SLURM_NNODES and
+SLURM_NTASKS through to the executable, rather than computing them
+from command line parameters.
+Not supported on Cray computers.
+
+=item B<--gid=group>
+
+If user root, then execute the job using the specified group access permissions.
+Specify either a group name or ID.
+Applies only when creating a job allocation.
+
+=item B<--gres=gres_name[*count]>
+
+Allocate the specified generic resources on each allocated node.
+Applies only when creating a job allocation.
+
+=item B<-?> | B<--help>
+
+Print brief help message.
+
+=item B<--hint=type>
+
+Bind tasks according to application hints.
+Not supported on Cray computers.
+
+=item B<-H> | B<--hold>
+
+Submit the job in a held state.
+Applies only when creating a job allocation.
+
+=item B<-I> | B<--immediate>
+
+Exit if resources are not available immediately.
+Applies only when creating a job allocation.
+
+=item B<-i> | B<--input=filename>
+
+Read stdin from the specified file.
+
+=item B<--jobid=number>
+
+Specify the job ID number. Usable only by SlurmUser or user root.
+Applies only when creating a job allocation.
+
+=item B<-J> | B<--job-name=name>
+
+Specify a name for the job.
+Applies only when creating a job allocation.
+
+=item B<-K> | B<--kill-on-bad-exit>
+
+Immediately terminate a job if any task exits with a non-zero exit code.
+Not supported on Cray computers.
+
+=item B<-l> | B<--label>
+
+Prepend task number to lines of stdout/err.
+Not supported on Cray computers.
+
+=item B<-l> | B<--licenses=names>
+
+Specification of licenses (or other resources available on all
+nodes of the cluster) which must be allocated to this job.
+Applies only when creating a job allocation.
+
+=item B<-m> | B<--distribution=layout>
+
+Specification of distribution of tasks across nodes.
+Not supported on Cray computers.
+
+=item B<--man>
+
+Print full documentation.
+
+=item B<--mail-type=event>
+
+Send email when certain event types occur.
+Valid events values are BEGIN, END, FAIL, REQUEUE, and ALL (any state change).
+Applies only when creating a job allocation.
+
+=item B<--mail-user=user>
+
+Send email to the specified user(s). The default is the submitting user.
+Applies only when creating a job allocation.
+
+=item B<--mem=MB>
+
+Specify the real memory required per node in MegaBytes.
+Applies only when creating a job allocation.
+
+=item B<--mem-per-cpu=MB>[h|hs]
+
+Specify the real memory required per CPU in MegaBytes.
+Applies only when creating a job allocation.
+Append "h" or "hs" for huge page support.
+
+=item B<--mem_bind=type>
+
+Bind tasks to memory. The only option supported on Cray systems is local which
+confines memory use to the local NUMA node.
+
+=item B<--mincpus>
+
+Specify a minimum number of logical CPUs per node.
+Applies only when creating a job allocation.
+
+=item B<--msg-timeout=second>
+
+Modify the job launch message timeout.
+Not supported on Cray computers.
+
+=item B<--mpi=implementation>
+
+Identify the type of MPI to be used. May result in unique initiation
+procedures.
+Not supported on Cray computers.
+
+=item B<--multi-prog>
+
+Run a job with different programs and different arguments for
+each task. In this case, the executable program specified is
+actually a configuration file specifying the executable and
+arguments for each task.
+
+=item B<--network=type>
+
+Specify the communication protocol to be used.
+Not supported on Cray computers.
+
+=item B<--nice=adjustment>
+
+Run the job with an adjusted scheduling priority within SLURM.
+Applies only when creating a job allocation.
+
+=item B<--ntasks-per-core=ntasks>
+
+Request the maximum ntasks be invoked on each core.
+Applies only when creating a job allocation.
+
+=item B<--ntasks-per-node=ntasks>
+
+Request the maximum ntasks be invoked on each node.
+Applies only when creating a job allocation.
+
+=item B<--ntasks-per-socket=ntasks>
+
+Request the maximum ntasks be invoked on each socket.
+Applies only when creating a job allocation.
+
+=item B<-N> | B<--nodes=num_nodes>
+
+Number of nodes to use.
+
+=item B<-n> | B<--ntasks=num_tasks>
+
+Number of tasks to launch.
+
+=item B<--overcommit>
+
+Overcommit resources. Launch more than one task per CPU.
+Applies only when creating a job allocation.
+
+=item B<-o> | B<--output=filename>
+
+Specify the mode for stdout redirection.
+
+=item B<--open-mode=append|truncate>
+
+Open the output and error files using append or truncate mode as specified.
+
+=item B<--partition=name>
+
+Request a specific partition for the resource allocation.
+Applies only when creating a job allocation.
+
+=item B<--prolog=filename>
+
+Execute the specified file before launching the job step.
+Not supported on Cray computers.
+
+=item B<--propagate=rlimits>
+
+Allows users to specify which of the modifiable (soft) resource limits
+to propagate to the compute nodes and apply to their jobs.
+Not supported on Cray computers.
+
+=item B<--pty>
+
+Execute task zero in pseudo terminal mode.
+Not supported on Cray computers.
+
+=item B<--quiet>
+
+Suppress informational messages. Errors will still be displayed.
+
+=item B<-q> | B<--quit-on-interrupt>
+
+Quit immediately on single SIGINT (Ctrl-C).
+This is the default behavior on Cray computers.
+
+=item B<--qos=quality_of_service>
+
+Request a specific quality of service for the job.
+Applies only when creating a job allocation.
+
+=item B<-r> | B<--relative=offset>
+
+Run a job step at the specified node offset in the current allocation.
+Not supported on Cray computers.
+
+=item B<--resv-ports=filename>
+
+Reserve communication ports for this job. Used for OpenMPI.
+Not supported on Cray computers.
+
+=item B<--reservation=name>
+
+Allocate resources for the job from the named reservation.
+Applies only when creating a job allocation.
+
+=item B<--restart-dir=directory>
+
+Specifies the directory from which the job or job step's checkpoint should
+be read.
+Not supported on Cray computers.
+
+=item B<-s> | B<--share>
+
+The job can share nodes with other running jobs.
+Applies only when creating a job allocation.
+
+=item B<--signal=signal_number[@seconds]>
+
+When a job is within the specified number seconds of its end time,
+send it the specified signal number.
+
+=item B<--slurmd-debug=level>
+
+Specify a debug level for slurmd daemon.
+Not supported on Cray computers.
+
+=item B<--sockets-per-node=number>
+
+Allocate the specified number of sockets per node.
+Applies only when creating a job allocation.
+
+=item B<--task-epilog=filename>
+
+Execute the specified program after each task terminates.
+Not supported on Cray computers.
+
+=item B<--task-prolog=filename>
+
+Execute the specified program before launching each task.
+Not supported on Cray computers.
+
+=item B<--test-only>
+
+Returns an estimate of when a job would be scheduled.
+Not supported on Cray computers.
+
+=item B<-t> | B<--time=limit>
+
+Time limit in minutes or hours:minutes:seconds.
+
+=item B<--time-min=limit>
+
+The minimum acceptable time limit in minutes or hours:minutes:seconds.
+The default value is the same as the maximum time limit.
+Applies only when creating a job allocation.
+
+=item B<--tmp=mb>
+
+Specify a minimum amount of temporary disk space.
+Applies only when creating a job allocation.
+
+=item B<-u> | B<--unbuffered>
+
+Do not line buffer stdout from remote tasks.
+Not supported on Cray computers.
+
+=item B<--uid=user>
+
+If user root, then execute the job as the specified user.
+Specify either a user name or ID.
+Applies only when creating a job allocation.
+
+=item B<--usage>
+
+Print brief help message.
+
+=item B<-V> | B<--version>
+
+Display version information and exit.
+
+=item B<-v> | B<--verbose>
+
+Increase the verbosity of srun's informational messages.
+
+=item B<-W> | B<--wait=seconds>
+
+Specify how long to wait after the first task terminates before terminating
+all remaining tasks.
+Not supported on Cray computers.
+
+=item B<-w> | B<--nodelist=hostlist|filename>
+
+Request a specific list of hosts to use.
+
+=item B<--wckey=key>
+
+Specify wckey to be used with job.
+Applies only when creating a job allocation.
+
+=item B<-X> | B<--disable-status>
+
+Disable the display of task status when srun receives a single SIGINT (Ctrl-C).
+Not supported on Cray computers.
+
+=item B<-x> | B<--exclude=hostlist>
+
+Request a specific list of hosts to not use
+Applies only when creating a job allocation.
+
+=item B<-Z> | B<--no-allocate>
+
+Run the specified tasks on a set of nodes without creating a SLURM
+"job" in the SLURM queue structure, bypassing the normal resource
+allocation step.
+Not supported on Cray computers.
+
+=back
+
+=cut
diff -Nru slurm-llnl-2.2.7/contribs/env_cache_builder.c slurm-llnl-2.3.2/contribs/env_cache_builder.c
--- slurm-llnl-2.2.7/contribs/env_cache_builder.c 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/env_cache_builder.c 2011-12-05 17:20:08.000000000 +0000
@@ -30,7 +30,7 @@
* CODE-OCEC-09-009. All rights reserved.
*
* This file is part of SLURM, a resource management program.
- * For details, see .
+ * For details, see .
* Please also read the included file: DISCLAIMER.
*
* SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/lua/job_submit.license.lua slurm-llnl-2.3.2/contribs/lua/job_submit.license.lua
--- slurm-llnl-2.2.7/contribs/lua/job_submit.license.lua 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/lua/job_submit.license.lua 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,105 @@
+--[[
+
+ Example lua script demonstrating the SLURM job_submit/lua interface.
+ This is only an example, not meant for use in its current form.
+
+ For use, this script should be copied into a file name job_"submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
+--]]
+
+function _limit_license_cnt(orig_string, license_name, max_count)
+ local i = 0
+ local j = 0
+ local val = 0
+
+ if orig_string == nil then
+ return 0
+ end
+
+ i, j, val = string.find(orig_string, license_name .. "%*(%d)")
+-- if val ~= nil then log_info("name:%s count:%s", license_name, val) end
+ if val ~= nil and val + 0 > max_count then
+ return 1
+ end
+ return 0
+end
+
+--########################################################################--
+--
+-- SLURM job_submit/lua interface:
+--
+--########################################################################--
+
+function slurm_job_submit ( job_desc, part_list )
+ setmetatable (job_desc, job_req_meta)
+ local bad_license_count = 0
+
+ bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratcha", 1)
+ bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchb", 1) + bad_license_count
+ bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchc", 1) + bad_license_count
+ if bad_license_count > 0 then
+ log_info("slurm_job_submit: for user %d, invalid licenses value: %s",
+ job_desc.user_id, job_desc.licenses)
+-- ESLURM_INVALID_LICENSES is 2048
+ return 2048
+ end
+
+ return 0
+end
+
+function slurm_job_modify ( job_desc, job_rec, part_list )
+ setmetatable (job_desc, job_req_meta)
+ setmetatable (job_rec, job_rec_meta)
+ local bad_license_count = 0
+
+-- *** YOUR LOGIC GOES BELOW ***
+ bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratcha", 1)
+ bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchb", 1) + bad_license_count
+ bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchc", 1) + bad_license_count
+ if bad_license_count > 0 then
+ log_info("slurm_job_modify: for job %u, invalid licenses value: %s",
+ job_rec.job_id, job_desc.licenses)
+-- ESLURM_INVALID_LICENSES is 2048
+ return 2048
+ end
+
+ return 0
+end
+
+--########################################################################--
+--
+-- Initialization code:
+--
+-- Define functions for logging and accessing slurmctld structures
+--
+--########################################################################--
+
+
+log_info = slurm.log_info
+log_verbose = slurm.log_verbose
+log_debug = slurm.log_debug
+log_err = slurm.error
+
+job_rec_meta = {
+ __index = function (table, key)
+ return _get_job_rec_field(table.job_rec_ptr, key)
+ end
+}
+job_req_meta = {
+ __index = function (table, key)
+ return _get_job_req_field(table.job_desc_ptr, key)
+ end,
+ __newindex = function (table, key, value)
+ return _set_job_req_field(table.job_desc_ptr, key, value)
+ end
+}
+part_rec_meta = {
+ __index = function (table, key)
+ return _get_part_rec_field(table.part_rec_ptr, key)
+ end
+}
+
+log_info("initialized")
+
+return slurm.SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/lua/job_submit.lua slurm-llnl-2.3.2/contribs/lua/job_submit.lua
--- slurm-llnl-2.2.7/contribs/lua/job_submit.lua 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/lua/job_submit.lua 2011-12-05 17:20:08.000000000 +0000
@@ -2,19 +2,21 @@
Example lua script demonstrating the SLURM job_submit/lua interface.
This is only an example, not meant for use in its current form.
+
Leave the function names, arguments, local varialbes and setmetatable
set up logic in each function unchanged. Change only the logic after
the line containing "*** YOUR LOGIC GOES BELOW ***".
+ For use, this script should be copied into a file name job_"submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
--]]
function _build_part_table ( part_list )
local part_rec = {}
- local i = 1
- while part_list[i] do
+ for i in ipairs(part_list) do
part_rec[i] = { part_rec_ptr=part_list[i] }
setmetatable (part_rec[i], part_rec_meta)
- i = i + 1
end
return part_rec
end
@@ -42,8 +44,7 @@
local new_partition = nil
local top_priority = -1
local last_priority = -1
- local i = 1
- while part_rec[i] do
+ for i in ipairs(part_rec) do
-- log_info("part name[%d]:%s", i, part_rec[i].name)
if part_rec[i].flag_default ~= 0 then
top_priority = -1
@@ -54,7 +55,6 @@
top_priority = last_priority
new_partition = part_rec[i].name
end
- i = i + 1
end
if top_priority >= 0 then
log_info("slurm_job_submit: job from uid %d, setting default partition value: %s",
@@ -106,7 +106,7 @@
return _get_job_req_field(table.job_desc_ptr, key)
end,
__newindex = function (table, key, value)
- return _set_job_req_field(table.job_desc_ptr, key, value)
+ return _set_job_req_field(table.job_desc_ptr, key, value or "")
end
}
part_rec_meta = {
diff -Nru slurm-llnl-2.2.7/contribs/lua/Makefile.am slurm-llnl-2.3.2/contribs/lua/Makefile.am
--- slurm-llnl-2.2.7/contribs/lua/Makefile.am 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/lua/Makefile.am 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,4 @@
+EXTRA_DIST = \
+ job_submit.license.lua \
+ job_submit.lua \
+ proctrack.lua
diff -Nru slurm-llnl-2.2.7/contribs/lua/Makefile.in slurm-llnl-2.3.2/contribs/lua/Makefile.in
--- slurm-llnl-2.2.7/contribs/lua/Makefile.in 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/lua/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,475 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/lua
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+ $(top_srcdir)/auxdir/libtool.m4 \
+ $(top_srcdir)/auxdir/ltoptions.m4 \
+ $(top_srcdir)/auxdir/ltsugar.m4 \
+ $(top_srcdir)/auxdir/ltversion.m4 \
+ $(top_srcdir)/auxdir/lt~obsolete.m4 \
+ $(top_srcdir)/auxdir/slurm.m4 \
+ $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+ $(top_srcdir)/auxdir/x_ac_affinity.m4 \
+ $(top_srcdir)/auxdir/x_ac_aix.m4 \
+ $(top_srcdir)/auxdir/x_ac_blcr.m4 \
+ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+ $(top_srcdir)/auxdir/x_ac_cflags.m4 \
+ $(top_srcdir)/auxdir/x_ac_cray.m4 \
+ $(top_srcdir)/auxdir/x_ac_databases.m4 \
+ $(top_srcdir)/auxdir/x_ac_debug.m4 \
+ $(top_srcdir)/auxdir/x_ac_elan.m4 \
+ $(top_srcdir)/auxdir/x_ac_env.m4 \
+ $(top_srcdir)/auxdir/x_ac_federation.m4 \
+ $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+ $(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+ $(top_srcdir)/auxdir/x_ac_iso.m4 \
+ $(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
+ $(top_srcdir)/auxdir/x_ac_munge.m4 \
+ $(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+ $(top_srcdir)/auxdir/x_ac_pam.m4 \
+ $(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+ $(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+ $(top_srcdir)/auxdir/x_ac_readline.m4 \
+ $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+ $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+ $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+ $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
+ $(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+ $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = \
+ job_submit.license.lua \
+ job_submit.lua \
+ proctrack.lua
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu contribs/lua/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --gnu contribs/lua/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+ @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+ list='$(DISTFILES)'; \
+ dist_files=`for file in $$list; do echo $$file; done | \
+ sed -e "s|^$$srcdirstrip/||;t" \
+ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+ case $$dist_files in \
+ */*) $(MKDIR_P) `echo "$$dist_files" | \
+ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+ sort -u` ;; \
+ esac; \
+ for file in $$dist_files; do \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ if test -d $$d/$$file; then \
+ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test -d "$(distdir)/$$file"; then \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+ fi; \
+ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+ else \
+ test -f "$(distdir)/$$file" \
+ || cp -p $$d/$$file "$(distdir)/$$file" \
+ || exit 1; \
+ fi; \
+ done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+ distclean distclean-generic distclean-libtool distdir dvi \
+ dvi-am html html-am info info-am install install-am \
+ install-data install-data-am install-dvi install-dvi-am \
+ install-exec install-exec-am install-html install-html-am \
+ install-info install-info-am install-man install-pdf \
+ install-pdf-am install-ps install-ps-am install-strip \
+ installcheck installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff -Nru slurm-llnl-2.2.7/contribs/Makefile.am slurm-llnl-2.3.2/contribs/Makefile.am
--- slurm-llnl-2.2.7/contribs/Makefile.am 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/Makefile.am 2011-12-05 17:20:08.000000000 +0000
@@ -1,4 +1,4 @@
-SUBDIRS = pam perlapi torque sjobexit slurmdb-direct
+SUBDIRS = arrayrun cray lua pam perlapi torque sjobexit slurmdb-direct
EXTRA_DIST = \
env_cache_builder.c \
diff -Nru slurm-llnl-2.2.7/contribs/Makefile.in slurm-llnl-2.3.2/contribs/Makefile.in
--- slurm-llnl-2.2.7/contribs/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -60,6 +60,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
@@ -312,7 +320,7 @@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
-SUBDIRS = pam perlapi torque sjobexit slurmdb-direct
+SUBDIRS = arrayrun cray lua pam perlapi torque sjobexit slurmdb-direct
EXTRA_DIST = \
env_cache_builder.c \
make.slurm.patch \
diff -Nru slurm-llnl-2.2.7/contribs/pam/Makefile.in slurm-llnl-2.3.2/contribs/pam/Makefile.in
--- slurm-llnl-2.2.7/contribs/pam/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/pam/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -64,6 +64,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -145,7 +147,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -182,6 +187,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -239,6 +245,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -274,6 +281,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/pam/pam_slurm.c slurm-llnl-2.3.2/contribs/pam/pam_slurm.c
--- slurm-llnl-2.2.7/contribs/pam/pam_slurm.c 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/pam/pam_slurm.c 2011-12-05 17:20:08.000000000 +0000
@@ -136,13 +136,16 @@
if ((auth != PAM_SUCCESS) && (!opts.enable_silence))
_send_denial_msg(pamh, &opts, user, uid);
+
+ /*
+ * Generate an entry to the system log if access was
+ * denied (!PAM_SUCCESS) or disable_sys_info is not set
+ */
if ((auth != PAM_SUCCESS) || (!opts.disable_sys_info)) {
_log_msg(LOG_INFO, "access %s for user %s (uid=%d)",
(auth == PAM_SUCCESS) ? "granted" : "denied",
user, uid);
}
- _log_msg(LOG_INFO, "access %s for user %s (uid=%d)",
- (auth == PAM_SUCCESS) ? "granted" : "denied", user, uid);
return(auth);
}
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurm/Makefile.in slurm-llnl-2.3.2/contribs/perlapi/libslurm/Makefile.in
--- slurm-llnl-2.2.7/contribs/perlapi/libslurm/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurm/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -60,6 +60,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/alloc.c slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/alloc.c
--- slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/alloc.c 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/alloc.c 2011-12-05 17:20:08.000000000 +0000
@@ -182,7 +182,23 @@
job_desc->geometry[i] = SvUV(*svp);
}
}
- FETCH_FIELD(hv, job_desc, conn_type, uint16_t, FALSE);
+ if((svp = hv_fetch(hv, "conn_type", 9, FALSE))) {
+ AV *av;
+ if (!SvROK(*svp) || SvTYPE(SvRV(*svp)) != SVt_PVAV) {
+ Perl_warn(aTHX_ "`conn_type' is not an array reference in job descriptor");
+ free_job_desc_msg_memory(job_desc);
+ return -1;
+ }
+ av = (AV*)SvRV(*svp);
+ for(i = 0; i < HIGHEST_DIMENSIONS; i ++) {
+ if(! (svp = av_fetch(av, i, FALSE))) {
+ Perl_warn(aTHX_ "conn_type of dimension %d missing in job descriptor", i);
+ free_job_desc_msg_memory(job_desc);
+ return -1;
+ }
+ job_desc->conn_type[i] = SvUV(*svp);
+ }
+ }
FETCH_FIELD(hv, job_desc, reboot, uint16_t, FALSE);
FETCH_FIELD(hv, job_desc, rotate, uint16_t, FALSE);
FETCH_FIELD(hv, job_desc, blrtsimage, charp, FALSE);
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/block.c slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/block.c
--- slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/block.c 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/block.c 2011-12-05 17:20:08.000000000 +0000
@@ -17,27 +17,36 @@
int
block_info_to_hv(block_info_t *block_info, HV *hv)
{
+ int dim;
+ AV* av = NULL;
+
if(block_info->bg_block_id)
STORE_FIELD(hv, block_info, bg_block_id, charp);
if(block_info->blrtsimage)
STORE_FIELD(hv, block_info, blrtsimage, charp);
- if (block_info->bp_inx) {
+ if (block_info->mp_inx) {
int j;
- AV* av = newAV();
+ av = newAV();
for(j = 0; ; j += 2) {
- if(block_info->bp_inx[j] == -1)
+ if(block_info->mp_inx[j] == -1)
break;
- av_store(av, j, newSVuv(block_info->bp_inx[j]));
- av_store(av, j+1, newSVuv(block_info->bp_inx[j+1]));
+ av_store(av, j, newSVuv(block_info->mp_inx[j]));
+ av_store(av, j+1, newSVuv(block_info->mp_inx[j+1]));
}
- hv_store_sv(hv, "bp_inx", newRV_noinc((SV*)av));
+ hv_store_sv(hv, "mp_inx", newRV_noinc((SV*)av));
}
- STORE_FIELD(hv, block_info, conn_type, uint16_t);
- if(block_info->ionodes)
- STORE_FIELD(hv, block_info, ionodes, charp);
+
+ av = newAV();
+ for (dim=0; dimconn_type[dim]));
+
+ hv_store_sv(hv, "conn_type", newRV_noinc((SV*)av));
+
+ if(block_info->ionode_str)
+ STORE_FIELD(hv, block_info, ionode_str, charp);
if (block_info->ionode_inx) {
int j;
- AV* av = newAV();
+ av = newAV();
for(j = 0; ; j += 2) {
if(block_info->ionode_inx[j] == -1)
break;
@@ -51,9 +60,9 @@
STORE_FIELD(hv, block_info, linuximage, charp);
if(block_info->mloaderimage)
STORE_FIELD(hv, block_info, mloaderimage, charp);
- if(block_info->nodes)
- STORE_FIELD(hv, block_info, nodes, charp);
- STORE_FIELD(hv, block_info, node_cnt, uint32_t);
+ if(block_info->mp_str)
+ STORE_FIELD(hv, block_info, mp_str, charp);
+ STORE_FIELD(hv, block_info, cnode_cnt, uint32_t);
STORE_FIELD(hv, block_info, node_use, uint16_t);
if (block_info->owner_name)
STORE_FIELD(hv, block_info, owner_name, charp);
@@ -79,21 +88,29 @@
FETCH_FIELD(hv, block_info, bg_block_id, charp, FALSE);
FETCH_FIELD(hv, block_info, blrtsimage, charp, FALSE);
- svp = hv_fetch(hv, "bp_inx", 6, FALSE);
+ svp = hv_fetch(hv, "mp_inx", 6, FALSE);
if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
av = (AV*)SvRV(*svp);
n = av_len(av) + 2; /* for trailing -1 */
- block_info->bp_inx = xmalloc(n * sizeof(int));
+ block_info->mp_inx = xmalloc(n * sizeof(int));
for (i = 0 ; i < n-1; i += 2) {
- block_info->bp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
- block_info->bp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
+ block_info->mp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
+ block_info->mp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
}
- block_info->bp_inx[n-1] = -1;
+ block_info->mp_inx[n-1] = -1;
} else {
/* nothing to do */
}
- FETCH_FIELD(hv, block_info, conn_type, uint16_t, TRUE);
- FETCH_FIELD(hv, block_info, ionodes, charp, FALSE);
+ svp = hv_fetch(hv, "conn_type", 9, FALSE);
+ if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
+ av = (AV*)SvRV(*svp);
+ n = av_len(av); /* for trailing -1 */
+ for (i = 0 ; i < HIGHEST_DIMENSIONS; i++)
+ block_info->conn_type[i] = SvUV(*(av_fetch(av, i, FALSE)));
+ } else {
+ /* nothing to do */
+ }
+ FETCH_FIELD(hv, block_info, ionode_str, charp, FALSE);
svp = hv_fetch(hv, "ionode_inx", 10, FALSE);
if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
av = (AV*)SvRV(*svp);
@@ -110,8 +127,8 @@
FETCH_FIELD(hv, block_info, job_running, uint32_t, TRUE);
FETCH_FIELD(hv, block_info, linuximage, charp, FALSE);
FETCH_FIELD(hv, block_info, mloaderimage, charp, FALSE);
- FETCH_FIELD(hv, block_info, nodes, charp, FALSE);
- FETCH_FIELD(hv, block_info, node_cnt, uint32_t, TRUE);
+ FETCH_FIELD(hv, block_info, mp_str, charp, FALSE);
+ FETCH_FIELD(hv, block_info, cnode_cnt, uint32_t, TRUE);
FETCH_FIELD(hv, block_info, node_use, uint16_t, TRUE);
FETCH_FIELD(hv, block_info, owner_name, charp, FALSE);
FETCH_FIELD(hv, block_info, ramdiskimage, charp, FALSE);
@@ -200,21 +217,28 @@
FETCH_FIELD(hv, update_msg, bg_block_id, charp, FALSE);
FETCH_FIELD(hv, update_msg, blrtsimage, charp, FALSE);
- svp = hv_fetch(hv, "bp_inx", 6, FALSE);
+ svp = hv_fetch(hv, "mp_inx", 6, FALSE);
if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
av = (AV*)SvRV(*svp);
n = av_len(av) + 2; /* for trailing -1 */
- update_msg->bp_inx = xmalloc(n * sizeof(int));
+ update_msg->mp_inx = xmalloc(n * sizeof(int));
for (i = 0 ; i < n-1; i += 2) {
- update_msg->bp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
- update_msg->bp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
+ update_msg->mp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
+ update_msg->mp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
}
- update_msg->bp_inx[n-1] = -1;
+ update_msg->mp_inx[n-1] = -1;
+ } else {
+ /* nothing to do */
+ }
+ svp = hv_fetch(hv, "conn_type", 9, FALSE);
+ if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
+ av = (AV*)SvRV(*svp);
+ for (i = 0 ; i < HIGHEST_DIMENSIONS; i++)
+ update_msg->conn_type[i] = SvUV(*(av_fetch(av, i, FALSE)));
} else {
/* nothing to do */
}
- FETCH_FIELD(hv, update_msg, conn_type, uint16_t, FALSE);
- FETCH_FIELD(hv, update_msg, ionodes, charp, FALSE);
+ FETCH_FIELD(hv, update_msg, ionode_str, charp, FALSE);
svp = hv_fetch(hv, "ionode_inx", 10, FALSE);
if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
av = (AV*)SvRV(*svp);
@@ -231,8 +255,8 @@
FETCH_FIELD(hv, update_msg, job_running, uint32_t, FALSE);
FETCH_FIELD(hv, update_msg, linuximage, charp, FALSE);
FETCH_FIELD(hv, update_msg, mloaderimage, charp, FALSE);
- FETCH_FIELD(hv, update_msg, nodes, charp, FALSE);
- FETCH_FIELD(hv, update_msg, node_cnt, uint32_t, FALSE);
+ FETCH_FIELD(hv, update_msg, mp_str, charp, FALSE);
+ FETCH_FIELD(hv, update_msg, cnode_cnt, uint32_t, FALSE);
FETCH_FIELD(hv, update_msg, node_use, uint16_t, FALSE);
FETCH_FIELD(hv, update_msg, owner_name, charp, FALSE);
FETCH_FIELD(hv, update_msg, ramdiskimage, charp, FALSE);
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/job.c slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/job.c
--- slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/job.c 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/job.c 2011-12-05 17:20:08.000000000 +0000
@@ -109,6 +109,7 @@
av_store(av, j+1, newSVuv(job_info->req_node_inx[j+1]));
}
hv_store_sv(hv, "req_node_inx", newRV_noinc((SV*)av));
+ STORE_FIELD(hv, job_info, req_switch, uint32_t);
STORE_FIELD(hv, job_info, requeue, uint16_t);
STORE_FIELD(hv, job_info, resize_time, time_t);
STORE_FIELD(hv, job_info, restart_cnt, uint16_t);
@@ -127,6 +128,7 @@
STORE_FIELD(hv, job_info, time_limit, uint32_t);
STORE_FIELD(hv, job_info, time_min, uint32_t);
STORE_FIELD(hv, job_info, user_id, uint32_t);
+ STORE_FIELD(hv, job_info, wait4switch, uint32_t);
if(job_info->wckey)
STORE_FIELD(hv, job_info, wckey, charp);
if(job_info->work_dir)
@@ -226,6 +228,7 @@
} else {
/* nothing to do */
}
+ FETCH_FIELD(hv, job_info, req_switch, uint32_t, FALSE);
FETCH_FIELD(hv, job_info, requeue, uint16_t, TRUE);
FETCH_FIELD(hv, job_info, resize_time, time_t, TRUE);
FETCH_FIELD(hv, job_info, restart_cnt, uint16_t, TRUE);
@@ -241,6 +244,7 @@
FETCH_FIELD(hv, job_info, suspend_time, time_t, TRUE);
FETCH_FIELD(hv, job_info, time_limit, uint32_t, TRUE);
FETCH_FIELD(hv, job_info, time_min, uint32_t, TRUE);
+ FETCH_FIELD(hv, job_info, wait4switch, uint32_t, FALSE);
FETCH_FIELD(hv, job_info, wckey, charp, FALSE);
FETCH_FIELD(hv, job_info, work_dir, charp, FALSE);
return 0;
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/step.c slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/step.c
--- slurm-llnl-2.2.7/contribs/perlapi/libslurm/perl/step.c 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurm/perl/step.c 2011-12-05 17:20:08.000000000 +0000
@@ -178,6 +178,8 @@
AV* av, *av2;
int i, j;
+ if (step_layout->front_end)
+ STORE_FIELD(hv, step_layout, front_end, charp);
STORE_FIELD(hv, step_layout, node_cnt, uint16_t);
if (step_layout->node_list)
STORE_FIELD(hv, step_layout, node_list, charp);
@@ -187,15 +189,15 @@
}
STORE_FIELD(hv, step_layout, plane_size, uint16_t);
av = newAV();
- for(i = 0; i < step_layout->node_cnt; i ++)
+ for (i = 0; i < step_layout->node_cnt; i ++)
av_store_uint16_t(av, i, step_layout->tasks[i]);
hv_store_sv(hv, "tasks", newRV_noinc((SV*)av));
STORE_FIELD(hv, step_layout, task_cnt, uint32_t);
STORE_FIELD(hv, step_layout, task_dist, uint16_t);
av = newAV();
- for(i = 0; i < step_layout->node_cnt; i ++) {
+ for (i = 0; i < step_layout->node_cnt; i ++) {
av2 = newAV();
- for(j = 0; j < step_layout->tasks[i]; j ++)
+ for (j = 0; j < step_layout->tasks[i]; j ++)
av_store_uint32_t(av2, i, step_layout->tids[i][j]);
av_store(av, i, newRV_noinc((SV*)av2));
}
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurmdb/Makefile.in slurm-llnl-2.3.2/contribs/perlapi/libslurmdb/Makefile.in
--- slurm-llnl-2.2.7/contribs/perlapi/libslurmdb/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurmdb/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -60,6 +60,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/libslurmdb/perl/Slurmdb.pm slurm-llnl-2.3.2/contribs/perlapi/libslurmdb/perl/Slurmdb.pm
--- slurm-llnl-2.2.7/contribs/perlapi/libslurmdb/perl/Slurmdb.pm 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/libslurmdb/perl/Slurmdb.pm 2011-12-05 17:20:08.000000000 +0000
@@ -135,7 +135,7 @@
=head1 SEE ALSO
-https://computing.llnl.gov/linux/slurm/accounting.html
+http://www.schedmd.com/slurmdocs/accounting.html
=head1 AUTHOR
@@ -148,7 +148,7 @@
CODE-OCEC-09-009. All rights reserved.
This file is part of SLURM, a resource management program. For
- details, see . Please also
+ details, see . Please also
read the included file: DISCLAIMER.
SLURM is free software; you can redistribute it and/or modify it
diff -Nru slurm-llnl-2.2.7/contribs/perlapi/Makefile.in slurm-llnl-2.3.2/contribs/perlapi/Makefile.in
--- slurm-llnl-2.2.7/contribs/perlapi/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/perlapi/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -60,6 +60,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/phpext/Makefile.am slurm-llnl-2.3.2/contribs/phpext/Makefile.am
--- slurm-llnl-2.2.7/contribs/phpext/Makefile.am 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/Makefile.am 2011-12-05 17:20:08.000000000 +0000
@@ -1,11 +1,11 @@
AUTOMAKE_OPTIONS = foreign
-php_dir = slurm_php
-phpize = /usr/bin/phpize
+php_dir=slurm_php
+phpize=/usr/bin/phpize
if HAVE_AIX
- add_flags = "CC=\"$(CC)\" CCFLAGS=\"-g -static $(CFLAGS)\""
+config_line=CC="$(CC)" CCFLAGS="-g -static $(CFLAGS)" ./configure
else
- add_flags = "CC=\"$(CC)\" LD=\"$(CC) $(CFLAGS)\" CCFLAGS=\"-g -static $(CFLAGS)\""
+config_line=CC="$(CC)" LD="$(CC) $(CFLAGS)" CCFLAGS="-g -static $(CFLAGS)" CFLAGS="$(CFLAGS)" ./configure
endif
all-local:
@@ -14,12 +14,12 @@
if [ ! -f configure ]; then \
$(phpize); \
fi && \
- ./configure ; \
+ $(config_line); \
if [ ! -f Makefile ]; then \
exit 0;\
fi \
fi && \
- $(MAKE) $(add_flags); \
+ $(MAKE); \
cd ..;
install-exec-local:
@@ -27,7 +27,7 @@
if [ ! -f Makefile ]; then \
exit 0;\
fi && \
- $(MAKE) INSTALL_ROOT=$(DESTDIR) $(add_flags) install && \
+ $(MAKE) INSTALL_ROOT=$(DESTDIR) install && \
cd ..;
clean-generic:
diff -Nru slurm-llnl-2.2.7/contribs/phpext/Makefile.in slurm-llnl-2.3.2/contribs/phpext/Makefile.in
--- slurm-llnl-2.2.7/contribs/phpext/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -60,6 +60,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
@@ -275,6 +283,8 @@
AUTOMAKE_OPTIONS = foreign
php_dir = slurm_php
phpize = /usr/bin/phpize
+@HAVE_AIX_FALSE@config_line = CC="$(CC)" LD="$(CC) $(CFLAGS)" CCFLAGS="-g -static $(CFLAGS)" CFLAGS="$(CFLAGS)" ./configure
+@HAVE_AIX_TRUE@config_line = CC="$(CC)" CCFLAGS="-g -static $(CFLAGS)" ./configure
all: all-am
.SUFFIXES:
@@ -455,21 +465,18 @@
ps ps-am uninstall uninstall-am
-@HAVE_AIX_TRUE@ add_flags = "CC=\"$(CC)\" CCFLAGS=\"-g -static $(CFLAGS)\""
-@HAVE_AIX_FALSE@ add_flags = "CC=\"$(CC)\" LD=\"$(CC) $(CFLAGS)\" CCFLAGS=\"-g -static $(CFLAGS)\""
-
all-local:
@cd $(php_dir) && \
if [ ! -f Makefile ]; then \
if [ ! -f configure ]; then \
$(phpize); \
fi && \
- ./configure ; \
+ $(config_line); \
if [ ! -f Makefile ]; then \
exit 0;\
fi \
fi && \
- $(MAKE) $(add_flags); \
+ $(MAKE); \
cd ..;
install-exec-local:
@@ -477,7 +484,7 @@
if [ ! -f Makefile ]; then \
exit 0;\
fi && \
- $(MAKE) INSTALL_ROOT=$(DESTDIR) $(add_flags) install && \
+ $(MAKE) INSTALL_ROOT=$(DESTDIR) install && \
cd ..;
clean-generic:
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/AUTHORS slurm-llnl-2.3.2/contribs/phpext/slurm_php/AUTHORS
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/AUTHORS 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/AUTHORS 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,2 @@
+Vermeulen Peter, nMCT Howest
+Jimmy Tang, Trinity Centre for High Performance Computing, Trinity College Dublin
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/config.m4.in slurm-llnl-2.3.2/contribs/phpext/slurm_php/config.m4.in
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/config.m4.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/config.m4.in 2011-12-05 17:20:08.000000000 +0000
@@ -9,50 +9,60 @@
##*****************************************************************************
PHP_ARG_WITH(slurm, whether to use slurm,
[ --with-slurm SLURM install dir])
-
+
+AC_MSG_CHECKING([for phpize in default path])
+if test ! -f "/usr/bin/phpize"; then
+ PHP_SLURM="no"
+ AC_MSG_RESULT([NO, CANNOT MAKE SLURM_PHP])
+else
+ AC_MSG_RESULT([yes])
+fi
+
if test "$PHP_SLURM" != "no"; then
- SLURMLIB_PATH="@prefix@/lib @top_builddir@/src/api/.libs"
+ SLURMLIB_PATH="@prefix@/lib @top_builddir@/src/db_api/.libs"
SLURMINCLUDE_PATH="@prefix@/include"
- SEARCH_FOR="libslurm.so"
-
- # --with-libslurm -> check with-path
-
+ SEARCH_FOR="libslurmdb.so"
+
+ # --with-libslurm -> check with-path
+
if test -r $PHP_SLURM/; then # path given as parameter
SLURM_DIR=$PHP_SLURM
SLURMLIB_PATH="$SLURM_DIR/lib"
else # search default path list
- AC_MSG_CHECKING([for libslurm.so in default paths])
+ AC_MSG_CHECKING([for libslurmdb.so in default paths])
for i in $SLURMLIB_PATH ; do
if test -r $i/$SEARCH_FOR; then
SLURM_DIR=$i
PHP_ADD_LIBPATH($i, SLURM_PHP_SHARED_LIBADD)
-
+
AC_MSG_RESULT([found in $i])
-
+
fi
done
fi
-
+
if test -z "$SLURM_DIR"; then
AC_MSG_RESULT([not found])
AC_MSG_ERROR([Please reinstall the slurm distribution])
fi
-
+
PHP_ADD_INCLUDE($SLURMINCLUDE_PATH)
PHP_ADD_INCLUDE(@top_srcdir@)
-
- LIBNAME=slurm
+
+ LIBNAME=slurmdb
LIBSYMBOL=slurm_acct_storage_init
-
+
PHP_CHECK_LIBRARY($LIBNAME, $LIBSYMBOL,
[PHP_ADD_LIBRARY($LIBNAME, , SLURM_PHP_SHARED_LIBADD)
AC_DEFINE(HAVE_SLURMLIB,1,[ ])],
- [AC_MSG_ERROR([wrong libslurm version or lib not found])],
- [-L$SLURM_DIR -lslurm])
-
-
+ [AC_MSG_ERROR([wrong libslurmdb version or lib not found])],
+ [-L$SLURM_DIR -l$LIBNAME])
+
+
PHP_SUBST(SLURM_PHP_SHARED_LIBADD)
-
+
+ AC_CHECK_HEADERS(stdbool.h)
+
AC_DEFINE(HAVE_SLURM_PHP, 1, [Whether you have SLURM])
#PHP_EXTENSION(slurm_php, $ext_shared)
PHP_NEW_EXTENSION(slurm_php, @top_srcdir@/contribs/phpext/slurm_php/slurm_php.c, $ext_shared)
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/DISCLAIMER slurm-llnl-2.3.2/contribs/phpext/slurm_php/DISCLAIMER
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/DISCLAIMER 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/DISCLAIMER 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,20 @@
+Disclaimer
+
+The php-slurm program, its documentation, and any other auxiliary
+resources involved in building, installing and running the program,
+such as graphics, Makefiles, and user interface definition files, are
+licensed under the GNU General Public License. This includes, but is
+not limited to, all the files in the official source distribution, as
+well as the source distribution itself.
+
+A copy of the GNU General Public License can be found in the file
+LICENSE in the top directory of the official source distribution. The
+license is also available in several formats through the World Wide
+Web, via http://www.gnu.org/licenses/licenses.html#GPL, or you can
+write the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
+02139, USA.
+
+php-slurm is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/LICENSE slurm-llnl-2.3.2/contribs/phpext/slurm_php/LICENSE
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/LICENSE 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/LICENSE 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/README slurm-llnl-2.3.2/contribs/phpext/slurm_php/README
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/README 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/README 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,48 @@
+Slurm PHP extension
+===================
+
+Requirements (tested with)
+
+* SLURM 2.2.0
+* PHP 5.1.6
+* APACHE (optional, but recommended)
+
+This was made primarily for SLURMWEB to connect to slurm. Any extra
+interactions are welcome.
+
+to compile...
+
+phpize
+./configure
+make
+
+this should make modules/slurm_php.so
+
+make install as root
+should install this where your extensions are in your php install
+
+in your php.ini file add the line
+
+extension=slurm_php.so
+
+and you should be able to use the functions here.
+
+
+TEST CASES
+==========
+
+It is assumed that the user has both slurmctld and slurmd is
+configured up with at least 1 partition and 1 node for these tests to
+pass.
+
+Developer Notes
+===============
+
+To clean up the directory to a clean state do the following
+
+~~~~
+phpize --clean
+~~~~
+
+The coding style that should be adopted is
+http://www.kernel.org/doc/Documentation/CodingStyle
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/RELEASE_NOTES slurm-llnl-2.3.2/contribs/phpext/slurm_php/RELEASE_NOTES
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/RELEASE_NOTES 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/RELEASE_NOTES 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,38 @@
+NOTES FOR PHP-SLURM VERSION 1.0
+===============================
+
+This is PHP extensions goal is to provide just enough functionality to
+a web developer read data from the slurm controller daemon to create a
+*status* or *monitoring* application which can be viewed by the end
+user. All the code has been written by 'Vermeulen Peter' with
+contributions from TCHPC staff.
+
+
+Installation Requirements
+=========================
+
+* SLURM 2.2.0 or newer
+* PHP 5.1.6 or newer
+* APACHE (optional, but recommended)
+
+
+Added the following API's
+=========================
+
+slurm_hostlist_to_array()
+slurm_array_to_hostlist()
+slurm_ping()
+slurm_slurmd_status()
+slurm_version()
+slurm_print_partition_names()
+slurm_get_specific_partition_info()
+slurm_get_partition_node_names()
+slurm_get_node_names()
+slurm_get_node_elements()
+slurm_get_node_element_by_name()
+slurm_get_node_state_by_name()
+slurm_get_node_states()
+slurm_get_control_configuration_keys()
+slurm_get_control_configuration_values()
+slurm_load_partition_jobs()
+slurm_load_job_information()
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/slurm_php.c slurm-llnl-2.3.2/contribs/phpext/slurm_php/slurm_php.c
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/slurm_php.c 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/slurm_php.c 2011-12-05 17:20:08.000000000 +0000
@@ -1,101 +1,902 @@
/*****************************************************************************\
* slurm_php.c - php interface to slurm.
*
- * $Id: account_gold.c 13061 2008-01-22 21:23:56Z da $
*****************************************************************************
- * Copyright (C) 2004-2007 The Regents of the University of California.
- * Copyright (C) 2008 Lawrence Livermore National Security.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Danny Auble
- *
- * This file is part of SLURM, a resource management program.
- * For details, see .
+ * Copyright (C) 2011 - Trinity Centre for High Performance Computing
+ * Copyright (C) 2011 - Trinity College Dublin
+ * Written By : Vermeulen Peter
+ *
+ * This file is part of php-slurm, a resource management program.
* Please also read the included file: DISCLAIMER.
- *
- * SLURM is free software; you can redistribute it and/or modify it under
+ *
+ * php-slurm is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
- * In addition, as a special exception, the copyright holders give permission
+ * In addition, as a special exception, the copyright holders give permission
* to link the code of portions of this program with the OpenSSL library under
- * certain conditions as described in each individual source file, and
- * distribute linked combinations including the two. You must obey the GNU
- * General Public License in all respects for all of the code used other than
- * OpenSSL. If you modify file(s) with this exception, you may extend this
- * exception to your version of the file(s), but you are not obligated to do
+ * certain conditions as described in each individual source file, and
+ * distribute linked combinations including the two. You must obey the GNU
+ * General Public License in all respects for all of the code used other than
+ * OpenSSL. If you modify file(s) with this exception, you may extend this
+ * exception to your version of the file(s), but you are not obligated to do
* so. If you do not wish to do so, delete this exception statement from your
- * version. If you delete this exception statement from all source files in
+ * version. If you delete this exception statement from all source files in
* the program, then also delete it here.
- *
- * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *
+ * php-slurm is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
- *
+ *
* You should have received a copy of the GNU General Public License along
- * with SLURM; if not, write to the Free Software Foundation, Inc.,
+ * with php-slurm; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
\*****************************************************************************/
+/*****************************************************************************\
+ *
+ * Documentation for each function can be found in the slurm_php.h file
+ *
+\*****************************************************************************/
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
-#include "php.h"
#include "slurm_php.h"
-#include "slurm/slurm.h"
-#include "src/common/list.h"
static function_entry slurm_functions[] = {
- PHP_FE(hello_world, NULL)
- PHP_FE(print_partitions, NULL)
- {NULL, NULL, NULL}
+ PHP_FE(slurm_ping, NULL)
+ PHP_FE(slurm_slurmd_status, NULL)
+ PHP_FE(slurm_print_partition_names, NULL)
+ PHP_FE(slurm_get_specific_partition_info, NULL)
+ PHP_FE(slurm_get_partition_node_names, NULL)
+ PHP_FE(slurm_version, NULL)
+ PHP_FE(slurm_get_node_names, NULL)
+ PHP_FE(slurm_get_node_elements, NULL)
+ PHP_FE(slurm_get_node_element_by_name, NULL)
+ PHP_FE(slurm_get_node_state_by_name, NULL)
+ PHP_FE(slurm_get_control_configuration_keys, NULL)
+ PHP_FE(slurm_get_control_configuration_values, NULL)
+ PHP_FE(slurm_load_job_information, NULL)
+ PHP_FE(slurm_load_partition_jobs, NULL)
+ PHP_FE(slurm_get_node_states, NULL)
+ PHP_FE(slurm_hostlist_to_array, NULL)
+ PHP_FE(slurm_array_to_hostlist, NULL) {
+ NULL, NULL, NULL
+ }
};
zend_module_entry slurm_php_module_entry = {
#if ZEND_MODULE_API_NO >= 20010901
- STANDARD_MODULE_HEADER,
+ STANDARD_MODULE_HEADER,
#endif
- SLURM_PHP_EXTNAME,
- slurm_functions,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ SLURM_PHP_EXTNAME,
+ slurm_functions,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
#if ZEND_MODULE_API_NO >= 20010901
- SLURM_PHP_VERSION,
+ SLURM_PHP_VERSION,
#endif
- STANDARD_MODULE_PROPERTIES
+ STANDARD_MODULE_PROPERTIES
};
#ifdef COMPILE_DL_SLURM_PHP
ZEND_GET_MODULE(slurm_php)
#endif
-PHP_FUNCTION(hello_world)
+/*****************************************************************************\
+ * HELPER FUNCTION PROTOTYPES
+\*****************************************************************************/
+
+/*
+ * _parse_node_pointer - Parse a node pointer's contents into an
+ * assocative zval array where the key is descriptive to the
+ * value
+ *
+ * IN sub_arr - array to store the contents of the node pointer
+ * IN node_arr - node pointer that needs parsing
+ */
+static void _parse_node_pointer(zval *sub_arr, node_info_t *node_arr);
+
+/*
+ * _parse_assoc_array - Parse a character array where the elements are
+ * key-value pairs separated by delimiters into an associative
+ * array
+ *
+ * IN char_arr - character array that needs parsing
+ * IN delims - character array that contains the delimeters used in parsing
+ * IN result_arr - associative array used to store the key_value pairs in
+ */
+static void _parse_assoc_array(char *char_arr, char *delims, zval *result_arr);
+
+/*
+ * _parse_array - Parse a character array where the elements are values
+ * separated by delimiters into a numerically indexed array
+ *
+ * IN char_arr - character array that needs parsing
+ * IN delims - character array that contains the delimeters used in parsing
+ * IN result_arr - numerically indexed array used to store the values in
+ */
+static void _parse_array(char *char_arr, char *delims, zval *rslt_arr);
+
+/*
+ * _zend_add_valid_assoc_string - checks a character array to see if
+ * it's NULL or not, if so an associative null is added, if not
+ * an associative string is added.
+ *
+ * IN rstl_arr - array to store the associative key_value pairs in
+ * IN key - character array used as the associative key
+ * IN val - character array to be validated and added as value if valid
+ */
+static void _zend_add_valid_assoc_string(zval *rstl_arr, char *key, char *val);
+
+/*
+ * _zend_add_valid_assoc_time_string - checks a unix timestamp to see if it's
+ * 0 or not, if so an associative null is added, if not a formatted string
+ * is added.
+ *
+ * IN rstl_arr - array to store the associative key_value pairs in
+ * IN key - character array used as the associative key
+ * IN val - time_t unix timestamp to be validated and added if valid
+ * NOTE : If you'd like to change the format in which the valid strings are
+ * returned, you can change the TIME_FORMAT_STRING macro to the needed format
+ */
+static void _zend_add_valid_assoc_time_string(
+ zval *rstl_arr, char *key, time_t *val);
+
+/*****************************************************************************\
+ * TODO
+ *****************************************************************************
+ * [ADJUSTING EXISTING FUNCTIONS]
+ * - _parse_node_pointer
+ * dynamic_plugin_data_t is currently not returned
+ * [EXTRA FUNCTIONS]
+ * - Functions that filter jobs on the nodes they are running on
+ * - Scheduling
+ * - ...
+\*****************************************************************************/
+
+/*****************************************************************************\
+ * HELPER FUNCTIONS
+\*****************************************************************************/
+
+static void _parse_node_pointer(zval *sub_arr, node_info_t *node_arr)
+{
+ zval *sub_arr_2 = NULL;
+
+ _zend_add_valid_assoc_string(sub_arr, "Name", node_arr->name);
+ _zend_add_valid_assoc_string(sub_arr, "Arch.", node_arr->arch);
+ _zend_add_valid_assoc_time_string(sub_arr, "Boot Time",
+ &node_arr->boot_time);
+ add_assoc_long(sub_arr, "#CPU'S", node_arr->cpus);
+ add_assoc_long(sub_arr, "#Cores/CPU", node_arr->cores);
+
+ if (node_arr->features == NULL) {
+ add_assoc_null(sub_arr, "Features");
+ } else {
+ ALLOC_INIT_ZVAL(sub_arr_2);
+ array_init(sub_arr_2);
+ _parse_array(node_arr->features, ",", sub_arr_2);
+ add_assoc_zval(sub_arr, "Features", sub_arr_2);
+ }
+
+ _zend_add_valid_assoc_string(sub_arr, "GRES", node_arr->gres);
+ add_assoc_long(sub_arr, "State", node_arr->node_state);
+ _zend_add_valid_assoc_string(sub_arr, "OS", node_arr->os);
+ add_assoc_long(sub_arr, "Real Mem", node_arr->real_memory);
+
+ if (node_arr->reason!=NULL) {
+ _zend_add_valid_assoc_string(sub_arr, "Reason",
+ node_arr->reason);
+ _zend_add_valid_assoc_time_string(sub_arr,"Reason Timestamp",
+ &node_arr->reason_time);
+ add_assoc_long(sub_arr, "Reason User Id",
+ node_arr->reason_uid);
+ } else {
+ add_assoc_null(sub_arr, "Reason");
+ add_assoc_null(sub_arr, "Reason Timestamp");
+ add_assoc_null(sub_arr, "Reason User Id");
+ }
+
+ _zend_add_valid_assoc_time_string(sub_arr, "Slurmd Startup Time",
+ &node_arr->slurmd_start_time);
+ add_assoc_long(sub_arr, "#Sockets/Node", node_arr->sockets);
+ add_assoc_long(sub_arr, "#Threads/Core", node_arr->threads);
+ add_assoc_long(sub_arr, "TmpDisk", node_arr->tmp_disk);
+ add_assoc_long(sub_arr, "Weight", node_arr->weight);
+}
+
+
+static void _parse_assoc_array(char *char_arr, char *delims, zval *result_arr)
+{
+ char *rslt = NULL;
+ char *tmp;
+ int i = 0;
+
+ rslt = strtok(char_arr, delims);
+ while (rslt != NULL) {
+ if (i == 0) {
+ tmp = rslt;
+ } else if (i == 1) {
+ if (strcmp(rslt,"(null)")==0) {
+ add_assoc_null(result_arr, tmp);
+ } else {
+ _zend_add_valid_assoc_string(result_arr,
+ tmp, rslt);
+ }
+ }
+ i++;
+ if (i == 2) {
+ i = 0;
+ }
+ rslt = strtok(NULL, delims);
+ }
+}
+
+
+static void _parse_array(char *char_arr, char *delims, zval *rslt_arr)
+{
+ char *rslt = NULL;
+ char *tmp = NULL;
+
+ rslt = strtok(char_arr, delims);
+ while (rslt != NULL) {
+ if (strcmp(rslt, "(null)")==0) {
+ add_next_index_null(rslt_arr);
+ } else {
+ tmp = slurm_xstrdup(rslt);
+ add_next_index_string(rslt_arr, tmp, 1);
+ xfree(tmp);
+ }
+ rslt = strtok(NULL, delims);
+ }
+}
+
+static void _zend_add_valid_assoc_string(zval *rstl_arr, char *key, char *val)
+{
+ if (!val)
+ add_assoc_null(rstl_arr, key);
+ else
+ add_assoc_string(rstl_arr, key, val, 1);
+}
+
+
+static void _zend_add_valid_assoc_time_string(
+ zval *rstl_arr, char *key, time_t *val)
+{
+ char buf[80];
+ struct tm *timeinfo;
+
+ if (val==0) {
+ add_assoc_null(rstl_arr, key);
+ } else {
+ timeinfo = localtime(val);
+ strftime(buf, 80, TIME_FORMAT_STRING, timeinfo);
+ add_assoc_string(rstl_arr, key, buf, 1);
+ }
+}
+
+
+/*****************************************************************************\
+ * SLURM STATUS FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_ping)
+{
+ int err = SLURM_SUCCESS;
+
+ array_init(return_value);
+ err = slurm_ping(1);
+ add_assoc_long(return_value,"Prim. Controller",err);
+ err = slurm_ping(2);
+ add_assoc_long(return_value,"Sec. Controller",err);
+}
+
+
+PHP_FUNCTION(slurm_slurmd_status)
+{
+ int err = SLURM_SUCCESS;
+ slurmd_status_t *status_ptr = NULL;
+
+ err = slurm_load_slurmd_status(&status_ptr);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ array_init(return_value);
+ _zend_add_valid_assoc_time_string(return_value,"Booted_at",
+ &status_ptr->booted);
+ _zend_add_valid_assoc_time_string(return_value,"Last_Msg",
+ &status_ptr->last_slurmctld_msg);
+ add_assoc_long(return_value,"Logging_Level", status_ptr->slurmd_debug);
+ add_assoc_long(return_value,"Actual_CPU's", status_ptr->actual_cpus);
+ add_assoc_long(return_value,"Actual_Sockets",
+ status_ptr->actual_sockets);
+ add_assoc_long(return_value,"Actual_Cores",status_ptr->actual_cores);
+ add_assoc_long(return_value,"Actual_Threads",
+ status_ptr->actual_threads);
+ add_assoc_long(return_value,"Actual_Real_Mem",
+ status_ptr->actual_real_mem);
+ add_assoc_long(return_value,"Actual_Tmp_Disk",
+ status_ptr->actual_tmp_disk);
+ add_assoc_long(return_value,"PID",status_ptr->pid);
+ _zend_add_valid_assoc_string(return_value, "Hostname",
+ status_ptr->hostname);
+ _zend_add_valid_assoc_string(return_value, "Slurm Logfile",
+ status_ptr->slurmd_logfile);
+ _zend_add_valid_assoc_string(return_value, "Step List",
+ status_ptr->step_list);
+ _zend_add_valid_assoc_string(return_value, "Version",
+ status_ptr->version);
+
+ if (status_ptr != NULL) {
+ slurm_free_slurmd_status(status_ptr);
+ }
+}
+
+
+PHP_FUNCTION(slurm_version)
+{
+ long option = -1;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC,
+ "l", &option) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ switch (option) {
+ case 0:
+ RETURN_LONG(SLURM_VERSION_MAJOR(SLURM_VERSION_NUMBER));
+ break;
+ case 1:
+ RETURN_LONG(SLURM_VERSION_MINOR(SLURM_VERSION_NUMBER));
+ break;
+ case 2:
+ RETURN_LONG(SLURM_VERSION_MICRO(SLURM_VERSION_NUMBER));
+ break;
+ default:
+ array_init(return_value);
+ add_next_index_long(return_value,
+ SLURM_VERSION_MAJOR(SLURM_VERSION_NUMBER));
+ add_next_index_long(return_value,
+ SLURM_VERSION_MINOR(SLURM_VERSION_NUMBER));
+ add_next_index_long(return_value,
+ SLURM_VERSION_MICRO(SLURM_VERSION_NUMBER));
+ break;
+ }
+}
+
+
+/*****************************************************************************\
+ * SLURM PHP HOSTLIST FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_hostlist_to_array)
+{
+ long lngth = 0;
+ char *host_list = NULL;
+ hostlist_t hl = NULL;
+ int hl_length = 0;
+ int i=0;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d",
+ &host_list, &lngth) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ if ((host_list == NULL) || !strcmp(host_list, "")) {
+ RETURN_LONG(-3);
+ }
+
+ hl = slurm_hostlist_create(host_list);
+ hl_length = slurm_hostlist_count(hl);
+
+ if (hl_length==0) {
+ RETURN_LONG(-2);
+ }
+
+ array_init(return_value);
+ for (i=0; irecord_count; i++) {
+ add_next_index_string(return_value,
+ prt_ptr->partition_array[i].name, 1);
+ }
+
+ slurm_free_partition_info_msg(prt_ptr);
+
+ if (i == 0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_get_specific_partition_info)
+{
+ long lngth = 0;
+ int err = SLURM_SUCCESS;
+ partition_info_msg_t *prt_ptr = NULL;
+ partition_info_t *prt_data = NULL;
+ char *name = NULL;
+ char *tmp = NULL;
+ int i = 0;
+ int y = 0;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &name,
+ &lngth) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ if ((name == NULL) || !strcmp(name, "")) {
+ RETURN_LONG(-3);
+ }
+
+ err = slurm_load_partitions((time_t) NULL, &prt_ptr, 0);
+
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ if (prt_ptr->record_count != 0) {
+ for (i = 0; i < prt_ptr->record_count; i++) {
+ if (strcmp(prt_ptr->partition_array->name, name) == 0) {
+ prt_data = &prt_ptr->partition_array[i];
+ tmp = slurm_sprint_partition_info(prt_data, 1);
+ array_init(return_value);
+ _parse_assoc_array(tmp, "= ", return_value);
+ y++;
+ break;
+ }
+ }
+ }
+
+ slurm_free_partition_info_msg(prt_ptr);
+
+ if (y == 0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_get_partition_node_names)
+{
+ char *prt_name = NULL;
+ long lngth = 0;
+ int err = SLURM_SUCCESS;
+ partition_info_msg_t *prt_ptr = NULL;
+ partition_info_t *prt_data = NULL;
+ int i = 0;
+ int y = 0;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &prt_name,
+ &lngth) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ if ((prt_name == NULL) || (strcmp(prt_name,"")==0)) {
+ RETURN_LONG(-3);
+ }
+
+ err = slurm_load_partitions((time_t) NULL, &prt_ptr, 0);
+
+ if (err)
+ RETURN_LONG(-2);
+
+ if (prt_ptr->record_count != 0) {
+ for (i = 0; i < prt_ptr->record_count; i++) {
+ if (!strcmp(prt_ptr->partition_array->name, prt_name)) {
+ prt_data = &prt_ptr->partition_array[i];
+ array_init(return_value);
+ add_next_index_string(
+ return_value, prt_data->nodes, 1);
+ y++;
+ break;
+ }
+ }
+ }
+
+ slurm_free_partition_info_msg(prt_ptr);
+
+ if (y == 0)
+ RETURN_LONG(-1);
+}
+
+
+/*****************************************************************************\
+ * SLURM NODE CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_get_node_names)
+{
+ int err = SLURM_SUCCESS;
+ int i = 0;
+ node_info_msg_t *node_ptr = NULL;
+
+ err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ if (node_ptr->record_count > 0) {
+ array_init(return_value);
+ for (i = 0; i < node_ptr->record_count; i++) {
+ add_next_index_string(
+ return_value, node_ptr->node_array[i].name, 1);
+ }
+ }
+
+ slurm_free_node_info_msg(node_ptr);
+
+ if(i==0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_get_node_elements)
+{
+ int err = SLURM_SUCCESS;
+ int i = 0;
+ node_info_msg_t *node_ptr;
+ zval *sub_arr = NULL;
+
+ err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ if (node_ptr->record_count > 0) {
+ array_init(return_value);
+ for (i = 0; i < node_ptr->record_count; i++) {
+ ALLOC_INIT_ZVAL(sub_arr);
+ array_init(sub_arr);
+ _parse_node_pointer(sub_arr, &node_ptr->node_array[i]);
+ add_assoc_zval(return_value,
+ node_ptr->node_array[i].name,
+ sub_arr);
+ }
+ }
+
+ slurm_free_node_info_msg(node_ptr);
+
+ if(i==0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_get_node_element_by_name)
+{
+ int err = SLURM_SUCCESS;
+ int i = 0,y = 0;
+ node_info_msg_t *node_ptr;
+ char *node_name = NULL;
+ long lngth;
+ zval *sub_arr = NULL;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &node_name,
+ &lngth) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ if ((node_name == NULL) || (strcmp(node_name,"")==0)) {
+ RETURN_LONG(-3);
+ }
+
+ err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ array_init(return_value);
+
+ for (i = 0; i < node_ptr->record_count; i++) {
+ if (strcmp(node_ptr->node_array->name, node_name) == 0) {
+ y++;
+ ALLOC_INIT_ZVAL(sub_arr);
+ array_init(sub_arr);
+ _parse_node_pointer(sub_arr, &node_ptr->node_array[i]);
+ add_assoc_zval(return_value, node_name,
+ sub_arr);
+ break;
+ }
+ }
+
+ slurm_free_node_info_msg(node_ptr);
+
+ if (y == 0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_get_node_state_by_name)
+{
+ int err = SLURM_SUCCESS;
+ int i = 0,y = 0;
+ node_info_msg_t *node_ptr;
+ char *node_name = NULL;
+ long lngth;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &node_name,
+ &lngth) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ if ((node_name == NULL) || (strcmp(node_name,"")==0)) {
+ RETURN_LONG(-3);
+ }
+
+ err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ for (i = 0; i < node_ptr->record_count; i++) {
+ if (strcmp(node_ptr->node_array->name, node_name) == 0) {
+ y++;
+ RETURN_LONG(node_ptr->node_array[i].node_state);
+ break;
+ }
+ }
+
+ slurm_free_node_info_msg(node_ptr);
+
+ if (i == 0) {
+ RETURN_LONG(-1);
+ }
+
+ if (y==0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_get_node_states)
+{
+ int err = SLURM_SUCCESS;
+ int i = 0;
+ node_info_msg_t *node_ptr;
+
+ err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ array_init(return_value);
+ for (i = 0; i < node_ptr->record_count; i++) {
+ add_next_index_long(return_value,
+ node_ptr->node_array[i].node_state);
+ }
+
+ slurm_free_node_info_msg(node_ptr);
+
+ if (i == 0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+/*****************************************************************************\
+ * SLURM CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_get_control_configuration_keys)
+{
+ int err = SLURM_SUCCESS;
+ slurm_ctl_conf_t *ctrl_conf_ptr;
+ List lst;
+ ListIterator iter = NULL;
+ key_pair_t *k_p;
+
+ err = slurm_load_ctl_conf((time_t) NULL, &ctrl_conf_ptr);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ lst = slurm_ctl_conf_2_key_pairs(ctrl_conf_ptr);
+ if (!lst) {
+ RETURN_LONG(-1);
+ }
+
+ iter = slurm_list_iterator_create(lst);
+ array_init(return_value);
+ while ((k_p = slurm_list_next(iter))) {
+ add_next_index_string(return_value, k_p->name, 1);
+ }
+
+ slurm_free_ctl_conf(ctrl_conf_ptr);
+}
+
+
+PHP_FUNCTION(slurm_get_control_configuration_values)
{
- RETURN_STRING("Hello World\n", 1);
+ int err = SLURM_SUCCESS;
+ slurm_ctl_conf_t *ctrl_conf_ptr;
+ List lst;
+ ListIterator iter = NULL;
+ key_pair_t *k_p;
+
+ err = slurm_load_ctl_conf((time_t) NULL, &ctrl_conf_ptr);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ lst = slurm_ctl_conf_2_key_pairs(ctrl_conf_ptr);
+ if (!lst) {
+ RETURN_LONG(-1);
+ }
+
+ iter = slurm_list_iterator_create(lst);
+ array_init(return_value);
+ while ((k_p = slurm_list_next(iter))) {
+ if (k_p->value==NULL) {
+ add_next_index_null(return_value);
+ } else {
+ add_next_index_string(return_value, k_p->value, 1);
+ }
+ }
+
+ slurm_free_ctl_conf(ctrl_conf_ptr);
}
-PHP_FUNCTION(print_partitions)
+
+/*****************************************************************************\
+ * SLURM JOB READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_load_job_information)
{
- List sinfo_list = NULL;
- int error_code = SLURM_SUCCESS;
- uint16_t show_flags = 0;
- static partition_info_msg_t *new_part_ptr;
- printf("hey\n");
- slurm_info("got here!");
- printf("hey\n");
- error_code = slurm_load_partitions((time_t) NULL, &new_part_ptr,
- show_flags);
- if (error_code) {
- error("slurm_load_part");
- RETURN_INT(error_code);
- }
-
-// sinfo_list = list_create(_sinfo_list_delete);
-
- RETURN_INT(error_code);
+ int err = SLURM_SUCCESS;
+ int i = 0;
+ job_info_msg_t *job_ptr;
+ zval *sub_arr = NULL;
+ char *tmp;
+
+ err = slurm_load_jobs((time_t) NULL, &job_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ array_init(return_value);
+ for (i = 0; i < job_ptr->record_count; i++) {
+ ALLOC_INIT_ZVAL(sub_arr);
+ array_init(sub_arr);
+ _parse_assoc_array(slurm_sprint_job_info(
+ &job_ptr->job_array[i], 1),
+ "= ", sub_arr);
+ tmp = slurm_xstrdup_printf("%u", job_ptr->job_array[i].job_id);
+ add_assoc_zval(return_value, tmp, sub_arr);
+ xfree(tmp);
+ }
+
+ slurm_free_job_info_msg(job_ptr);
+
+ if (i == 0) {
+ RETURN_LONG(-1);
+ }
+}
+
+
+PHP_FUNCTION(slurm_load_partition_jobs)
+{
+ int err = SLURM_SUCCESS;
+ int i = 0;
+ job_info_msg_t *job_ptr;
+ zval *sub_arr = NULL;
+ char *tmp;
+ char *pname = NULL;
+ long lngth;
+ long checker = 0;
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &pname,
+ &lngth) == FAILURE) {
+ RETURN_LONG(-3);
+ }
+
+ if ((pname == NULL) || !strcmp(pname,"")) {
+ RETURN_LONG(-3);
+ }
+
+ err = slurm_load_jobs((time_t) NULL, &job_ptr, 0);
+ if (err) {
+ RETURN_LONG(-2);
+ }
+
+ array_init(return_value);
+ for (i = 0; i < job_ptr->record_count; i++) {
+ if (!strcmp(job_ptr->job_array->partition, pname)) {
+ checker++;
+ ALLOC_INIT_ZVAL(sub_arr);
+ array_init(sub_arr);
+ _parse_assoc_array(slurm_sprint_job_info(
+ &job_ptr->job_array[i], 1),
+ "= ", sub_arr);
+ tmp = slurm_xstrdup_printf(
+ "%u", job_ptr->job_array[i].job_id);
+ add_assoc_zval(return_value, tmp, sub_arr);
+ xfree(tmp);
+ }
+ }
+
+ slurm_free_job_info_msg(job_ptr);
+
+ if (i == 0) {
+ RETURN_LONG(-1);
+ }
+
+ if (checker==0) {
+ RETURN_LONG(-1);
+ }
}
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/slurm_php.h slurm-llnl-2.3.2/contribs/phpext/slurm_php/slurm_php.h
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/slurm_php.h 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/slurm_php.h 2011-12-05 17:20:08.000000000 +0000
@@ -1,52 +1,383 @@
/*****************************************************************************\
* slurm_php.h - php interface to slurm.
*
- * $Id: slurm_php.h 13061 2008-01-22 21:23:56Z da $
*****************************************************************************
- * Copyright (C) 2004-2007 The Regents of the University of California.
- * Copyright (C) 2008 Lawrence Livermore National Security.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Danny Auble
- *
- * This file is part of SLURM, a resource management program.
- * For details, see .
+ * Copyright (C) 2011 - Trinity Centre for High Performance Computing
+ * Copyright (C) 2011 - Trinity College Dublin
+ * Written By : Vermeulen Peter
+ *
+ * This file is part of php-slurm, a resource management program.
* Please also read the included file: DISCLAIMER.
- *
- * SLURM is free software; you can redistribute it and/or modify it under
+ *
+ * php-slurm is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
- * In addition, as a special exception, the copyright holders give permission
+ * In addition, as a special exception, the copyright holders give permission
* to link the code of portions of this program with the OpenSSL library under
- * certain conditions as described in each individual source file, and
- * distribute linked combinations including the two. You must obey the GNU
- * General Public License in all respects for all of the code used other than
- * OpenSSL. If you modify file(s) with this exception, you may extend this
- * exception to your version of the file(s), but you are not obligated to do
+ * certain conditions as described in each individual source file, and
+ * distribute linked combinations including the two. You must obey the GNU
+ * General Public License in all respects for all of the code used other than
+ * OpenSSL. If you modify file(s) with this exception, you may extend this
+ * exception to your version of the file(s), but you are not obligated to do
* so. If you do not wish to do so, delete this exception statement from your
- * version. If you delete this exception statement from all source files in
+ * version. If you delete this exception statement from all source files in
* the program, then also delete it here.
- *
- * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *
+ * php-slurm is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
- *
+ *
* You should have received a copy of the GNU General Public License along
- * with SLURM; if not, write to the Free Software Foundation, Inc.,
+ * with php-slurm; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
\*****************************************************************************/
+
#ifndef SLURM_PHP_H
#define SLURM_PHP_H 1
-#define SLURM_PHP_VERSION "1.0"
+#define SLURM_PHP_VERSION "1.0.1"
#define SLURM_PHP_EXTNAME "slurm"
+/*
+ * Adjust this value to change the format of the returned string
+ * values.
+ *
+ * For more information on formatting options :
+ * http://www.java2s.com/Tutorial/C/0460__time.h/strftime.htm
+ */
+#define TIME_FORMAT_STRING "%c"
-PHP_FUNCTION(hello_world);
-PHP_FUNCTION(print_partitions);
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include "src/common/xmalloc.h"
extern zend_module_entry slurm_php_module_entry;
+
+/*****************************************************************************\
+ * TYPEDEFS
+\*****************************************************************************/
+
+typedef struct key_value {
+ char *name; /* key */
+ char *value; /* value */
+} key_pair_t;
+
+/* define functions needed to avoid warnings (they are defined in
+ * src/common/xstring.h) If you can figure out a way to make it so we
+ * don't have to make these declarations that would be awesome. I
+ * didn't have time to spend on it when I was working on it. -da
+ */
+
+/*
+** strdup which uses xmalloc routines
+*/
+char *slurm_xstrdup(const char *str);
+
+/*
+** strdup formatted which uses xmalloc routines
+*/
+char *slurm_xstrdup_printf(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+
+/*****************************************************************************\
+ * SLURM PHP HOSTLIST FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_hostlist_to_array - converts a hostlist string to
+ * a numerically indexed array.
+ *
+ * IN host_list - string value containing the hostlist
+ * RET numerically indexed array containing the names of the nodes
+ */
+PHP_FUNCTION(slurm_hostlist_to_array);
+
+/*
+ * slurm_array_to_hostlist - convert an array of nodenames into a hostlist
+ * string
+ *
+ * IN node_arr - Numerically indexed array containing a nodename on each index
+ * RET String variable containing the hostlist string
+ */
+PHP_FUNCTION(slurm_array_to_hostlist);
+
+
+/*****************************************************************************\
+ * SLURM STATUS FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_ping - Issues the slurm interface to return the status of the slurm
+ * primary and secondary controller
+ *
+ * RET associative array containing the status ( status = 0 if online, = -1 if
+ * offline ) of both controllers
+ * NOTE : the error codes and their meaning are described in the section
+ * labelled EXTRA
+ */
+PHP_FUNCTION(slurm_ping);
+
+/*
+ * slurm_slurmd_status - Issues the slurm interface to return the
+ * status of the slave daemon ( running on this machine )
+ *
+ * RET associative array containing the status or a negative long variable
+ * containing an error code
+ * NOTE : the error codes and their meaning are described in the section
+ * labelled EXTRA
+ */
+PHP_FUNCTION(slurm_slurmd_status);
+
+/*
+ * slurm_version - Returns the slurm version number in the requested format
+ *
+ * IN option - long/integer value linking to the formatting of the version
+ * number
+ * RET long value containing the specific formatted version number a numeric
+ * array containing the version number or a negative long variable
+ * containing an error code.
+ * NOTE : the possible cases and their meaning are described in the section
+ * labelled EXTRA
+ */
+PHP_FUNCTION(slurm_version);
+
+
+/*****************************************************************************\
+ * SLURM PARTITION READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_print_partition_names - Creates and returns a numerically
+ * indexed array containing the names of the partitions
+ *
+ * RET numerically indexed array containing the partitionnames or a
+ * negative long variable containing an error code NOTE : the
+ * error codes and their meaning are described in the section
+ * labelled EXTRA
+ */
+PHP_FUNCTION(slurm_print_partition_names);
+/*
+ * slurm_get_specific_partition_info - Searches for the requested
+ * partition and if found it returns an associative array
+ * containing the information about this specific partition
+ *
+ * IN name - a string variable containing the partitionname
+ * OPTIONAL IN lngth - a long variable containing the length of the
+ * partitionname
+ * RET an associative array containing the information about a
+ * specific partition, or a negative long value containing an
+ * error code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_specific_partition_info);
+
+/*
+ * slurm_get_partition_node_names - Searches for the requested partition and
+ * if found it parses the nodes into a numerically indexed array, which is
+ * then returned to the calling function.
+ *
+ * IN name - a string variable containing the partitionname
+ *
+ * OPTIONAL IN lngth - a long variable containing the length of the
+ * partitionname
+ *
+ * RET a numerically indexed array containing the names of all the
+ * nodes connected to this partition, or a negative long value
+ * containing an error code
+ *
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_partition_node_names);
+
+
+/*****************************************************************************\
+ * SLURM NODE CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_get_node_names - Creates and returns a numerically index array
+ * containing the nodenames.
+ *
+ * RET a numerically indexed array containing the requested nodenames,
+ * or a negative long value containing an error code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_names);
+
+/*
+ * slurm_get_node_elements - Creates and returns an associative array
+ * containing all the nodes indexed by nodename and as value an
+ * associative array containing their information.
+ *
+ * RET an associative array containing the nodes as keys and their
+ * information as value, or a long value containing an error code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_elements);
+
+/*
+ * slurm_get_node_element_by_name - Searches for the requested node
+ * and if found it parses its information into an associative
+ * array, which is then returned to the calling function.
+ *
+ * IN name - a string variable containing the nodename
+ * OPTIONAL IN lngth - a long variable containing the length of the nodename
+ * RET an assocative array containing the requested information or a
+ * long value containing an error code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_element_by_name);
+
+/*
+ * slurm_get_node_state_by_name - Searches for the requested node and
+ * if found it returns the state of that node
+ *
+ * IN name - a string variable containing the nodename
+ * OPTIONAL IN lngth - a long variable containing the length of the nodename
+ * RET a long value containing the state of the node [0-7] or a
+ * negative long value containing the error code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_state_by_name);
+
+/*
+ * slurm_get_node_states - Creates a numerically indexed array
+ * containing the state of each node ( only the state ! ) as a
+ * long value. This function could be used to create a summary of
+ * the node states without having to do a lot of processing ( or
+ * having to deal with overlapping nodes between partitions ).
+ *
+ * RET a numerically indexed array containing node states
+ */
+PHP_FUNCTION(slurm_get_node_states);
+
+
+/*****************************************************************************\
+ * SLURM CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * Due to the configuration being quite large, i decided to create 2 functions
+ * to return the keys and values separately. ( to prevent a buffer overflow )
+ */
+
+/*
+ * slurm_get_control_configuration_keys - Retreives the configuration
+ * from the slurm daemon and parses it into a numerically indexed
+ * array containg the keys that link to the values ( the values
+ * are retreived by the slurm_get_control_configuration_values
+ * function )
+ *
+ * RET a numerically indexed array containing keys that describe the
+ * values of the configuration of the slurm daemon, or a long
+ * value containing an error code
+ *
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_control_configuration_keys);
+
+/*
+ * slurm_get_control_configuration_values - Retreives the
+ * configuration from the slurm daemon and parses it into a
+ * numerically indexed array containg the values that link to the
+ * keys ( the keys are retreived by the
+ * slurm_get_control_configuration_keys function )
+ *
+ * RET a numerically indexed array containing the values of the
+ * configuration of the slurm daemon, or a long value containing
+ * an error code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_control_configuration_values);
+
+
+/*****************************************************************************\
+ * SLURM JOB READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_load_job_information - Loads the information of all the jobs,
+ * parses it and returns the values as an associative array where
+ * each key is the job id linking to an associative array with
+ * the information of the job
+ *
+ * RET an associative array containing the information of all jobs, or
+ * a long value containing an error code.
+ *
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_load_job_information);
+
+/*
+ * slurm_load_partition_jobs - Retreive the information of all the
+ * jobs running on a single partition.
+ *
+ * IN pname - The partition name as a string value
+ * OPTIONAL IN lngth - a long variable containing the length of the
+ * partitionname
+ * RET an associative array containing the information of all the jobs
+ * running on this partition. Or a long value containing an error
+ * code
+ * NOTE : the error codes and their meaning are described in the
+ * section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_load_partition_jobs);
+
+
+/*****************************************************************************\
+ * EXTRA
+ *****************************************************************************
+ *
+ * [ERROR CODES]
+ *
+ * -3 : no/incorrect variables where passed on
+ * -2 : An error occurred whilst trying to communicate
+ * with the daemon
+ * -1 : Your query produced no results
+ *
+ * [VERSION FORMATTING OPTIONS]
+ *
+ * 0 : major of the version number
+ * 1 : minor of the version number
+ * 2 : micro of the version number
+ * default : full version number
+ *
+ * [EXPLANATION]
+ *
+ * Consider the version number 2.2.3,
+ * if we were to split this into an array
+ * where the "." sign is the delimiter
+ * we would receive the following
+ *
+ * [2] => MAJOR
+ * [2] => MINOR
+ * [3] => MICRO
+ *
+ * When requesting the major you would
+ * only receive the major, when requesting
+ * the full version you would receive the array
+ * as depicted above.
+ *
+\*****************************************************************************/
+
#define phpext_slurm_php_ptr &slurm_php_module_entry
#endif
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,29 @@
+--TEST--
+Test function slurm_array_to_hostlist() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+array(1) {
+ ["HOSTLIST"]=>
+ string(26) "host[01-02],another-host02"
+}
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_array_to_hostlist() by calling it more than or less than its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECTF--
+*** Test by calling method or function with incorrect numbers of arguments ***
+! ret -2 < 0
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,35 @@
+--TEST--
+Test function slurm_get_control_configuration_keys() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_control_configuration_keys : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,39 @@
+--TEST--
+Test function slurm_get_control_configuration_values() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_control_configuration_values : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,34 @@
+--TEST--
+Test function slurm_get_node_element_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function without any arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,24 @@
+--TEST--
+Test function slurm_get_node_element_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,33 @@
+--TEST--
+Test function slurm_get_node_elements() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_node_elements() : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,29 @@
+--TEST--
+Test function slurm_get_node_names() by calling it with its expected arguments
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_node_names : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,41 @@
+--TEST--
+Test function slurm_get_node_state_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with faulty arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,24 @@
+--TEST--
+Test function slurm_get_node_state_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with faulty arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,28 @@
+--TEST--
+Test function slurm_get_node_states() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with correct arguments ***
+[SLURM:SUCCESS] : slurm_get_node_states() succesfully returned it's data
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,17 @@
+--TEST--
+Test function slurm_get_partition_node_names() by calling it with its expected arguments
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_partition_node_names ok
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_get_partition_node_names() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:ERROR] -1 : No partition by that name was found on your system
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,37 @@
+--TEST--
+Test function slurm_get_specific_partition_info() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECTF--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:SUCCESS]
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_get_specific_partition_info() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:ERROR] -1 : No partition by that name was found on your system
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,27 @@
+--TEST--
+Test function slurm_hostlist_to_array() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+array(3) {
+ [0]=>
+ string(6) "host01"
+ [1]=>
+ string(6) "host02"
+ [2]=>
+ string(14) "another-host02"
+}
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,24 @@
+--TEST--
+Test function slurm_hostlist_to_array() by calling it more than or less than its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECTF--
+*** Test by calling method or function with incorrect numbers of arguments ***
+int(-3)
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,32 @@
+--TEST--
+Test function slurm_load_job_information() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_load_job_information : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,36 @@
+--TEST--
+Test function slurm_load_partition_jobs() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECTF--
+*** Test by calling method or function with correct arguments ***
+[SLURM:SUCCESS]
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,34 @@
+--TEST--
+Test function slurm_load_partition_jobs() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with faulty arguments ***
+[SLURM:ERROR] -1 : No jobs where found for a partition by that name
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,14 @@
+--TEST--
+Test function slurm_ping() by calling it with its expected arguments
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+int(0)
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,35 @@
+--TEST--
+Test function slurm_ping() by calling it more than or less than its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECTF--
+*** Test by calling method or function with incorrect numbers of arguments ***
+! slurm_ping Array == 0 ok
+! slurm_ping Array == -1 ok
+! slurm_ping Array == 0 ok
+! slurm_ping Array == -1 ok
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,33 @@
+--TEST--
+Test function slurm_print_partition_names() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_print_partition_names : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,26 @@
+--TEST--
+Test function slurm_slurmd_status() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_slurmd_status() : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt
--- slurm-llnl-2.2.7/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_version() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang
+Peter Vermeulen
+--SKIPIF--
+
+--FILE--
+0)) {
+ echo "! slurm_version : SUCCESS";
+} else if($ver == -3) {
+ echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($ver == -2) {
+ echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($ver == -1) {
+ echo "[SLURM:ERROR] -1 : No version was found on the system";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_version : SUCCESS
diff -Nru slurm-llnl-2.2.7/contribs/README slurm-llnl-2.3.2/contribs/README
--- slurm-llnl-2.2.7/contribs/README 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/README 2011-12-05 17:20:08.000000000 +0000
@@ -7,6 +7,47 @@
SLURM as their documentation. A quick description of the subdirectories
of the SLURM contribs distribution follows:
+ arrayrun [Adds support for array jobs]
+ README - Description of the arrayrun tool and its use
+ arrayrun - Command used to submit job arrays
+ arrayrun_worker - Back-end to the arrayrun command responsible for
+ spawning the jobs in the array
+
+ cray [Tools for use on Cray systems]
+ etc_init_d_munge - /etc/init.d/munge script for use with Munge
+ etc_sysconfig_slurm - /etc/sysconfig/slurm for Cray XT/XE systems
+ libalps_test_programs.tar.gz - set of tools to verify ALPS/BASIL support
+ logic. Note that this currently requires:
+ * hardcoding in libsdb/basil_mysql_routines.c:
+ mysql_real_connect(handle, "localhost", NULL, NULL, "XT5istanbul"
+ * suitable /etc/my.cnf, containing at least the lines
+ [client]
+ user=basic
+ password=basic
+ * setting the APBASIL in the libalps/Makefile, e.g.
+ APBASIL := slurm/alps_simulator/apbasil.sh
+ To use, extract the files then:
+ > cd libasil/
+ > make -C alps_tests all # runs basil parser tests
+ > make -C sdb_tests all # checks if database routines work
+ A tool named tuxadmin is also also included. When
+ executed with the -s or --slurm.conf option, this
+ contact the SDB to generate system-specific information
+ needed in slurm.conf (e.g. "NodeName=nid..." and
+ "PartitionName= Nodes=nid... MaxNodes=...".
+ munge_build_script.sh - script to build Munge from sources for Cray system
+ opt_modulefiles_slurm - enables use of Munge as soon as built
+ slurm-build-script.sh - script to build SLURM from sources for Cray system.
+ set LIBROOT and SLURM_SRC environment variables
+ before use, for example:
+ LIBROOT=/ufs/slurm/build
+ SLURM_SRC=${SLURM_SRC:-${LIBROOT}/slurm-2.3.0-0.pre4}
+ srun.pl - A perl wrapper for the aprun command. Use of this
+ wrapper requires that SLURM's perlapi be installed.
+ Execute configure with the --with-srun2aprun option
+ to build and install this instead of SLURM's normal
+ srun command.
+
env_cache_builder.c [C program]
This program will build an environment variable cache file for specific
users or all users on the system. This can be used to prevent the aborting
@@ -18,6 +59,8 @@
Example LUA scripts that can serve as SLURM plugins.
job_submit.lua - job_submit plugin that can set a job's default partition
using a very simple algorithm
+ job_submit_license.lua - job_submit plugin that can set a job's use of
+ system licenses
proctrack.lua - proctrack (process tracking) plugin that implements a
very simple job step container using CPUSETs
diff -Nru slurm-llnl-2.2.7/contribs/sjobexit/Makefile.in slurm-llnl-2.3.2/contribs/sjobexit/Makefile.in
--- slurm-llnl-2.2.7/contribs/sjobexit/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/sjobexit/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -64,6 +64,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -118,7 +120,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -155,6 +160,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -212,6 +218,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -247,6 +254,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/sjobexit/sjobexitmod.pl slurm-llnl-2.3.2/contribs/sjobexit/sjobexitmod.pl
--- slurm-llnl-2.2.7/contribs/sjobexit/sjobexitmod.pl 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/sjobexit/sjobexitmod.pl 2011-12-05 17:20:08.000000000 +0000
@@ -36,7 +36,7 @@
#
# Format for listing job.
#
-my $list_format = "JobID,Account,NNodes,NodeList,State,ExitCode,DerivedExitCode,DerivedExitStr";
+my $list_format = "JobID,Account,NNodes,NodeList,State,ExitCode,DerivedExitCode,Comment";
#
@@ -128,7 +128,7 @@
#
$execute_line = "sacctmgr -i modify job jobid=$jobid set";
- $execute_line .= " DerivedExitStr=\"$reason\"" if ($reason);
+ $execute_line .= " Comment=\"$reason\"" if ($reason);
$execute_line .= " DerivedExitCode=$code" if ($code);
$execute_line .= " Cluster=$cluster" if ($cluster);
@@ -163,7 +163,7 @@
$base [-man]
-e Modify the derived exit code to new value.
- -r Modify the derived exit string to new value.
+ -r Modify the job's comment field to new value.
-c Name of cluster (optional).
-l List information for a completed job.
-h Show usage.
@@ -215,11 +215,11 @@
sjobexitmod is a wrapper which effectively does the same operation as using the
sacct utility to modify certain aspects of a completed job.
- sacctmgr -i modify job jobid=1286 set DerivedExitCode=1 DerivedExitStr="code error"
+ sacctmgr -i modify job jobid=1286 set DerivedExitCode=1 Comment="code error"
or to list certain aspects of a completed job.
- sacct -o jobid,derivedexitcode,derivedexitstr,cluster
+ sacct -o jobid,derivedexitcode,comment,cluster
=head1 OPTIONS
@@ -247,7 +247,7 @@
=item B<-r> I
-The reason (DerivedEixtStr) for job termination.
+The reason (Comment) for job termination.
=item B
diff -Nru slurm-llnl-2.2.7/contribs/sjstat slurm-llnl-2.3.2/contribs/sjstat
--- slurm-llnl-2.2.7/contribs/sjstat 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/sjstat 2011-12-05 17:20:08.000000000 +0000
@@ -9,36 +9,36 @@
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Phil Eckert .
# CODE-OCEC-09-009. All rights reserved.
-#
+#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
-#
+#
# SLURM is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
-# In addition, as a special exception, the copyright holders give permission
+# In addition, as a special exception, the copyright holders give permission
# to link the code of portions of this program with the OpenSSL library under
-# certain conditions as described in each individual source file, and
-# distribute linked combinations including the two. You must obey the GNU
-# General Public License in all respects for all of the code used other than
-# OpenSSL. If you modify file(s) with this exception, you may extend this
-# exception to your version of the file(s), but you are not obligated to do
+# certain conditions as described in each individual source file, and
+# distribute linked combinations including the two. You must obey the GNU
+# General Public License in all respects for all of the code used other than
+# OpenSSL. If you modify file(s) with this exception, you may extend this
+# exception to your version of the file(s), but you are not obligated to do
# so. If you do not wish to do so, delete this exception statement from your
-# version. If you delete this exception statement from all source files in
+# version. If you delete this exception statement from all source files in
# the program, then also delete it here.
-#
+#
# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
-#
+#
# You should have received a copy of the GNU General Public License along
# with SLURM; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
+#
# Based off code with permission copyright 2006, 2007 Cluster Resources, Inc.
###############################################################################
@@ -70,6 +70,16 @@
my (%MaxNodes, %MaxTime);
#
+# Check SLURM status.
+#
+ isslurmup();
+
+#
+# See if bluegene system.
+#
+ my $bglflag = 1 if (`scontrol show config | grep -i bluegene`);
+
+#
# Get user options.
#
get_options();
@@ -99,6 +109,25 @@
#
+# Do usable for bluegene
+#
+sub Usable
+{
+ my ($tot, $out) = @_;
+
+ $tot *= 1024.0 if ($tot =~ /K/);
+ $out *= 1024.0 if ($out =~ /K/);
+
+ my $usable = $tot - $out;
+ if ($usable > 1024.0) {
+ $usable /= 1024.0;
+ $usable .= 'K';
+ }
+
+ return($usable);
+}
+
+#
# Get the SLURM partitions information.
#
sub do_sinfo
@@ -109,7 +138,7 @@
#
# Get the partition and node info.
#
- my $options = "\"%9P %6m %.4c %.16F %f\"";
+ my $options = "\"%9P %6m %.4c %.22F %f\"";
my $ct = 0;
my @sin = `sinfo -e -o $options`;
@@ -128,11 +157,13 @@
$s_idle[$ct] = $fields[1];
$s_out[$ct] = $fields[2];
$s_total[$ct] = $fields[3];
+
+ if ($bglflag) {
+ $s_usable[$ct] = Usable($s_total[$ct], $s_out[$ct]);
+ } else {
$s_usable[$ct] = $s_total[$ct] - $s_out[$ct];
-#
-# Handle "k" factor for Blue Gene.
-#
- $s_usable[$ct] .= 'K' if ($s_total[$ct] =~ /K/);
+ }
+
$s_feat[$ct] = ($line[4] .= " ");
$s_feat[$ct] =~ s/\(null\)//g;
$ct++;
@@ -140,10 +171,10 @@
printf("\nScheduling pool data:\n");
if ($verbose) {
- printf("------------------------------------------------------------------------------\n");
- printf(" Total Usable Free Node Time Other \n");
- printf("Pool Memory Cpus Nodes Nodes Nodes Limit Limit traits \n");
- printf("------------------------------------------------------------------------------\n");
+ printf("----------------------------------------------------------------------------------\n");
+ printf(" Total Usable Free Node Time Other \n");
+ printf("Pool Memory Cpus Nodes Nodes Nodes Limit Limit traits \n");
+ printf("----------------------------------------------------------------------------------\n");
} else {
printf("-------------------------------------------------------------\n");
printf("Pool Memory Cpus Total Usable Free Other Traits \n");
@@ -154,17 +185,15 @@
if ($verbose) {
my $p = $s_part[$i];
$p =~ s/\*//;
- printf("%-9s %6dMb %5d %6s %7s %6s %6s %6s %-s\n",
+ printf("%-9s %6dMb %5s %6s %7s %6s %6s %10s %-s\n",
$s_part[$i], $s_mem[$i], $s_cpu[$i],
- $s_total[$i],
- $s_total[$i] - $s_out[$i],
+ $s_total[$i], $s_usable[$i],
$s_idle[$i], $MaxNodes{$p},
$MaxTime{$p}, $s_feat[$i]);
} else {
- printf("%-9s %6dMb %5d %6s %6s %6s %-s\n",
+ printf("%-9s %6dMb %5s %6s %6s %6s %-s\n",
$s_part[$i], $s_mem[$i], $s_cpu[$i],
- $s_total[$i],
- $s_total[$i] - $s_out[$i],
+ $s_total[$i], $s_usable[$i],
$s_idle[$i], $s_feat[$i]);
}
}
@@ -189,10 +218,10 @@
my $rval = system("scontrol show config | grep cons_res >> /dev/null");
if ($rval) {
$type = "Nodes";
- $options = "\"%8i %8u %.6D %2t %.11S %.12l %.9P %.11M %1000R\"";
+ $options = "\"%8i %8u %.6D %2t %S %.12l %.9P %.11M %1000R\"";
} else {
$type = "Procs";
- $options = "\"%8i %8u %.6C %2t %.11S %.12l %.9P %.11M %1000R\"";
+ $options = "\"%8i %8u %.6C %2t %S %.12l %.9P %.11M %1000R\"";
}
#
@@ -211,6 +240,8 @@
$s_user[$ct] = $line[1];
$s_nodes[$ct] = $line[2];
$s_status[$ct] = $line[3];
+ $line[4] =~ s/^.....//;
+ $line[4] = "N/A" if ($line[3] =~ /PD/);
$s_begin[$ct] = $line[4];
$s_limit[$ct] = $line[5];
if ($line[5] eq "UNLIMITED") {
@@ -233,10 +264,10 @@
printf("Running job data:\n");
if ($verbose) {
- printf("------------------------------------------------------------------------------------------------\n");
- printf(" Time Time Time \n");
- printf("JobID User $type Pool Status Used Limit Started Master/Other \n");
- printf("------------------------------------------------------------------------------------------------\n");
+ printf("---------------------------------------------------------------------------------------------------\n");
+ printf(" Time Time Time \n");
+ printf("JobID User $type Pool Status Used Limit Started Master/Other \n");
+ printf("---------------------------------------------------------------------------------------------------\n");
} else {
printf("----------------------------------------------------------------------\n");
printf("JobID User $type Pool Status Used Master/Other \n");
@@ -245,7 +276,7 @@
for (my $i = 0; $i < $ct; $i++) {
if ($verbose) {
- printf("%-8s %-8s %6s %-9s %-7s %10s %11s %11s %.12s\n",
+ printf("%-8s %-8s %6s %-9s %-7s %10s %11s %14s %.12s\n",
$s_job[$i], $s_user[$i], $s_nodes[$i],
$s_pool[$i], $s_status[$i],
$s_used[$i], $s_limit[$i], $s_begin[$i],
@@ -277,13 +308,12 @@
foreach my $tmp (@scon) {
chomp $tmp;
my @line = split(' ',$tmp);
- ($part) = ($tmp =~ m/PartitionName=(\S+)\s+/) if ($tmp =~ /PartitionName=/);
+ ($part) = ($tmp =~ m/PartitionName=(\S+)/) if ($tmp =~ /PartitionName=/);
($MaxTime{$part}) = ($tmp =~ m/MaxTime=(\S+)\s+/) if ($tmp =~ /MaxTime=/);
($MaxNodes{$part}) = ($tmp =~ m/MaxNodes=(\S+)\s+/) if ($tmp =~ /MaxNodes=/);
$MaxTime{$part} =~ s/UNLIMITED/UNLIM/ if ($MaxTime{$part});
$MaxNodes{$part} =~ s/UNLIMITED/UNLIM/ if ($MaxNodes{$part});
-
}
return;
@@ -380,12 +410,26 @@
}
+#
+# Determine if SLURM is available.
+#
+sub isslurmup
+{
+ my $out = `scontrol show part 2>&1`;
+ if ($?) {
+ printf("\n SLURM is not communicating.\n\n");
+ exit(1);
+ }
+
+ return;
+}
+
__END__
=head1 NAME
-B - List attributes of jobs under SLURM control
+B - List attributes of jobs under the SLURM control
=head1 SYNOPSIS
@@ -393,7 +437,7 @@
=head1 DESCRIPTION
-The B command is used to display statistics of jobs under control of SLURM.
+The B command is used to display statistics of jobs under control of SLURM.
The output is designed to give information on the resource usage and availablilty,
as well as information about jobs that are currently active on the machine. This output
is built using the SLURM utilities, sinfo, squeue and scontrol, the man pages for these
@@ -462,7 +506,7 @@
The Running job data contains information pertaining to the:
- JobID either the SLURM job id
+ JobID the SLURM job id
User owner of the job
Nodes nodes required, or in use by the job
(Note: On cpu scheduled machines, this field
@@ -495,10 +539,11 @@
pbatch* 15000Mb 8 1072 1070 174 UNLIM UNLIM (null)
Running job data:
- ----------------------------------------------------------------------------------------------
- JobID User Nodes Pool Status Used Limit Start Master/Other
- ----------------------------------------------------------------------------------------------
- 395 sam 200 pbatch PD 0:00 30:00 N/A (JobHeld)
+ ---------------------------------------------------------------------------------------------------
+ Time Time Time
+ JobID User Nodes Pool Status Used Limit Started Master/Other
+ ---------------------------------------------------------------------------------------------------
+ 38562 tom 4 pbatch PD 0:00 1:00:00 01-14T18:11:22 (JobHeld)
The added fields to the "Scheduling pool data" are:
@@ -510,11 +555,8 @@
Limit Time limit of job.
Start Start time of job.
-=head1 AUTHOR
-
-Written by Philip D. Eckert
-
=head1 REPORTING BUGS
Report bugs to
+=cut
diff -Nru slurm-llnl-2.2.7/contribs/slurmdb-direct/Makefile.in slurm-llnl-2.3.2/contribs/slurmdb-direct/Makefile.in
--- slurm-llnl-2.2.7/contribs/slurmdb-direct/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/slurmdb-direct/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -64,6 +64,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -118,7 +120,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -155,6 +160,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -212,6 +218,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -247,6 +254,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/time_login.c slurm-llnl-2.3.2/contribs/time_login.c
--- slurm-llnl-2.2.7/contribs/time_login.c 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/time_login.c 2011-12-05 17:20:08.000000000 +0000
@@ -17,7 +17,7 @@
* CODE-OCEC-09-009. All rights reserved.
*
* This file is part of SLURM, a resource management program.
- * For details, see .
+ * For details, see .
* Please also read the included file: DISCLAIMER.
*
* SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/Makefile.in slurm-llnl-2.3.2/contribs/torque/Makefile.in
--- slurm-llnl-2.2.7/contribs/torque/Makefile.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/Makefile.in 2011-12-05 17:20:08.000000000 +0000
@@ -64,6 +64,7 @@
$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
$(top_srcdir)/auxdir/x_ac_iso.m4 \
$(top_srcdir)/auxdir/x_ac_lua.m4 \
+ $(top_srcdir)/auxdir/x_ac_man2html.m4 \
$(top_srcdir)/auxdir/x_ac_munge.m4 \
$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@
$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+ $(top_srcdir)/auxdir/x_ac_srun.m4 \
$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -118,7 +120,10 @@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
BLCR_HOME = @BLCR_HOME@
BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -155,6 +160,7 @@
HAVE_AIX = @HAVE_AIX@
HAVE_ELAN = @HAVE_ELAN@
HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
HAVE_OPENSSL = @HAVE_OPENSSL@
HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -212,6 +218,7 @@
PTHREAD_LIBS = @PTHREAD_LIBS@
RANLIB = @RANLIB@
READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
RELEASE = @RELEASE@
SED = @SED@
SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -247,6 +254,7 @@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
diff -Nru slurm-llnl-2.2.7/contribs/torque/mpiexec.pl slurm-llnl-2.3.2/contribs/torque/mpiexec.pl
--- slurm-llnl-2.2.7/contribs/torque/mpiexec.pl 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/mpiexec.pl 2011-12-05 17:20:08.000000000 +0000
@@ -11,7 +11,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/pbsnodes.pl slurm-llnl-2.3.2/contribs/torque/pbsnodes.pl
--- slurm-llnl-2.2.7/contribs/torque/pbsnodes.pl 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/pbsnodes.pl 2011-12-05 17:20:08.000000000 +0000
@@ -11,7 +11,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/qdel.pl slurm-llnl-2.3.2/contribs/torque/qdel.pl
--- slurm-llnl-2.2.7/contribs/torque/qdel.pl 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/qdel.pl 2011-12-05 17:20:08.000000000 +0000
@@ -11,7 +11,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/qhold.pl slurm-llnl-2.3.2/contribs/torque/qhold.pl
--- slurm-llnl-2.2.7/contribs/torque/qhold.pl 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/qhold.pl 2011-12-05 17:20:08.000000000 +0000
@@ -12,7 +12,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/qrls.pl slurm-llnl-2.3.2/contribs/torque/qrls.pl
--- slurm-llnl-2.2.7/contribs/torque/qrls.pl 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/qrls.pl 2011-12-05 17:20:08.000000000 +0000
@@ -11,7 +11,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/qstat.pl slurm-llnl-2.3.2/contribs/torque/qstat.pl
--- slurm-llnl-2.2.7/contribs/torque/qstat.pl 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/qstat.pl 2011-12-05 17:20:08.000000000 +0000
@@ -11,7 +11,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/contribs/torque/qsub.pl slurm-llnl-2.3.2/contribs/torque/qsub.pl
--- slurm-llnl-2.2.7/contribs/torque/qsub.pl 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/torque/qsub.pl 2011-12-05 17:20:08.000000000 +0000
@@ -11,7 +11,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
@@ -186,7 +186,11 @@
$command .= " --tmp=$res_opts{file}" if $res_opts{file};
$command .= " --mem=$res_opts{mem}" if $res_opts{mem};
$command .= " --nice=$res_opts{nice}" if $res_opts{nice};
-
+# Cray-specific options
+$command .= " -n$res_opts{mppwidth}" if $res_opts{mppwidth};
+$command .= " -w$res_opts{mppnodes}" if $res_opts{mppnodes};
+$command .= " --cpus-per-task=$res_opts{mppdepth}" if $res_opts{mppdepth};
+$command .= " --ntasks-per-node=$res_opts{mppnppn}" if $res_opts{mppnppn};
$command .= " --begin=$start_time" if $start_time;
$command .= " --account=$account" if $account;
@@ -225,6 +229,12 @@
'pvmem' => "",
'software' => "",
'vmem' => "",
+ # Cray-specific resources
+ 'mppwidth' => "",
+ 'mppdepth' => "",
+ 'mppnppn' => "",
+ 'mppmem' => "",
+ 'mppnodes' => "",
'walltime' => ""
);
my @keys = keys(%opt);
@@ -238,7 +248,9 @@
$opt{cput} = get_minutes($opt{cput});
}
- if($opt{mem}) {
+ if($opt{mppmem}) {
+ $opt{mem} = convert_mb_format($opt{mppmem});
+ } elsif($opt{mem}) {
$opt{mem} = convert_mb_format($opt{mem});
}
diff -Nru slurm-llnl-2.2.7/contribs/web_apps/chart_stats.cgi slurm-llnl-2.3.2/contribs/web_apps/chart_stats.cgi
--- slurm-llnl-2.2.7/contribs/web_apps/chart_stats.cgi 2011-06-10 16:55:36.000000000 +0000
+++ slurm-llnl-2.3.2/contribs/web_apps/chart_stats.cgi 2011-12-05 17:20:08.000000000 +0000
@@ -15,7 +15,7 @@
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of SLURM, a resource management program.
-# For details, see .
+# For details, see .
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
diff -Nru slurm-llnl-2.2.7/COPYING slurm-llnl-2.3.2/COPYING
--- slurm-llnl-2.2.7/COPYING 2011-06-10 16:55:35.000000000 +0000
+++ slurm-llnl-2.3.2/COPYING 2011-12-05 17:20:08.000000000 +0000
@@ -45,7 +45,7 @@
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@@ -211,7 +211,7 @@
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
-
+
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
@@ -268,7 +268,7 @@
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
-
+
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
@@ -321,7 +321,7 @@
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
-
+
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
diff -Nru slurm-llnl-2.2.7/debian/changelog slurm-llnl-2.3.2/debian/changelog
--- slurm-llnl-2.2.7/debian/changelog 2011-12-03 04:22:18.000000000 +0000
+++ slurm-llnl-2.3.2/debian/changelog 2011-12-27 18:28:42.000000000 +0000
@@ -1,26 +1,11 @@
-slurm-llnl (2.2.7-2build3) precise; urgency=low
+slurm-llnl (2.3.2-1) unstable; urgency=low
- * No-change rebuild to drop spurious libsfgcc1 dependency on armhf.
-
- -- Adam Conrad Fri, 02 Dec 2011 21:22:18 -0700
-
-slurm-llnl (2.2.7-2build2) precise; urgency=low
-
- * Rebuild for libmysqlclient transition
-
- -- Clint Byrum Thu, 24 Nov 2011 00:24:56 -0800
-
-slurm-llnl (2.2.7-2build1) precise; urgency=low
-
- * Rebuild for Perl 5.14.
-
- -- Colin Watson Wed, 16 Nov 2011 14:03:45 +0000
-
-slurm-llnl (2.2.7-2) unstable; urgency=low
-
- * Rebuild package against postgresql-server-dev-9.1 (Closes: #639480)
+ * New upstream release
+ * slurm-llnl-configurator.html updated
+ * libpam-slurm packages merged to slurm-llnl source
+ * libpam-slurm package new versioning following slurm-llnl
- -- Gennaro Oliva Sat, 27 Aug 2011 19:19:03 +0200
+ -- Gennaro Oliva Tue, 27 Dec 2011 19:27:46 +0100
slurm-llnl (2.2.7-1) unstable; urgency=low
diff -Nru slurm-llnl-2.2.7/debian/clean slurm-llnl-2.3.2/debian/clean
--- slurm-llnl-2.2.7/debian/clean 2011-05-06 09:47:17.000000000 +0000
+++ slurm-llnl-2.3.2/debian/clean 2011-10-27 08:25:32.000000000 +0000
@@ -13,3 +13,6 @@
src/plugins/accounting_storage/common/*.l[oa]
src/plugins/accounting_storage/common/Makefile
src/common/global_defaults.c
+contribs/arrayrun/Makefile
+contribs/cray/Makefile
+contribs/lua/Makefile
diff -Nru slurm-llnl-2.2.7/debian/control slurm-llnl-2.3.2/debian/control
--- slurm-llnl-2.2.7/debian/control 2011-08-27 17:22:40.000000000 +0000
+++ slurm-llnl-2.3.2/debian/control 2011-11-25 13:24:47.000000000 +0000
@@ -2,8 +2,7 @@
Section: admin
Priority: extra
Maintainer: Gennaro Oliva
-Uploaders: Dirk Eddelbuettel
-Build-Depends: debhelper (>= 7.0.0), autotools-dev, libmunge-dev, libncurses5-dev, libssl-dev, po-debconf, python, libglade2-dev, libgtk2.0-dev, libmysqlclient-dev, postgresql-server-dev-9.1, libpam0g-dev, libperl-dev, chrpath
+Build-Depends: debhelper (>= 7.0.0), autotools-dev, libmunge-dev, libncurses5-dev, libssl-dev, po-debconf, python, libglade2-dev, libgtk2.0-dev, libmysqlclient-dev, postgresql-server-dev-8.4, libpam0g-dev, libperl-dev, chrpath, libpam0g-dev
Standards-Version: 3.9.2
Homepage: https://computing.llnl.gov/linux/slurm/
@@ -16,7 +15,7 @@
that strives to be simple, scalable, portable, fault-tolerant, and
interconnect agnostic.
-Package: libslurmdb22
+Package: libslurmdb23
Section: libs
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
@@ -25,7 +24,7 @@
is an open-source cluster resource management and job scheduling.
This package contains the SLURM DataBase Daemon runtime library.
-Package: libslurm22
+Package: libslurm23
Section: libs
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
@@ -46,7 +45,7 @@
Package: libslurm-dev
Section: libdevel
Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, libslurm22 (= ${binary:Version})
+Depends: ${shlibs:Depends}, ${misc:Depends}, libslurm23 (= ${binary:Version})
Conflicts: libslurm10-dev, libslurm11-dev, libslurm12-dev, libslurm13-dev, libslurm20-dev, libslurm21-dev
Replaces: libslurm10-dev, libslurm11-dev, libslurm12-dev, libslurm13-dev, libslurm20-dev, libslurm21-dev
Description: SLURM development files
@@ -57,7 +56,7 @@
Package: libslurmdb-dev
Section: libdevel
Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, libslurmdb22 (= ${binary:Version})
+Depends: ${shlibs:Depends}, ${misc:Depends}, libslurmdb23 (= ${binary:Version})
Description: SLURM DataBase Daemon development files
SLURM, the Simple Linux Utility for Resource Management,
is an open-source cluster resource management and job scheduling.
@@ -66,7 +65,7 @@
Package: libpmi0-dev
Section: libdevel
Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, libpmi0 (= ${binary:Version}), libslurm22 (= ${binary:Version})
+Depends: ${shlibs:Depends}, ${misc:Depends}, libpmi0 (= ${binary:Version}), libslurm23 (= ${binary:Version})
Description: SLURM PMI library implementation development files
SLURM, the Simple Linux Utility for Resource Management,
is an open-source cluster resource management and job scheduling.
@@ -150,3 +149,12 @@
.
This package contains the Torque compatibility wrappers.
+Package: libpam-slurm
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: PAM module to authenticate using the SLURM resource manager
+ Pluggable Authentication Module (PAM) for restricting access to compute
+ nodes where SLURM performs resource management. Access to the node is
+ restricted to user root and users who have been allocated resources on
+ that node.
+
diff -Nru slurm-llnl-2.2.7/debian/libpam-slurm.docs slurm-llnl-2.3.2/debian/libpam-slurm.docs
--- slurm-llnl-2.2.7/debian/libpam-slurm.docs 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/debian/libpam-slurm.docs 2011-11-28 13:03:46.000000000 +0000
@@ -0,0 +1 @@
+contribs/pam/README
diff -Nru slurm-llnl-2.2.7/debian/libslurm22.symbols slurm-llnl-2.3.2/debian/libslurm22.symbols
--- slurm-llnl-2.2.7/debian/libslurm22.symbols 2011-04-04 13:39:29.000000000 +0000
+++ slurm-llnl-2.3.2/debian/libslurm22.symbols 1970-01-01 00:00:00.000000000 +0000
@@ -1,698 +0,0 @@
-libslurm.so.22 libslurm22 #MINVER#
- islurm_get_rem_time2__@Base 1.3.8
- islurm_get_rem_time__@Base 1.3.8
- slurm_accept_msg_conn@Base 1.3.8
- slurm_accept_stream@Base 1.3.8
- slurm_accounting_enforce_string@Base 2.2.0
- slurm_acct_storage_fini@Base 1.3.8
- slurm_acct_storage_init@Base 1.3.8
- slurm_add_slash_to_quotes@Base 2.2.0
- slurm_addto_char_list@Base 1.3.8
- slurm_allocate_resources@Base 1.3.8
- slurm_allocate_resources_blocking@Base 1.3.8
- slurm_allocation_lookup@Base 1.3.8
- slurm_allocation_lookup_lite@Base 1.3.8
- slurm_allocation_msg_thr_create@Base 1.3.8
- slurm_allocation_msg_thr_destroy@Base 1.3.8
- slurm_api_clear_config@Base 1.3.8
- slurm_api_set_conf_file@Base 1.3.8
- slurm_api_set_default_config@Base 1.3.8
- slurm_api_version@Base 1.3.8
- slurm_arg_count@Base 1.3.8
- slurm_arg_idx_by_name@Base 1.3.8
- slurm_arg_name_by_idx@Base 1.3.8
- slurm_auth_context_create@Base 1.3.8
- slurm_auth_fini@Base 1.3.8
- slurm_auth_get_arg_desc@Base 1.3.8
- slurm_auth_init@Base 1.3.8
- slurm_bg_block_state_string@Base 2.2.0
- slurm_bit_alloc@Base 1.3.8
- slurm_bit_and@Base 1.3.8
- slurm_bit_clear@Base 1.3.8
- slurm_bit_clear_count@Base 1.3.8
- slurm_bit_copy@Base 1.3.8
- slurm_bit_copybits@Base 1.3.8
- slurm_bit_equal@Base 1.3.8
- slurm_bit_ffc@Base 1.3.8
- slurm_bit_ffs@Base 1.3.8
- slurm_bit_fill_gaps@Base 1.3.8
- slurm_bit_fls@Base 1.3.8
- slurm_bit_fmt@Base 1.3.8
- slurm_bit_fmt_binmask@Base 1.3.8
- slurm_bit_fmt_hexmask@Base 1.3.8
- slurm_bit_free@Base 1.3.8
- slurm_bit_get_bit_num@Base 1.3.8
- slurm_bit_get_pos_num@Base 1.3.8
- slurm_bit_nclear@Base 1.3.8
- slurm_bit_nffc@Base 1.3.8
- slurm_bit_nffs@Base 1.3.8
- slurm_bit_noc@Base 1.3.8
- slurm_bit_not@Base 1.3.8
- slurm_bit_nset@Base 1.3.8
- slurm_bit_nset_max_count@Base 1.3.8
- slurm_bit_or@Base 1.3.8
- slurm_bit_overlap@Base 1.3.8
- slurm_bit_pick_cnt@Base 1.3.8
- slurm_bit_realloc@Base 1.3.8
- slurm_bit_rotate@Base 1.3.8
- slurm_bit_rotate_copy@Base 1.3.8
- slurm_bit_set@Base 1.3.8
- slurm_bit_set_count@Base 1.3.8
- slurm_bit_size@Base 1.3.8
- slurm_bit_super_set@Base 1.3.8
- slurm_bit_test@Base 1.3.8
- slurm_bit_unfmt@Base 1.3.8
- slurm_bit_unfmt_binmask@Base 1.3.8
- slurm_bit_unfmt_hexmask@Base 1.3.8
- slurm_bitfmt2int@Base 1.3.8
- slurm_checkpoint_able@Base 1.3.8
- slurm_checkpoint_complete@Base 1.3.8
- slurm_checkpoint_create@Base 1.3.8
- slurm_checkpoint_disable@Base 1.3.8
- slurm_checkpoint_enable@Base 1.3.8
- slurm_checkpoint_error@Base 1.3.8
- slurm_checkpoint_requeue@Base 2.2.0
- slurm_checkpoint_restart@Base 1.3.8
- slurm_checkpoint_task_complete@Base 1.3.8
- slurm_checkpoint_tasks@Base 2.0.0
- slurm_checkpoint_vacate@Base 1.3.8
- slurm_clear_trigger@Base 1.3.8
- slurm_close_accepted_conn@Base 1.3.8
- slurm_close_slurmdbd_conn@Base 1.3.8
- slurm_close_stream@Base 1.3.8
- slurm_complete_job@Base 1.3.8
- slurm_conf_destroy@Base 1.3.8
- slurm_conf_downnodes_array@Base 1.3.8
- slurm_conf_expand_slurmd_path@Base 1.3.8
- slurm_conf_get_addr@Base 1.3.8
- slurm_conf_get_aliased_nodename@Base 1.3.8
- slurm_conf_get_aliases@Base 2.2.3
- slurm_conf_get_cpus_sct@Base 1.3.8
- slurm_conf_get_hostname@Base 1.3.8
- slurm_conf_get_nodeaddr@Base 2.0.0
- slurm_conf_get_nodename@Base 1.3.8
- slurm_conf_get_port@Base 1.3.8
- slurm_conf_init@Base 1.3.8
- slurm_conf_install_fork_handlers@Base 1.3.8
- slurm_conf_lock@Base 1.3.8
- slurm_conf_mutex_init@Base 1.3.8
- slurm_conf_nodename_array@Base 1.3.8
- slurm_conf_options@Base 1.3.8
- slurm_conf_partition_array@Base 1.3.8
- slurm_conf_reinit@Base 1.3.8
- slurm_conf_unlock@Base 1.3.8
- slurm_conn_type_string@Base 2.2.0
- slurm_create_buf@Base 1.3.8
- slurm_create_partition@Base 2.0.0
- slurm_create_reservation@Base 2.0.0
- slurm_cred_begin_expiration@Base 1.3.8
- slurm_cred_copy@Base 1.3.8
- slurm_cred_create@Base 1.3.8
- slurm_cred_creator_ctx_create@Base 1.3.8
- slurm_cred_ctx_destroy@Base 1.3.8
- slurm_cred_ctx_get@Base 1.3.8
- slurm_cred_ctx_key_update@Base 1.3.8
- slurm_cred_ctx_pack@Base 1.3.8
- slurm_cred_ctx_set@Base 1.3.8
- slurm_cred_ctx_unpack@Base 1.3.8
- slurm_cred_destroy@Base 1.3.8
- slurm_cred_faker@Base 1.3.8
- slurm_cred_free_args@Base 1.3.8
- slurm_cred_get_args@Base 1.3.8
- slurm_cred_get_signature@Base 1.3.8
- slurm_cred_handle_reissue@Base 1.3.8
- slurm_cred_insert_jobid@Base 1.3.8
- slurm_cred_jobid_cached@Base 1.3.8
- slurm_cred_pack@Base 1.3.8
- slurm_cred_print@Base 1.3.8
- slurm_cred_revoke@Base 1.3.8
- slurm_cred_revoked@Base 1.3.8
- slurm_cred_rewind@Base 1.3.8
- slurm_cred_unpack@Base 1.3.8
- slurm_cred_verifier_ctx_create@Base 1.3.8
- slurm_cred_verify@Base 1.3.8
- slurm_crypto_fini@Base 1.3.8
- slurm_ctl_conf_2_key_pairs@Base 2.1.0
- slurm_debug2@Base 1.3.8
- slurm_debug3@Base 1.3.8
- slurm_debug4@Base 2.2.0
- slurm_debug5@Base 2.2.0
- slurm_debug@Base 1.3.8
- slurm_delete_partition@Base 1.3.8
- slurm_delete_reservation@Base 2.0.0
- slurm_destroy_association_shares_object@Base 2.0.0
- slurm_destroy_char@Base 1.3.8
- slurm_destroy_config_key_pair@Base 2.1.0
- slurm_destroy_priority_factors_object@Base 2.0.0
- slurm_destroy_uint32_ptr@Base 2.0.0
- slurm_dump_cleanup_list@Base 1.3.8
- slurm_env_array_append@Base 1.3.8
- slurm_env_array_append_fmt@Base 1.3.8
- slurm_env_array_copy@Base 1.3.8
- slurm_env_array_create@Base 1.3.8
- slurm_env_array_free@Base 1.3.8
- slurm_env_array_merge@Base 1.3.8
- slurm_env_array_overwrite@Base 1.3.8
- slurm_env_array_overwrite_fmt@Base 1.3.8
- slurm_error@Base 1.3.8
- slurm_fatal@Base 1.3.8
- slurm_fatal_add_cleanup@Base 1.3.8
- slurm_fatal_add_cleanup_job@Base 1.3.8
- slurm_fatal_cleanup@Base 1.3.8
- slurm_fatal_remove_cleanup@Base 1.3.8
- slurm_fatal_remove_cleanup_job@Base 1.3.8
- slurm_fd_read_n@Base 1.3.8
- slurm_fd_set_blocking@Base 1.3.8
- slurm_fd_set_nonblocking@Base 1.3.8
- slurm_fd_write_n@Base 1.3.8
- slurm_free_accounting_update_msg@Base 1.3.8
- slurm_free_block_info@Base 2.2.0
- slurm_free_block_info_members@Base 2.2.0
- slurm_free_block_info_msg@Base 2.1.0
- slurm_free_block_info_request_msg@Base 2.1.0
- slurm_free_buf@Base 1.3.8
- slurm_free_checkpoint_comp_msg@Base 1.3.8
- slurm_free_checkpoint_msg@Base 1.3.8
- slurm_free_checkpoint_resp_msg@Base 1.3.8
- slurm_free_checkpoint_task_comp_msg@Base 1.3.8
- slurm_free_checkpoint_tasks_msg@Base 1.3.8
- slurm_free_complete_batch_script_msg@Base 1.3.8
- slurm_free_complete_job_allocation_msg@Base 1.3.8
- slurm_free_ctl_conf@Base 1.3.8
- slurm_free_delete_part_msg@Base 1.3.8
- slurm_free_epilog_complete_msg@Base 1.3.8
- slurm_free_file_bcast_msg@Base 1.3.8
- slurm_free_get_kvs_msg@Base 1.3.8
- slurm_free_job_alloc_info_msg@Base 1.3.8
- slurm_free_job_alloc_info_response_msg@Base 1.3.8
- slurm_free_job_desc_msg@Base 1.3.8
- slurm_free_job_id_msg@Base 1.3.8
- slurm_free_job_id_request_msg@Base 1.3.8
- slurm_free_job_id_response_msg@Base 1.3.8
- slurm_free_job_info@Base 1.3.8
- slurm_free_job_info_members@Base 1.3.8
- slurm_free_job_info_msg@Base 1.3.8
- slurm_free_job_info_request_msg@Base 1.3.8
- slurm_free_job_launch_msg@Base 1.3.8
- slurm_free_job_notify_msg@Base 1.3.8
- slurm_free_job_step_create_request_msg@Base 1.3.8
- slurm_free_job_step_create_response_msg@Base 1.3.8
- slurm_free_job_step_id_msg@Base 1.3.8
- slurm_free_job_step_info_members@Base 2.2.0
- slurm_free_job_step_info_request_msg@Base 1.3.8
- slurm_free_job_step_info_response_msg@Base 1.3.8
- slurm_free_job_step_kill_msg@Base 1.3.8
- slurm_free_job_step_pids@Base 2.2.0
- slurm_free_job_step_stat@Base 2.2.0
- slurm_free_kill_job_msg@Base 1.3.8
- slurm_free_kill_tasks_msg@Base 1.3.8
- slurm_free_kvs_comm_set@Base 1.3.8
- slurm_free_last_update_msg@Base 1.3.8
- slurm_free_launch_tasks_request_msg@Base 1.3.8
- slurm_free_launch_tasks_response_msg@Base 1.3.8
- slurm_free_msg@Base 1.3.8
- slurm_free_msg_data@Base 1.3.8
- slurm_free_node_info_members@Base 2.2.0
- slurm_free_node_info_msg@Base 1.3.8
- slurm_free_node_info_request_msg@Base 1.3.8
- slurm_free_node_registration_status_msg@Base 1.3.8
- slurm_free_part_info_request_msg@Base 1.3.8
- slurm_free_partition_info_members@Base 2.2.0
- slurm_free_partition_info_msg@Base 1.3.8
- slurm_free_priority_factors_request_msg@Base 2.0.0
- slurm_free_priority_factors_response_msg@Base 2.0.0
- slurm_free_reattach_tasks_request_msg@Base 1.3.8
- slurm_free_reattach_tasks_response_msg@Base 1.3.8
- slurm_free_reservation_info_msg@Base 2.0.0
- slurm_free_reserve_info_members@Base 2.2.0
- slurm_free_resource_allocation_response_msg@Base 1.3.8
- slurm_free_resv_desc_msg@Base 2.0.0
- slurm_free_resv_info_request_msg@Base 2.0.0
- slurm_free_resv_name_msg@Base 2.0.0
- slurm_free_return_code_msg@Base 1.3.8
- slurm_free_sbcast_cred_msg@Base 2.1.0
- slurm_free_set_debug_level_msg@Base 1.3.8
- slurm_free_shares_request_msg@Base 2.0.0
- slurm_free_shares_response_msg@Base 2.0.0
- slurm_free_shutdown_msg@Base 1.3.8
- slurm_free_signal_job_msg@Base 1.3.8
- slurm_free_slurmd_status@Base 1.3.8
- slurm_free_srun_exec_msg@Base 1.3.8
- slurm_free_srun_job_complete_msg@Base 1.3.8
- slurm_free_srun_node_fail_msg@Base 1.3.8
- slurm_free_srun_ping_msg@Base 1.3.8
- slurm_free_srun_step_missing_msg@Base 2.0.0
- slurm_free_srun_timeout_msg@Base 1.3.8
- slurm_free_srun_user_msg@Base 1.3.8
- slurm_free_step_complete_msg@Base 1.3.8
- slurm_free_submit_response_response_msg@Base 1.3.8
- slurm_free_suspend_msg@Base 1.3.8
- slurm_free_task_exit_msg@Base 1.3.8
- slurm_free_task_user_managed_io_stream_msg@Base 1.3.8
- slurm_free_topo_info_msg@Base 2.0.0
- slurm_free_trigger_msg@Base 1.3.8
- slurm_free_update_job_time_msg@Base 1.3.8
- slurm_free_update_node_msg@Base 1.3.8
- slurm_free_update_part_msg@Base 1.3.8
- slurm_free_update_step_msg@Base 2.2.0
- slurm_free_will_run_response_msg@Base 1.3.8
- slurm_get_accounting_storage_backup_host@Base 2.0.0
- slurm_get_accounting_storage_enforce@Base 1.3.8
- slurm_get_accounting_storage_host@Base 1.3.8
- slurm_get_accounting_storage_loc@Base 1.3.8
- slurm_get_accounting_storage_pass@Base 1.3.8
- slurm_get_accounting_storage_port@Base 1.3.8
- slurm_get_accounting_storage_type@Base 1.3.8
- slurm_get_accounting_storage_user@Base 1.3.8
- slurm_get_addr@Base 1.3.8
- slurm_get_api_config@Base 1.3.8
- slurm_get_auth_type@Base 1.3.8
- slurm_get_avail_procs@Base 1.3.8
- slurm_get_batch_start_timeout@Base 1.3.12
- slurm_get_checkpoint_type@Base 1.3.8
- slurm_get_cluster_name@Base 1.3.8
- slurm_get_complete_wait@Base 2.0.0
- slurm_get_controller_addr_spec@Base 1.3.8
- slurm_get_crypto_type@Base 1.3.8
- slurm_get_debug_flags@Base 2.0.0
- slurm_get_def_mem_per_cpu@Base 2.1.0
- slurm_get_end_time@Base 1.3.8
- slurm_get_env_timeout@Base 1.3.8
- slurm_get_epilog_msg_time@Base 1.3.8
- slurm_get_errno@Base 1.3.8
- slurm_get_fast_schedule@Base 1.3.8
- slurm_get_gres_plugins@Base 2.2.0
- slurm_get_hash_val@Base 2.2.0
- slurm_get_health_check_program@Base 1.3.8
- slurm_get_ip_str@Base 1.3.8
- slurm_get_is_association_based_accounting@Base 1.3.9
- slurm_get_job_steps@Base 1.3.8
- slurm_get_job_submit_plugins@Base 2.2.0
- slurm_get_jobacct_gather_freq@Base 1.3.8
- slurm_get_jobacct_gather_type@Base 1.3.8
- slurm_get_jobcomp_host@Base 1.3.8
- slurm_get_jobcomp_loc@Base 1.3.8
- slurm_get_jobcomp_pass@Base 1.3.8
- slurm_get_jobcomp_port@Base 1.3.8
- slurm_get_jobcomp_type@Base 1.3.8
- slurm_get_jobcomp_user@Base 1.3.8
- slurm_get_kill_on_bad_exit@Base 2.0.0
- slurm_get_kvs_comm_set@Base 1.3.8
- slurm_get_max_mem_per_cpu@Base 2.1.0
- slurm_get_mpi_default@Base 1.3.8
- slurm_get_mpi_params@Base 2.0.0
- slurm_get_msg_timeout@Base 1.3.8
- slurm_get_peer_addr@Base 1.3.8
- slurm_get_plugin_dir@Base 1.3.8
- slurm_get_preempt_mode@Base 2.1.0
- slurm_get_preempt_type@Base 2.1.0
- slurm_get_priority_calc_period@Base 2.1.0
- slurm_get_priority_decay_hl@Base 2.0.0
- slurm_get_priority_favor_small@Base 2.0.0
- slurm_get_priority_max_age@Base 2.0.0
- slurm_get_priority_reset_period@Base 2.0.0
- slurm_get_priority_type@Base 2.0.0
- slurm_get_priority_weight_age@Base 2.0.0
- slurm_get_priority_weight_fairshare@Base 2.0.0
- slurm_get_priority_weight_job_size@Base 2.0.0
- slurm_get_priority_weight_partition@Base 2.0.0
- slurm_get_priority_weight_qos@Base 2.0.0
- slurm_get_private_data@Base 1.3.8
- slurm_get_proctrack_type@Base 1.3.8
- slurm_get_propagate_prio_process@Base 1.3.8
- slurm_get_rem_time@Base 1.3.8
- slurm_get_resume_timeout@Base 2.0.0
- slurm_get_return_code@Base 1.3.8
- slurm_get_root_filter@Base 1.3.8
- slurm_get_sched_params@Base 2.0.0
- slurm_get_sched_port@Base 1.3.8
- slurm_get_sched_type@Base 1.3.8
- slurm_get_select_jobinfo@Base 1.3.8
- slurm_get_select_nodeinfo@Base 2.1.0
- slurm_get_select_type@Base 1.3.8
- slurm_get_slurm_user_id@Base 1.3.8
- slurm_get_slurmd_port@Base 1.3.8
- slurm_get_slurmd_user_id@Base 2.0.0
- slurm_get_srun_epilog@Base 1.3.8
- slurm_get_srun_prolog@Base 1.3.8
- slurm_get_state_save_location@Base 1.3.8
- slurm_get_stream_addr@Base 1.3.8
- slurm_get_suspend_time@Base 2.1.0
- slurm_get_suspend_timeout@Base 2.2.0
- slurm_get_switch_type@Base 1.3.8
- slurm_get_task_epilog@Base 1.3.8
- slurm_get_task_plugin@Base 1.3.8
- slurm_get_task_plugin_param@Base 1.3.8
- slurm_get_task_prolog@Base 1.3.8
- slurm_get_topology_plugin@Base 2.0.0
- slurm_get_track_wckey@Base 1.3.12
- slurm_get_tree_width@Base 1.3.8
- slurm_get_triggers@Base 1.3.8
- slurm_get_vsize_factor@Base 2.2.0
- slurm_get_wait_time@Base 1.3.8
- slurm_getenvp@Base 1.3.8
- slurm_grow_buf@Base 1.3.8
- slurm_hostlist_copy@Base 1.3.8
- slurm_hostlist_count@Base 1.3.8
- slurm_hostlist_create@Base 1.3.8
- slurm_hostlist_delete@Base 1.3.8
- slurm_hostlist_delete_host@Base 1.3.8
- slurm_hostlist_delete_nth@Base 1.3.8
- slurm_hostlist_deranged_string@Base 1.3.8
- slurm_hostlist_deranged_string_malloc@Base 2.2.0
- slurm_hostlist_deranged_string_xmalloc@Base 2.2.0
- slurm_hostlist_destroy@Base 1.3.8
- slurm_hostlist_find@Base 1.3.8
- slurm_hostlist_iterator_create@Base 1.3.8
- slurm_hostlist_iterator_destroy@Base 1.3.8
- slurm_hostlist_iterator_reset@Base 1.3.8
- slurm_hostlist_next@Base 1.3.8
- slurm_hostlist_next_range@Base 1.3.8
- slurm_hostlist_nth@Base 1.3.8
- slurm_hostlist_pop@Base 1.3.8
- slurm_hostlist_pop_range@Base 1.3.8
- slurm_hostlist_push@Base 1.3.8
- slurm_hostlist_push_host@Base 1.3.8
- slurm_hostlist_push_list@Base 1.3.8
- slurm_hostlist_ranged_string@Base 1.3.8
- slurm_hostlist_ranged_string_malloc@Base 2.2.0
- slurm_hostlist_ranged_string_xmalloc@Base 2.2.0
- slurm_hostlist_remove@Base 1.3.8
- slurm_hostlist_shift@Base 1.3.8
- slurm_hostlist_shift_range@Base 1.3.8
- slurm_hostlist_soft@Base 1.3.8
- slurm_hostlist_uniq@Base 1.3.8
- slurm_hostset_copy@Base 1.3.8
- slurm_hostset_count@Base 1.3.8
- slurm_hostset_create@Base 1.3.8
- slurm_hostset_delete@Base 1.3.8
- slurm_hostset_destroy@Base 1.3.8
- slurm_hostset_find@Base 1.3.8
- slurm_hostset_insert@Base 1.3.8
- slurm_hostset_nth@Base 1.3.8
- slurm_hostset_shift@Base 1.3.8
- slurm_hostset_shift_range@Base 1.3.8
- slurm_hostset_within@Base 1.3.8
- slurm_info@Base 1.3.8
- slurm_init_buf@Base 1.3.8
- slurm_init_job_desc_msg@Base 1.3.8
- slurm_init_msg_engine@Base 1.3.8
- slurm_init_msg_engine_addrname_port@Base 2.0.0
- slurm_init_msg_engine_port@Base 1.3.8
- slurm_init_part_desc_msg@Base 1.3.8
- slurm_init_resv_desc_msg@Base 2.0.0
- slurm_init_update_block_msg@Base 2.1.0
- slurm_init_update_node_msg@Base 2.0.0
- slurm_init_update_step_msg@Base 2.2.0
- slurm_int_and_set_count@Base 1.3.8
- slurm_job_cpus_allocated_on_node@Base 2.1.0
- slurm_job_cpus_allocated_on_node_id@Base 2.1.0
- slurm_job_node_ready@Base 1.3.8
- slurm_job_reason_string@Base 2.2.0
- slurm_job_state_num@Base 2.2.0
- slurm_job_state_string@Base 2.2.0
- slurm_job_state_string_compact@Base 2.2.0
- slurm_job_step_create@Base 1.3.8
- slurm_job_step_get_pids@Base 2.2.0
- slurm_job_step_layout_free@Base 1.3.8
- slurm_job_step_layout_get@Base 1.3.8
- slurm_job_step_pids_free@Base 2.2.0
- slurm_job_step_pids_response_msg_free@Base 2.2.0
- slurm_job_step_stat@Base 2.2.0
- slurm_job_step_stat_free@Base 2.2.0
- slurm_job_step_stat_response_msg_free@Base 2.2.0
- slurm_job_will_run@Base 1.3.8
- slurm_jobacct_common_alloc_jobacct@Base 2.2.0
- slurm_jobacct_common_free_jobacct@Base 2.2.0
- slurm_jobacct_common_pack@Base 2.2.0
- slurm_jobacct_common_unpack@Base 2.2.0
- slurm_jobacct_gather_fini@Base 1.3.8
- slurm_jobacct_gather_init@Base 1.3.8
- slurm_jobinfo_ctx_get@Base 1.3.8
- slurm_kill_job@Base 1.3.8
- slurm_kill_job_step@Base 1.3.8
- slurm_list_append@Base 1.3.8
- slurm_list_append_list@Base 1.3.8
- slurm_list_count@Base 1.3.8
- slurm_list_create@Base 1.3.8
- slurm_list_delete_all@Base 1.3.8
- slurm_list_delete_item@Base 1.3.8
- slurm_list_dequeue@Base 1.3.8
- slurm_list_destroy@Base 1.3.8
- slurm_list_enqueue@Base 1.3.8
- slurm_list_find@Base 1.3.8
- slurm_list_find_first@Base 1.3.8
- slurm_list_flush@Base 2.2.0
- slurm_list_for_each@Base 1.3.8
- slurm_list_insert@Base 1.3.8
- slurm_list_install_fork_handlers@Base 1.3.8
- slurm_list_is_empty@Base 1.3.8
- slurm_list_iterator_create@Base 1.3.8
- slurm_list_iterator_destroy@Base 1.3.8
- slurm_list_iterator_reset@Base 1.3.8
- slurm_list_next@Base 1.3.8
- slurm_list_peek@Base 1.3.8
- slurm_list_pop@Base 1.3.8
- slurm_list_prepend@Base 1.3.8
- slurm_list_push@Base 1.3.8
- slurm_list_remove@Base 1.3.8
- slurm_list_sort@Base 1.3.8
- slurm_list_transfer@Base 1.3.8
- slurm_listen_stream@Base 1.3.8
- slurm_load_block_info@Base 2.1.0
- slurm_load_ctl_conf@Base 1.3.8
- slurm_load_job@Base 1.3.8
- slurm_load_jobs@Base 1.3.8
- slurm_load_node@Base 1.3.8
- slurm_load_partitions@Base 1.3.8
- slurm_load_reservations@Base 2.0.0
- slurm_load_slurmd_status@Base 1.3.8
- slurm_load_topo@Base 2.0.0
- slurm_log_alter@Base 1.3.8
- slurm_log_fini@Base 1.3.8
- slurm_log_flush@Base 1.3.8
- slurm_log_fp@Base 1.3.8
- slurm_log_has_data@Base 1.3.8
- slurm_log_init@Base 1.3.8
- slurm_log_reinit@Base 1.3.8
- slurm_log_set_fpfx@Base 1.3.8
- slurm_make_time_str@Base 1.3.8
- slurm_msg_t_copy@Base 1.3.8
- slurm_msg_t_init@Base 1.3.8
- slurm_net_accept_stream@Base 1.3.8
- slurm_net_set_low_water@Base 1.3.8
- slurm_net_stream_listen@Base 1.3.8
- slurm_node_state_string@Base 2.2.0
- slurm_node_state_string_compact@Base 2.2.0
- slurm_node_use_string@Base 2.2.0
- slurm_notify_job@Base 1.3.8
- slurm_open_controller_conn@Base 1.3.8
- slurm_open_controller_conn_spec@Base 1.3.8
- slurm_open_msg_conn@Base 1.3.8
- slurm_open_slurmdbd_conn@Base 1.3.8
- slurm_open_stream@Base 1.3.8
- slurm_pack16@Base 1.3.8
- slurm_pack16_array@Base 1.3.8
- slurm_pack32@Base 1.3.8
- slurm_pack32_array@Base 1.3.8
- slurm_pack64@Base 1.3.8
- slurm_pack8@Base 1.3.8
- slurm_pack_msg_no_header@Base 1.3.8
- slurm_pack_slurm_addr@Base 1.3.8
- slurm_pack_slurm_addr_array@Base 1.3.8
- slurm_pack_time@Base 1.3.8
- slurm_packmem@Base 1.3.8
- slurm_packmem_array@Base 1.3.8
- slurm_packstr_array@Base 1.3.8
- slurm_parser@Base 1.3.8
- slurm_perror@Base 1.3.8
- slurm_pid2jobid@Base 1.3.8
- slurm_ping@Base 1.3.8
- slurm_plugin_get_syms@Base 2.2.0
- slurm_plugin_load_and_link@Base 2.2.0
- slurm_plugin_strerror@Base 2.2.0
- slurm_plugin_unload@Base 2.2.0
- slurm_plugrack_create@Base 2.2.0
- slurm_plugrack_destroy@Base 2.2.0
- slurm_plugrack_read_dir@Base 2.2.0
- slurm_plugrack_set_major_type@Base 2.2.0
- slurm_plugrack_set_paranoia@Base 2.2.0
- slurm_plugrack_use_by_type@Base 2.2.0
- slurm_pmi_finalize@Base 1.3.8
- slurm_preempt_mode_num@Base 2.2.0
- slurm_preempt_mode_string@Base 2.2.0
- slurm_print_block_info@Base 2.1.0
- slurm_print_block_info_msg@Base 2.1.0
- slurm_print_cpu_bind_help@Base 2.0.0
- slurm_print_ctl_conf@Base 1.3.8
- slurm_print_job_info@Base 1.3.8
- slurm_print_job_info_msg@Base 1.3.8
- slurm_print_job_step_info@Base 1.3.8
- slurm_print_job_step_info_msg@Base 1.3.8
- slurm_print_key_pairs@Base 2.0.0
- slurm_print_launch_task_msg@Base 1.3.8
- slurm_print_mem_bind_help@Base 2.0.0
- slurm_print_node_info_msg@Base 1.3.8
- slurm_print_node_table@Base 1.3.8
- slurm_print_partition_info@Base 1.3.8
- slurm_print_partition_info_msg@Base 1.3.8
- slurm_print_reservation_info@Base 2.0.0
- slurm_print_reservation_info_msg@Base 2.0.0
- slurm_print_slurm_addr@Base 1.3.8
- slurm_print_slurmd_status@Base 1.3.8
- slurm_print_topo_info_msg@Base 2.0.0
- slurm_print_topo_record@Base 2.0.0
- slurm_priority_fini@Base 2.0.0
- slurm_priority_init@Base 2.0.0
- slurm_private_data_string@Base 2.2.0
- slurm_pull_trigger@Base 2.2.0
- slurm_read_hostfile@Base 1.3.8
- slurm_read_stream@Base 1.3.8
- slurm_read_stream_timeout@Base 1.3.8
- slurm_receive_msg@Base 1.3.8
- slurm_receive_msg_and_forward@Base 1.3.8
- slurm_receive_msgs@Base 1.3.8
- slurm_reconfigure@Base 1.3.8
- slurm_requeue@Base 1.3.8
- slurm_reservation_flags_string@Base 2.2.0
- slurm_resume@Base 1.3.8
- slurm_sbcast_lookup@Base 2.1.0
- slurm_select_fini@Base 1.3.8
- slurm_select_init@Base 1.3.8
- slurm_send_addr_recv_msgs@Base 1.3.8
- slurm_send_kvs_comm_set@Base 1.3.8
- slurm_send_node_msg@Base 1.3.8
- slurm_send_only_controller_msg@Base 1.3.8
- slurm_send_only_node_msg@Base 1.3.8
- slurm_send_rc_msg@Base 1.3.8
- slurm_send_recv_controller_msg@Base 1.3.8
- slurm_send_recv_controller_rc_msg@Base 1.3.8
- slurm_send_recv_msgs@Base 1.3.8
- slurm_send_recv_node_msg@Base 1.3.8
- slurm_send_recv_rc_msg_only_one@Base 1.3.8
- slurm_send_recv_slurmdbd_msg@Base 1.3.8
- slurm_send_slurmdbd_msg@Base 1.3.8
- slurm_send_slurmdbd_recv_rc_msg@Base 1.3.8
- slurm_set_accounting_storage_host@Base 2.0.2
- slurm_set_accounting_storage_loc@Base 1.3.8
- slurm_set_accounting_storage_port@Base 1.3.11
- slurm_set_accounting_storage_user@Base 2.0.2
- slurm_set_addr@Base 1.3.8
- slurm_set_addr_any@Base 1.3.8
- slurm_set_addr_char@Base 1.3.8
- slurm_set_addr_uint@Base 1.3.8
- slurm_set_api_config@Base 1.3.8
- slurm_set_auth_type@Base 1.3.8
- slurm_set_debug_level@Base 1.3.8
- slurm_set_jobcomp_port@Base 1.3.11
- slurm_set_schedlog_level@Base 2.2.0
- slurm_set_stream_blocking@Base 1.3.8
- slurm_set_stream_non_blocking@Base 1.3.8
- slurm_set_tree_width@Base 1.3.8
- slurm_set_trigger@Base 1.3.8
- slurm_setenvpf@Base 1.3.8
- slurm_seterrno@Base 1.3.8
- slurm_shutdown@Base 1.3.8
- slurm_shutdown_msg_conn@Base 1.3.8
- slurm_shutdown_msg_engine@Base 1.3.8
- slurm_signal_job@Base 1.3.8
- slurm_signal_job_step@Base 1.3.8
- slurm_sort_char_list_asc@Base 1.3.9
- slurm_sort_char_list_desc@Base 1.3.9
- slurm_sort_key_pairs@Base 2.1.0
- slurm_sprint_block_info@Base 2.1.0
- slurm_sprint_cpu_bind_type@Base 1.3.8
- slurm_sprint_job_info@Base 1.3.8
- slurm_sprint_job_step_info@Base 1.3.8
- slurm_sprint_mem_bind_type@Base 1.3.8
- slurm_sprint_node_table@Base 1.3.8
- slurm_sprint_partition_info@Base 1.3.8
- slurm_sprint_reservation_info@Base 2.0.0
- slurm_step_ctx_create@Base 1.3.8
- slurm_step_ctx_create_no_alloc@Base 1.3.8
- slurm_step_ctx_daemon_per_node_hack@Base 1.3.8
- slurm_step_ctx_destroy@Base 1.3.8
- slurm_step_ctx_get@Base 1.3.8
- slurm_step_ctx_params_t_init@Base 1.3.8
- slurm_step_launch@Base 1.3.8
- slurm_step_launch_abort@Base 1.3.8
- slurm_step_launch_fwd_signal@Base 1.3.8
- slurm_step_launch_params_t_init@Base 1.3.8
- slurm_step_launch_wait_finish@Base 1.3.8
- slurm_step_launch_wait_start@Base 1.3.8
- slurm_step_layout_copy@Base 1.3.8
- slurm_step_layout_create@Base 1.3.8
- slurm_step_layout_destroy@Base 1.3.8
- slurm_step_layout_host_id@Base 1.3.8
- slurm_step_layout_host_name@Base 1.3.8
- slurm_step_layout_type_name@Base 2.0.0
- slurm_strcasestr@Base 2.0.0
- slurm_strerror@Base 1.3.8
- slurm_strlcpy@Base 1.3.8
- slurm_submit_batch_job@Base 1.3.8
- slurm_suspend@Base 1.3.8
- slurm_takeover@Base 2.0.0
- slurm_terminate_job@Base 1.3.8
- slurm_terminate_job_step@Base 1.3.8
- slurm_topo_build_config@Base 2.1.0
- slurm_topo_fini@Base 2.1.0
- slurm_topo_get_node_addr@Base 2.1.0
- slurm_topo_init@Base 2.1.0
- slurm_try_xmalloc@Base 1.3.8
- slurm_try_xrealloc@Base 1.3.8
- slurm_unpack16@Base 1.3.8
- slurm_unpack16_array@Base 1.3.8
- slurm_unpack32@Base 1.3.8
- slurm_unpack32_array@Base 1.3.8
- slurm_unpack64@Base 1.3.8
- slurm_unpack8@Base 1.3.8
- slurm_unpack_block_info_msg@Base 2.2.0
- slurm_unpack_slurm_addr_array@Base 1.3.8
- slurm_unpack_slurm_addr_no_alloc@Base 1.3.8
- slurm_unpack_time@Base 1.3.8
- slurm_unpackmem@Base 1.3.8
- slurm_unpackmem_array@Base 1.3.8
- slurm_unpackmem_malloc@Base 1.3.8
- slurm_unpackmem_ptr@Base 1.3.8
- slurm_unpackmem_xmalloc@Base 1.3.8
- slurm_unpackstr_array@Base 1.3.8
- slurm_unsetenvp@Base 1.3.8
- slurm_update_block@Base 2.1.0
- slurm_update_job@Base 1.3.8
- slurm_update_node@Base 1.3.8
- slurm_update_partition@Base 1.3.8
- slurm_update_reservation@Base 2.0.0
- slurm_update_step@Base 2.2.0
- slurm_verbose@Base 1.3.8
- slurm_verify_cpu_bind@Base 2.0.0
- slurm_verify_mem_bind@Base 2.0.0
- slurm_write_stream@Base 1.3.8
- slurm_write_stream_timeout@Base 1.3.8
- slurm_xassert_failed@Base 1.3.8
- slurm_xbasename@Base 1.3.8
- slurm_xfer_buf_data@Base 1.3.8
- slurm_xfree@Base 1.3.8
- slurm_xmalloc@Base 1.3.8
- slurm_xmemcat@Base 1.3.8
- slurm_xrealloc@Base 1.3.8
- slurm_xshort_hostname@Base 1.3.8
- slurm_xsignal@Base 1.3.8
- slurm_xsignal_block@Base 1.3.8
- slurm_xsignal_save_mask@Base 1.3.8
- slurm_xsignal_set_mask@Base 1.3.8
- slurm_xsignal_sigset_create@Base 1.3.8
- slurm_xsignal_unblock@Base 1.3.8
- slurm_xsize@Base 1.3.8
- slurm_xslurm_strerrorcat@Base 1.3.8
- slurm_xstrcat@Base 1.3.8
- slurm_xstrcatchar@Base 1.3.8
- slurm_xstrdup@Base 1.3.8
- slurm_xstrdup_printf@Base 1.3.8
- slurm_xstrfmtcat@Base 1.3.8
- slurm_xstrftimecat@Base 1.3.8
- slurm_xstring_is_whitespace@Base 1.3.8
- slurm_xstrncat@Base 2.2.0
- slurm_xstrndup@Base 1.3.8
- slurm_xstrstrip@Base 1.3.9
- slurm_xstrsubstitute@Base 1.3.8
- slurm_xstrtolower@Base 2.1.10
diff -Nru slurm-llnl-2.2.7/debian/libslurm23.symbols slurm-llnl-2.3.2/debian/libslurm23.symbols
--- slurm-llnl-2.2.7/debian/libslurm23.symbols 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/debian/libslurm23.symbols 2011-10-27 08:23:36.000000000 +0000
@@ -0,0 +1,866 @@
+libslurm.so.23 libslurm23 #MINVER#
+ islurm_get_rem_time2__@Base 1.3.8
+ islurm_get_rem_time__@Base 1.3.8
+ slurm_accept_msg_conn@Base 1.3.8
+ slurm_accept_stream@Base 1.3.8
+ slurm_accounting_enforce_string@Base 2.2.0
+ slurm_acct_storage_fini@Base 1.3.8
+ slurm_acct_storage_init@Base 1.3.8
+ slurm_add_slash_to_quotes@Base 2.2.0
+ slurm_addto_char_list@Base 1.3.8
+ slurm_allocate_resources@Base 1.3.8
+ slurm_allocate_resources_blocking@Base 1.3.8
+ slurm_allocation_lookup@Base 1.3.8
+ slurm_allocation_lookup_lite@Base 1.3.8
+ slurm_allocation_msg_thr_create@Base 1.3.8
+ slurm_allocation_msg_thr_destroy@Base 1.3.8
+ slurm_api_clear_config@Base 1.3.8
+ slurm_api_set_conf_file@Base 1.3.8
+ slurm_api_set_default_config@Base 1.3.8
+ slurm_api_version@Base 1.3.8
+ slurm_arg_count@Base 1.3.8
+ slurm_arg_idx_by_name@Base 1.3.8
+ slurm_arg_name_by_idx@Base 1.3.8
+ slurm_auth_context_create@Base 1.3.8
+ slurm_auth_fini@Base 1.3.8
+ slurm_auth_get_arg_desc@Base 1.3.8
+ slurm_auth_init@Base 1.3.8
+ slurm_bg_block_state_string@Base 2.2.0
+ slurm_bit_alloc@Base 1.3.8
+ slurm_bit_and@Base 1.3.8
+ slurm_bit_clear@Base 1.3.8
+ slurm_bit_clear_count@Base 1.3.8
+ slurm_bit_copy@Base 1.3.8
+ slurm_bit_copybits@Base 1.3.8
+ slurm_bit_equal@Base 1.3.8
+ slurm_bit_ffc@Base 1.3.8
+ slurm_bit_ffs@Base 1.3.8
+ slurm_bit_fill_gaps@Base 1.3.8
+ slurm_bit_fls@Base 1.3.8
+ slurm_bit_fmt@Base 1.3.8
+ slurm_bit_fmt_binmask@Base 1.3.8
+ slurm_bit_fmt_hexmask@Base 1.3.8
+ slurm_bit_free@Base 1.3.8
+ slurm_bit_get_bit_num@Base 1.3.8
+ slurm_bit_get_pos_num@Base 1.3.8
+ slurm_bit_nclear@Base 1.3.8
+ slurm_bit_nffc@Base 1.3.8
+ slurm_bit_nffs@Base 1.3.8
+ slurm_bit_noc@Base 1.3.8
+ slurm_bit_not@Base 1.3.8
+ slurm_bit_nset@Base 1.3.8
+ slurm_bit_nset_max_count@Base 1.3.8
+ slurm_bit_or@Base 1.3.8
+ slurm_bit_overlap@Base 1.3.8
+ slurm_bit_pick_cnt@Base 1.3.8
+ slurm_bit_realloc@Base 1.3.8
+ slurm_bit_rotate@Base 1.3.8
+ slurm_bit_rotate_copy@Base 1.3.8
+ slurm_bit_set@Base 1.3.8
+ slurm_bit_set_count@Base 1.3.8
+ slurm_bit_size@Base 1.3.8
+ slurm_bit_super_set@Base 1.3.8
+ slurm_bit_test@Base 1.3.8
+ slurm_bit_unfmt@Base 1.3.8
+ slurm_bit_unfmt_binmask@Base 1.3.8
+ slurm_bit_unfmt_hexmask@Base 1.3.8
+ slurm_bitfmt2int@Base 1.3.8
+ slurm_cgroup_conf@Base 2.3.1
+ slurm_checkpoint_able@Base 1.3.8
+ slurm_checkpoint_complete@Base 1.3.8
+ slurm_checkpoint_create@Base 1.3.8
+ slurm_checkpoint_disable@Base 1.3.8
+ slurm_checkpoint_enable@Base 1.3.8
+ slurm_checkpoint_error@Base 1.3.8
+ slurm_checkpoint_requeue@Base 2.2.0
+ slurm_checkpoint_restart@Base 1.3.8
+ slurm_checkpoint_task_complete@Base 1.3.8
+ slurm_checkpoint_tasks@Base 2.0.0
+ slurm_checkpoint_vacate@Base 1.3.8
+ slurm_clear_trigger@Base 1.3.8
+ slurm_close_accepted_conn@Base 1.3.8
+ slurm_close_slurmdbd_conn@Base 1.3.8
+ slurm_close_stream@Base 1.3.8
+ slurm_complete_job@Base 1.3.8
+ slurm_conf_destroy@Base 1.3.8
+ slurm_conf_downnodes_array@Base 1.3.8
+ slurm_conf_expand_slurmd_path@Base 1.3.8
+ slurm_conf_frontend_array@Base 2.3.1
+ slurm_conf_get_addr@Base 1.3.8
+ slurm_conf_get_aliased_nodename@Base 1.3.8
+ slurm_conf_get_aliases@Base 2.2.3
+ slurm_conf_get_cpus_sct@Base 1.3.8
+ slurm_conf_get_hostname@Base 1.3.8
+ slurm_conf_get_nodeaddr@Base 2.0.0
+ slurm_conf_get_nodename@Base 1.3.8
+ slurm_conf_get_port@Base 1.3.8
+ slurm_conf_init@Base 1.3.8
+ slurm_conf_install_fork_handlers@Base 1.3.8
+ slurm_conf_lock@Base 1.3.8
+ slurm_conf_mutex_init@Base 1.3.8
+ slurm_conf_nodename_array@Base 1.3.8
+ slurm_conf_options@Base 1.3.8
+ slurm_conf_partition_array@Base 1.3.8
+ slurm_conf_reinit@Base 1.3.8
+ slurm_conf_unlock@Base 1.3.8
+ slurm_conn_type_string@Base 2.2.0
+ slurm_conn_type_string_full@Base 2.3.1
+ slurm_create_buf@Base 1.3.8
+ slurm_create_partition@Base 2.0.0
+ slurm_create_reservation@Base 2.0.0
+ slurm_cred_begin_expiration@Base 1.3.8
+ slurm_cred_copy@Base 1.3.8
+ slurm_cred_create@Base 1.3.8
+ slurm_cred_creator_ctx_create@Base 1.3.8
+ slurm_cred_ctx_destroy@Base 1.3.8
+ slurm_cred_ctx_get@Base 1.3.8
+ slurm_cred_ctx_key_update@Base 1.3.8
+ slurm_cred_ctx_pack@Base 1.3.8
+ slurm_cred_ctx_set@Base 1.3.8
+ slurm_cred_ctx_unpack@Base 1.3.8
+ slurm_cred_destroy@Base 1.3.8
+ slurm_cred_faker@Base 1.3.8
+ slurm_cred_free_args@Base 1.3.8
+ slurm_cred_get_args@Base 1.3.8
+ slurm_cred_get_signature@Base 1.3.8
+ slurm_cred_handle_reissue@Base 1.3.8
+ slurm_cred_insert_jobid@Base 1.3.8
+ slurm_cred_jobid_cached@Base 1.3.8
+ slurm_cred_pack@Base 1.3.8
+ slurm_cred_print@Base 1.3.8
+ slurm_cred_revoke@Base 1.3.8
+ slurm_cred_revoked@Base 1.3.8
+ slurm_cred_rewind@Base 1.3.8
+ slurm_cred_unpack@Base 1.3.8
+ slurm_cred_verifier_ctx_create@Base 1.3.8
+ slurm_cred_verify@Base 1.3.8
+ slurm_crypto_fini@Base 1.3.8
+ slurm_ctl_conf_2_key_pairs@Base 2.1.0
+ slurm_debug2@Base 1.3.8
+ slurm_debug3@Base 1.3.8
+ slurm_debug4@Base 2.2.0
+ slurm_debug5@Base 2.2.0
+ slurm_debug@Base 1.3.8
+ slurm_delete_partition@Base 1.3.8
+ slurm_delete_reservation@Base 2.0.0
+ slurm_destroy_association_shares_object@Base 2.0.0
+ slurm_destroy_char@Base 1.3.8
+ slurm_destroy_config_key_pair@Base 2.1.0
+ slurm_destroy_priority_factors_object@Base 2.0.0
+ slurm_destroy_select_ba_request@Base 2.3.1
+ slurm_destroy_uint32_ptr@Base 2.0.0
+ slurm_diff_tv@Base 2.3.1
+ slurm_diff_tv_str@Base 2.3.1
+ slurm_dump_cleanup_list@Base 1.3.8
+ slurm_env_array_append@Base 1.3.8
+ slurm_env_array_append_fmt@Base 1.3.8
+ slurm_env_array_copy@Base 1.3.8
+ slurm_env_array_create@Base 1.3.8
+ slurm_env_array_free@Base 1.3.8
+ slurm_env_array_merge@Base 1.3.8
+ slurm_env_array_overwrite@Base 1.3.8
+ slurm_env_array_overwrite_fmt@Base 1.3.8
+ slurm_error@Base 1.3.8
+ slurm_fatal@Base 1.3.8
+ slurm_fatal_add_cleanup@Base 1.3.8
+ slurm_fatal_add_cleanup_job@Base 1.3.8
+ slurm_fatal_cleanup@Base 1.3.8
+ slurm_fatal_remove_cleanup@Base 1.3.8
+ slurm_fatal_remove_cleanup_job@Base 1.3.8
+ slurm_fd_read_n@Base 1.3.8
+ slurm_fd_set_blocking@Base 1.3.8
+ slurm_fd_set_nonblocking@Base 1.3.8
+ slurm_fd_write_n@Base 1.3.8
+ slurm_free_accounting_update_msg@Base 1.3.8
+ slurm_free_block_info@Base 2.2.0
+ slurm_free_block_info_members@Base 2.2.0
+ slurm_free_block_info_msg@Base 2.1.0
+ slurm_free_block_info_request_msg@Base 2.1.0
+ slurm_free_block_job_info@Base 2.3.1
+ slurm_free_buf@Base 1.3.8
+ slurm_free_checkpoint_comp_msg@Base 1.3.8
+ slurm_free_checkpoint_msg@Base 1.3.8
+ slurm_free_checkpoint_resp_msg@Base 1.3.8
+ slurm_free_checkpoint_task_comp_msg@Base 1.3.8
+ slurm_free_checkpoint_tasks_msg@Base 1.3.8
+ slurm_free_complete_batch_script_msg@Base 1.3.8
+ slurm_free_complete_job_allocation_msg@Base 1.3.8
+ slurm_free_ctl_conf@Base 1.3.8
+ slurm_free_delete_part_msg@Base 1.3.8
+ slurm_free_epilog_complete_msg@Base 1.3.8
+ slurm_free_file_bcast_msg@Base 1.3.8
+ slurm_free_front_end_info_members@Base 2.3.1
+ slurm_free_front_end_info_msg@Base 2.3.1
+ slurm_free_front_end_info_request_msg@Base 2.3.1
+ slurm_free_get_kvs_msg@Base 1.3.8
+ slurm_free_job_alloc_info_msg@Base 1.3.8
+ slurm_free_job_alloc_info_response_msg@Base 1.3.8
+ slurm_free_job_desc_msg@Base 1.3.8
+ slurm_free_job_id_msg@Base 1.3.8
+ slurm_free_job_id_request_msg@Base 1.3.8
+ slurm_free_job_id_response_msg@Base 1.3.8
+ slurm_free_job_info@Base 1.3.8
+ slurm_free_job_info_members@Base 1.3.8
+ slurm_free_job_info_msg@Base 1.3.8
+ slurm_free_job_info_request_msg@Base 1.3.8
+ slurm_free_job_launch_msg@Base 1.3.8
+ slurm_free_job_notify_msg@Base 1.3.8
+ slurm_free_job_step_create_request_msg@Base 1.3.8
+ slurm_free_job_step_create_response_msg@Base 1.3.8
+ slurm_free_job_step_id_msg@Base 1.3.8
+ slurm_free_job_step_info_members@Base 2.2.0
+ slurm_free_job_step_info_request_msg@Base 1.3.8
+ slurm_free_job_step_info_response_msg@Base 1.3.8
+ slurm_free_job_step_kill_msg@Base 1.3.8
+ slurm_free_job_step_pids@Base 2.2.0
+ slurm_free_job_step_stat@Base 2.2.0
+ slurm_free_kill_job_msg@Base 1.3.8
+ slurm_free_kill_tasks_msg@Base 1.3.8
+ slurm_free_kvs_comm_set@Base 1.3.8
+ slurm_free_last_update_msg@Base 1.3.8
+ slurm_free_launch_tasks_request_msg@Base 1.3.8
+ slurm_free_launch_tasks_response_msg@Base 1.3.8
+ slurm_free_msg@Base 1.3.8
+ slurm_free_msg_data@Base 1.3.8
+ slurm_free_node_info_members@Base 2.2.0
+ slurm_free_node_info_msg@Base 1.3.8
+ slurm_free_node_info_request_msg@Base 1.3.8
+ slurm_free_node_registration_status_msg@Base 1.3.8
+ slurm_free_part_info_request_msg@Base 1.3.8
+ slurm_free_partition_info_members@Base 2.2.0
+ slurm_free_partition_info_msg@Base 1.3.8
+ slurm_free_priority_factors_request_msg@Base 2.0.0
+ slurm_free_priority_factors_response_msg@Base 2.0.0
+ slurm_free_reattach_tasks_request_msg@Base 1.3.8
+ slurm_free_reattach_tasks_response_msg@Base 1.3.8
+ slurm_free_reservation_info_msg@Base 2.0.0
+ slurm_free_reserve_info_members@Base 2.2.0
+ slurm_free_resource_allocation_response_msg@Base 1.3.8
+ slurm_free_resv_desc_msg@Base 2.0.0
+ slurm_free_resv_info_request_msg@Base 2.0.0
+ slurm_free_resv_name_msg@Base 2.0.0
+ slurm_free_return_code_msg@Base 1.3.8
+ slurm_free_sbcast_cred_msg@Base 2.1.0
+ slurm_free_set_debug_flags_msg@Base 2.3.1
+ slurm_free_set_debug_level_msg@Base 1.3.8
+ slurm_free_shares_request_msg@Base 2.0.0
+ slurm_free_shares_response_msg@Base 2.0.0
+ slurm_free_shutdown_msg@Base 1.3.8
+ slurm_free_signal_job_msg@Base 1.3.8
+ slurm_free_slurmd_status@Base 1.3.8
+ slurm_free_spank_env_request_msg@Base 2.3.1
+ slurm_free_spank_env_responce_msg@Base 2.3.1
+ slurm_free_srun_exec_msg@Base 1.3.8
+ slurm_free_srun_job_complete_msg@Base 1.3.8
+ slurm_free_srun_node_fail_msg@Base 1.3.8
+ slurm_free_srun_ping_msg@Base 1.3.8
+ slurm_free_srun_step_missing_msg@Base 2.0.0
+ slurm_free_srun_timeout_msg@Base 1.3.8
+ slurm_free_srun_user_msg@Base 1.3.8
+ slurm_free_step_complete_msg@Base 1.3.8
+ slurm_free_submit_response_response_msg@Base 1.3.8
+ slurm_free_suspend_msg@Base 1.3.8
+ slurm_free_task_exit_msg@Base 1.3.8
+ slurm_free_task_user_managed_io_stream_msg@Base 1.3.8
+ slurm_free_topo_info_msg@Base 2.0.0
+ slurm_free_trigger_msg@Base 1.3.8
+ slurm_free_update_front_end_msg@Base 2.3.1
+ slurm_free_update_job_time_msg@Base 1.3.8
+ slurm_free_update_node_msg@Base 1.3.8
+ slurm_free_update_part_msg@Base 1.3.8
+ slurm_free_update_step_msg@Base 2.2.0
+ slurm_free_will_run_response_msg@Base 1.3.8
+ slurm_get_accounting_storage_backup_host@Base 2.0.0
+ slurm_get_accounting_storage_enforce@Base 1.3.8
+ slurm_get_accounting_storage_host@Base 1.3.8
+ slurm_get_accounting_storage_loc@Base 1.3.8
+ slurm_get_accounting_storage_pass@Base 1.3.8
+ slurm_get_accounting_storage_port@Base 1.3.8
+ slurm_get_accounting_storage_type@Base 1.3.8
+ slurm_get_accounting_storage_user@Base 1.3.8
+ slurm_get_addr@Base 1.3.8
+ slurm_get_api_config@Base 1.3.8
+ slurm_get_auth_type@Base 1.3.8
+ slurm_get_avail_procs@Base 1.3.8
+ slurm_get_batch_start_timeout@Base 1.3.12
+ slurm_get_checkpoint_type@Base 1.3.8
+ slurm_get_cluster_name@Base 1.3.8
+ slurm_get_complete_wait@Base 2.0.0
+ slurm_get_controller_addr_spec@Base 1.3.8
+ slurm_get_crypto_type@Base 1.3.8
+ slurm_get_debug_flags@Base 2.0.0
+ slurm_get_def_mem_per_cpu@Base 2.1.0
+ slurm_get_end_time@Base 1.3.8
+ slurm_get_env_timeout@Base 1.3.8
+ slurm_get_epilog_msg_time@Base 1.3.8
+ slurm_get_errno@Base 1.3.8
+ slurm_get_fast_schedule@Base 1.3.8
+ slurm_get_gres_plugins@Base 2.2.0
+ slurm_get_hash_val@Base 2.2.0
+ slurm_get_health_check_program@Base 1.3.8
+ slurm_get_ip_str@Base 1.3.8
+ slurm_get_is_association_based_accounting@Base 1.3.9
+ slurm_get_job_steps@Base 1.3.8
+ slurm_get_job_submit_plugins@Base 2.2.0
+ slurm_get_jobacct_gather_freq@Base 1.3.8
+ slurm_get_jobacct_gather_type@Base 1.3.8
+ slurm_get_jobcomp_host@Base 1.3.8
+ slurm_get_jobcomp_loc@Base 1.3.8
+ slurm_get_jobcomp_pass@Base 1.3.8
+ slurm_get_jobcomp_port@Base 1.3.8
+ slurm_get_jobcomp_type@Base 1.3.8
+ slurm_get_jobcomp_user@Base 1.3.8
+ slurm_get_kill_on_bad_exit@Base 2.0.0
+ slurm_get_kvs_comm_set@Base 1.3.8
+ slurm_get_max_mem_per_cpu@Base 2.1.0
+ slurm_get_mpi_default@Base 1.3.8
+ slurm_get_mpi_params@Base 2.0.0
+ slurm_get_msg_timeout@Base 1.3.8
+ slurm_get_peer_addr@Base 1.3.8
+ slurm_get_plugin_dir@Base 1.3.8
+ slurm_get_preempt_mode@Base 2.1.0
+ slurm_get_preempt_type@Base 2.1.0
+ slurm_get_priority_calc_period@Base 2.1.0
+ slurm_get_priority_decay_hl@Base 2.0.0
+ slurm_get_priority_favor_small@Base 2.0.0
+ slurm_get_priority_max_age@Base 2.0.0
+ slurm_get_priority_reset_period@Base 2.0.0
+ slurm_get_priority_type@Base 2.0.0
+ slurm_get_priority_weight_age@Base 2.0.0
+ slurm_get_priority_weight_fairshare@Base 2.0.0
+ slurm_get_priority_weight_job_size@Base 2.0.0
+ slurm_get_priority_weight_partition@Base 2.0.0
+ slurm_get_priority_weight_qos@Base 2.0.0
+ slurm_get_private_data@Base 1.3.8
+ slurm_get_proctrack_type@Base 1.3.8
+ slurm_get_propagate_prio_process@Base 1.3.8
+ slurm_get_rem_time@Base 1.3.8
+ slurm_get_resume_timeout@Base 2.0.0
+ slurm_get_return_code@Base 1.3.8
+ slurm_get_root_filter@Base 1.3.8
+ slurm_get_sched_params@Base 2.0.0
+ slurm_get_sched_port@Base 1.3.8
+ slurm_get_sched_type@Base 1.3.8
+ slurm_get_select_jobinfo@Base 1.3.8
+ slurm_get_select_nodeinfo@Base 2.1.0
+ slurm_get_select_type@Base 1.3.8
+ slurm_get_select_type_param@Base 2.3.1
+ slurm_get_slurm_user_id@Base 1.3.8
+ slurm_get_slurmd_port@Base 1.3.8
+ slurm_get_slurmd_user_id@Base 2.0.0
+ slurm_get_srun_epilog@Base 1.3.8
+ slurm_get_srun_prolog@Base 1.3.8
+ slurm_get_state_save_location@Base 1.3.8
+ slurm_get_stream_addr@Base 1.3.8
+ slurm_get_suspend_time@Base 2.1.0
+ slurm_get_suspend_timeout@Base 2.2.0
+ slurm_get_switch_type@Base 1.3.8
+ slurm_get_task_epilog@Base 1.3.8
+ slurm_get_task_plugin@Base 1.3.8
+ slurm_get_task_plugin_param@Base 1.3.8
+ slurm_get_task_prolog@Base 1.3.8
+ slurm_get_topology_plugin@Base 2.0.0
+ slurm_get_track_wckey@Base 1.3.12
+ slurm_get_tree_width@Base 1.3.8
+ slurm_get_triggers@Base 1.3.8
+ slurm_get_vsize_factor@Base 2.2.0
+ slurm_get_wait_time@Base 1.3.8
+ slurm_getenvp@Base 1.3.8
+ slurm_grow_buf@Base 1.3.8
+ slurm_hostlist_copy@Base 1.3.8
+ slurm_hostlist_count@Base 1.3.8
+ slurm_hostlist_create@Base 1.3.8
+ slurm_hostlist_create_dims@Base 2.3.1
+ slurm_hostlist_delete@Base 1.3.8
+ slurm_hostlist_delete_host@Base 1.3.8
+ slurm_hostlist_delete_nth@Base 1.3.8
+ slurm_hostlist_deranged_string@Base 1.3.8
+ slurm_hostlist_deranged_string_dims@Base 2.3.1
+ slurm_hostlist_deranged_string_malloc@Base 2.2.0
+ slurm_hostlist_deranged_string_xmalloc@Base 2.2.0
+ slurm_hostlist_deranged_string_xmalloc_dims@Base 2.3.1
+ slurm_hostlist_destroy@Base 1.3.8
+ slurm_hostlist_find@Base 1.3.8
+ slurm_hostlist_iterator_create@Base 1.3.8
+ slurm_hostlist_iterator_destroy@Base 1.3.8
+ slurm_hostlist_iterator_reset@Base 1.3.8
+ slurm_hostlist_next@Base 1.3.8
+ slurm_hostlist_next_range@Base 1.3.8
+ slurm_hostlist_nth@Base 1.3.8
+ slurm_hostlist_pop@Base 1.3.8
+ slurm_hostlist_pop_range@Base 1.3.8
+ slurm_hostlist_push@Base 1.3.8
+ slurm_hostlist_push_host@Base 1.3.8
+ slurm_hostlist_push_host_dims@Base 2.3.1
+ slurm_hostlist_push_list@Base 1.3.8
+ slurm_hostlist_ranged_string@Base 1.3.8
+ slurm_hostlist_ranged_string_dims@Base 2.3.1
+ slurm_hostlist_ranged_string_malloc@Base 2.2.0
+ slurm_hostlist_ranged_string_xmalloc@Base 2.2.0
+ slurm_hostlist_ranged_string_xmalloc_dims@Base 2.3.1
+ slurm_hostlist_remove@Base 1.3.8
+ slurm_hostlist_shift@Base 1.3.8
+ slurm_hostlist_shift_range@Base 1.3.8
+ slurm_hostlist_soft@Base 1.3.8
+ slurm_hostlist_uniq@Base 1.3.8
+ slurm_hostset_copy@Base 1.3.8
+ slurm_hostset_count@Base 1.3.8
+ slurm_hostset_create@Base 1.3.8
+ slurm_hostset_delete@Base 1.3.8
+ slurm_hostset_destroy@Base 1.3.8
+ slurm_hostset_find@Base 1.3.8
+ slurm_hostset_insert@Base 1.3.8
+ slurm_hostset_nth@Base 1.3.8
+ slurm_hostset_shift@Base 1.3.8
+ slurm_hostset_shift_range@Base 1.3.8
+ slurm_hostset_within@Base 1.3.8
+ slurm_info@Base 1.3.8
+ slurm_init_buf@Base 1.3.8
+ slurm_init_job_desc_msg@Base 1.3.8
+ slurm_init_msg_engine@Base 1.3.8
+ slurm_init_msg_engine_addrname_port@Base 2.0.0
+ slurm_init_msg_engine_port@Base 1.3.8
+ slurm_init_part_desc_msg@Base 1.3.8
+ slurm_init_resv_desc_msg@Base 2.0.0
+ slurm_init_update_block_msg@Base 2.1.0
+ slurm_init_update_front_end_msg@Base 2.3.1
+ slurm_init_update_node_msg@Base 2.0.0
+ slurm_init_update_step_msg@Base 2.2.0
+ slurm_int_and_set_count@Base 1.3.8
+ slurm_job_cpus_allocated_on_node@Base 2.1.0
+ slurm_job_cpus_allocated_on_node_id@Base 2.1.0
+ slurm_job_node_ready@Base 1.3.8
+ slurm_job_reason_string@Base 2.2.0
+ slurm_job_state_num@Base 2.2.0
+ slurm_job_state_string@Base 2.2.0
+ slurm_job_state_string_compact@Base 2.2.0
+ slurm_job_step_create@Base 1.3.8
+ slurm_job_step_get_pids@Base 2.2.0
+ slurm_job_step_layout_free@Base 1.3.8
+ slurm_job_step_layout_get@Base 1.3.8
+ slurm_job_step_pids_free@Base 2.2.0
+ slurm_job_step_pids_response_msg_free@Base 2.2.0
+ slurm_job_step_stat@Base 2.2.0
+ slurm_job_step_stat_free@Base 2.2.0
+ slurm_job_step_stat_response_msg_free@Base 2.2.0
+ slurm_job_will_run@Base 1.3.8
+ slurm_jobacct_common_alloc_jobacct@Base 2.2.0
+ slurm_jobacct_common_free_jobacct@Base 2.2.0
+ slurm_jobacct_common_pack@Base 2.2.0
+ slurm_jobacct_common_unpack@Base 2.2.0
+ slurm_jobacct_gather_fini@Base 1.3.8
+ slurm_jobacct_gather_init@Base 1.3.8
+ slurm_jobinfo_ctx_get@Base 1.3.8
+ slurm_kill_job@Base 1.3.8
+ slurm_kill_job_step@Base 1.3.8
+ slurm_list_append@Base 1.3.8
+ slurm_list_append_list@Base 1.3.8
+ slurm_list_count@Base 1.3.8
+ slurm_list_create@Base 1.3.8
+ slurm_list_delete_all@Base 1.3.8
+ slurm_list_delete_item@Base 1.3.8
+ slurm_list_dequeue@Base 1.3.8
+ slurm_list_destroy@Base 1.3.8
+ slurm_list_enqueue@Base 1.3.8
+ slurm_list_find@Base 1.3.8
+ slurm_list_find_first@Base 1.3.8
+ slurm_list_flush@Base 2.2.0
+ slurm_list_for_each@Base 1.3.8
+ slurm_list_insert@Base 1.3.8
+ slurm_list_install_fork_handlers@Base 1.3.8
+ slurm_list_is_empty@Base 1.3.8
+ slurm_list_iterator_create@Base 1.3.8
+ slurm_list_iterator_destroy@Base 1.3.8
+ slurm_list_iterator_reset@Base 1.3.8
+ slurm_list_next@Base 1.3.8
+ slurm_list_peek@Base 1.3.8
+ slurm_list_pop@Base 1.3.8
+ slurm_list_prepend@Base 1.3.8
+ slurm_list_push@Base 1.3.8
+ slurm_list_remove@Base 1.3.8
+ slurm_list_sort@Base 1.3.8
+ slurm_list_transfer@Base 1.3.8
+ slurm_listen_stream@Base 1.3.8
+ slurm_load_block_info@Base 2.1.0
+ slurm_load_ctl_conf@Base 1.3.8
+ slurm_load_front_end@Base 2.3.1
+ slurm_load_job@Base 1.3.8
+ slurm_load_jobs@Base 1.3.8
+ slurm_load_node@Base 1.3.8
+ slurm_load_partitions@Base 1.3.8
+ slurm_load_reservations@Base 2.0.0
+ slurm_load_slurmd_status@Base 1.3.8
+ slurm_load_topo@Base 2.0.0
+ slurm_log_alter@Base 1.3.8
+ slurm_log_fini@Base 1.3.8
+ slurm_log_flush@Base 1.3.8
+ slurm_log_fp@Base 1.3.8
+ slurm_log_has_data@Base 1.3.8
+ slurm_log_init@Base 1.3.8
+ slurm_log_reinit@Base 1.3.8
+ slurm_log_set_fpfx@Base 1.3.8
+ slurm_make_time_str@Base 1.3.8
+ slurm_msg_t_copy@Base 1.3.8
+ slurm_msg_t_init@Base 1.3.8
+ slurm_net_accept_stream@Base 1.3.8
+ slurm_net_set_low_water@Base 1.3.8
+ slurm_net_stream_listen@Base 1.3.8
+ slurm_node_state_string@Base 2.2.0
+ slurm_node_state_string_compact@Base 2.2.0
+ slurm_node_use_string@Base 2.2.0
+ slurm_notify_job@Base 1.3.8
+ slurm_open_controller_conn@Base 1.3.8
+ slurm_open_controller_conn_spec@Base 1.3.8
+ slurm_open_msg_conn@Base 1.3.8
+ slurm_open_slurmdbd_conn@Base 1.3.8
+ slurm_open_stream@Base 1.3.8
+ slurm_pack16@Base 1.3.8
+ slurm_pack16_array@Base 1.3.8
+ slurm_pack32@Base 1.3.8
+ slurm_pack32_array@Base 1.3.8
+ slurm_pack64@Base 1.3.8
+ slurm_pack8@Base 1.3.8
+ slurm_pack_block_job_info@Base 2.3.1
+ slurm_pack_slurm_addr@Base 1.3.8
+ slurm_pack_slurm_addr_array@Base 1.3.8
+ slurm_pack_time@Base 1.3.8
+ slurm_packdouble@Base 2.3.1
+ slurm_packmem@Base 1.3.8
+ slurm_packmem_array@Base 1.3.8
+ slurm_packstr_array@Base 1.3.8
+ slurm_parser@Base 1.3.8
+ slurm_perror@Base 1.3.8
+ slurm_pid2jobid@Base 1.3.8
+ slurm_ping@Base 1.3.8
+ slurm_plugin_get_syms@Base 2.2.0
+ slurm_plugin_load_and_link@Base 2.2.0
+ slurm_plugin_strerror@Base 2.2.0
+ slurm_plugin_unload@Base 2.2.0
+ slurm_plugrack_create@Base 2.2.0
+ slurm_plugrack_destroy@Base 2.2.0
+ slurm_plugrack_read_dir@Base 2.2.0
+ slurm_plugrack_set_major_type@Base 2.2.0
+ slurm_plugrack_set_paranoia@Base 2.2.0
+ slurm_plugrack_use_by_type@Base 2.2.0
+ slurm_pmi_finalize@Base 1.3.8
+ slurm_preempt_mode_num@Base 2.2.0
+ slurm_preempt_mode_string@Base 2.2.0
+ slurm_print_block_info@Base 2.1.0
+ slurm_print_block_info_msg@Base 2.1.0
+ slurm_print_cpu_bind_help@Base 2.0.0
+ slurm_print_ctl_conf@Base 1.3.8
+ slurm_print_front_end_info_msg@Base 2.3.1
+ slurm_print_front_end_table@Base 2.3.1
+ slurm_print_job_info@Base 1.3.8
+ slurm_print_job_info_msg@Base 1.3.8
+ slurm_print_job_step_info@Base 1.3.8
+ slurm_print_job_step_info_msg@Base 1.3.8
+ slurm_print_key_pairs@Base 2.0.0
+ slurm_print_launch_task_msg@Base 1.3.8
+ slurm_print_mem_bind_help@Base 2.0.0
+ slurm_print_node_info_msg@Base 1.3.8
+ slurm_print_node_table@Base 1.3.8
+ slurm_print_partition_info@Base 1.3.8
+ slurm_print_partition_info_msg@Base 1.3.8
+ slurm_print_reservation_info@Base 2.0.0
+ slurm_print_reservation_info_msg@Base 2.0.0
+ slurm_print_slurm_addr@Base 1.3.8
+ slurm_print_slurmd_status@Base 1.3.8
+ slurm_print_topo_info_msg@Base 2.0.0
+ slurm_print_topo_record@Base 2.0.0
+ slurm_priority_fini@Base 2.0.0
+ slurm_priority_init@Base 2.0.0
+ slurm_private_data_string@Base 2.2.0
+ slurm_pull_trigger@Base 2.2.0
+ slurm_read_hostfile@Base 1.3.8
+ slurm_read_stream@Base 1.3.8
+ slurm_read_stream_timeout@Base 1.3.8
+ slurm_receive_msg@Base 1.3.8
+ slurm_receive_msg_and_forward@Base 1.3.8
+ slurm_receive_msgs@Base 1.3.8
+ slurm_reconfigure@Base 1.3.8
+ slurm_requeue@Base 1.3.8
+ slurm_reservation_flags_string@Base 2.2.0
+ slurm_resume@Base 1.3.8
+ slurm_s_p_get_string@Base 2.3.1
+ slurm_s_p_get_uint32@Base 2.3.1
+ slurm_s_p_hashtbl_create@Base 2.3.1
+ slurm_s_p_hashtbl_destroy@Base 2.3.1
+ slurm_s_p_parse_file@Base 2.3.1
+ slurm_sbcast_lookup@Base 2.1.0
+ slurm_select_fini@Base 1.3.8
+ slurm_select_init@Base 1.3.8
+ slurm_send_addr_recv_msgs@Base 1.3.8
+ slurm_send_kvs_comm_set@Base 1.3.8
+ slurm_send_node_msg@Base 1.3.8
+ slurm_send_only_controller_msg@Base 1.3.8
+ slurm_send_only_node_msg@Base 1.3.8
+ slurm_send_rc_msg@Base 1.3.8
+ slurm_send_recv_controller_msg@Base 1.3.8
+ slurm_send_recv_controller_rc_msg@Base 1.3.8
+ slurm_send_recv_msgs@Base 1.3.8
+ slurm_send_recv_node_msg@Base 1.3.8
+ slurm_send_recv_rc_msg_only_one@Base 1.3.8
+ slurm_send_recv_slurmdbd_msg@Base 1.3.8
+ slurm_send_slurmdbd_msg@Base 1.3.8
+ slurm_send_slurmdbd_recv_rc_msg@Base 1.3.8
+ slurm_set_accounting_storage_host@Base 2.0.2
+ slurm_set_accounting_storage_loc@Base 1.3.8
+ slurm_set_accounting_storage_port@Base 1.3.11
+ slurm_set_accounting_storage_user@Base 2.0.2
+ slurm_set_addr@Base 1.3.8
+ slurm_set_addr_any@Base 1.3.8
+ slurm_set_addr_char@Base 1.3.8
+ slurm_set_addr_uint@Base 1.3.8
+ slurm_set_api_config@Base 1.3.8
+ slurm_set_auth_type@Base 1.3.8
+ slurm_set_debug_flags@Base 2.3.1
+ slurm_set_debug_level@Base 1.3.8
+ slurm_set_debugflags@Base 2.3.1
+ slurm_set_jobcomp_port@Base 1.3.11
+ slurm_set_schedlog_level@Base 2.2.0
+ slurm_set_stream_blocking@Base 1.3.8
+ slurm_set_stream_non_blocking@Base 1.3.8
+ slurm_set_tree_width@Base 1.3.8
+ slurm_set_trigger@Base 1.3.8
+ slurm_setenvpf@Base 1.3.8
+ slurm_seterrno@Base 1.3.8
+ slurm_shutdown@Base 1.3.8
+ slurm_shutdown_msg_conn@Base 1.3.8
+ slurm_shutdown_msg_engine@Base 1.3.8
+ slurm_signal_job@Base 1.3.8
+ slurm_signal_job_step@Base 1.3.8
+ slurm_sort_char_list_asc@Base 1.3.9
+ slurm_sort_char_list_desc@Base 1.3.9
+ slurm_sort_key_pairs@Base 2.1.0
+ slurm_sprint_block_info@Base 2.1.0
+ slurm_sprint_cpu_bind_type@Base 1.3.8
+ slurm_sprint_front_end_table@Base 2.3.1
+ slurm_sprint_job_info@Base 1.3.8
+ slurm_sprint_job_step_info@Base 1.3.8
+ slurm_sprint_mem_bind_type@Base 1.3.8
+ slurm_sprint_node_table@Base 1.3.8
+ slurm_sprint_partition_info@Base 1.3.8
+ slurm_sprint_reservation_info@Base 2.0.0
+ slurm_step_ctx_create@Base 1.3.8
+ slurm_step_ctx_create_no_alloc@Base 1.3.8
+ slurm_step_ctx_daemon_per_node_hack@Base 1.3.8
+ slurm_step_ctx_destroy@Base 1.3.8
+ slurm_step_ctx_get@Base 1.3.8
+ slurm_step_ctx_params_t_init@Base 1.3.8
+ slurm_step_launch@Base 1.3.8
+ slurm_step_launch_abort@Base 1.3.8
+ slurm_step_launch_fwd_signal@Base 1.3.8
+ slurm_step_launch_params_t_init@Base 1.3.8
+ slurm_step_launch_wait_finish@Base 1.3.8
+ slurm_step_launch_wait_start@Base 1.3.8
+ slurm_step_layout_copy@Base 1.3.8
+ slurm_step_layout_create@Base 1.3.8
+ slurm_step_layout_destroy@Base 1.3.8
+ slurm_step_layout_host_id@Base 1.3.8
+ slurm_step_layout_host_name@Base 1.3.8
+ slurm_step_layout_type_name@Base 2.0.0
+ slurm_strcasestr@Base 2.0.0
+ slurm_strerror@Base 1.3.8
+ slurm_strlcpy@Base 1.3.8
+ slurm_submit_batch_job@Base 1.3.8
+ slurm_suspend@Base 1.3.8
+ slurm_takeover@Base 2.0.0
+ slurm_terminate_job@Base 1.3.8
+ slurm_terminate_job_step@Base 1.3.8
+ slurm_topo_build_config@Base 2.1.0
+ slurm_topo_fini@Base 2.1.0
+ slurm_topo_generate_node_ranking@Base 2.3.1
+ slurm_topo_get_node_addr@Base 2.1.0
+ slurm_topo_init@Base 2.1.0
+ slurm_try_xmalloc@Base 1.3.8
+ slurm_try_xrealloc@Base 1.3.8
+ slurm_unpack16@Base 1.3.8
+ slurm_unpack16_array@Base 1.3.8
+ slurm_unpack32@Base 1.3.8
+ slurm_unpack32_array@Base 1.3.8
+ slurm_unpack64@Base 1.3.8
+ slurm_unpack8@Base 1.3.8
+ slurm_unpack_block_info_members@Base 2.3.1
+ slurm_unpack_block_info_msg@Base 2.2.0
+ slurm_unpack_slurm_addr_array@Base 1.3.8
+ slurm_unpack_slurm_addr_no_alloc@Base 1.3.8
+ slurm_unpack_time@Base 1.3.8
+ slurm_unpackdouble@Base 2.3.1
+ slurm_unpackmem@Base 1.3.8
+ slurm_unpackmem_array@Base 1.3.8
+ slurm_unpackmem_malloc@Base 1.3.8
+ slurm_unpackmem_ptr@Base 1.3.8
+ slurm_unpackmem_xmalloc@Base 1.3.8
+ slurm_unpackstr_array@Base 1.3.8
+ slurm_unsetenvp@Base 1.3.8
+ slurm_update_block@Base 2.1.0
+ slurm_update_front_end@Base 2.3.1
+ slurm_update_job@Base 1.3.8
+ slurm_update_node@Base 1.3.8
+ slurm_update_partition@Base 1.3.8
+ slurm_update_reservation@Base 2.0.0
+ slurm_update_step@Base 2.2.0
+ slurm_verbose@Base 1.3.8
+ slurm_verify_cpu_bind@Base 2.0.0
+ slurm_verify_mem_bind@Base 2.0.0
+ slurm_write_stream@Base 1.3.8
+ slurm_write_stream_timeout@Base 1.3.8
+ slurm_xassert_failed@Base 1.3.8
+ slurm_xbasename@Base 1.3.8
+ slurm_xfer_buf_data@Base 1.3.8
+ slurm_xfree@Base 1.3.8
+ slurm_xmalloc@Base 1.3.8
+ slurm_xmemcat@Base 1.3.8
+ slurm_xrealloc@Base 1.3.8
+ slurm_xshort_hostname@Base 1.3.8
+ slurm_xsignal@Base 1.3.8
+ slurm_xsignal_block@Base 1.3.8
+ slurm_xsignal_save_mask@Base 1.3.8
+ slurm_xsignal_set_mask@Base 1.3.8
+ slurm_xsignal_sigset_create@Base 1.3.8
+ slurm_xsignal_unblock@Base 1.3.8
+ slurm_xsize@Base 1.3.8
+ slurm_xslurm_strerrorcat@Base 1.3.8
+ slurm_xstrcat@Base 1.3.8
+ slurm_xstrcatchar@Base 1.3.8
+ slurm_xstrdup@Base 1.3.8
+ slurm_xstrdup_printf@Base 1.3.8
+ slurm_xstrfmtcat@Base 1.3.8
+ slurm_xstrftimecat@Base 1.3.8
+ slurm_xstring_is_whitespace@Base 1.3.8
+ slurm_xstrncat@Base 2.2.0
+ slurm_xstrndup@Base 1.3.8
+ slurm_xstrstrip@Base 1.3.9
+ slurm_xstrsubstitute@Base 1.3.8
+ slurm_xstrtolower@Base 2.1.10
+ slurmdb_addto_qos_char_list@Base 2.3.1
+ slurmdb_admin_level_str@Base 2.3.1
+ slurmdb_cluster_flags_2_str@Base 2.3.1
+ slurmdb_create_job_rec@Base 2.3.1
+ slurmdb_create_step_rec@Base 2.3.1
+ slurmdb_destroy_account_cond@Base 2.3.1
+ slurmdb_destroy_account_rec@Base 2.3.1
+ slurmdb_destroy_accounting_rec@Base 2.3.1
+ slurmdb_destroy_archive_cond@Base 2.3.1
+ slurmdb_destroy_archive_rec@Base 2.3.1
+ slurmdb_destroy_association_cond@Base 2.3.1
+ slurmdb_destroy_association_rec@Base 2.3.1
+ slurmdb_destroy_cluster_accounting_rec@Base 2.3.1
+ slurmdb_destroy_cluster_cond@Base 2.3.1
+ slurmdb_destroy_cluster_rec@Base 2.3.1
+ slurmdb_destroy_coord_rec@Base 2.3.1
+ slurmdb_destroy_event_cond@Base 2.3.1
+ slurmdb_destroy_event_rec@Base 2.3.1
+ slurmdb_destroy_hierarchical_rec@Base 2.3.1
+ slurmdb_destroy_job_cond@Base 2.3.1
+ slurmdb_destroy_job_modify_cond@Base 2.3.1
+ slurmdb_destroy_job_rec@Base 2.3.1
+ slurmdb_destroy_print_tree@Base 2.3.1
+ slurmdb_destroy_qos_cond@Base 2.3.1
+ slurmdb_destroy_qos_rec@Base 2.3.1
+ slurmdb_destroy_report_acct_grouping@Base 2.3.1
+ slurmdb_destroy_report_assoc_rec@Base 2.3.1
+ slurmdb_destroy_report_cluster_grouping@Base 2.3.1
+ slurmdb_destroy_report_cluster_rec@Base 2.3.1
+ slurmdb_destroy_report_job_grouping@Base 2.3.1
+ slurmdb_destroy_report_user_rec@Base 2.3.1
+ slurmdb_destroy_reservation_cond@Base 2.3.1
+ slurmdb_destroy_reservation_rec@Base 2.3.1
+ slurmdb_destroy_selected_step@Base 2.3.1
+ slurmdb_destroy_step_rec@Base 2.3.1
+ slurmdb_destroy_txn_cond@Base 2.3.1
+ slurmdb_destroy_txn_rec@Base 2.3.1
+ slurmdb_destroy_update_object@Base 2.3.1
+ slurmdb_destroy_update_shares_rec@Base 2.3.1
+ slurmdb_destroy_used_limits@Base 2.3.1
+ slurmdb_destroy_user_cond@Base 2.3.1
+ slurmdb_destroy_user_rec@Base 2.3.1
+ slurmdb_destroy_wckey_cond@Base 2.3.1
+ slurmdb_destroy_wckey_rec@Base 2.3.1
+ slurmdb_get_acct_hierarchical_rec_list@Base 2.3.1
+ slurmdb_get_hierarchical_sorted_assoc_list@Base 2.3.1
+ slurmdb_get_info_cluster@Base 2.3.1
+ slurmdb_init_association_rec@Base 2.3.1
+ slurmdb_init_cluster_cond@Base 2.3.1
+ slurmdb_init_cluster_rec@Base 2.3.1
+ slurmdb_init_qos_rec@Base 2.3.1
+ slurmdb_init_wckey_rec@Base 2.3.1
+ slurmdb_pack_account_cond@Base 2.3.1
+ slurmdb_pack_account_rec@Base 2.3.1
+ slurmdb_pack_accounting_rec@Base 2.3.1
+ slurmdb_pack_archive_cond@Base 2.3.1
+ slurmdb_pack_archive_rec@Base 2.3.1
+ slurmdb_pack_association_cond@Base 2.3.1
+ slurmdb_pack_association_rec@Base 2.3.1
+ slurmdb_pack_cluster_accounting_rec@Base 2.3.1
+ slurmdb_pack_cluster_cond@Base 2.3.1
+ slurmdb_pack_cluster_rec@Base 2.3.1
+ slurmdb_pack_coord_rec@Base 2.3.1
+ slurmdb_pack_event_cond@Base 2.3.1
+ slurmdb_pack_event_rec@Base 2.3.1
+ slurmdb_pack_job_cond@Base 2.3.1
+ slurmdb_pack_job_modify_cond@Base 2.3.1
+ slurmdb_pack_job_rec@Base 2.3.1
+ slurmdb_pack_qos_cond@Base 2.3.1
+ slurmdb_pack_qos_rec@Base 2.3.1
+ slurmdb_pack_reservation_cond@Base 2.3.1
+ slurmdb_pack_reservation_rec@Base 2.3.1
+ slurmdb_pack_selected_step@Base 2.3.1
+ slurmdb_pack_step_rec@Base 2.3.1
+ slurmdb_pack_txn_cond@Base 2.3.1
+ slurmdb_pack_txn_rec@Base 2.3.1
+ slurmdb_pack_update_object@Base 2.3.1
+ slurmdb_pack_used_limits@Base 2.3.1
+ slurmdb_pack_user_cond@Base 2.3.1
+ slurmdb_pack_user_rec@Base 2.3.1
+ slurmdb_pack_wckey_cond@Base 2.3.1
+ slurmdb_pack_wckey_rec@Base 2.3.1
+ slurmdb_parse_purge@Base 2.3.1
+ slurmdb_problem_str_get@Base 2.3.1
+ slurmdb_purge_string@Base 2.3.1
+ slurmdb_qos_flags_str@Base 2.3.1
+ slurmdb_qos_str@Base 2.3.1
+ slurmdb_report_set_start_end_time@Base 2.3.1
+ slurmdb_send_accounting_update@Base 2.3.1
+ slurmdb_setup_cluster_dim_size@Base 2.3.1
+ slurmdb_setup_cluster_dims@Base 2.3.1
+ slurmdb_setup_cluster_flags@Base 2.3.1
+ slurmdb_setup_cluster_name_dims@Base 2.3.1
+ slurmdb_slurmdbd_free_id_rc_msg@Base 2.3.1
+ slurmdb_slurmdbd_free_list_msg@Base 2.3.1
+ slurmdb_slurmdbd_free_rc_msg@Base 2.3.1
+ slurmdb_slurmdbd_free_usage_msg@Base 2.3.1
+ slurmdb_sort_hierarchical_assoc_list@Base 2.3.1
+ slurmdb_str_2_cluster_flags@Base 2.3.1
+ slurmdb_tree_name_get@Base 2.3.1
+ slurmdb_unpack_account_cond@Base 2.3.1
+ slurmdb_unpack_account_rec@Base 2.3.1
+ slurmdb_unpack_accounting_rec@Base 2.3.1
+ slurmdb_unpack_archive_cond@Base 2.3.1
+ slurmdb_unpack_archive_rec@Base 2.3.1
+ slurmdb_unpack_association_cond@Base 2.3.1
+ slurmdb_unpack_association_rec@Base 2.3.1
+ slurmdb_unpack_cluster_accounting_rec@Base 2.3.1
+ slurmdb_unpack_cluster_cond@Base 2.3.1
+ slurmdb_unpack_cluster_rec@Base 2.3.1
+ slurmdb_unpack_coord_rec@Base 2.3.1
+ slurmdb_unpack_event_cond@Base 2.3.1
+ slurmdb_unpack_event_rec@Base 2.3.1
+ slurmdb_unpack_job_cond@Base 2.3.1
+ slurmdb_unpack_job_modify_cond@Base 2.3.1
+ slurmdb_unpack_job_rec@Base 2.3.1
+ slurmdb_unpack_qos_cond@Base 2.3.1
+ slurmdb_unpack_qos_rec@Base 2.3.1
+ slurmdb_unpack_reservation_cond@Base 2.3.1
+ slurmdb_unpack_reservation_rec@Base 2.3.1
+ slurmdb_unpack_selected_step@Base 2.3.1
+ slurmdb_unpack_step_rec@Base 2.3.1
+ slurmdb_unpack_txn_cond@Base 2.3.1
+ slurmdb_unpack_txn_rec@Base 2.3.1
+ slurmdb_unpack_update_object@Base 2.3.1
+ slurmdb_unpack_used_limits@Base 2.3.1
+ slurmdb_unpack_user_cond@Base 2.3.1
+ slurmdb_unpack_user_rec@Base 2.3.1
+ slurmdb_unpack_wckey_cond@Base 2.3.1
+ slurmdb_unpack_wckey_rec@Base 2.3.1
diff -Nru slurm-llnl-2.2.7/debian/libslurmdb22.symbols slurm-llnl-2.3.2/debian/libslurmdb22.symbols
--- slurm-llnl-2.2.7/debian/libslurmdb22.symbols 2011-04-04 13:39:29.000000000 +0000
+++ slurm-llnl-2.3.2/debian/libslurmdb22.symbols 1970-01-01 00:00:00.000000000 +0000
@@ -1,870 +0,0 @@
-libslurmdb.so.22 libslurmdb22 #MINVER#
- slurm_accept_msg_conn@Base 2.2.0
- slurm_accept_stream@Base 2.2.0
- slurm_accounting_enforce_string@Base 2.2.0
- slurm_acct_storage_fini@Base 2.2.0
- slurm_acct_storage_init@Base 2.2.0
- slurm_add_slash_to_quotes@Base 2.2.0
- slurm_addto_char_list@Base 2.2.0
- slurm_allocate_resources@Base 2.2.0
- slurm_allocate_resources_blocking@Base 2.2.0
- slurm_allocation_lookup@Base 2.2.0
- slurm_allocation_lookup_lite@Base 2.2.0
- slurm_allocation_msg_thr_create@Base 2.2.0
- slurm_allocation_msg_thr_destroy@Base 2.2.0
- slurm_api_clear_config@Base 2.2.0
- slurm_api_set_conf_file@Base 2.2.0
- slurm_api_set_default_config@Base 2.2.0
- slurm_api_version@Base 2.2.0
- slurm_arg_count@Base 2.2.0
- slurm_arg_idx_by_name@Base 2.2.0
- slurm_arg_name_by_idx@Base 2.2.0
- slurm_auth_context_create@Base 2.2.0
- slurm_auth_fini@Base 2.2.0
- slurm_auth_get_arg_desc@Base 2.2.0
- slurm_auth_init@Base 2.2.0
- slurm_bg_block_state_string@Base 2.2.0
- slurm_bit_alloc@Base 2.2.0
- slurm_bit_and@Base 2.2.0
- slurm_bit_clear@Base 2.2.0
- slurm_bit_clear_count@Base 2.2.0
- slurm_bit_copy@Base 2.2.0
- slurm_bit_copybits@Base 2.2.0
- slurm_bit_equal@Base 2.2.0
- slurm_bit_ffc@Base 2.2.0
- slurm_bit_ffs@Base 2.2.0
- slurm_bit_fill_gaps@Base 2.2.0
- slurm_bit_fls@Base 2.2.0
- slurm_bit_fmt@Base 2.2.0
- slurm_bit_fmt_binmask@Base 2.2.0
- slurm_bit_fmt_hexmask@Base 2.2.0
- slurm_bit_free@Base 2.2.0
- slurm_bit_get_bit_num@Base 2.2.0
- slurm_bit_get_pos_num@Base 2.2.0
- slurm_bit_nclear@Base 2.2.0
- slurm_bit_nffc@Base 2.2.0
- slurm_bit_nffs@Base 2.2.0
- slurm_bit_noc@Base 2.2.0
- slurm_bit_not@Base 2.2.0
- slurm_bit_nset@Base 2.2.0
- slurm_bit_nset_max_count@Base 2.2.0
- slurm_bit_or@Base 2.2.0
- slurm_bit_overlap@Base 2.2.0
- slurm_bit_pick_cnt@Base 2.2.0
- slurm_bit_realloc@Base 2.2.0
- slurm_bit_rotate@Base 2.2.0
- slurm_bit_rotate_copy@Base 2.2.0
- slurm_bit_set@Base 2.2.0
- slurm_bit_set_count@Base 2.2.0
- slurm_bit_size@Base 2.2.0
- slurm_bit_super_set@Base 2.2.0
- slurm_bit_test@Base 2.2.0
- slurm_bit_unfmt@Base 2.2.0
- slurm_bit_unfmt_binmask@Base 2.2.0
- slurm_bit_unfmt_hexmask@Base 2.2.0
- slurm_bitfmt2int@Base 2.2.0
- slurm_checkpoint_able@Base 2.2.0
- slurm_checkpoint_complete@Base 2.2.0
- slurm_checkpoint_create@Base 2.2.0
- slurm_checkpoint_disable@Base 2.2.0
- slurm_checkpoint_enable@Base 2.2.0
- slurm_checkpoint_error@Base 2.2.0
- slurm_checkpoint_requeue@Base 2.2.0
- slurm_checkpoint_restart@Base 2.2.0
- slurm_checkpoint_task_complete@Base 2.2.0
- slurm_checkpoint_tasks@Base 2.2.0
- slurm_checkpoint_vacate@Base 2.2.0
- slurm_clear_trigger@Base 2.2.0
- slurm_close_accepted_conn@Base 2.2.0
- slurm_close_slurmdbd_conn@Base 2.2.0
- slurm_close_stream@Base 2.2.0
- slurm_complete_job@Base 2.2.0
- slurm_conf_destroy@Base 2.2.0
- slurm_conf_downnodes_array@Base 2.2.0
- slurm_conf_expand_slurmd_path@Base 2.2.0
- slurm_conf_get_addr@Base 2.2.0
- slurm_conf_get_aliased_nodename@Base 2.2.0
- slurm_conf_get_aliases@Base 2.2.4
- slurm_conf_get_cpus_sct@Base 2.2.0
- slurm_conf_get_hostname@Base 2.2.0
- slurm_conf_get_nodeaddr@Base 2.2.0
- slurm_conf_get_nodename@Base 2.2.0
- slurm_conf_get_port@Base 2.2.0
- slurm_conf_init@Base 2.2.0
- slurm_conf_install_fork_handlers@Base 2.2.0
- slurm_conf_lock@Base 2.2.0
- slurm_conf_mutex_init@Base 2.2.0
- slurm_conf_nodename_array@Base 2.2.0
- slurm_conf_options@Base 2.2.0
- slurm_conf_partition_array@Base 2.2.0
- slurm_conf_reinit@Base 2.2.0
- slurm_conf_unlock@Base 2.2.0
- slurm_conn_type_string@Base 2.2.0
- slurm_create_buf@Base 2.2.0
- slurm_create_partition@Base 2.2.0
- slurm_create_reservation@Base 2.2.0
- slurm_cred_begin_expiration@Base 2.2.0
- slurm_cred_copy@Base 2.2.0
- slurm_cred_create@Base 2.2.0
- slurm_cred_creator_ctx_create@Base 2.2.0
- slurm_cred_ctx_destroy@Base 2.2.0
- slurm_cred_ctx_get@Base 2.2.0
- slurm_cred_ctx_key_update@Base 2.2.0
- slurm_cred_ctx_pack@Base 2.2.0
- slurm_cred_ctx_set@Base 2.2.0
- slurm_cred_ctx_unpack@Base 2.2.0
- slurm_cred_destroy@Base 2.2.0
- slurm_cred_faker@Base 2.2.0
- slurm_cred_free_args@Base 2.2.0
- slurm_cred_get_args@Base 2.2.0
- slurm_cred_get_signature@Base 2.2.0
- slurm_cred_handle_reissue@Base 2.2.0
- slurm_cred_insert_jobid@Base 2.2.0
- slurm_cred_jobid_cached@Base 2.2.0
- slurm_cred_pack@Base 2.2.0
- slurm_cred_print@Base 2.2.0
- slurm_cred_revoke@Base 2.2.0
- slurm_cred_revoked@Base 2.2.0
- slurm_cred_rewind@Base 2.2.0
- slurm_cred_unpack@Base 2.2.0
- slurm_cred_verifier_ctx_create@Base 2.2.0
- slurm_cred_verify@Base 2.2.0
- slurm_crypto_fini@Base 2.2.0
- slurm_ctl_conf_2_key_pairs@Base 2.2.0
- slurm_debug2@Base 2.2.0
- slurm_debug3@Base 2.2.0
- slurm_debug4@Base 2.2.0
- slurm_debug5@Base 2.2.0
- slurm_debug@Base 2.2.0
- slurm_delete_partition@Base 2.2.0
- slurm_delete_reservation@Base 2.2.0
- slurm_destroy_association_shares_object@Base 2.2.0
- slurm_destroy_char@Base 2.2.0
- slurm_destroy_config_key_pair@Base 2.2.0
- slurm_destroy_priority_factors_object@Base 2.2.0
- slurm_destroy_uint32_ptr@Base 2.2.0
- slurm_dump_cleanup_list@Base 2.2.0
- slurm_env_array_append@Base 2.2.0
- slurm_env_array_append_fmt@Base 2.2.0
- slurm_env_array_copy@Base 2.2.0
- slurm_env_array_create@Base 2.2.0
- slurm_env_array_free@Base 2.2.0
- slurm_env_array_merge@Base 2.2.0
- slurm_env_array_overwrite@Base 2.2.0
- slurm_env_array_overwrite_fmt@Base 2.2.0
- slurm_error@Base 2.2.0
- slurm_fatal@Base 2.2.0
- slurm_fatal_add_cleanup@Base 2.2.0
- slurm_fatal_add_cleanup_job@Base 2.2.0
- slurm_fatal_cleanup@Base 2.2.0
- slurm_fatal_remove_cleanup@Base 2.2.0
- slurm_fatal_remove_cleanup_job@Base 2.2.0
- slurm_fd_read_n@Base 2.2.0
- slurm_fd_set_blocking@Base 2.2.0
- slurm_fd_set_nonblocking@Base 2.2.0
- slurm_fd_write_n@Base 2.2.0
- slurm_free_accounting_update_msg@Base 2.2.0
- slurm_free_block_info@Base 2.2.0
- slurm_free_block_info_members@Base 2.2.0
- slurm_free_block_info_msg@Base 2.2.0
- slurm_free_block_info_request_msg@Base 2.2.0
- slurm_free_buf@Base 2.2.0
- slurm_free_checkpoint_comp_msg@Base 2.2.0
- slurm_free_checkpoint_msg@Base 2.2.0
- slurm_free_checkpoint_resp_msg@Base 2.2.0
- slurm_free_checkpoint_task_comp_msg@Base 2.2.0
- slurm_free_checkpoint_tasks_msg@Base 2.2.0
- slurm_free_complete_batch_script_msg@Base 2.2.0
- slurm_free_complete_job_allocation_msg@Base 2.2.0
- slurm_free_ctl_conf@Base 2.2.0
- slurm_free_delete_part_msg@Base 2.2.0
- slurm_free_epilog_complete_msg@Base 2.2.0
- slurm_free_file_bcast_msg@Base 2.2.0
- slurm_free_get_kvs_msg@Base 2.2.0
- slurm_free_job_alloc_info_msg@Base 2.2.0
- slurm_free_job_alloc_info_response_msg@Base 2.2.0
- slurm_free_job_desc_msg@Base 2.2.0
- slurm_free_job_id_msg@Base 2.2.0
- slurm_free_job_id_request_msg@Base 2.2.0
- slurm_free_job_id_response_msg@Base 2.2.0
- slurm_free_job_info@Base 2.2.0
- slurm_free_job_info_members@Base 2.2.0
- slurm_free_job_info_msg@Base 2.2.0
- slurm_free_job_info_request_msg@Base 2.2.0
- slurm_free_job_launch_msg@Base 2.2.0
- slurm_free_job_notify_msg@Base 2.2.0
- slurm_free_job_step_create_request_msg@Base 2.2.0
- slurm_free_job_step_create_response_msg@Base 2.2.0
- slurm_free_job_step_id_msg@Base 2.2.0
- slurm_free_job_step_info_members@Base 2.2.0
- slurm_free_job_step_info_request_msg@Base 2.2.0
- slurm_free_job_step_info_response_msg@Base 2.2.0
- slurm_free_job_step_kill_msg@Base 2.2.0
- slurm_free_job_step_pids@Base 2.2.0
- slurm_free_job_step_stat@Base 2.2.0
- slurm_free_kill_job_msg@Base 2.2.0
- slurm_free_kill_tasks_msg@Base 2.2.0
- slurm_free_kvs_comm_set@Base 2.2.0
- slurm_free_last_update_msg@Base 2.2.0
- slurm_free_launch_tasks_request_msg@Base 2.2.0
- slurm_free_launch_tasks_response_msg@Base 2.2.0
- slurm_free_msg@Base 2.2.0
- slurm_free_msg_data@Base 2.2.0
- slurm_free_node_info_members@Base 2.2.0
- slurm_free_node_info_msg@Base 2.2.0
- slurm_free_node_info_request_msg@Base 2.2.0
- slurm_free_node_registration_status_msg@Base 2.2.0
- slurm_free_part_info_request_msg@Base 2.2.0
- slurm_free_partition_info_members@Base 2.2.0
- slurm_free_partition_info_msg@Base 2.2.0
- slurm_free_priority_factors_request_msg@Base 2.2.0
- slurm_free_priority_factors_response_msg@Base 2.2.0
- slurm_free_reattach_tasks_request_msg@Base 2.2.0
- slurm_free_reattach_tasks_response_msg@Base 2.2.0
- slurm_free_reservation_info_msg@Base 2.2.0
- slurm_free_reserve_info_members@Base 2.2.0
- slurm_free_resource_allocation_response_msg@Base 2.2.0
- slurm_free_resv_desc_msg@Base 2.2.0
- slurm_free_resv_info_request_msg@Base 2.2.0
- slurm_free_resv_name_msg@Base 2.2.0
- slurm_free_return_code_msg@Base 2.2.0
- slurm_free_sbcast_cred_msg@Base 2.2.0
- slurm_free_set_debug_level_msg@Base 2.2.0
- slurm_free_shares_request_msg@Base 2.2.0
- slurm_free_shares_response_msg@Base 2.2.0
- slurm_free_shutdown_msg@Base 2.2.0
- slurm_free_signal_job_msg@Base 2.2.0
- slurm_free_slurmd_status@Base 2.2.0
- slurm_free_srun_exec_msg@Base 2.2.0
- slurm_free_srun_job_complete_msg@Base 2.2.0
- slurm_free_srun_node_fail_msg@Base 2.2.0
- slurm_free_srun_ping_msg@Base 2.2.0
- slurm_free_srun_step_missing_msg@Base 2.2.0
- slurm_free_srun_timeout_msg@Base 2.2.0
- slurm_free_srun_user_msg@Base 2.2.0
- slurm_free_step_complete_msg@Base 2.2.0
- slurm_free_submit_response_response_msg@Base 2.2.0
- slurm_free_suspend_msg@Base 2.2.0
- slurm_free_task_exit_msg@Base 2.2.0
- slurm_free_task_user_managed_io_stream_msg@Base 2.2.0
- slurm_free_topo_info_msg@Base 2.2.0
- slurm_free_trigger_msg@Base 2.2.0
- slurm_free_update_job_time_msg@Base 2.2.0
- slurm_free_update_node_msg@Base 2.2.0
- slurm_free_update_part_msg@Base 2.2.0
- slurm_free_update_step_msg@Base 2.2.0
- slurm_free_will_run_response_msg@Base 2.2.0
- slurm_get_accounting_storage_backup_host@Base 2.2.0
- slurm_get_accounting_storage_enforce@Base 2.2.0
- slurm_get_accounting_storage_host@Base 2.2.0
- slurm_get_accounting_storage_loc@Base 2.2.0
- slurm_get_accounting_storage_pass@Base 2.2.0
- slurm_get_accounting_storage_port@Base 2.2.0
- slurm_get_accounting_storage_type@Base 2.2.0
- slurm_get_accounting_storage_user@Base 2.2.0
- slurm_get_addr@Base 2.2.0
- slurm_get_api_config@Base 2.2.0
- slurm_get_auth_type@Base 2.2.0
- slurm_get_avail_procs@Base 2.2.0
- slurm_get_batch_start_timeout@Base 2.2.0
- slurm_get_checkpoint_type@Base 2.2.0
- slurm_get_cluster_name@Base 2.2.0
- slurm_get_complete_wait@Base 2.2.0
- slurm_get_controller_addr_spec@Base 2.2.0
- slurm_get_crypto_type@Base 2.2.0
- slurm_get_debug_flags@Base 2.2.0
- slurm_get_def_mem_per_cpu@Base 2.2.0
- slurm_get_end_time@Base 2.2.0
- slurm_get_env_timeout@Base 2.2.0
- slurm_get_epilog_msg_time@Base 2.2.0
- slurm_get_errno@Base 2.2.0
- slurm_get_fast_schedule@Base 2.2.0
- slurm_get_gres_plugins@Base 2.2.0
- slurm_get_hash_val@Base 2.2.0
- slurm_get_health_check_program@Base 2.2.0
- slurm_get_ip_str@Base 2.2.0
- slurm_get_is_association_based_accounting@Base 2.2.0
- slurm_get_job_steps@Base 2.2.0
- slurm_get_job_submit_plugins@Base 2.2.0
- slurm_get_jobacct_gather_freq@Base 2.2.0
- slurm_get_jobacct_gather_type@Base 2.2.0
- slurm_get_jobcomp_host@Base 2.2.0
- slurm_get_jobcomp_loc@Base 2.2.0
- slurm_get_jobcomp_pass@Base 2.2.0
- slurm_get_jobcomp_port@Base 2.2.0
- slurm_get_jobcomp_type@Base 2.2.0
- slurm_get_jobcomp_user@Base 2.2.0
- slurm_get_kill_on_bad_exit@Base 2.2.0
- slurm_get_kvs_comm_set@Base 2.2.0
- slurm_get_max_mem_per_cpu@Base 2.2.0
- slurm_get_mpi_default@Base 2.2.0
- slurm_get_mpi_params@Base 2.2.0
- slurm_get_msg_timeout@Base 2.2.0
- slurm_get_peer_addr@Base 2.2.0
- slurm_get_plugin_dir@Base 2.2.0
- slurm_get_preempt_mode@Base 2.2.0
- slurm_get_preempt_type@Base 2.2.0
- slurm_get_priority_calc_period@Base 2.2.0
- slurm_get_priority_decay_hl@Base 2.2.0
- slurm_get_priority_favor_small@Base 2.2.0
- slurm_get_priority_max_age@Base 2.2.0
- slurm_get_priority_reset_period@Base 2.2.0
- slurm_get_priority_type@Base 2.2.0
- slurm_get_priority_weight_age@Base 2.2.0
- slurm_get_priority_weight_fairshare@Base 2.2.0
- slurm_get_priority_weight_job_size@Base 2.2.0
- slurm_get_priority_weight_partition@Base 2.2.0
- slurm_get_priority_weight_qos@Base 2.2.0
- slurm_get_private_data@Base 2.2.0
- slurm_get_proctrack_type@Base 2.2.0
- slurm_get_propagate_prio_process@Base 2.2.0
- slurm_get_rem_time@Base 2.2.0
- slurm_get_resume_timeout@Base 2.2.0
- slurm_get_return_code@Base 2.2.0
- slurm_get_root_filter@Base 2.2.0
- slurm_get_sched_params@Base 2.2.0
- slurm_get_sched_port@Base 2.2.0
- slurm_get_sched_type@Base 2.2.0
- slurm_get_select_jobinfo@Base 2.2.0
- slurm_get_select_nodeinfo@Base 2.2.0
- slurm_get_select_type@Base 2.2.0
- slurm_get_slurm_user_id@Base 2.2.0
- slurm_get_slurmd_port@Base 2.2.0
- slurm_get_slurmd_user_id@Base 2.2.0
- slurm_get_srun_epilog@Base 2.2.0
- slurm_get_srun_prolog@Base 2.2.0
- slurm_get_state_save_location@Base 2.2.0
- slurm_get_stream_addr@Base 2.2.0
- slurm_get_suspend_time@Base 2.2.0
- slurm_get_suspend_timeout@Base 2.2.0
- slurm_get_switch_type@Base 2.2.0
- slurm_get_task_epilog@Base 2.2.0
- slurm_get_task_plugin@Base 2.2.0
- slurm_get_task_plugin_param@Base 2.2.0
- slurm_get_task_prolog@Base 2.2.0
- slurm_get_topology_plugin@Base 2.2.0
- slurm_get_track_wckey@Base 2.2.0
- slurm_get_tree_width@Base 2.2.0
- slurm_get_triggers@Base 2.2.0
- slurm_get_vsize_factor@Base 2.2.0
- slurm_get_wait_time@Base 2.2.0
- slurm_getenvp@Base 2.2.0
- slurm_grow_buf@Base 2.2.0
- slurm_hostlist_copy@Base 2.2.0
- slurm_hostlist_count@Base 2.2.0
- slurm_hostlist_create@Base 2.2.0
- slurm_hostlist_delete@Base 2.2.0
- slurm_hostlist_delete_host@Base 2.2.0
- slurm_hostlist_delete_nth@Base 2.2.0
- slurm_hostlist_deranged_string@Base 2.2.0
- slurm_hostlist_deranged_string_malloc@Base 2.2.0
- slurm_hostlist_deranged_string_xmalloc@Base 2.2.0
- slurm_hostlist_destroy@Base 2.2.0
- slurm_hostlist_find@Base 2.2.0
- slurm_hostlist_iterator_create@Base 2.2.0
- slurm_hostlist_iterator_destroy@Base 2.2.0
- slurm_hostlist_iterator_reset@Base 2.2.0
- slurm_hostlist_next@Base 2.2.0
- slurm_hostlist_next_range@Base 2.2.0
- slurm_hostlist_nth@Base 2.2.0
- slurm_hostlist_pop@Base 2.2.0
- slurm_hostlist_pop_range@Base 2.2.0
- slurm_hostlist_push@Base 2.2.0
- slurm_hostlist_push_host@Base 2.2.0
- slurm_hostlist_push_list@Base 2.2.0
- slurm_hostlist_ranged_string@Base 2.2.0
- slurm_hostlist_ranged_string_malloc@Base 2.2.0
- slurm_hostlist_ranged_string_xmalloc@Base 2.2.0
- slurm_hostlist_remove@Base 2.2.0
- slurm_hostlist_shift@Base 2.2.0
- slurm_hostlist_shift_range@Base 2.2.0
- slurm_hostlist_soft@Base 2.2.0
- slurm_hostlist_uniq@Base 2.2.0
- slurm_hostset_copy@Base 2.2.0
- slurm_hostset_count@Base 2.2.0
- slurm_hostset_create@Base 2.2.0
- slurm_hostset_delete@Base 2.2.0
- slurm_hostset_destroy@Base 2.2.0
- slurm_hostset_find@Base 2.2.0
- slurm_hostset_insert@Base 2.2.0
- slurm_hostset_nth@Base 2.2.0
- slurm_hostset_shift@Base 2.2.0
- slurm_hostset_shift_range@Base 2.2.0
- slurm_hostset_within@Base 2.2.0
- slurm_info@Base 2.2.0
- slurm_init_buf@Base 2.2.0
- slurm_init_job_desc_msg@Base 2.2.0
- slurm_init_msg_engine@Base 2.2.0
- slurm_init_msg_engine_addrname_port@Base 2.2.0
- slurm_init_msg_engine_port@Base 2.2.0
- slurm_init_part_desc_msg@Base 2.2.0
- slurm_init_resv_desc_msg@Base 2.2.0
- slurm_init_update_block_msg@Base 2.2.0
- slurm_init_update_node_msg@Base 2.2.0
- slurm_init_update_step_msg@Base 2.2.0
- slurm_int_and_set_count@Base 2.2.0
- slurm_job_cpus_allocated_on_node@Base 2.2.0
- slurm_job_cpus_allocated_on_node_id@Base 2.2.0
- slurm_job_node_ready@Base 2.2.0
- slurm_job_reason_string@Base 2.2.0
- slurm_job_state_num@Base 2.2.0
- slurm_job_state_string@Base 2.2.0
- slurm_job_state_string_compact@Base 2.2.0
- slurm_job_step_create@Base 2.2.0
- slurm_job_step_get_pids@Base 2.2.0
- slurm_job_step_layout_free@Base 2.2.0
- slurm_job_step_layout_get@Base 2.2.0
- slurm_job_step_pids_free@Base 2.2.0
- slurm_job_step_pids_response_msg_free@Base 2.2.0
- slurm_job_step_stat@Base 2.2.0
- slurm_job_step_stat_free@Base 2.2.0
- slurm_job_step_stat_response_msg_free@Base 2.2.0
- slurm_job_will_run@Base 2.2.0
- slurm_jobacct_common_alloc_jobacct@Base 2.2.0
- slurm_jobacct_common_free_jobacct@Base 2.2.0
- slurm_jobacct_common_pack@Base 2.2.0
- slurm_jobacct_common_unpack@Base 2.2.0
- slurm_jobacct_gather_fini@Base 2.2.0
- slurm_jobacct_gather_init@Base 2.2.0
- slurm_jobinfo_ctx_get@Base 2.2.0
- slurm_kill_job@Base 2.2.0
- slurm_kill_job_step@Base 2.2.0
- slurm_list_append@Base 2.2.0
- slurm_list_append_list@Base 2.2.0
- slurm_list_count@Base 2.2.0
- slurm_list_create@Base 2.2.0
- slurm_list_delete_all@Base 2.2.0
- slurm_list_delete_item@Base 2.2.0
- slurm_list_dequeue@Base 2.2.0
- slurm_list_destroy@Base 2.2.0
- slurm_list_enqueue@Base 2.2.0
- slurm_list_find@Base 2.2.0
- slurm_list_find_first@Base 2.2.0
- slurm_list_flush@Base 2.2.0
- slurm_list_for_each@Base 2.2.0
- slurm_list_insert@Base 2.2.0
- slurm_list_install_fork_handlers@Base 2.2.0
- slurm_list_is_empty@Base 2.2.0
- slurm_list_iterator_create@Base 2.2.0
- slurm_list_iterator_destroy@Base 2.2.0
- slurm_list_iterator_reset@Base 2.2.0
- slurm_list_next@Base 2.2.0
- slurm_list_peek@Base 2.2.0
- slurm_list_pop@Base 2.2.0
- slurm_list_prepend@Base 2.2.0
- slurm_list_push@Base 2.2.0
- slurm_list_remove@Base 2.2.0
- slurm_list_sort@Base 2.2.0
- slurm_list_transfer@Base 2.2.0
- slurm_listen_stream@Base 2.2.0
- slurm_load_block_info@Base 2.2.0
- slurm_load_ctl_conf@Base 2.2.0
- slurm_load_job@Base 2.2.0
- slurm_load_jobs@Base 2.2.0
- slurm_load_node@Base 2.2.0
- slurm_load_partitions@Base 2.2.0
- slurm_load_reservations@Base 2.2.0
- slurm_load_slurmd_status@Base 2.2.0
- slurm_load_topo@Base 2.2.0
- slurm_log_alter@Base 2.2.0
- slurm_log_fini@Base 2.2.0
- slurm_log_flush@Base 2.2.0
- slurm_log_fp@Base 2.2.0
- slurm_log_has_data@Base 2.2.0
- slurm_log_init@Base 2.2.0
- slurm_log_reinit@Base 2.2.0
- slurm_log_set_fpfx@Base 2.2.0
- slurm_make_time_str@Base 2.2.0
- slurm_msg_t_copy@Base 2.2.0
- slurm_msg_t_init@Base 2.2.0
- slurm_net_accept_stream@Base 2.2.0
- slurm_net_set_low_water@Base 2.2.0
- slurm_net_stream_listen@Base 2.2.0
- slurm_node_state_string@Base 2.2.0
- slurm_node_state_string_compact@Base 2.2.0
- slurm_node_use_string@Base 2.2.0
- slurm_notify_job@Base 2.2.0
- slurm_open_controller_conn@Base 2.2.0
- slurm_open_controller_conn_spec@Base 2.2.0
- slurm_open_msg_conn@Base 2.2.0
- slurm_open_slurmdbd_conn@Base 2.2.0
- slurm_open_stream@Base 2.2.0
- slurm_pack16@Base 2.2.0
- slurm_pack16_array@Base 2.2.0
- slurm_pack32@Base 2.2.0
- slurm_pack32_array@Base 2.2.0
- slurm_pack64@Base 2.2.0
- slurm_pack8@Base 2.2.0
- slurm_pack_msg_no_header@Base 2.2.0
- slurm_pack_slurm_addr@Base 2.2.0
- slurm_pack_slurm_addr_array@Base 2.2.0
- slurm_pack_time@Base 2.2.0
- slurm_packmem@Base 2.2.0
- slurm_packmem_array@Base 2.2.0
- slurm_packstr_array@Base 2.2.0
- slurm_parser@Base 2.2.0
- slurm_perror@Base 2.2.0
- slurm_pid2jobid@Base 2.2.0
- slurm_ping@Base 2.2.0
- slurm_plugin_get_syms@Base 2.2.0
- slurm_plugin_load_and_link@Base 2.2.0
- slurm_plugin_strerror@Base 2.2.0
- slurm_plugin_unload@Base 2.2.0
- slurm_plugrack_create@Base 2.2.0
- slurm_plugrack_destroy@Base 2.2.0
- slurm_plugrack_read_dir@Base 2.2.0
- slurm_plugrack_set_major_type@Base 2.2.0
- slurm_plugrack_set_paranoia@Base 2.2.0
- slurm_plugrack_use_by_type@Base 2.2.0
- slurm_pmi_finalize@Base 2.2.0
- slurm_preempt_mode_num@Base 2.2.0
- slurm_preempt_mode_string@Base 2.2.0
- slurm_print_block_info@Base 2.2.0
- slurm_print_block_info_msg@Base 2.2.0
- slurm_print_cpu_bind_help@Base 2.2.0
- slurm_print_ctl_conf@Base 2.2.0
- slurm_print_job_info@Base 2.2.0
- slurm_print_job_info_msg@Base 2.2.0
- slurm_print_job_step_info@Base 2.2.0
- slurm_print_job_step_info_msg@Base 2.2.0
- slurm_print_key_pairs@Base 2.2.0
- slurm_print_launch_task_msg@Base 2.2.0
- slurm_print_mem_bind_help@Base 2.2.0
- slurm_print_node_info_msg@Base 2.2.0
- slurm_print_node_table@Base 2.2.0
- slurm_print_partition_info@Base 2.2.0
- slurm_print_partition_info_msg@Base 2.2.0
- slurm_print_reservation_info@Base 2.2.0
- slurm_print_reservation_info_msg@Base 2.2.0
- slurm_print_slurm_addr@Base 2.2.0
- slurm_print_slurmd_status@Base 2.2.0
- slurm_print_topo_info_msg@Base 2.2.0
- slurm_print_topo_record@Base 2.2.0
- slurm_priority_fini@Base 2.2.0
- slurm_priority_init@Base 2.2.0
- slurm_private_data_string@Base 2.2.0
- slurm_pull_trigger@Base 2.2.0
- slurm_read_hostfile@Base 2.2.0
- slurm_read_stream@Base 2.2.0
- slurm_read_stream_timeout@Base 2.2.0
- slurm_receive_msg@Base 2.2.0
- slurm_receive_msg_and_forward@Base 2.2.0
- slurm_receive_msgs@Base 2.2.0
- slurm_reconfigure@Base 2.2.0
- slurm_requeue@Base 2.2.0
- slurm_reservation_flags_string@Base 2.2.0
- slurm_resume@Base 2.2.0
- slurm_sbcast_lookup@Base 2.2.0
- slurm_select_fini@Base 2.2.0
- slurm_select_init@Base 2.2.0
- slurm_send_addr_recv_msgs@Base 2.2.0
- slurm_send_kvs_comm_set@Base 2.2.0
- slurm_send_node_msg@Base 2.2.0
- slurm_send_only_controller_msg@Base 2.2.0
- slurm_send_only_node_msg@Base 2.2.0
- slurm_send_rc_msg@Base 2.2.0
- slurm_send_recv_controller_msg@Base 2.2.0
- slurm_send_recv_controller_rc_msg@Base 2.2.0
- slurm_send_recv_msgs@Base 2.2.0
- slurm_send_recv_node_msg@Base 2.2.0
- slurm_send_recv_rc_msg_only_one@Base 2.2.0
- slurm_send_recv_slurmdbd_msg@Base 2.2.0
- slurm_send_slurmdbd_msg@Base 2.2.0
- slurm_send_slurmdbd_recv_rc_msg@Base 2.2.0
- slurm_set_accounting_storage_host@Base 2.2.0
- slurm_set_accounting_storage_loc@Base 2.2.0
- slurm_set_accounting_storage_port@Base 2.2.0
- slurm_set_accounting_storage_user@Base 2.2.0
- slurm_set_addr@Base 2.2.0
- slurm_set_addr_any@Base 2.2.0
- slurm_set_addr_char@Base 2.2.0
- slurm_set_addr_uint@Base 2.2.0
- slurm_set_api_config@Base 2.2.0
- slurm_set_auth_type@Base 2.2.0
- slurm_set_debug_level@Base 2.2.0
- slurm_set_jobcomp_port@Base 2.2.0
- slurm_set_schedlog_level@Base 2.2.0
- slurm_set_stream_blocking@Base 2.2.0
- slurm_set_stream_non_blocking@Base 2.2.0
- slurm_set_tree_width@Base 2.2.0
- slurm_set_trigger@Base 2.2.0
- slurm_setenvpf@Base 2.2.0
- slurm_seterrno@Base 2.2.0
- slurm_shutdown@Base 2.2.0
- slurm_shutdown_msg_conn@Base 2.2.0
- slurm_shutdown_msg_engine@Base 2.2.0
- slurm_signal_job@Base 2.2.0
- slurm_signal_job_step@Base 2.2.0
- slurm_sort_char_list_asc@Base 2.2.0
- slurm_sort_char_list_desc@Base 2.2.0
- slurm_sort_key_pairs@Base 2.2.0
- slurm_sprint_block_info@Base 2.2.0
- slurm_sprint_cpu_bind_type@Base 2.2.0
- slurm_sprint_job_info@Base 2.2.0
- slurm_sprint_job_step_info@Base 2.2.0
- slurm_sprint_mem_bind_type@Base 2.2.0
- slurm_sprint_node_table@Base 2.2.0
- slurm_sprint_partition_info@Base 2.2.0
- slurm_sprint_reservation_info@Base 2.2.0
- slurm_step_ctx_create@Base 2.2.0
- slurm_step_ctx_create_no_alloc@Base 2.2.0
- slurm_step_ctx_daemon_per_node_hack@Base 2.2.0
- slurm_step_ctx_destroy@Base 2.2.0
- slurm_step_ctx_get@Base 2.2.0
- slurm_step_ctx_params_t_init@Base 2.2.0
- slurm_step_launch@Base 2.2.0
- slurm_step_launch_abort@Base 2.2.0
- slurm_step_launch_fwd_signal@Base 2.2.0
- slurm_step_launch_params_t_init@Base 2.2.0
- slurm_step_launch_wait_finish@Base 2.2.0
- slurm_step_launch_wait_start@Base 2.2.0
- slurm_step_layout_copy@Base 2.2.0
- slurm_step_layout_create@Base 2.2.0
- slurm_step_layout_destroy@Base 2.2.0
- slurm_step_layout_host_id@Base 2.2.0
- slurm_step_layout_host_name@Base 2.2.0
- slurm_step_layout_type_name@Base 2.2.0
- slurm_strcasestr@Base 2.2.0
- slurm_strerror@Base 2.2.0
- slurm_strlcpy@Base 2.2.0
- slurm_submit_batch_job@Base 2.2.0
- slurm_suspend@Base 2.2.0
- slurm_takeover@Base 2.2.0
- slurm_terminate_job@Base 2.2.0
- slurm_terminate_job_step@Base 2.2.0
- slurm_topo_build_config@Base 2.2.0
- slurm_topo_fini@Base 2.2.0
- slurm_topo_get_node_addr@Base 2.2.0
- slurm_topo_init@Base 2.2.0
- slurm_try_xmalloc@Base 2.2.0
- slurm_try_xrealloc@Base 2.2.0
- slurm_unpack16@Base 2.2.0
- slurm_unpack16_array@Base 2.2.0
- slurm_unpack32@Base 2.2.0
- slurm_unpack32_array@Base 2.2.0
- slurm_unpack64@Base 2.2.0
- slurm_unpack8@Base 2.2.0
- slurm_unpack_block_info_msg@Base 2.2.0
- slurm_unpack_slurm_addr_array@Base 2.2.0
- slurm_unpack_slurm_addr_no_alloc@Base 2.2.0
- slurm_unpack_time@Base 2.2.0
- slurm_unpackmem@Base 2.2.0
- slurm_unpackmem_array@Base 2.2.0
- slurm_unpackmem_malloc@Base 2.2.0
- slurm_unpackmem_ptr@Base 2.2.0
- slurm_unpackmem_xmalloc@Base 2.2.0
- slurm_unpackstr_array@Base 2.2.0
- slurm_unsetenvp@Base 2.2.0
- slurm_update_block@Base 2.2.0
- slurm_update_job@Base 2.2.0
- slurm_update_node@Base 2.2.0
- slurm_update_partition@Base 2.2.0
- slurm_update_reservation@Base 2.2.0
- slurm_update_step@Base 2.2.0
- slurm_verbose@Base 2.2.0
- slurm_verify_cpu_bind@Base 2.2.0
- slurm_verify_mem_bind@Base 2.2.0
- slurm_write_stream@Base 2.2.0
- slurm_write_stream_timeout@Base 2.2.0
- slurm_xassert_failed@Base 2.2.0
- slurm_xbasename@Base 2.2.0
- slurm_xfer_buf_data@Base 2.2.0
- slurm_xfree@Base 2.2.0
- slurm_xmalloc@Base 2.2.0
- slurm_xmemcat@Base 2.2.0
- slurm_xrealloc@Base 2.2.0
- slurm_xshort_hostname@Base 2.2.0
- slurm_xsignal@Base 2.2.0
- slurm_xsignal_block@Base 2.2.0
- slurm_xsignal_save_mask@Base 2.2.0
- slurm_xsignal_set_mask@Base 2.2.0
- slurm_xsignal_sigset_create@Base 2.2.0
- slurm_xsignal_unblock@Base 2.2.0
- slurm_xsize@Base 2.2.0
- slurm_xslurm_strerrorcat@Base 2.2.0
- slurm_xstrcat@Base 2.2.0
- slurm_xstrcatchar@Base 2.2.0
- slurm_xstrdup@Base 2.2.0
- slurm_xstrdup_printf@Base 2.2.0
- slurm_xstrfmtcat@Base 2.2.0
- slurm_xstrftimecat@Base 2.2.0
- slurm_xstring_is_whitespace@Base 2.2.0
- slurm_xstrncat@Base 2.2.0
- slurm_xstrndup@Base 2.2.0
- slurm_xstrstrip@Base 2.2.0
- slurm_xstrsubstitute@Base 2.2.0
- slurm_xstrtolower@Base 2.2.0
- slurmdb_accounts_add@Base 2.2.0
- slurmdb_accounts_get@Base 2.2.0
- slurmdb_accounts_modify@Base 2.2.0
- slurmdb_accounts_remove@Base 2.2.0
- slurmdb_addto_qos_char_list@Base 2.2.0
- slurmdb_admin_level_str@Base 2.2.0
- slurmdb_archive@Base 2.2.0
- slurmdb_archive_load@Base 2.2.0
- slurmdb_associations_add@Base 2.2.0
- slurmdb_associations_get@Base 2.2.0
- slurmdb_associations_modify@Base 2.2.0
- slurmdb_associations_remove@Base 2.2.0
- slurmdb_cluster_flags_2_str@Base 2.2.0
- slurmdb_clusters_add@Base 2.2.0
- slurmdb_clusters_get@Base 2.2.0
- slurmdb_clusters_modify@Base 2.2.0
- slurmdb_clusters_remove@Base 2.2.0
- slurmdb_config_get@Base 2.2.0
- slurmdb_connection_close@Base 2.2.0
- slurmdb_connection_get@Base 2.2.0
- slurmdb_coord_add@Base 2.2.0
- slurmdb_coord_remove@Base 2.2.0
- slurmdb_create_job_rec@Base 2.2.0
- slurmdb_create_step_rec@Base 2.2.0
- slurmdb_destroy_account_cond@Base 2.2.0
- slurmdb_destroy_account_rec@Base 2.2.0
- slurmdb_destroy_accounting_rec@Base 2.2.0
- slurmdb_destroy_archive_cond@Base 2.2.0
- slurmdb_destroy_archive_rec@Base 2.2.0
- slurmdb_destroy_association_cond@Base 2.2.0
- slurmdb_destroy_association_rec@Base 2.2.0
- slurmdb_destroy_cluster_accounting_rec@Base 2.2.0
- slurmdb_destroy_cluster_cond@Base 2.2.0
- slurmdb_destroy_cluster_rec@Base 2.2.0
- slurmdb_destroy_coord_rec@Base 2.2.0
- slurmdb_destroy_event_cond@Base 2.2.0
- slurmdb_destroy_event_rec@Base 2.2.0
- slurmdb_destroy_hierarchical_rec@Base 2.2.0
- slurmdb_destroy_job_cond@Base 2.2.0
- slurmdb_destroy_job_modify_cond@Base 2.2.0
- slurmdb_destroy_job_rec@Base 2.2.0
- slurmdb_destroy_print_tree@Base 2.2.0
- slurmdb_destroy_qos_cond@Base 2.2.0
- slurmdb_destroy_qos_rec@Base 2.2.0
- slurmdb_destroy_report_acct_grouping@Base 2.2.0
- slurmdb_destroy_report_assoc_rec@Base 2.2.0
- slurmdb_destroy_report_cluster_grouping@Base 2.2.0
- slurmdb_destroy_report_cluster_rec@Base 2.2.0
- slurmdb_destroy_report_job_grouping@Base 2.2.0
- slurmdb_destroy_report_user_rec@Base 2.2.0
- slurmdb_destroy_reservation_cond@Base 2.2.0
- slurmdb_destroy_reservation_rec@Base 2.2.0
- slurmdb_destroy_selected_step@Base 2.2.0
- slurmdb_destroy_step_rec@Base 2.2.0
- slurmdb_destroy_txn_cond@Base 2.2.0
- slurmdb_destroy_txn_rec@Base 2.2.0
- slurmdb_destroy_update_object@Base 2.2.0
- slurmdb_destroy_update_shares_rec@Base 2.2.0
- slurmdb_destroy_used_limits@Base 2.2.0
- slurmdb_destroy_user_cond@Base 2.2.0
- slurmdb_destroy_user_rec@Base 2.2.0
- slurmdb_destroy_wckey_cond@Base 2.2.0
- slurmdb_destroy_wckey_rec@Base 2.2.0
- slurmdb_events_get@Base 2.2.0
- slurmdb_get_acct_hierarchical_rec_list@Base 2.2.0
- slurmdb_get_hierarchical_sorted_assoc_list@Base 2.2.0
- slurmdb_get_info_cluster@Base 2.2.0
- slurmdb_init_association_rec@Base 2.2.0
- slurmdb_init_cluster_cond@Base 2.2.0
- slurmdb_init_cluster_rec@Base 2.2.0
- slurmdb_init_qos_rec@Base 2.2.0
- slurmdb_init_wckey_rec@Base 2.2.0
- slurmdb_jobs_get@Base 2.2.0
- slurmdb_pack_account_cond@Base 2.2.0
- slurmdb_pack_account_rec@Base 2.2.0
- slurmdb_pack_accounting_rec@Base 2.2.0
- slurmdb_pack_archive_cond@Base 2.2.0
- slurmdb_pack_archive_rec@Base 2.2.0
- slurmdb_pack_association_cond@Base 2.2.0
- slurmdb_pack_association_rec@Base 2.2.0
- slurmdb_pack_cluster_accounting_rec@Base 2.2.0
- slurmdb_pack_cluster_cond@Base 2.2.0
- slurmdb_pack_cluster_rec@Base 2.2.0
- slurmdb_pack_coord_rec@Base 2.2.0
- slurmdb_pack_event_cond@Base 2.2.0
- slurmdb_pack_event_rec@Base 2.2.0
- slurmdb_pack_job_cond@Base 2.2.0
- slurmdb_pack_job_modify_cond@Base 2.2.0
- slurmdb_pack_job_rec@Base 2.2.0
- slurmdb_pack_qos_cond@Base 2.2.0
- slurmdb_pack_qos_rec@Base 2.2.0
- slurmdb_pack_reservation_cond@Base 2.2.0
- slurmdb_pack_reservation_rec@Base 2.2.0
- slurmdb_pack_selected_step@Base 2.2.0
- slurmdb_pack_step_rec@Base 2.2.0
- slurmdb_pack_txn_cond@Base 2.2.0
- slurmdb_pack_txn_rec@Base 2.2.0
- slurmdb_pack_update_object@Base 2.2.0
- slurmdb_pack_used_limits@Base 2.2.0
- slurmdb_pack_user_cond@Base 2.2.0
- slurmdb_pack_user_rec@Base 2.2.0
- slurmdb_pack_wckey_cond@Base 2.2.0
- slurmdb_pack_wckey_rec@Base 2.2.0
- slurmdb_parse_purge@Base 2.2.0
- slurmdb_problem_str_get@Base 2.2.0
- slurmdb_problems_get@Base 2.2.0
- slurmdb_purge_string@Base 2.2.0
- slurmdb_qos_add@Base 2.2.0
- slurmdb_qos_flags_str@Base 2.2.0
- slurmdb_qos_get@Base 2.2.0
- slurmdb_qos_modify@Base 2.2.0
- slurmdb_qos_remove@Base 2.2.0
- slurmdb_qos_str@Base 2.2.0
- slurmdb_report_cluster_account_by_user@Base 2.2.0
- slurmdb_report_cluster_user_by_account@Base 2.2.0
- slurmdb_report_cluster_user_by_wckey@Base 2.2.0
- slurmdb_report_cluster_wckey_by_user@Base 2.2.0
- slurmdb_report_job_sizes_grouped_by_top_account@Base 2.2.0
- slurmdb_report_job_sizes_grouped_by_top_account_then_wckey@Base 2.2.0
- slurmdb_report_job_sizes_grouped_by_wckey@Base 2.2.0
- slurmdb_report_set_start_end_time@Base 2.2.0
- slurmdb_report_user_top_usage@Base 2.2.0
- slurmdb_reservations_get@Base 2.2.0
- slurmdb_send_accounting_update@Base 2.2.1
- slurmdb_setup_cluster_dims@Base 2.2.0
- slurmdb_setup_cluster_flags@Base 2.2.0
- slurmdb_slurmdbd_free_id_rc_msg@Base 2.2.0
- slurmdb_slurmdbd_free_list_msg@Base 2.2.0
- slurmdb_slurmdbd_free_rc_msg@Base 2.2.0
- slurmdb_slurmdbd_free_usage_msg@Base 2.2.0
- slurmdb_sort_hierarchical_assoc_list@Base 2.2.0
- slurmdb_str_2_cluster_flags@Base 2.2.0
- slurmdb_tree_name_get@Base 2.2.0
- slurmdb_txn_get@Base 2.2.0
- slurmdb_unpack_account_cond@Base 2.2.0
- slurmdb_unpack_account_rec@Base 2.2.0
- slurmdb_unpack_accounting_rec@Base 2.2.0
- slurmdb_unpack_archive_cond@Base 2.2.0
- slurmdb_unpack_archive_rec@Base 2.2.0
- slurmdb_unpack_association_cond@Base 2.2.0
- slurmdb_unpack_association_rec@Base 2.2.0
- slurmdb_unpack_cluster_accounting_rec@Base 2.2.0
- slurmdb_unpack_cluster_cond@Base 2.2.0
- slurmdb_unpack_cluster_rec@Base 2.2.0
- slurmdb_unpack_coord_rec@Base 2.2.0
- slurmdb_unpack_event_cond@Base 2.2.0
- slurmdb_unpack_event_rec@Base 2.2.0
- slurmdb_unpack_job_cond@Base 2.2.0
- slurmdb_unpack_job_modify_cond@Base 2.2.0
- slurmdb_unpack_job_rec@Base 2.2.0
- slurmdb_unpack_qos_cond@Base 2.2.0
- slurmdb_unpack_qos_rec@Base 2.2.0
- slurmdb_unpack_reservation_cond@Base 2.2.0
- slurmdb_unpack_reservation_rec@Base 2.2.0
- slurmdb_unpack_selected_step@Base 2.2.0
- slurmdb_unpack_step_rec@Base 2.2.0
- slurmdb_unpack_txn_cond@Base 2.2.0
- slurmdb_unpack_txn_rec@Base 2.2.0
- slurmdb_unpack_update_object@Base 2.2.0
- slurmdb_unpack_used_limits@Base 2.2.0
- slurmdb_unpack_user_cond@Base 2.2.0
- slurmdb_unpack_user_rec@Base 2.2.0
- slurmdb_unpack_wckey_cond@Base 2.2.0
- slurmdb_unpack_wckey_rec@Base 2.2.0
- slurmdb_usage_get@Base 2.2.0
- slurmdb_usage_roll@Base 2.2.0
- slurmdb_users_add@Base 2.2.0
- slurmdb_users_get@Base 2.2.0
- slurmdb_users_modify@Base 2.2.0
- slurmdb_users_remove@Base 2.2.0
- slurmdb_wckeys_add@Base 2.2.0
- slurmdb_wckeys_get@Base 2.2.0
- slurmdb_wckeys_modify@Base 2.2.0
- slurmdb_wckeys_remove@Base 2.2.0
diff -Nru slurm-llnl-2.2.7/debian/libslurmdb23.symbols slurm-llnl-2.3.2/debian/libslurmdb23.symbols
--- slurm-llnl-2.2.7/debian/libslurmdb23.symbols 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/debian/libslurmdb23.symbols 2011-10-27 08:24:31.000000000 +0000
@@ -0,0 +1,911 @@
+libslurmdb.so.23 libslurmdb23 #MINVER#
+ slurm_accept_msg_conn@Base 2.2.0
+ slurm_accept_stream@Base 2.2.0
+ slurm_accounting_enforce_string@Base 2.2.0
+ slurm_acct_storage_fini@Base 2.2.0
+ slurm_acct_storage_init@Base 2.2.0
+ slurm_add_slash_to_quotes@Base 2.2.0
+ slurm_addto_char_list@Base 2.2.0
+ slurm_allocate_resources@Base 2.2.0
+ slurm_allocate_resources_blocking@Base 2.2.0
+ slurm_allocation_lookup@Base 2.2.0
+ slurm_allocation_lookup_lite@Base 2.2.0
+ slurm_allocation_msg_thr_create@Base 2.2.0
+ slurm_allocation_msg_thr_destroy@Base 2.2.0
+ slurm_api_clear_config@Base 2.2.0
+ slurm_api_set_conf_file@Base 2.2.0
+ slurm_api_set_default_config@Base 2.2.0
+ slurm_api_version@Base 2.2.0
+ slurm_arg_count@Base 2.2.0
+ slurm_arg_idx_by_name@Base 2.2.0
+ slurm_arg_name_by_idx@Base 2.2.0
+ slurm_auth_context_create@Base 2.2.0
+ slurm_auth_fini@Base 2.2.0
+ slurm_auth_get_arg_desc@Base 2.2.0
+ slurm_auth_init@Base 2.2.0
+ slurm_bg_block_state_string@Base 2.2.0
+ slurm_bit_alloc@Base 2.2.0
+ slurm_bit_and@Base 2.2.0
+ slurm_bit_clear@Base 2.2.0
+ slurm_bit_clear_count@Base 2.2.0
+ slurm_bit_copy@Base 2.2.0
+ slurm_bit_copybits@Base 2.2.0
+ slurm_bit_equal@Base 2.2.0
+ slurm_bit_ffc@Base 2.2.0
+ slurm_bit_ffs@Base 2.2.0
+ slurm_bit_fill_gaps@Base 2.2.0
+ slurm_bit_fls@Base 2.2.0
+ slurm_bit_fmt@Base 2.2.0
+ slurm_bit_fmt_binmask@Base 2.2.0
+ slurm_bit_fmt_hexmask@Base 2.2.0
+ slurm_bit_free@Base 2.2.0
+ slurm_bit_get_bit_num@Base 2.2.0
+ slurm_bit_get_pos_num@Base 2.2.0
+ slurm_bit_nclear@Base 2.2.0
+ slurm_bit_nffc@Base 2.2.0
+ slurm_bit_nffs@Base 2.2.0
+ slurm_bit_noc@Base 2.2.0
+ slurm_bit_not@Base 2.2.0
+ slurm_bit_nset@Base 2.2.0
+ slurm_bit_nset_max_count@Base 2.2.0
+ slurm_bit_or@Base 2.2.0
+ slurm_bit_overlap@Base 2.2.0
+ slurm_bit_pick_cnt@Base 2.2.0
+ slurm_bit_realloc@Base 2.2.0
+ slurm_bit_rotate@Base 2.2.0
+ slurm_bit_rotate_copy@Base 2.2.0
+ slurm_bit_set@Base 2.2.0
+ slurm_bit_set_count@Base 2.2.0
+ slurm_bit_size@Base 2.2.0
+ slurm_bit_super_set@Base 2.2.0
+ slurm_bit_test@Base 2.2.0
+ slurm_bit_unfmt@Base 2.2.0
+ slurm_bit_unfmt_binmask@Base 2.2.0
+ slurm_bit_unfmt_hexmask@Base 2.2.0
+ slurm_bitfmt2int@Base 2.2.0
+ slurm_cgroup_conf@Base 2.3.1
+ slurm_checkpoint_able@Base 2.2.0
+ slurm_checkpoint_complete@Base 2.2.0
+ slurm_checkpoint_create@Base 2.2.0
+ slurm_checkpoint_disable@Base 2.2.0
+ slurm_checkpoint_enable@Base 2.2.0
+ slurm_checkpoint_error@Base 2.2.0
+ slurm_checkpoint_requeue@Base 2.2.0
+ slurm_checkpoint_restart@Base 2.2.0
+ slurm_checkpoint_task_complete@Base 2.2.0
+ slurm_checkpoint_tasks@Base 2.2.0
+ slurm_checkpoint_vacate@Base 2.2.0
+ slurm_clear_trigger@Base 2.2.0
+ slurm_close_accepted_conn@Base 2.2.0
+ slurm_close_slurmdbd_conn@Base 2.2.0
+ slurm_close_stream@Base 2.2.0
+ slurm_complete_job@Base 2.2.0
+ slurm_conf_destroy@Base 2.2.0
+ slurm_conf_downnodes_array@Base 2.2.0
+ slurm_conf_expand_slurmd_path@Base 2.2.0
+ slurm_conf_frontend_array@Base 2.3.1
+ slurm_conf_get_addr@Base 2.2.0
+ slurm_conf_get_aliased_nodename@Base 2.2.0
+ slurm_conf_get_aliases@Base 2.2.4
+ slurm_conf_get_cpus_sct@Base 2.2.0
+ slurm_conf_get_hostname@Base 2.2.0
+ slurm_conf_get_nodeaddr@Base 2.2.0
+ slurm_conf_get_nodename@Base 2.2.0
+ slurm_conf_get_port@Base 2.2.0
+ slurm_conf_init@Base 2.2.0
+ slurm_conf_install_fork_handlers@Base 2.2.0
+ slurm_conf_lock@Base 2.2.0
+ slurm_conf_mutex_init@Base 2.2.0
+ slurm_conf_nodename_array@Base 2.2.0
+ slurm_conf_options@Base 2.2.0
+ slurm_conf_partition_array@Base 2.2.0
+ slurm_conf_reinit@Base 2.2.0
+ slurm_conf_unlock@Base 2.2.0
+ slurm_conn_type_string@Base 2.2.0
+ slurm_conn_type_string_full@Base 2.3.1
+ slurm_create_buf@Base 2.2.0
+ slurm_create_partition@Base 2.2.0
+ slurm_create_reservation@Base 2.2.0
+ slurm_cred_begin_expiration@Base 2.2.0
+ slurm_cred_copy@Base 2.2.0
+ slurm_cred_create@Base 2.2.0
+ slurm_cred_creator_ctx_create@Base 2.2.0
+ slurm_cred_ctx_destroy@Base 2.2.0
+ slurm_cred_ctx_get@Base 2.2.0
+ slurm_cred_ctx_key_update@Base 2.2.0
+ slurm_cred_ctx_pack@Base 2.2.0
+ slurm_cred_ctx_set@Base 2.2.0
+ slurm_cred_ctx_unpack@Base 2.2.0
+ slurm_cred_destroy@Base 2.2.0
+ slurm_cred_faker@Base 2.2.0
+ slurm_cred_free_args@Base 2.2.0
+ slurm_cred_get_args@Base 2.2.0
+ slurm_cred_get_signature@Base 2.2.0
+ slurm_cred_handle_reissue@Base 2.2.0
+ slurm_cred_insert_jobid@Base 2.2.0
+ slurm_cred_jobid_cached@Base 2.2.0
+ slurm_cred_pack@Base 2.2.0
+ slurm_cred_print@Base 2.2.0
+ slurm_cred_revoke@Base 2.2.0
+ slurm_cred_revoked@Base 2.2.0
+ slurm_cred_rewind@Base 2.2.0
+ slurm_cred_unpack@Base 2.2.0
+ slurm_cred_verifier_ctx_create@Base 2.2.0
+ slurm_cred_verify@Base 2.2.0
+ slurm_crypto_fini@Base 2.2.0
+ slurm_ctl_conf_2_key_pairs@Base 2.2.0
+ slurm_debug2@Base 2.2.0
+ slurm_debug3@Base 2.2.0
+ slurm_debug4@Base 2.2.0
+ slurm_debug5@Base 2.2.0
+ slurm_debug@Base 2.2.0
+ slurm_delete_partition@Base 2.2.0
+ slurm_delete_reservation@Base 2.2.0
+ slurm_destroy_association_shares_object@Base 2.2.0
+ slurm_destroy_char@Base 2.2.0
+ slurm_destroy_config_key_pair@Base 2.2.0
+ slurm_destroy_priority_factors_object@Base 2.2.0
+ slurm_destroy_select_ba_request@Base 2.3.1
+ slurm_destroy_uint32_ptr@Base 2.2.0
+ slurm_diff_tv@Base 2.3.1
+ slurm_diff_tv_str@Base 2.3.1
+ slurm_dump_cleanup_list@Base 2.2.0
+ slurm_env_array_append@Base 2.2.0
+ slurm_env_array_append_fmt@Base 2.2.0
+ slurm_env_array_copy@Base 2.2.0
+ slurm_env_array_create@Base 2.2.0
+ slurm_env_array_free@Base 2.2.0
+ slurm_env_array_merge@Base 2.2.0
+ slurm_env_array_overwrite@Base 2.2.0
+ slurm_env_array_overwrite_fmt@Base 2.2.0
+ slurm_error@Base 2.2.0
+ slurm_fatal@Base 2.2.0
+ slurm_fatal_add_cleanup@Base 2.2.0
+ slurm_fatal_add_cleanup_job@Base 2.2.0
+ slurm_fatal_cleanup@Base 2.2.0
+ slurm_fatal_remove_cleanup@Base 2.2.0
+ slurm_fatal_remove_cleanup_job@Base 2.2.0
+ slurm_fd_read_n@Base 2.2.0
+ slurm_fd_set_blocking@Base 2.2.0
+ slurm_fd_set_nonblocking@Base 2.2.0
+ slurm_fd_write_n@Base 2.2.0
+ slurm_free_accounting_update_msg@Base 2.2.0
+ slurm_free_block_info@Base 2.2.0
+ slurm_free_block_info_members@Base 2.2.0
+ slurm_free_block_info_msg@Base 2.2.0
+ slurm_free_block_info_request_msg@Base 2.2.0
+ slurm_free_block_job_info@Base 2.3.1
+ slurm_free_buf@Base 2.2.0
+ slurm_free_checkpoint_comp_msg@Base 2.2.0
+ slurm_free_checkpoint_msg@Base 2.2.0
+ slurm_free_checkpoint_resp_msg@Base 2.2.0
+ slurm_free_checkpoint_task_comp_msg@Base 2.2.0
+ slurm_free_checkpoint_tasks_msg@Base 2.2.0
+ slurm_free_complete_batch_script_msg@Base 2.2.0
+ slurm_free_complete_job_allocation_msg@Base 2.2.0
+ slurm_free_ctl_conf@Base 2.2.0
+ slurm_free_delete_part_msg@Base 2.2.0
+ slurm_free_epilog_complete_msg@Base 2.2.0
+ slurm_free_file_bcast_msg@Base 2.2.0
+ slurm_free_front_end_info_members@Base 2.3.1
+ slurm_free_front_end_info_msg@Base 2.3.1
+ slurm_free_front_end_info_request_msg@Base 2.3.1
+ slurm_free_get_kvs_msg@Base 2.2.0
+ slurm_free_job_alloc_info_msg@Base 2.2.0
+ slurm_free_job_alloc_info_response_msg@Base 2.2.0
+ slurm_free_job_desc_msg@Base 2.2.0
+ slurm_free_job_id_msg@Base 2.2.0
+ slurm_free_job_id_request_msg@Base 2.2.0
+ slurm_free_job_id_response_msg@Base 2.2.0
+ slurm_free_job_info@Base 2.2.0
+ slurm_free_job_info_members@Base 2.2.0
+ slurm_free_job_info_msg@Base 2.2.0
+ slurm_free_job_info_request_msg@Base 2.2.0
+ slurm_free_job_launch_msg@Base 2.2.0
+ slurm_free_job_notify_msg@Base 2.2.0
+ slurm_free_job_step_create_request_msg@Base 2.2.0
+ slurm_free_job_step_create_response_msg@Base 2.2.0
+ slurm_free_job_step_id_msg@Base 2.2.0
+ slurm_free_job_step_info_members@Base 2.2.0
+ slurm_free_job_step_info_request_msg@Base 2.2.0
+ slurm_free_job_step_info_response_msg@Base 2.2.0
+ slurm_free_job_step_kill_msg@Base 2.2.0
+ slurm_free_job_step_pids@Base 2.2.0
+ slurm_free_job_step_stat@Base 2.2.0
+ slurm_free_kill_job_msg@Base 2.2.0
+ slurm_free_kill_tasks_msg@Base 2.2.0
+ slurm_free_kvs_comm_set@Base 2.2.0
+ slurm_free_last_update_msg@Base 2.2.0
+ slurm_free_launch_tasks_request_msg@Base 2.2.0
+ slurm_free_launch_tasks_response_msg@Base 2.2.0
+ slurm_free_msg@Base 2.2.0
+ slurm_free_msg_data@Base 2.2.0
+ slurm_free_node_info_members@Base 2.2.0
+ slurm_free_node_info_msg@Base 2.2.0
+ slurm_free_node_info_request_msg@Base 2.2.0
+ slurm_free_node_registration_status_msg@Base 2.2.0
+ slurm_free_part_info_request_msg@Base 2.2.0
+ slurm_free_partition_info_members@Base 2.2.0
+ slurm_free_partition_info_msg@Base 2.2.0
+ slurm_free_priority_factors_request_msg@Base 2.2.0
+ slurm_free_priority_factors_response_msg@Base 2.2.0
+ slurm_free_reattach_tasks_request_msg@Base 2.2.0
+ slurm_free_reattach_tasks_response_msg@Base 2.2.0
+ slurm_free_reservation_info_msg@Base 2.2.0
+ slurm_free_reserve_info_members@Base 2.2.0
+ slurm_free_resource_allocation_response_msg@Base 2.2.0
+ slurm_free_resv_desc_msg@Base 2.2.0
+ slurm_free_resv_info_request_msg@Base 2.2.0
+ slurm_free_resv_name_msg@Base 2.2.0
+ slurm_free_return_code_msg@Base 2.2.0
+ slurm_free_sbcast_cred_msg@Base 2.2.0
+ slurm_free_set_debug_flags_msg@Base 2.3.1
+ slurm_free_set_debug_level_msg@Base 2.2.0
+ slurm_free_shares_request_msg@Base 2.2.0
+ slurm_free_shares_response_msg@Base 2.2.0
+ slurm_free_shutdown_msg@Base 2.2.0
+ slurm_free_signal_job_msg@Base 2.2.0
+ slurm_free_slurmd_status@Base 2.2.0
+ slurm_free_spank_env_request_msg@Base 2.3.1
+ slurm_free_spank_env_responce_msg@Base 2.3.1
+ slurm_free_srun_exec_msg@Base 2.2.0
+ slurm_free_srun_job_complete_msg@Base 2.2.0
+ slurm_free_srun_node_fail_msg@Base 2.2.0
+ slurm_free_srun_ping_msg@Base 2.2.0
+ slurm_free_srun_step_missing_msg@Base 2.2.0
+ slurm_free_srun_timeout_msg@Base 2.2.0
+ slurm_free_srun_user_msg@Base 2.2.0
+ slurm_free_step_complete_msg@Base 2.2.0
+ slurm_free_submit_response_response_msg@Base 2.2.0
+ slurm_free_suspend_msg@Base 2.2.0
+ slurm_free_task_exit_msg@Base 2.2.0
+ slurm_free_task_user_managed_io_stream_msg@Base 2.2.0
+ slurm_free_topo_info_msg@Base 2.2.0
+ slurm_free_trigger_msg@Base 2.2.0
+ slurm_free_update_front_end_msg@Base 2.3.1
+ slurm_free_update_job_time_msg@Base 2.2.0
+ slurm_free_update_node_msg@Base 2.2.0
+ slurm_free_update_part_msg@Base 2.2.0
+ slurm_free_update_step_msg@Base 2.2.0
+ slurm_free_will_run_response_msg@Base 2.2.0
+ slurm_get_accounting_storage_backup_host@Base 2.2.0
+ slurm_get_accounting_storage_enforce@Base 2.2.0
+ slurm_get_accounting_storage_host@Base 2.2.0
+ slurm_get_accounting_storage_loc@Base 2.2.0
+ slurm_get_accounting_storage_pass@Base 2.2.0
+ slurm_get_accounting_storage_port@Base 2.2.0
+ slurm_get_accounting_storage_type@Base 2.2.0
+ slurm_get_accounting_storage_user@Base 2.2.0
+ slurm_get_addr@Base 2.2.0
+ slurm_get_api_config@Base 2.2.0
+ slurm_get_auth_type@Base 2.2.0
+ slurm_get_avail_procs@Base 2.2.0
+ slurm_get_batch_start_timeout@Base 2.2.0
+ slurm_get_checkpoint_type@Base 2.2.0
+ slurm_get_cluster_name@Base 2.2.0
+ slurm_get_complete_wait@Base 2.2.0
+ slurm_get_controller_addr_spec@Base 2.2.0
+ slurm_get_crypto_type@Base 2.2.0
+ slurm_get_debug_flags@Base 2.2.0
+ slurm_get_def_mem_per_cpu@Base 2.2.0
+ slurm_get_end_time@Base 2.2.0
+ slurm_get_env_timeout@Base 2.2.0
+ slurm_get_epilog_msg_time@Base 2.2.0
+ slurm_get_errno@Base 2.2.0
+ slurm_get_fast_schedule@Base 2.2.0
+ slurm_get_gres_plugins@Base 2.2.0
+ slurm_get_hash_val@Base 2.2.0
+ slurm_get_health_check_program@Base 2.2.0
+ slurm_get_ip_str@Base 2.2.0
+ slurm_get_is_association_based_accounting@Base 2.2.0
+ slurm_get_job_steps@Base 2.2.0
+ slurm_get_job_submit_plugins@Base 2.2.0
+ slurm_get_jobacct_gather_freq@Base 2.2.0
+ slurm_get_jobacct_gather_type@Base 2.2.0
+ slurm_get_jobcomp_host@Base 2.2.0
+ slurm_get_jobcomp_loc@Base 2.2.0
+ slurm_get_jobcomp_pass@Base 2.2.0
+ slurm_get_jobcomp_port@Base 2.2.0
+ slurm_get_jobcomp_type@Base 2.2.0
+ slurm_get_jobcomp_user@Base 2.2.0
+ slurm_get_kill_on_bad_exit@Base 2.2.0
+ slurm_get_kvs_comm_set@Base 2.2.0
+ slurm_get_max_mem_per_cpu@Base 2.2.0
+ slurm_get_mpi_default@Base 2.2.0
+ slurm_get_mpi_params@Base 2.2.0
+ slurm_get_msg_timeout@Base 2.2.0
+ slurm_get_peer_addr@Base 2.2.0
+ slurm_get_plugin_dir@Base 2.2.0
+ slurm_get_preempt_mode@Base 2.2.0
+ slurm_get_preempt_type@Base 2.2.0
+ slurm_get_priority_calc_period@Base 2.2.0
+ slurm_get_priority_decay_hl@Base 2.2.0
+ slurm_get_priority_favor_small@Base 2.2.0
+ slurm_get_priority_max_age@Base 2.2.0
+ slurm_get_priority_reset_period@Base 2.2.0
+ slurm_get_priority_type@Base 2.2.0
+ slurm_get_priority_weight_age@Base 2.2.0
+ slurm_get_priority_weight_fairshare@Base 2.2.0
+ slurm_get_priority_weight_job_size@Base 2.2.0
+ slurm_get_priority_weight_partition@Base 2.2.0
+ slurm_get_priority_weight_qos@Base 2.2.0
+ slurm_get_private_data@Base 2.2.0
+ slurm_get_proctrack_type@Base 2.2.0
+ slurm_get_propagate_prio_process@Base 2.2.0
+ slurm_get_rem_time@Base 2.2.0
+ slurm_get_resume_timeout@Base 2.2.0
+ slurm_get_return_code@Base 2.2.0
+ slurm_get_root_filter@Base 2.2.0
+ slurm_get_sched_params@Base 2.2.0
+ slurm_get_sched_port@Base 2.2.0
+ slurm_get_sched_type@Base 2.2.0
+ slurm_get_select_jobinfo@Base 2.2.0
+ slurm_get_select_nodeinfo@Base 2.2.0
+ slurm_get_select_type@Base 2.2.0
+ slurm_get_select_type_param@Base 2.3.1
+ slurm_get_slurm_user_id@Base 2.2.0
+ slurm_get_slurmd_port@Base 2.2.0
+ slurm_get_slurmd_user_id@Base 2.2.0
+ slurm_get_srun_epilog@Base 2.2.0
+ slurm_get_srun_prolog@Base 2.2.0
+ slurm_get_state_save_location@Base 2.2.0
+ slurm_get_stream_addr@Base 2.2.0
+ slurm_get_suspend_time@Base 2.2.0
+ slurm_get_suspend_timeout@Base 2.2.0
+ slurm_get_switch_type@Base 2.2.0
+ slurm_get_task_epilog@Base 2.2.0
+ slurm_get_task_plugin@Base 2.2.0
+ slurm_get_task_plugin_param@Base 2.2.0
+ slurm_get_task_prolog@Base 2.2.0
+ slurm_get_topology_plugin@Base 2.2.0
+ slurm_get_track_wckey@Base 2.2.0
+ slurm_get_tree_width@Base 2.2.0
+ slurm_get_triggers@Base 2.2.0
+ slurm_get_vsize_factor@Base 2.2.0
+ slurm_get_wait_time@Base 2.2.0
+ slurm_getenvp@Base 2.2.0
+ slurm_grow_buf@Base 2.2.0
+ slurm_hostlist_copy@Base 2.2.0
+ slurm_hostlist_count@Base 2.2.0
+ slurm_hostlist_create@Base 2.2.0
+ slurm_hostlist_create_dims@Base 2.3.1
+ slurm_hostlist_delete@Base 2.2.0
+ slurm_hostlist_delete_host@Base 2.2.0
+ slurm_hostlist_delete_nth@Base 2.2.0
+ slurm_hostlist_deranged_string@Base 2.2.0
+ slurm_hostlist_deranged_string_dims@Base 2.3.1
+ slurm_hostlist_deranged_string_malloc@Base 2.2.0
+ slurm_hostlist_deranged_string_xmalloc@Base 2.2.0
+ slurm_hostlist_deranged_string_xmalloc_dims@Base 2.3.1
+ slurm_hostlist_destroy@Base 2.2.0
+ slurm_hostlist_find@Base 2.2.0
+ slurm_hostlist_iterator_create@Base 2.2.0
+ slurm_hostlist_iterator_destroy@Base 2.2.0
+ slurm_hostlist_iterator_reset@Base 2.2.0
+ slurm_hostlist_next@Base 2.2.0
+ slurm_hostlist_next_range@Base 2.2.0
+ slurm_hostlist_nth@Base 2.2.0
+ slurm_hostlist_pop@Base 2.2.0
+ slurm_hostlist_pop_range@Base 2.2.0
+ slurm_hostlist_push@Base 2.2.0
+ slurm_hostlist_push_host@Base 2.2.0
+ slurm_hostlist_push_host_dims@Base 2.3.1
+ slurm_hostlist_push_list@Base 2.2.0
+ slurm_hostlist_ranged_string@Base 2.2.0
+ slurm_hostlist_ranged_string_dims@Base 2.3.1
+ slurm_hostlist_ranged_string_malloc@Base 2.2.0
+ slurm_hostlist_ranged_string_xmalloc@Base 2.2.0
+ slurm_hostlist_ranged_string_xmalloc_dims@Base 2.3.1
+ slurm_hostlist_remove@Base 2.2.0
+ slurm_hostlist_shift@Base 2.2.0
+ slurm_hostlist_shift_range@Base 2.2.0
+ slurm_hostlist_soft@Base 2.2.0
+ slurm_hostlist_uniq@Base 2.2.0
+ slurm_hostset_copy@Base 2.2.0
+ slurm_hostset_count@Base 2.2.0
+ slurm_hostset_create@Base 2.2.0
+ slurm_hostset_delete@Base 2.2.0
+ slurm_hostset_destroy@Base 2.2.0
+ slurm_hostset_find@Base 2.2.0
+ slurm_hostset_insert@Base 2.2.0
+ slurm_hostset_nth@Base 2.2.0
+ slurm_hostset_shift@Base 2.2.0
+ slurm_hostset_shift_range@Base 2.2.0
+ slurm_hostset_within@Base 2.2.0
+ slurm_info@Base 2.2.0
+ slurm_init_buf@Base 2.2.0
+ slurm_init_job_desc_msg@Base 2.2.0
+ slurm_init_msg_engine@Base 2.2.0
+ slurm_init_msg_engine_addrname_port@Base 2.2.0
+ slurm_init_msg_engine_port@Base 2.2.0
+ slurm_init_part_desc_msg@Base 2.2.0
+ slurm_init_resv_desc_msg@Base 2.2.0
+ slurm_init_update_block_msg@Base 2.2.0
+ slurm_init_update_front_end_msg@Base 2.3.1
+ slurm_init_update_node_msg@Base 2.2.0
+ slurm_init_update_step_msg@Base 2.2.0
+ slurm_int_and_set_count@Base 2.2.0
+ slurm_job_cpus_allocated_on_node@Base 2.2.0
+ slurm_job_cpus_allocated_on_node_id@Base 2.2.0
+ slurm_job_node_ready@Base 2.2.0
+ slurm_job_reason_string@Base 2.2.0
+ slurm_job_state_num@Base 2.2.0
+ slurm_job_state_string@Base 2.2.0
+ slurm_job_state_string_compact@Base 2.2.0
+ slurm_job_step_create@Base 2.2.0
+ slurm_job_step_get_pids@Base 2.2.0
+ slurm_job_step_layout_free@Base 2.2.0
+ slurm_job_step_layout_get@Base 2.2.0
+ slurm_job_step_pids_free@Base 2.2.0
+ slurm_job_step_pids_response_msg_free@Base 2.2.0
+ slurm_job_step_stat@Base 2.2.0
+ slurm_job_step_stat_free@Base 2.2.0
+ slurm_job_step_stat_response_msg_free@Base 2.2.0
+ slurm_job_will_run@Base 2.2.0
+ slurm_jobacct_common_alloc_jobacct@Base 2.2.0
+ slurm_jobacct_common_free_jobacct@Base 2.2.0
+ slurm_jobacct_common_pack@Base 2.2.0
+ slurm_jobacct_common_unpack@Base 2.2.0
+ slurm_jobacct_gather_fini@Base 2.2.0
+ slurm_jobacct_gather_init@Base 2.2.0
+ slurm_jobinfo_ctx_get@Base 2.2.0
+ slurm_kill_job@Base 2.2.0
+ slurm_kill_job_step@Base 2.2.0
+ slurm_list_append@Base 2.2.0
+ slurm_list_append_list@Base 2.2.0
+ slurm_list_count@Base 2.2.0
+ slurm_list_create@Base 2.2.0
+ slurm_list_delete_all@Base 2.2.0
+ slurm_list_delete_item@Base 2.2.0
+ slurm_list_dequeue@Base 2.2.0
+ slurm_list_destroy@Base 2.2.0
+ slurm_list_enqueue@Base 2.2.0
+ slurm_list_find@Base 2.2.0
+ slurm_list_find_first@Base 2.2.0
+ slurm_list_flush@Base 2.2.0
+ slurm_list_for_each@Base 2.2.0
+ slurm_list_insert@Base 2.2.0
+ slurm_list_install_fork_handlers@Base 2.2.0
+ slurm_list_is_empty@Base 2.2.0
+ slurm_list_iterator_create@Base 2.2.0
+ slurm_list_iterator_destroy@Base 2.2.0
+ slurm_list_iterator_reset@Base 2.2.0
+ slurm_list_next@Base 2.2.0
+ slurm_list_peek@Base 2.2.0
+ slurm_list_pop@Base 2.2.0
+ slurm_list_prepend@Base 2.2.0
+ slurm_list_push@Base 2.2.0
+ slurm_list_remove@Base 2.2.0
+ slurm_list_sort@Base 2.2.0
+ slurm_list_transfer@Base 2.2.0
+ slurm_listen_stream@Base 2.2.0
+ slurm_load_block_info@Base 2.2.0
+ slurm_load_ctl_conf@Base 2.2.0
+ slurm_load_front_end@Base 2.3.1
+ slurm_load_job@Base 2.2.0
+ slurm_load_jobs@Base 2.2.0
+ slurm_load_node@Base 2.2.0
+ slurm_load_partitions@Base 2.2.0
+ slurm_load_reservations@Base 2.2.0
+ slurm_load_slurmd_status@Base 2.2.0
+ slurm_load_topo@Base 2.2.0
+ slurm_log_alter@Base 2.2.0
+ slurm_log_fini@Base 2.2.0
+ slurm_log_flush@Base 2.2.0
+ slurm_log_fp@Base 2.2.0
+ slurm_log_has_data@Base 2.2.0
+ slurm_log_init@Base 2.2.0
+ slurm_log_reinit@Base 2.2.0
+ slurm_log_set_fpfx@Base 2.2.0
+ slurm_make_time_str@Base 2.2.0
+ slurm_msg_t_copy@Base 2.2.0
+ slurm_msg_t_init@Base 2.2.0
+ slurm_net_accept_stream@Base 2.2.0
+ slurm_net_set_low_water@Base 2.2.0
+ slurm_net_stream_listen@Base 2.2.0
+ slurm_node_state_string@Base 2.2.0
+ slurm_node_state_string_compact@Base 2.2.0
+ slurm_node_use_string@Base 2.2.0
+ slurm_notify_job@Base 2.2.0
+ slurm_open_controller_conn@Base 2.2.0
+ slurm_open_controller_conn_spec@Base 2.2.0
+ slurm_open_msg_conn@Base 2.2.0
+ slurm_open_slurmdbd_conn@Base 2.2.0
+ slurm_open_stream@Base 2.2.0
+ slurm_pack16@Base 2.2.0
+ slurm_pack16_array@Base 2.2.0
+ slurm_pack32@Base 2.2.0
+ slurm_pack32_array@Base 2.2.0
+ slurm_pack64@Base 2.2.0
+ slurm_pack8@Base 2.2.0
+ slurm_pack_block_job_info@Base 2.3.1
+#MISSING: 2.3.1-1# slurm_pack_msg_no_header@Base 2.2.0
+ slurm_pack_slurm_addr@Base 2.2.0
+ slurm_pack_slurm_addr_array@Base 2.2.0
+ slurm_pack_time@Base 2.2.0
+ slurm_packdouble@Base 2.3.1
+ slurm_packmem@Base 2.2.0
+ slurm_packmem_array@Base 2.2.0
+ slurm_packstr_array@Base 2.2.0
+ slurm_parser@Base 2.2.0
+ slurm_perror@Base 2.2.0
+ slurm_pid2jobid@Base 2.2.0
+ slurm_ping@Base 2.2.0
+ slurm_plugin_get_syms@Base 2.2.0
+ slurm_plugin_load_and_link@Base 2.2.0
+ slurm_plugin_strerror@Base 2.2.0
+ slurm_plugin_unload@Base 2.2.0
+ slurm_plugrack_create@Base 2.2.0
+ slurm_plugrack_destroy@Base 2.2.0
+ slurm_plugrack_read_dir@Base 2.2.0
+ slurm_plugrack_set_major_type@Base 2.2.0
+ slurm_plugrack_set_paranoia@Base 2.2.0
+ slurm_plugrack_use_by_type@Base 2.2.0
+ slurm_pmi_finalize@Base 2.2.0
+ slurm_preempt_mode_num@Base 2.2.0
+ slurm_preempt_mode_string@Base 2.2.0
+ slurm_print_block_info@Base 2.2.0
+ slurm_print_block_info_msg@Base 2.2.0
+ slurm_print_cpu_bind_help@Base 2.2.0
+ slurm_print_ctl_conf@Base 2.2.0
+ slurm_print_front_end_info_msg@Base 2.3.1
+ slurm_print_front_end_table@Base 2.3.1
+ slurm_print_job_info@Base 2.2.0
+ slurm_print_job_info_msg@Base 2.2.0
+ slurm_print_job_step_info@Base 2.2.0
+ slurm_print_job_step_info_msg@Base 2.2.0
+ slurm_print_key_pairs@Base 2.2.0
+ slurm_print_launch_task_msg@Base 2.2.0
+ slurm_print_mem_bind_help@Base 2.2.0
+ slurm_print_node_info_msg@Base 2.2.0
+ slurm_print_node_table@Base 2.2.0
+ slurm_print_partition_info@Base 2.2.0
+ slurm_print_partition_info_msg@Base 2.2.0
+ slurm_print_reservation_info@Base 2.2.0
+ slurm_print_reservation_info_msg@Base 2.2.0
+ slurm_print_slurm_addr@Base 2.2.0
+ slurm_print_slurmd_status@Base 2.2.0
+ slurm_print_topo_info_msg@Base 2.2.0
+ slurm_print_topo_record@Base 2.2.0
+ slurm_priority_fini@Base 2.2.0
+ slurm_priority_init@Base 2.2.0
+ slurm_private_data_string@Base 2.2.0
+ slurm_pull_trigger@Base 2.2.0
+ slurm_read_hostfile@Base 2.2.0
+ slurm_read_stream@Base 2.2.0
+ slurm_read_stream_timeout@Base 2.2.0
+ slurm_receive_msg@Base 2.2.0
+ slurm_receive_msg_and_forward@Base 2.2.0
+ slurm_receive_msgs@Base 2.2.0
+ slurm_reconfigure@Base 2.2.0
+ slurm_requeue@Base 2.2.0
+ slurm_reservation_flags_string@Base 2.2.0
+ slurm_resume@Base 2.2.0
+ slurm_s_p_get_string@Base 2.3.1
+ slurm_s_p_get_uint32@Base 2.3.1
+ slurm_s_p_hashtbl_create@Base 2.3.1
+ slurm_s_p_hashtbl_destroy@Base 2.3.1
+ slurm_s_p_parse_file@Base 2.3.1
+ slurm_sbcast_lookup@Base 2.2.0
+ slurm_select_fini@Base 2.2.0
+ slurm_select_init@Base 2.2.0
+ slurm_send_addr_recv_msgs@Base 2.2.0
+ slurm_send_kvs_comm_set@Base 2.2.0
+ slurm_send_node_msg@Base 2.2.0
+ slurm_send_only_controller_msg@Base 2.2.0
+ slurm_send_only_node_msg@Base 2.2.0
+ slurm_send_rc_msg@Base 2.2.0
+ slurm_send_recv_controller_msg@Base 2.2.0
+ slurm_send_recv_controller_rc_msg@Base 2.2.0
+ slurm_send_recv_msgs@Base 2.2.0
+ slurm_send_recv_node_msg@Base 2.2.0
+ slurm_send_recv_rc_msg_only_one@Base 2.2.0
+ slurm_send_recv_slurmdbd_msg@Base 2.2.0
+ slurm_send_slurmdbd_msg@Base 2.2.0
+ slurm_send_slurmdbd_recv_rc_msg@Base 2.2.0
+ slurm_set_accounting_storage_host@Base 2.2.0
+ slurm_set_accounting_storage_loc@Base 2.2.0
+ slurm_set_accounting_storage_port@Base 2.2.0
+ slurm_set_accounting_storage_user@Base 2.2.0
+ slurm_set_addr@Base 2.2.0
+ slurm_set_addr_any@Base 2.2.0
+ slurm_set_addr_char@Base 2.2.0
+ slurm_set_addr_uint@Base 2.2.0
+ slurm_set_api_config@Base 2.2.0
+ slurm_set_auth_type@Base 2.2.0
+ slurm_set_debug_flags@Base 2.3.1
+ slurm_set_debug_level@Base 2.2.0
+ slurm_set_debugflags@Base 2.3.1
+ slurm_set_jobcomp_port@Base 2.2.0
+ slurm_set_schedlog_level@Base 2.2.0
+ slurm_set_stream_blocking@Base 2.2.0
+ slurm_set_stream_non_blocking@Base 2.2.0
+ slurm_set_tree_width@Base 2.2.0
+ slurm_set_trigger@Base 2.2.0
+ slurm_setenvpf@Base 2.2.0
+ slurm_seterrno@Base 2.2.0
+ slurm_shutdown@Base 2.2.0
+ slurm_shutdown_msg_conn@Base 2.2.0
+ slurm_shutdown_msg_engine@Base 2.2.0
+ slurm_signal_job@Base 2.2.0
+ slurm_signal_job_step@Base 2.2.0
+ slurm_sort_char_list_asc@Base 2.2.0
+ slurm_sort_char_list_desc@Base 2.2.0
+ slurm_sort_key_pairs@Base 2.2.0
+ slurm_sprint_block_info@Base 2.2.0
+ slurm_sprint_cpu_bind_type@Base 2.2.0
+ slurm_sprint_front_end_table@Base 2.3.1
+ slurm_sprint_job_info@Base 2.2.0
+ slurm_sprint_job_step_info@Base 2.2.0
+ slurm_sprint_mem_bind_type@Base 2.2.0
+ slurm_sprint_node_table@Base 2.2.0
+ slurm_sprint_partition_info@Base 2.2.0
+ slurm_sprint_reservation_info@Base 2.2.0
+ slurm_step_ctx_create@Base 2.2.0
+ slurm_step_ctx_create_no_alloc@Base 2.2.0
+ slurm_step_ctx_daemon_per_node_hack@Base 2.2.0
+ slurm_step_ctx_destroy@Base 2.2.0
+ slurm_step_ctx_get@Base 2.2.0
+ slurm_step_ctx_params_t_init@Base 2.2.0
+ slurm_step_launch@Base 2.2.0
+ slurm_step_launch_abort@Base 2.2.0
+ slurm_step_launch_fwd_signal@Base 2.2.0
+ slurm_step_launch_params_t_init@Base 2.2.0
+ slurm_step_launch_wait_finish@Base 2.2.0
+ slurm_step_launch_wait_start@Base 2.2.0
+ slurm_step_layout_copy@Base 2.2.0
+ slurm_step_layout_create@Base 2.2.0
+ slurm_step_layout_destroy@Base 2.2.0
+ slurm_step_layout_host_id@Base 2.2.0
+ slurm_step_layout_host_name@Base 2.2.0
+ slurm_step_layout_type_name@Base 2.2.0
+ slurm_strcasestr@Base 2.2.0
+ slurm_strerror@Base 2.2.0
+ slurm_strlcpy@Base 2.2.0
+ slurm_submit_batch_job@Base 2.2.0
+ slurm_suspend@Base 2.2.0
+ slurm_takeover@Base 2.2.0
+ slurm_terminate_job@Base 2.2.0
+ slurm_terminate_job_step@Base 2.2.0
+ slurm_topo_build_config@Base 2.2.0
+ slurm_topo_fini@Base 2.2.0
+ slurm_topo_generate_node_ranking@Base 2.3.1
+ slurm_topo_get_node_addr@Base 2.2.0
+ slurm_topo_init@Base 2.2.0
+ slurm_try_xmalloc@Base 2.2.0
+ slurm_try_xrealloc@Base 2.2.0
+ slurm_unpack16@Base 2.2.0
+ slurm_unpack16_array@Base 2.2.0
+ slurm_unpack32@Base 2.2.0
+ slurm_unpack32_array@Base 2.2.0
+ slurm_unpack64@Base 2.2.0
+ slurm_unpack8@Base 2.2.0
+ slurm_unpack_block_info_members@Base 2.3.1
+ slurm_unpack_block_info_msg@Base 2.2.0
+ slurm_unpack_slurm_addr_array@Base 2.2.0
+ slurm_unpack_slurm_addr_no_alloc@Base 2.2.0
+ slurm_unpack_time@Base 2.2.0
+ slurm_unpackdouble@Base 2.3.1
+ slurm_unpackmem@Base 2.2.0
+ slurm_unpackmem_array@Base 2.2.0
+ slurm_unpackmem_malloc@Base 2.2.0
+ slurm_unpackmem_ptr@Base 2.2.0
+ slurm_unpackmem_xmalloc@Base 2.2.0
+ slurm_unpackstr_array@Base 2.2.0
+ slurm_unsetenvp@Base 2.2.0
+ slurm_update_block@Base 2.2.0
+ slurm_update_front_end@Base 2.3.1
+ slurm_update_job@Base 2.2.0
+ slurm_update_node@Base 2.2.0
+ slurm_update_partition@Base 2.2.0
+ slurm_update_reservation@Base 2.2.0
+ slurm_update_step@Base 2.2.0
+ slurm_verbose@Base 2.2.0
+ slurm_verify_cpu_bind@Base 2.2.0
+ slurm_verify_mem_bind@Base 2.2.0
+ slurm_write_stream@Base 2.2.0
+ slurm_write_stream_timeout@Base 2.2.0
+ slurm_xassert_failed@Base 2.2.0
+ slurm_xbasename@Base 2.2.0
+ slurm_xfer_buf_data@Base 2.2.0
+ slurm_xfree@Base 2.2.0
+ slurm_xmalloc@Base 2.2.0
+ slurm_xmemcat@Base 2.2.0
+ slurm_xrealloc@Base 2.2.0
+ slurm_xshort_hostname@Base 2.2.0
+ slurm_xsignal@Base 2.2.0
+ slurm_xsignal_block@Base 2.2.0
+ slurm_xsignal_save_mask@Base 2.2.0
+ slurm_xsignal_set_mask@Base 2.2.0
+ slurm_xsignal_sigset_create@Base 2.2.0
+ slurm_xsignal_unblock@Base 2.2.0
+ slurm_xsize@Base 2.2.0
+ slurm_xslurm_strerrorcat@Base 2.2.0
+ slurm_xstrcat@Base 2.2.0
+ slurm_xstrcatchar@Base 2.2.0
+ slurm_xstrdup@Base 2.2.0
+ slurm_xstrdup_printf@Base 2.2.0
+ slurm_xstrfmtcat@Base 2.2.0
+ slurm_xstrftimecat@Base 2.2.0
+ slurm_xstring_is_whitespace@Base 2.2.0
+ slurm_xstrncat@Base 2.2.0
+ slurm_xstrndup@Base 2.2.0
+ slurm_xstrstrip@Base 2.2.0
+ slurm_xstrsubstitute@Base 2.2.0
+ slurm_xstrtolower@Base 2.2.0
+ slurmdb_accounts_add@Base 2.2.0
+ slurmdb_accounts_get@Base 2.2.0
+ slurmdb_accounts_modify@Base 2.2.0
+ slurmdb_accounts_remove@Base 2.2.0
+ slurmdb_addto_qos_char_list@Base 2.2.0
+ slurmdb_admin_level_str@Base 2.2.0
+ slurmdb_archive@Base 2.2.0
+ slurmdb_archive_load@Base 2.2.0
+ slurmdb_associations_add@Base 2.2.0
+ slurmdb_associations_get@Base 2.2.0
+ slurmdb_associations_modify@Base 2.2.0
+ slurmdb_associations_remove@Base 2.2.0
+ slurmdb_cluster_flags_2_str@Base 2.2.0
+ slurmdb_clusters_add@Base 2.2.0
+ slurmdb_clusters_get@Base 2.2.0
+ slurmdb_clusters_modify@Base 2.2.0
+ slurmdb_clusters_remove@Base 2.2.0
+ slurmdb_config_get@Base 2.2.0
+ slurmdb_connection_close@Base 2.2.0
+ slurmdb_connection_get@Base 2.2.0
+ slurmdb_coord_add@Base 2.2.0
+ slurmdb_coord_remove@Base 2.2.0
+ slurmdb_create_job_rec@Base 2.2.0
+ slurmdb_create_step_rec@Base 2.2.0
+ slurmdb_destroy_account_cond@Base 2.2.0
+ slurmdb_destroy_account_rec@Base 2.2.0
+ slurmdb_destroy_accounting_rec@Base 2.2.0
+ slurmdb_destroy_archive_cond@Base 2.2.0
+ slurmdb_destroy_archive_rec@Base 2.2.0
+ slurmdb_destroy_association_cond@Base 2.2.0
+ slurmdb_destroy_association_rec@Base 2.2.0
+ slurmdb_destroy_cluster_accounting_rec@Base 2.2.0
+ slurmdb_destroy_cluster_cond@Base 2.2.0
+ slurmdb_destroy_cluster_rec@Base 2.2.0
+ slurmdb_destroy_coord_rec@Base 2.2.0
+ slurmdb_destroy_event_cond@Base 2.2.0
+ slurmdb_destroy_event_rec@Base 2.2.0
+ slurmdb_destroy_hierarchical_rec@Base 2.2.0
+ slurmdb_destroy_job_cond@Base 2.2.0
+ slurmdb_destroy_job_modify_cond@Base 2.2.0
+ slurmdb_destroy_job_rec@Base 2.2.0
+ slurmdb_destroy_print_tree@Base 2.2.0
+ slurmdb_destroy_qos_cond@Base 2.2.0
+ slurmdb_destroy_qos_rec@Base 2.2.0
+ slurmdb_destroy_report_acct_grouping@Base 2.2.0
+ slurmdb_destroy_report_assoc_rec@Base 2.2.0
+ slurmdb_destroy_report_cluster_grouping@Base 2.2.0
+ slurmdb_destroy_report_cluster_rec@Base 2.2.0
+ slurmdb_destroy_report_job_grouping@Base 2.2.0
+ slurmdb_destroy_report_user_rec@Base 2.2.0
+ slurmdb_destroy_reservation_cond@Base 2.2.0
+ slurmdb_destroy_reservation_rec@Base 2.2.0
+ slurmdb_destroy_selected_step@Base 2.2.0
+ slurmdb_destroy_step_rec@Base 2.2.0
+ slurmdb_destroy_txn_cond@Base 2.2.0
+ slurmdb_destroy_txn_rec@Base 2.2.0
+ slurmdb_destroy_update_object@Base 2.2.0
+ slurmdb_destroy_update_shares_rec@Base 2.2.0
+ slurmdb_destroy_used_limits@Base 2.2.0
+ slurmdb_destroy_user_cond@Base 2.2.0
+ slurmdb_destroy_user_rec@Base 2.2.0
+ slurmdb_destroy_wckey_cond@Base 2.2.0
+ slurmdb_destroy_wckey_rec@Base 2.2.0
+ slurmdb_events_get@Base 2.2.0
+ slurmdb_get_acct_hierarchical_rec_list@Base 2.2.0
+ slurmdb_get_hierarchical_sorted_assoc_list@Base 2.2.0
+ slurmdb_get_info_cluster@Base 2.2.0
+ slurmdb_init_association_rec@Base 2.2.0
+ slurmdb_init_cluster_cond@Base 2.2.0
+ slurmdb_init_cluster_rec@Base 2.2.0
+ slurmdb_init_qos_rec@Base 2.2.0
+ slurmdb_init_wckey_rec@Base 2.2.0
+ slurmdb_jobs_get@Base 2.2.0
+ slurmdb_pack_account_cond@Base 2.2.0
+ slurmdb_pack_account_rec@Base 2.2.0
+ slurmdb_pack_accounting_rec@Base 2.2.0
+ slurmdb_pack_archive_cond@Base 2.2.0
+ slurmdb_pack_archive_rec@Base 2.2.0
+ slurmdb_pack_association_cond@Base 2.2.0
+ slurmdb_pack_association_rec@Base 2.2.0
+ slurmdb_pack_cluster_accounting_rec@Base 2.2.0
+ slurmdb_pack_cluster_cond@Base 2.2.0
+ slurmdb_pack_cluster_rec@Base 2.2.0
+ slurmdb_pack_coord_rec@Base 2.2.0
+ slurmdb_pack_event_cond@Base 2.2.0
+ slurmdb_pack_event_rec@Base 2.2.0
+ slurmdb_pack_job_cond@Base 2.2.0
+ slurmdb_pack_job_modify_cond@Base 2.2.0
+ slurmdb_pack_job_rec@Base 2.2.0
+ slurmdb_pack_qos_cond@Base 2.2.0
+ slurmdb_pack_qos_rec@Base 2.2.0
+ slurmdb_pack_reservation_cond@Base 2.2.0
+ slurmdb_pack_reservation_rec@Base 2.2.0
+ slurmdb_pack_selected_step@Base 2.2.0
+ slurmdb_pack_step_rec@Base 2.2.0
+ slurmdb_pack_txn_cond@Base 2.2.0
+ slurmdb_pack_txn_rec@Base 2.2.0
+ slurmdb_pack_update_object@Base 2.2.0
+ slurmdb_pack_used_limits@Base 2.2.0
+ slurmdb_pack_user_cond@Base 2.2.0
+ slurmdb_pack_user_rec@Base 2.2.0
+ slurmdb_pack_wckey_cond@Base 2.2.0
+ slurmdb_pack_wckey_rec@Base 2.2.0
+ slurmdb_parse_purge@Base 2.2.0
+ slurmdb_problem_str_get@Base 2.2.0
+ slurmdb_problems_get@Base 2.2.0
+ slurmdb_purge_string@Base 2.2.0
+ slurmdb_qos_add@Base 2.2.0
+ slurmdb_qos_flags_str@Base 2.2.0
+ slurmdb_qos_get@Base 2.2.0
+ slurmdb_qos_modify@Base 2.2.0
+ slurmdb_qos_remove@Base 2.2.0
+ slurmdb_qos_str@Base 2.2.0
+ slurmdb_report_cluster_account_by_user@Base 2.2.0
+ slurmdb_report_cluster_user_by_account@Base 2.2.0
+ slurmdb_report_cluster_user_by_wckey@Base 2.2.0
+ slurmdb_report_cluster_wckey_by_user@Base 2.2.0
+ slurmdb_report_job_sizes_grouped_by_top_account@Base 2.2.0
+ slurmdb_report_job_sizes_grouped_by_top_account_then_wckey@Base 2.2.0
+ slurmdb_report_job_sizes_grouped_by_wckey@Base 2.2.0
+ slurmdb_report_set_start_end_time@Base 2.2.0
+ slurmdb_report_user_top_usage@Base 2.2.0
+ slurmdb_reservations_get@Base 2.2.0
+ slurmdb_send_accounting_update@Base 2.2.1
+ slurmdb_setup_cluster_dim_size@Base 2.3.1
+ slurmdb_setup_cluster_dims@Base 2.2.0
+ slurmdb_setup_cluster_flags@Base 2.2.0
+ slurmdb_setup_cluster_name_dims@Base 2.3.1
+ slurmdb_slurmdbd_free_id_rc_msg@Base 2.2.0
+ slurmdb_slurmdbd_free_list_msg@Base 2.2.0
+ slurmdb_slurmdbd_free_rc_msg@Base 2.2.0
+ slurmdb_slurmdbd_free_usage_msg@Base 2.2.0
+ slurmdb_sort_hierarchical_assoc_list@Base 2.2.0
+ slurmdb_str_2_cluster_flags@Base 2.2.0
+ slurmdb_tree_name_get@Base 2.2.0
+ slurmdb_txn_get@Base 2.2.0
+ slurmdb_unpack_account_cond@Base 2.2.0
+ slurmdb_unpack_account_rec@Base 2.2.0
+ slurmdb_unpack_accounting_rec@Base 2.2.0
+ slurmdb_unpack_archive_cond@Base 2.2.0
+ slurmdb_unpack_archive_rec@Base 2.2.0
+ slurmdb_unpack_association_cond@Base 2.2.0
+ slurmdb_unpack_association_rec@Base 2.2.0
+ slurmdb_unpack_cluster_accounting_rec@Base 2.2.0
+ slurmdb_unpack_cluster_cond@Base 2.2.0
+ slurmdb_unpack_cluster_rec@Base 2.2.0
+ slurmdb_unpack_coord_rec@Base 2.2.0
+ slurmdb_unpack_event_cond@Base 2.2.0
+ slurmdb_unpack_event_rec@Base 2.2.0
+ slurmdb_unpack_job_cond@Base 2.2.0
+ slurmdb_unpack_job_modify_cond@Base 2.2.0
+ slurmdb_unpack_job_rec@Base 2.2.0
+ slurmdb_unpack_qos_cond@Base 2.2.0
+ slurmdb_unpack_qos_rec@Base 2.2.0
+ slurmdb_unpack_reservation_cond@Base 2.2.0
+ slurmdb_unpack_reservation_rec@Base 2.2.0
+ slurmdb_unpack_selected_step@Base 2.2.0
+ slurmdb_unpack_step_rec@Base 2.2.0
+ slurmdb_unpack_txn_cond@Base 2.2.0
+ slurmdb_unpack_txn_rec@Base 2.2.0
+ slurmdb_unpack_update_object@Base 2.2.0
+ slurmdb_unpack_used_limits@Base 2.2.0
+ slurmdb_unpack_user_cond@Base 2.2.0
+ slurmdb_unpack_user_rec@Base 2.2.0
+ slurmdb_unpack_wckey_cond@Base 2.2.0
+ slurmdb_unpack_wckey_rec@Base 2.2.0
+ slurmdb_usage_get@Base 2.2.0
+ slurmdb_usage_roll@Base 2.2.0
+ slurmdb_users_add@Base 2.2.0
+ slurmdb_users_get@Base 2.2.0
+ slurmdb_users_modify@Base 2.2.0
+ slurmdb_users_remove@Base 2.2.0
+ slurmdb_wckeys_add@Base 2.2.0
+ slurmdb_wckeys_get@Base 2.2.0
+ slurmdb_wckeys_modify@Base 2.2.0
+ slurmdb_wckeys_remove@Base 2.2.0
diff -Nru slurm-llnl-2.2.7/debian/patches/mail-path slurm-llnl-2.3.2/debian/patches/mail-path
--- slurm-llnl-2.2.7/debian/patches/mail-path 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/debian/patches/mail-path 2011-10-23 20:17:43.000000000 +0000
@@ -0,0 +1,60 @@
+Description: mail command absolute pathname
+ This patch changes the default command to /usr/bin/mail in various
+ configuration files and manual pages.
+Author: Gennaro Oliva
+Bug-Debian: http://bugs.debian.org/588862
+Forwarded: no
+
+--- slurm-llnl-2.3.0.orig/src/common/read_config.h
++++ slurm-llnl-2.3.0/src/common/read_config.h
+@@ -85,7 +85,7 @@ extern char *default_plugstack;
+ #define DEFAULT_KILL_ON_BAD_EXIT 0
+ #define DEFAULT_KILL_TREE 0
+ #define DEFAULT_KILL_WAIT 30
+-#define DEFAULT_MAIL_PROG "/bin/mail"
++#define DEFAULT_MAIL_PROG "/usr/bin/mail"
+ #define DEFAULT_MAX_JOB_COUNT 10000
+ #define DEFAULT_MAX_JOB_ID 0xffff0000
+ #define DEFAULT_MAX_STEP_COUNT 40000
+--- slurm-llnl-2.3.0.orig/doc/man/man1/strigger.1
++++ slurm-llnl-2.3.0/doc/man/man1/strigger.1
+@@ -263,7 +263,7 @@ primary slurmctld fails.
+ strigger \-\-set \-\-primary_slurmctld_failure \\
+ \-\-program=/usr/sbin/primary_slurmctld_failure
+ # Notify the administrator of the failure using by e\-mail
+- /bin/mail slurm_admin@site.com \-s Primary_SLURMCTLD_FAILURE
++ /usr/bin/mail slurm_admin@site.com \-s Primary_SLURMCTLD_FAILURE
+
+ > strigger \-\-set \-\-primary_slurmctld_failure \\
+ \-\-program=/usr/sbin/primary_slurmctld_failure
+@@ -282,7 +282,7 @@ argument to the script by SLURM).
+ strigger \-\-set \-\-node \-\-down \\
+ \-\-program=/usr/sbin/slurm_admin_notify
+ # Notify administrator using by e\-mail
+- /bin/mail slurm_admin@site.com \-s NodesDown:$*
++ /usr/bin/mail slurm_admin@site.com \-s NodesDown:$*
+
+ > strigger \-\-set \-\-node \-\-down \\
+ \-\-program=/usr/sbin/slurm_admin_notify
+--- slurm-llnl-2.3.0.orig/doc/man/man5/slurm.conf.5
++++ slurm-llnl-2.3.0/doc/man/man5/slurm.conf.5
+@@ -735,7 +735,7 @@ not explicitly listed in the job submiss
+ .TP
+ \fBMailProg\fR
+ Fully qualified pathname to the program used to send email per user request.
+-The default value is "/bin/mail".
++The default value is "/usr/bin/mail".
+
+ .TP
+ \fBMaxJobCount\fR
+--- slurm-llnl-2.3.0.orig/doc/html/configurator.html.in
++++ slurm-llnl-2.3.0/doc/html/configurator.html.in
+@@ -143,7 +143,7 @@ function displayfile()
+ "#JobSubmitPlugins=1
" +
+ "#KillOnBadExit=0
" +
+ "#Licenses=foo*4,bar
" +
+- "#MailProg=/bin/mail
" +
++ "#MailProg=/usr/bin/mail
" +
+ "#MaxJobCount=5000
" +
+ "#MaxStepCount=40000
" +
+ "#MaxTasksPerNode=128
" +
diff -Nru slurm-llnl-2.2.7/debian/patches/mail-path-patch slurm-llnl-2.3.2/debian/patches/mail-path-patch
--- slurm-llnl-2.2.7/debian/patches/mail-path-patch 2011-05-06 17:35:29.000000000 +0000
+++ slurm-llnl-2.3.2/debian/patches/mail-path-patch 1970-01-01 00:00:00.000000000 +0000
@@ -1,60 +0,0 @@
-Description: mail command absolute pathname
- This patch changes the default command to /usr/bin/mail in various
- configuration files and manual pages.
-Author: Gennaro Oliva
-Bug-Debian: http://bugs.debian.org/588862
-Forwarded: no
-
---- slurm-llnl-2.2.1.orig/src/common/read_config.h
-+++ slurm-llnl-2.2.1/src/common/read_config.h
-@@ -84,7 +84,7 @@ extern char *default_plugstack;
- #define DEFAULT_KILL_ON_BAD_EXIT 0
- #define DEFAULT_KILL_TREE 0
- #define DEFAULT_KILL_WAIT 30
--#define DEFAULT_MAIL_PROG "/bin/mail"
-+#define DEFAULT_MAIL_PROG "/usr/bin/mail"
- #define DEFAULT_MAX_JOB_COUNT 10000
- #define DEFAULT_MEM_PER_CPU 0
- #define DEFAULT_MAX_MEM_PER_CPU 0
---- slurm-llnl-2.2.1.orig/doc/man/man1/strigger.1
-+++ slurm-llnl-2.2.1/doc/man/man1/strigger.1
-@@ -257,7 +257,7 @@ primary slurmctld fails.
- strigger \-\-set \-\-primary_slurmctld_failure \\
- \-\-program=/usr/sbin/primary_slurmctld_failure
- # Notify the administrator of the failure using by e\-mail
-- /bin/mail slurm_admin@site.com \-s Primary_SLURMCTLD_FAILURE
-+ /usr/bin/mail slurm_admin@site.com \-s Primary_SLURMCTLD_FAILURE
-
- > strigger \-\-set \-\-primary_slurmctld_failure \\
- \-\-program=/usr/sbin/primary_slurmctld_failure
-@@ -276,7 +276,7 @@ argument to the script by SLURM).
- strigger \-\-set \-\-node \-\-down \\
- \-\-program=/usr/sbin/slurm_admin_notify
- # Notify administrator using by e\-mail
-- /bin/mail slurm_admin@site.com \-s NodesDown:$*
-+ /usr/bin/mail slurm_admin@site.com \-s NodesDown:$*
-
- > strigger \-\-set \-\-node \-\-down \\
- \-\-program=/usr/sbin/slurm_admin_notify
---- slurm-llnl-2.2.1.orig/doc/man/man5/slurm.conf.5
-+++ slurm-llnl-2.2.1/doc/man/man5/slurm.conf.5
-@@ -722,7 +722,7 @@ not explicitly listed in the job submiss
- .TP
- \fBMailProg\fR
- Fully qualified pathname to the program used to send email per user request.
--The default value is "/bin/mail".
-+The default value is "/usr/bin/mail".
-
- .TP
- \fBMaxJobCount\fR
---- slurm-llnl-2.2.1.orig/doc/html/configurator.html.in
-+++ slurm-llnl-2.2.1/doc/html/configurator.html.in
-@@ -163,7 +163,7 @@ function displayfile()
- "#JobSubmitPlugins=1
" +
- "#KillOnBadExit=0
" +
- "#Licenses=foo*4,bar
" +
-- "#MailProg=/bin/mail
" +
-+ "#MailProg=/usr/bin/mail
" +
- "#MaxJobCount=5000
" +
- "#MaxTasksPerNode=128
" +
- "MpiDefault=" + get_radio_value(document.config.mpi_default) + "
" +
diff -Nru slurm-llnl-2.2.7/debian/patches/pamlibslurm slurm-llnl-2.3.2/debian/patches/pamlibslurm
--- slurm-llnl-2.2.7/debian/patches/pamlibslurm 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/debian/patches/pamlibslurm 2011-12-28 12:02:37.000000000 +0000
@@ -0,0 +1,68 @@
+Description: Making pamslurm use libslurm shipped with the libslurm package
+ The pam_slurm module dlopen libslurm.so that is a link usually shipped
+ with the dev package (libslurm-dev), this patch modify the source to
+ open the libslur.so.version shipped with the libslurm package.
+
+---
+Origin: maintainer
+Forwarded: by mail
+Reviewed-By: Moe Jette
+Last-Update: 2011-12-21
+
+--- slurm-llnl-2.3.2.orig/contribs/pam/pam_slurm.c
++++ slurm-llnl-2.3.2/contribs/pam/pam_slurm.c
+@@ -30,6 +30,9 @@
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ \*****************************************************************************/
+
++#if HAVE_CONFIG_H
++# include "config.h"
++#endif
+
+ #include
+ #include
+@@ -357,12 +360,42 @@ _send_denial_msg(pam_handle_t *pamh, str
+ */
+ extern void libpam_slurm_init (void)
+ {
++ char libslurmname[64];
++
+ if (slurm_h)
+ return;
+
+- if (!(slurm_h = dlopen("libslurm.so", RTLD_NOW|RTLD_GLOBAL)))
+- _log_msg (LOG_ERR, "Unable to dlopen libslurm: %s\n",
++ /* First try to use the same libslurm version ("libslurm.so.24.0.0"),
++ * Second try to match the major version number ("libslurm.so.24"),
++ * Otherwise use "libslurm.so" */
++
++ if (snprintf(libslurmname, sizeof(libslurmname),
++ "libslurm.so.%d.%d.%d", SLURM_API_CURRENT, SLURM_API_REVISION,
++ SLURM_API_AGE) >= sizeof(libslurmname) ) {
++ _log_msg (LOG_ERR, "Unable to write libslurmname\n");
++ }
++ else {
++ if (!(slurm_h = dlopen(libslurmname, RTLD_NOW|RTLD_GLOBAL))) {
++ _log_msg (LOG_INFO, "Unable to dlopen %s: %s\n",
++ libslurmname, dlerror ());
++ }
++ }
++
++ if (snprintf(libslurmname, sizeof(libslurmname), "libslurm.so.%d",
++ SLURM_API_CURRENT) >= sizeof(libslurmname) ) {
++ _log_msg (LOG_ERR, "Unable to write libslurmname\n");
++ }
++ else {
++ if (!(slurm_h = dlopen(libslurmname, RTLD_NOW|RTLD_GLOBAL))) {
++ _log_msg (LOG_INFO, "Unable to dlopen %s: %s\n",
++ libslurmname, dlerror ());
++ }
++ }
++
++ if (!(slurm_h = dlopen("libslurm.so", RTLD_NOW|RTLD_GLOBAL))) {
++ _log_msg (LOG_ERR, "Unable to dlopen libslurm.so: %s\n",
+ dlerror ());
++ }
+
+ return;
+ }
diff -Nru slurm-llnl-2.2.7/debian/patches/series slurm-llnl-2.3.2/debian/patches/series
--- slurm-llnl-2.2.7/debian/patches/series 2011-06-10 11:21:35.000000000 +0000
+++ slurm-llnl-2.3.2/debian/patches/series 2011-12-21 12:17:15.000000000 +0000
@@ -1 +1,2 @@
-mail-path-patch
+mail-path
+pamlibslurm
diff -Nru slurm-llnl-2.2.7/debian/rules slurm-llnl-2.3.2/debian/rules
--- slurm-llnl-2.2.7/debian/rules 2011-07-11 14:01:11.000000000 +0000
+++ slurm-llnl-2.3.2/debian/rules 2011-12-21 08:32:17.000000000 +0000
@@ -39,7 +39,7 @@
cp -f /usr/share/misc/config.guess config.guess
endif
# Add here commands to configure the package.
- CFLAGS="$(CFLAGS)" ./configure --host=$(DEB_HOST_GNU_TYPE) --build=$(DEB_BUILD_GNU_TYPE) --prefix=/usr --mandir=\$${prefix}/share/man --infodir=\$${prefix}/share/info --sysconfdir=/etc/slurm-llnl --with-munge --localstatedir=/var/run/slurm-llnl --without-blcr --libexecdir=/usr/share --enable-pam
+ CFLAGS="$(CFLAGS)" ./configure --host=$(DEB_HOST_GNU_TYPE) --build=$(DEB_BUILD_GNU_TYPE) --prefix=/usr --mandir=\$${prefix}/share/man --infodir=\$${prefix}/share/info --sysconfdir=/etc/slurm-llnl --with-munge --localstatedir=/var/run/slurm-llnl --without-blcr --libexecdir=/usr/share --enable-pam --without-rpath
@@ -68,6 +68,7 @@
# Add here commands to clean up after the build process.
[ ! -f contribs/perlapi/Makefile ] || $(MAKE) -C contribs/perlapi distclean
[ ! -f contribs/torque/Makefile ] || $(MAKE) -C contribs/torque distclean
+ [ ! -f contribs/pam/Makefile ] || $(MAKE) -C contribs/pam distclean
[ ! -f Makefile ] || $(MAKE) distclean
rm -rf .pc contribs/pam/.deps \
@@ -106,12 +107,14 @@
# Add here commands to install the arch part of the package into
# debian/tmp.
$(MAKE) install DESTDIR=$(CURDIR)/debian/slurm-llnl
+ sed -i "s/\-shared/-shared -lc/" contribs/pam/Makefile
+ $(MAKE) -C contribs/pam install DESTDIR=$(CURDIR)/debian/slurm-llnl
sed -i "/dependency_libs/ s/'.*'/''/" `find . -name '*.la'`
rm -fr debian/slurm-llnl/usr/lib/slurm/src
rm -f debian/slurm-llnl/usr/sbin/slurm_epilog
rm -f debian/slurm-llnl/usr/sbin/slurm_prolog
rmdir debian/slurm-llnl/usr/share/slurm
- dh_install debian/slurm-llnl-configurator.html usr/share/doc/slurm-llnl
+ dh_install -pslurm-llnl debian/slurm-llnl-configurator.html usr/share/doc/slurm-llnl
rm -rf debian/slurm-llnl/usr/share/doc/slurm-[0-9]*
# pmi packages
@@ -128,7 +131,7 @@
dh_install -plibslurmdb-dev debian/slurm-llnl/usr/lib/libslurmdb.a usr/lib
dh_install -plibslurmdb-dev debian/slurm-llnl/usr/lib/libslurmdb.la usr/lib
dh_install -plibslurmdb-dev debian/slurm-llnl/usr/lib/libslurmdb.so usr/lib
- dh_install -plibslurmdb22 debian/slurm-llnl/usr/lib/libslurmdb.so.* usr/lib
+ dh_install -plibslurmdb23 debian/slurm-llnl/usr/lib/libslurmdb.so.* usr/lib
rm -f debian/slurm-llnl/usr/include/slurm/slurmdb.h
rm -f debian/slurm-llnl/usr/lib/libslurmdb.a
rm -f debian/slurm-llnl/usr/lib/libslurmdb.la
@@ -141,7 +144,10 @@
dh_install -plibslurm-dev debian/slurm-llnl/usr/lib/lib*.la usr/lib
dh_install -plibslurm-dev debian/slurm-llnl/usr/lib/lib*.so usr/lib
dh_installman -plibslurm-dev debian/slurm-llnl/usr/share/man/man3/*
- dh_install -plibslurm22 debian/slurm-llnl/usr/lib/libslurm.so.* usr/lib
+ dh_install -plibslurm23 debian/slurm-llnl/usr/lib/libslurm.so.* usr/lib
+ #LIBSLURM = `ls -v debian/slurm-llnl/usr/lib/libslurm.so* | tail -1`
+ #ls -v debian/slurm-llnl/usr/lib/libslurm.so* | tail -1
+ #echo LIBSLURM is $(LIBSLURM)
rm -rf debian/slurm-llnl/usr/include
rm -f debian/slurm-llnl/usr/lib/*.a
rm -f debian/slurm-llnl/usr/lib/lib*.la
@@ -172,7 +178,6 @@
dh_install -pslurm-llnl-slurmdbd debian/slurm-llnl/usr/share/man/man8/slurmdbd.8 usr/share/man/man8
dh_install -pslurm-llnl-slurmdbd debian/slurm-llnl/usr/share/man/man5/slurmdbd.conf.5 usr/share/man/man5
dh_install -pslurm-llnl-slurmdbd debian/slurm-llnl/usr/share/man/man1/sacctmgr.1 usr/share/man/man1
-
rm -f debian/slurm-llnl/usr/sbin/slurmdbd
rm -f debian/slurm-llnl/usr/bin/sacctmgr
rm -f debian/slurm-llnl/usr/share/man/man8/slurmdbd.8
@@ -181,6 +186,11 @@
rmdir debian/slurm-llnl/usr/lib
+ #libpam-slurm package
+ dh_install -plibpam-slurm debian/slurm-llnl/lib/security/pam_slurm.so lib/security
+ rm -rf debian/slurm-llnl/lib
+
+
# Perl
$(MAKE) -C contribs/perlapi install DESTDIR=$(CURDIR)/debian/libslurm-perl PERL_MM_OPT="INSTALLDIRS=vendor"
diff -Nru slurm-llnl-2.2.7/debian/slurm-llnl-configurator.html slurm-llnl-2.3.2/debian/slurm-llnl-configurator.html
--- slurm-llnl-2.2.7/debian/slurm-llnl-configurator.html 2011-04-04 13:39:29.000000000 +0000
+++ slurm-llnl-2.3.2/debian/slurm-llnl-configurator.html 2011-12-05 12:48:18.000000000 +0000
@@ -1,11 +1,11 @@
-Accounting
-
-NOTE: This documents accounting features available in SLURM version
-1.3, which are far more extensive than those available in previous
-releases.
+Accounting and Resource Limits
SLURM can be configured to collect accounting information for every
job and job step executed.
@@ -386,7 +382,7 @@
As the mysql user grant privileges to that user using a
command such as:
-GRANT ALL ON StorageLoc.* TO 'StorageUser'@'StorageHost';
+
GRANT ALL ON StorageLoc.* TO 'StorageUser'@'StorageHost';
(The ticks are needed)
(You need to be root to do this. Also in the info for password
@@ -394,7 +390,7 @@
prompt since the previous mysql statement did not end with a ';'. It
assumes that you wish to input more info.)
-live example:
+Live example:
mysql@snowflake:~$ mysql
@@ -664,8 +660,10 @@
if not we will refer to the account associated with the job.
If the account doesn't have the limit set we will refer to
the cluster's limits.
-If the cluster doesn't have the limit set no limit will be enforced.
-All of the above entities can include limits as described below...
+If the cluster doesn't have the limit set no limit will be enforced.
+
+All of the above entities can include limits as described below and
+in the Resource Limits document.
@@ -673,7 +671,6 @@
Essentially this is the amount of claim this association and it's
children have to the above system. Can also be the string "parent",
this means that the parent association is used for fairshare.
-
- GrpCPUMins= A hard limit of cpu minutes to be used by jobs
@@ -780,7 +777,7 @@
If an entity has existed for less than 1 day, the entity will be removed
completely. This is meant to clean up after typographic errors.
-Last modified 27 January 2010
+Last modified 10 June 2010
diff -Nru slurm-llnl-2.2.7/doc/html/accounting_storageplugins.shtml slurm-llnl-2.3.2/doc/html/accounting_storageplugins.shtml
--- slurm-llnl-2.2.7/doc/html/accounting_storageplugins.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/accounting_storageplugins.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -303,7 +303,7 @@
List acct_storage_p_modify_job(void *db_conn, uint32_t uid,
acct_job_modify_cond_t *job_cond, acct_job_rec_t *job)
Description:
-Used to modify two fields (the derived exit code and derived exit string) of an
+Used to modify two fields (the derived exit code and the comment string) of an
existing job in the storage type. Can only modify one job at a time.
Arguments:
db_conn (input) connection to
@@ -785,6 +785,24 @@
SLURM_ERROR on failure.
+int clusteracct_storage_p_fini_ctld(void *db_conn, char *ip,
+uint16_t port, char *cluster_nodes)
+
Description:
+Used when a controller is turned off to tell the storage type the
+ slurmctld has gone away.
+
Arguments:
+db_conn (input) connection to
+the storage type.
+ip (input) ip of connected slurmctld.
+port (input) port on host cluster is
+running on the host is grabbed from the connection.
+cluster_nodes (input) name of all
+nodes currently on the cluster.
+
Returns:
+SLURM_SUCCESS on success, or
+SLURM_ERROR on failure.
+
+
int jobacct_storage_p_job_start(void *db_conn, struct job_record *job_ptr)
Description:
Note that a job is about to begin execution or has just changed size.
diff -Nru slurm-llnl-2.2.7/doc/html/big_sys.shtml slurm-llnl-2.3.2/doc/html/big_sys.shtml
--- slurm-llnl-2.2.7/doc/html/big_sys.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/big_sys.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -72,7 +72,7 @@
on each compute node and use it for scheduling purposes, this entails
extra overhead.
Optimize performance by specifying the expected configuration using
-the available parameters (RealMemory, Procs, and
+the available parameters (RealMemory, CPUs, and
TmpDisk).
If the node is found to contain less resources than configured,
it will be marked DOWN and not used.
diff -Nru slurm-llnl-2.2.7/doc/html/bluegene.shtml slurm-llnl-2.3.2/doc/html/bluegene.shtml
--- slurm-llnl-2.2.7/doc/html/bluegene.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/bluegene.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -6,40 +6,45 @@
This document describes the unique features of SLURM on the
IBM BlueGene systems.
-You should be familiar with the SLURM's mode of operation on Linux clusters
+You should be familiar with SLURM's mode of operation on Linux clusters
before studying the relatively few differences in BlueGene operation
described in this document.
BlueGene systems have several unique features making for a few
differences in how SLURM operates there.
-The BlueGene system consists of one or more base partitions or
-midplanes connected in a three-dimensional torus.
-Each base partition consists of 512 c-nodes each containing two processors;
-one designed primarily for computations and the other primarily for managing communications.
-The c-nodes can execute only one process and thus are unable to execute both
-the user's jobs and SLURM's slurmd daemon.
-Thus the slurmd daemon executes on one of the BlueGene Front End Nodes.
-This single slurmd daemon provides (almost) all of the normal SLURM services
+BlueGene systems consists of one or more base partitions or
+midplanes connected in a three-dimensional (BlueGene/L and BlueGene/P
+systems) or five-dimensional (BlueGene/Q) torus.
+Each base partition typically includes 512 c-nodes or compute
+nodes each containing two or more cores;
+one core is typically designed primarily for managing communications while the
+other cores are used primarily for computations.
+Each c-node can execute only one process and thus are unable to execute
+both the user's application plus SLURM's slurmd daemon.
+Thus the slurmd daemon(s) executes on one or more of the BlueGene
+Front End Nodes.
+The slurmd daemons provide (almost) all of the normal SLURM services
for every base partition on the system.
Internally SLURM treats each base partition as one node with
-1024 processors, which keeps the number of entities being managed reasonable.
+a processor count equal to the number of cores on the base partition, which
+keeps the number of entities being managed by SLURM more reasonable.
Since the current BlueGene software can sub-allocate a base partition
-into blocks of 32 and/or 128 c-nodes, more than one user job can execute
-on each base partition (subject to system administrator configuration).
+into smaller blocks, more than one user job can execute on each base
+partition (subject to system administrator configuration). In the case of
+BlueGene/Q systems, more than one user job can also execute in each block.
To effectively utilize this environment, SLURM tools present the user with
-the view that each c-nodes is a separate node, so allocation requests
-and status information use c-node counts (this is a new feature in
-SLURM version 1.1).
+the view that each c-node is a separate node, so allocation requests
+and status information use c-node counts.
Since the c-node count can be very large, the suffix "k" can be used
-to represent multiples of 1024 (e.g. "2k" is equivalent to "2048").
+to represent multiples of 1024 or "m" for multiples of 1,048,576 (1024 x 1024).
+For example, "2k" is equivalent to "2048".
User Tools
-The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and scontrol
-provide all of the expected services except support for job steps.
-SLURM performs resource allocation for the job, but initiation of tasks is performed
-using the mpirun command. SLURM has no concept of a job step on BlueGene.
+
The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and
+scontrol provide all of the expected services except support for job steps,
+which is detailed later.
Seven new sbatch options are available:
--geometry (specify job size in each dimension),
--no-rotate (disable rotation of geometry),
@@ -52,25 +57,25 @@
--ramdisk-image (specify alternative ramdisk image for bluegene block. Default if not set, BGL only.)
The --nodes option with a minimum and (optionally) maximum node count continues
to be available.
-
Note that this is a c-node count.
-To reiterate: sbatch is used to submit a job script,
-but mpirun is used to launch the parallel tasks.
-Note that a SLURM batch job's default stdout and stderr file names are generated
-using the SLURM job ID.
-When the SLURM control daemon is restarted, SLURM job ID values can be repeated,
-therefore it is recommended that batch jobs explicitly specify unique names for
-stdout and stderr files using the srun options --output and --error
-respectively.
-While the salloc command may be used to create an interactive SLURM job,
-it will be the responsibility of the user to insure that the bgblock
-is ready for use before initiating any mpirun commands.
-SLURM will assume this responsibility for batch jobs.
-The script that you submit to SLURM can contain multiple invocations of mpirun as
-well as any desired commands for pre- and post-processing.
+
Task Launch on BlueGene/Q only
+
+Use SLURM's srun command to launch tasks (srun is a wrapper for IBM's
+runjob command.
+SLURM job step information including accounting functions as expected.
+
+Task Launch on BlueGene/L and BlueGene/P only
+
+SLURM performs resource allocation for the job, but initiation of tasks is
+performed using the mpirun command. SLURM has no concept of a job step
+on BlueGene/L or BlueGene/P systems.
+To reiterate: salloc or sbatch are used to create a job allocation, but
+mpirun is used to launch the parallel tasks.
+The script that you submit to SLURM can contain multiple invocations of mpirun
+as well as any desired commands for pre- and post-processing.
The mpirun command will get its bgblock information from the
-MPIRUN_PARTITION as set by SLURM. A sample script is shown below.
+MPIRUN_PARTITION as set by SLURM. A sample script is shown below.
#!/bin/bash
# pre-processing
@@ -80,51 +85,50 @@
mpirun -exec /home/user/prog -cwd /home/user -args 124
# post-processing
date
-
-
-
-The naming of base partitions includes a three-digit suffix representing the its
-coordinates in the X, Y and Z dimensions with a zero origin.
-For example, "bg012" represents the base partition whose coordinate is at X=0, Y=1 and Z=2. In a system
-configured with small blocks (any block less than a full base partition) there will be divisions
-into the base partition notation. For example, if there were 64 psets in the
-configuration, bg012[0-15] represents
-the first quarter or first 16 ionodes of a midplane. In BlueGene/L
-this would be 128 c-node block. To represent the first nodecard in the
-second quarter or ionodes 16-19 the notation would be bg012[16-19], or
-a 32 c-node block.
-Since jobs must allocate consecutive base partitions in all three dimensions, we have developed
-an abbreviated format for describing the base partitions in one of these three-dimensional blocks.
-The base partition has a prefix determined from the system which is followed by the end-points
-of the block enclosed in square-brackets and separated by an "x".
-For example, "bg[620x731]" is used to represent the eight base partitions enclosed in a block
-with end-points and bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721,
-bg730 and bg731).
-
-
-IMPORTANT: SLURM version 1.2 or higher can handle a bluegene system of
-sizes up to 36x36x36. To try to keep with the 'three-digit suffix
-representing the its coordinates in the X, Y and Z dimensions with a
-zero origin', we now support A-Z as valid numbers. This makes it so
-the prefix must always be lower case, and any letters in the
-three-digit suffix must always be upper case. This schema
-should be used in your slurm.conf file and in your bluegene.conf file
-if you put a prefix there even though it is not necessary there. This
-schema should also be used to specify midplanes or locations in
-configure mode of smap.
+
+
+The naming of base partitions includes a numeric suffix representing the its
+coordinates with a zero origin. The suffix contains three digits on BlueGene/L
+and BlueGene/P systems, while four digits are required for the BlueGene/Q
+systems. For example, "bgp012" represents the base partition whose coordinate
+is at X=0, Y=1 and Z=2.
+SLURM uses an abbreviated format for describing base partitions in which the
+end-points of the block enclosed are in square-brackets and separated by an "x".
+For example, "bgp[620x731]" is used to represent the eight base partitions
+enclosed in a block with end-points and bgp620 and bgp731 (bgp620, bgp621,
+bgp630, bgp631, bgp720, bgp721, bgp730 and bgp731).
+
+IMPORTANT: SLURM higher can support up to 36 elements in each
+BlueGene dimension by supporting "A-Z" as valid numbers. SLURM requires the
+prefix to be lower case and any letters in the suffix must always be upper
+case. This schema must be used in both the slurm.conf and bluegene.conf
+configuration files when specifying midplane/node names (the prefix is
+optional). This schema should also be used to specify midplanes or locations
+in configure mode of smap:
-valid: bgl[000xC44] bgl000 bglZZZ
+valid: bgl[000xC44], bgl000, bglZZZ
-invalid: BGL[000xC44] BglC00 bglb00 Bglzzz
+invalid: BGL[000xC44], BglC00, bglb00, Bglzzz
-One new tool provided is smap.
-As of SLURM version 1.2, sview is
-another new tool offering even more viewing and configuring options.
-Smap is aware of system topography and provides a map of what base partitions
-are allocated to jobs, partitions, etc.
-See the smap man page for details.
+
In a system configured with small blocks (any block less
+than a full base partition) there will be divisions in the base partition
+notation. On BlueGene/L and BlueGene/P systems, the base partition name may
+be followed by a square bracket enclosing ID numbers of the IO nodes associated
+with the block. For example, if there are 64 psets in a BlueGene/L
+configuration, "bgl012[0-15]" represents the first quarter or first 16 IO nodes
+of a midplane. In BlueGene/L this would be 128 c-node block. To represent
+the first nodecard in the second quarter or IO nodes 16-19, the notation would
+be "bgl012[16-19]", or a 32 c-node block. On BlueGene/Q systems, the specific
+c-nodes would be identified in square brackets using their five digit
+coordinates. For example "bgq0123[00000x11111]" would represent the 32 c-nodes
+in midplane "bgq0123" having coordinates (within that midplane) from zero to
+one in each of the five dimensions.
+
+Two topology-aware graphical user interfaces are provided: smap and
+sview (sview provides more viewing and configuring options).
+See each command's man page for details.
A sample of smap output is provided below showing the location of five jobs.
Note the format of the list of base partitions allocated to each job.
Also note that idle (unassigned) base partitions are indicated by a period.
@@ -226,14 +230,17 @@
The slurmctld daemon should execute on the system's service node.
If an optional backup daemon is used, it must be in some location where
it is capable of executing Bridge APIs.
-One slurmd daemon should be configured to execute on one of the front end nodes.
-That one slurmd daemon represents communications channel for every base partition.
-You can use the scontrol command to drain individual nodes as desired and
-return them to service.
-
-The slurm.conf (configuration) file needs to have the value of InactiveLimit
-set to zero or not specified (it defaults to a value of zero).
-This is because there are no job steps and we don't want to purge jobs prematurely.
+The slurmd daemons executes the user scripts and there must be at least one
+front end node configured for this purpose. Multiple front end nodes may be
+configured for slurmd use to improve performance and fault tolerance.
+Each slurmd can execute jobs for every base partition and the work will be
+distributed among the slurmd daemons to balance the workload.
+You can use the scontrol command to drain individual compute nodes as desired
+and return them to service.
+
+The slurm.conf (configuration) file needs to have the value of
+InactiveLimit set to zero or not specified (it defaults to a value of zero).
+This is because if there are no job steps, we don't want to purge jobs prematurely.
The value of SelectType must be set to "select/bluegene" in order to have
node selection performed using a system aware of the system's topography
and interfaces.
@@ -248,7 +255,7 @@
The prolog and epilog programs are used to insure proper synchronization
between the slurmctld daemon, the user job, and MMCS.
A multitude of other functions may also be placed into the prolog and
-epilog as desired (e.g. enabling/disabling user logins, puring file systmes,
+epilog as desired (e.g. enabling/disabling user logins, purging file systems,
etc.). Sample prolog and epilog scripts follow.
@@ -257,8 +264,9 @@
#
# Wait for bgblock to be ready for this job's use
/usr/sbin/slurm_prolog
+
-
+
#!/bin/bash
# Sample BlueGene Epilog script
#
@@ -294,31 +302,47 @@
SLURM node and partition descriptions should make use of the
naming conventions described above. For example,
-"NodeName=bg[000x733] NodeAddr=frontend0 NodeHostname=frontend0 Procs=1024"
+"NodeName=bg[000x733] CPUs=1024"
is used in slurm.conf to define a BlueGene system with 128 midplanes
-in an 8 by 4 by 4 matrix.
+in an 8 by 4 by 4 matrix and each midplane is configured with 1024 processors
+(cores).
The node name prefix of "bg" defined by NodeName can be anything you want,
but needs to be consistent throughout the slurm.conf file.
-Note that the values of both NodeAddr and NodeHostname for all
-128 base partitions is the name of the front-end node executing
-the slurmd daemon.
No computer is actually expected to a hostname of "bg000" and no
-attempt will be made to route message traffic to this address.
+attempt will be made to route message traffic to this address.
-While users are unable to initiate SLURM job steps on BlueGene systems,
-this restriction does not apply to user root or SlurmUser.
-Be advised that the one slurmd supporting all nodes is unable to manage a
-large number of job steps, so this ability should be used only to verify normal
-SLURM operation.
+
Front end nodes used for executing the slurmd daemons must also be defined
+in the slurm.conf file.
+It is recommended that at least two front end nodes be dedicated to use by
+the slurmd daemons for fault tolerance.
+For example:
+"FrontendName=frontend[00-03] State=UNKNOWN"
+is used to define four front end nodes for running slurmd daemons.
+
+
+# Portion of slurm.conf for BlueGene system
+InactiveLimit=0
+SelectType=select/bluegene
+Prolog=/usr/sbin/prolog
+Epilog=/usr/sbin/epilog
+#
+FrontendName=frontend[00-01] State=UNKNOWN
+NodeName=bg[000x733] CPUs=1024 State=UNKNOWN
+
+
+While users are unable to initiate SLURM job steps on BlueGene/L or BlueGene/P
+systems, this restriction does not apply to user root or SlurmUser.
+Be advised that the slurmd daemon is unable to manage a large number of job
+steps, so this ability should be used only to verify normal SLURM operation.
If large numbers of job steps are initiated by slurmd, expect the daemon to
fail due to lack of memory or other resources.
-It is best to minimize other work on the front-end node executing slurmd
+It is best to minimize other work on the front end nodes executing slurmd
so as to maximize its performance and minimize other risk factors.
Bluegene.conf File Creation
In addition to the normal slurm.conf file, a new
bluegene.conf configuration file is required with information pertinent
-to the sytem.
+to the system.
Put bluegene.conf into the SLURM configuration directory with
slurm.conf.
A sample file is installed in bluegene.conf.example.
@@ -326,7 +350,7 @@
configuration file for static partitioning.
Note that smap -Dc can be run without the SLURM daemons
active to establish the initial configuration.
-Note that the defined bgblocks may not overlap (except for the
+Note that the bgblocks defined using smap may not overlap (except for the
full-system bgblock, which is implicitly created).
See the smap man page for more information.
@@ -371,7 +395,7 @@
Dynamic partitioning was developed primarily for smaller BlueGene systems,
but can be used on larger systems.
Dynamic partitioning may introduce fragmentation of resources.
-This fragementaiton may be severe since SLURM will run a job anywhere
+This fragmentation may be severe since SLURM will run a job anywhere
resources are available with little thought of the future.
As with overlap partitioning, use dynamic partitioning with
caution!
@@ -383,17 +407,17 @@
Blocks can be freed or set in an error state with scontrol,
(i.e. "scontrol update BlockName=RMP0 state=error").
This will end any job on the block and set the state of the block to ERROR
-making it so no job will run on the block. To set it back to a useable
-state set the state to free (i.e.
+making it so no job will run on the block. To set it back to a usable
+state, set the state to free (i.e.
"scontrol update BlockName=RMP0 state=free").
Alternatively, if only part of a base partition needs to be put
into an error state which isn't already in a block of the size you
-need, you can set a set of ionodes into an error state with scontrol,
+need, you can set a collection of IO nodes into an error state using scontrol
(i.e. "scontrol update subbpname=bg000[0-3] state=error").
This will end any job on the nodes listed, create a block there, and set
the state of the block to ERROR making it so no job will run on the
-block. To set it back to a useable state set the state to free (i.e.
+block. To set it back to a usable state set the state to free (i.e.
"scontrol update BlockName=RMP0 state=free" or
"scontrol update subbpname=bg000[0-3] state=free"). This is
helpful to allow other jobs to run on the unaffected nodes in
@@ -411,14 +435,19 @@
Note that the Numpsets values defined in
bluegene.conf is used only when SLURM creates bgblocks this
-determines if the system is IO rich or not. For most bluegene/L
+determines if the system is IO rich or not. For most BlueGene/L
systems this value is either 8 (for IO poor systems) or 64 (for IO rich
-systems).
-
The Images can change during job start based on input from
-the user.
+systems).
+
+The Images file specifications identify which images are used when
+booting a bgblock and the valid images are different for each BlueGene system
+type (e.g. L, P and Q). Their values can change during job allocation based on
+input from the user.
If you change the bgblock layout, then slurmctld and slurmd should
-both be cold-started (e.g. /etc/init.d/slurm startclean).
-If you wish to modify the Numpsets values
+both be cold-started (without preserving any state information,
+"/etc/init.d/slurm startclean").
+
+If you wish to modify the Numpsets values
for existing bgblocks, either modify them manually or destroy the bgblocks
and let SLURM recreate them.
Note that in addition to the bgblocks defined in bluegene.conf, an
@@ -429,7 +458,7 @@
A sample bluegene.conf file is shown below.
###############################################################################
-# Global specifications for BlueGene system
+# Global specifications for a BlueGene/L system
#
# BlrtsImage: BlrtsImage used for creation of all bgblocks.
# LinuxImage: LinuxImage used for creation of all bgblocks.
@@ -531,7 +560,7 @@
# volume = 1x1x1 = 1
BPs=[000x000] Type=TORUS # 1x1x1 = 1 midplane
BPs=[001x001] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1x1x1 = 4-Nodecard sized
- # cnode blocks 3-Base
+ # c-node blocks 3-Base
# Partition Quarter sized
# c-node blocks
@@ -539,8 +568,8 @@
The above bluegene.conf file defines multiple bgblocks to be
created in a single midplane (see the "SMALL" option).
-Using this mechanism, up to 32 independent jobs each consisting of 1
- 32 cnodes can be executed
+Using this mechanism, up to 32 independent jobs each consisting of
+32 c-nodes can be executed
simultaneously on a one-rack BlueGene system.
If defining bgblocks of Type=SMALL, the SLURM partition
containing them as defined in slurm.conf must have the
@@ -552,9 +581,10 @@
As in all SLURM configuration files, parameters and values
are case insensitive.
- With a BlueGene/P system the image names are different. The
- correct image names are CnloadImage, MloaderImage, and IoloadImage.
- You can also use alternate images just the same as discribed above.
+
The valid image names on a BlueGene/P system are CnloadImage, MloaderImage,
+and IoloadImage. The only image name on BlueGene/Q systems is MloaderImage.
+Alternate images may be specified as described above for all BlueGene system
+types.
One more thing is required to support SLURM interactions with
the DB2 database (at least as of the time this was written).
@@ -601,9 +631,9 @@
A system administrator should address the problem before returning
the base partitions to service.
-If you cold-start slurmctld (/etc/init.d/slurm startclean
-or slurmctld -c) it is recommended that you also cold-start
-the slurmd at the same time.
+
If the slurmctld daemon is cold-started (/etc/init.d/slurm startclean
+or slurmctld -c) it is recommended that the slurmd daemon(s) be
+cold-started at the same time.
Failure to do so may result in errors being reported by both slurmd
and slurmctld due to bgblocks that previously existed being deleted.
@@ -614,20 +644,32 @@
Resource Reservations
-This reservation mechanism for less than an entire midplane is still
-under development.
+SLURM's advance reservation mechanism can accept a node count specification
+as input rather than identification of specific nodes/midplanes. In that case,
+SLURM may reserve nodes/midplanes which may not be formed into an appropriate
+bgblock. Work is planned for SLURM version 2.4 to remedy this problem. Until
+that time, identifying the specific nodes/midplanes to be included in an
+advanced reservation may be necessary.
SLURM's advance reservation mechanism is designed to reserve resources
at the level of whole nodes, which on a BlueGene systems would represent
whole midplanes. In order to support advanced reservations with a finer
grained resolution, you can configure one license per c-node on the system
-and reserve c-nodes instead of entire midplanes. For example, in slurm.conf
-specify something of this sort: "Licenses=cnode*512". Then create an
-advanced reservation with a command like this:
-"scontrol create reservation licenses="cnode*32" starttime=now duration=30:00 users=joe".
+and reserve c-nodes instead of entire midplanes. Note that reserved licenses
+are treated somewhat differently than reserved nodes. When nodes are reserved
+then jobs using that reservation can use only those nodes. Reserved licenses
+can only be used by jobs associated with that reservation, but licenses not
+explicitly reserved are available to any job.
+
+For example, in slurm.conf specify something of this sort:
+"Licenses=cnode*512". Then create an advanced reservation with a
+command like this:
+"scontrol create reservation licenses="cnode*32" starttime=now duration=30:00 users=joe".
+Jobs run in this reservation will then have at least 32 c-nodes
+available for their use, but could use more given an appropriate workload.
There is also a job_submit/cnode plugin available for use that will
-automatically set a job's license specification to match his c-node request
+automatically set a job's license specification to match its c-node request
(i.e. a command like
"sbatch -N32 my.sh" would automatically be translated to
"sbatch -N32 --licenses=cnode*32 my.sh" by the slurmctld daemon.
@@ -639,7 +681,7 @@
All of the testing and debugging guidance provided in
Quick Start Administrator Guide
apply to BlueGene systems.
-One can start the slurmctld and slurmd in the foreground
+One can start the slurmctld and slurmd daemons in the foreground
with extensive debugging to establish basic functionality.
Once running in production, the configured SlurmctldLog and
SlurmdLog files will provide historical system information.
@@ -659,18 +701,22 @@
This will define "HAVE_BG", "HAVE_BGL", and "HAVE_FRONT_END" in the
config.h file.
You can also emulate a BlueGene/P system with
- the --enable-bgp-emulation option.
+the --enable-bgp-emulation option.
This will define "HAVE_BG", "HAVE_BGP", and "HAVE_FRONT_END" in the
config.h file.
+You can also emulate a BlueGene/Q system using
+the --enable-bgq-emulation option.
+This will define "HAVE_BG", "HAVE_BGQ", and "HAVE_FRONT_END" in the
+config.h file.
Then execute make normally.
These variables will build the code as if it were running
on an actual BlueGene computer, but avoid making calls to the
-Bridge libary (that is controlled by the variable "HAVE_BG_FILES",
+Bridge library (that is controlled by the variable "HAVE_BG_FILES",
which is left undefined). You can use this to test configurations,
scheduling logic, etc.
-Last modified 17 March 2009
+Last modified 16 August 2011
Binary files /tmp/jKTrgHjCbo/slurm-llnl-2.2.7/doc/html/bull.jpg and /tmp/eIE_aeUSMX/slurm-llnl-2.3.2/doc/html/bull.jpg differ
diff -Nru slurm-llnl-2.2.7/doc/html/checkpoint_plugins.shtml slurm-llnl-2.3.2/doc/html/checkpoint_plugins.shtml
--- slurm-llnl-2.2.7/doc/html/checkpoint_plugins.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/checkpoint_plugins.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -22,7 +22,6 @@
Berkeley Lab Checkpoint/Restart (BLCR)
noneNo job checkpoint.
ompiOpenMPI checkpoint (requires OpenMPI version 1.3 or higher).
-xlchXLCH
The plugin_name and
diff -Nru slurm-llnl-2.2.7/doc/html/configurator.html.in slurm-llnl-2.3.2/doc/html/configurator.html.in
--- slurm-llnl-2.2.7/doc/html/configurator.html.in 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/configurator.html.in 2011-12-05 17:20:08.000000000 +0000
@@ -1,11 +1,11 @@
+
+
CPU Management User and Administrator Guide
+
+Overview
+The purpose of this guide is to assist SLURM users and administrators in selecting configuration options
+and composing command lines to manage the use of CPU resources by jobs, steps and tasks. The document
+is divided into the following sections:
+
+
+CPU Management through user commands is constrained by the configuration parameters
+chosen by the SLURM administrator. The interactions between different CPU management options are complex
+and often difficult to predict. Some experimentation may be required to discover the exact combination
+of options needed to produce a desired outcome. Users and administrators should refer to the man pages
+for slurm.conf, cgroup.conf
+salloc,
+sbatch and srun for detailed explanations of each
+option. The following html documents may also be useful:
+
+
+Consumable Resources in SLURM
+Sharing Consumable Resources
+Support for Multi-core/Multi-thread
+Architectures
+Plane distribution
+
+This document describes SLURM CPU management for conventional Linux clusters only. For
+information on Cray and IBM BlueGene systems, please refer to the appropriate documents.
+The information and examples in this document have been verified on SLURM version 2.3.0. Some
+information may not be valid for previous SLURM versions.
+
+CPU Management Steps performed by SLURM
+SLURM uses four basic steps to manage CPU resources for a job/step:
+
+
+Step 1: Selection of Nodes
+In Step 1, SLURM selects the set of nodes from which CPU resources are to be allocated to a job or
+job step. Node selection is therefore influenced by many of the configuration and command line options
+that control the allocation of CPUs (Step 2 below).
+If
+SelectType=select/linear is configured, all resources on the selected nodes will be allocated
+to the job/step. If SelectType=select/cons_res is configured,
+individual sockets, cores and threads may be allocated from the selected nodes as
+consumable resources. The consumable resource type is defined by
+SelectTypeParameters.
+
+
+Step 1 is performed by slurmctld and the select plugin.
+
+
+
+ slurm.conf options that control Step 1
+
+
+
+
+
+ slurm.conf
+ parameter
+ |
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ NodeName
+ |
+
+ <name of the node>
+ Plus additional parameters. See man page for details.
+ |
+
+ Defines
+ a node. This includes the number and layout of sockets, cores,
+ threads and processors (logical CPUs) on the node.
+ |
+
+
+
+
+ PartitionName
+ |
+
+ <name of the partition>
+ Plus additional parameters. See man page for details.
+ |
+
+ Defines
+ a partition. Several parameters of the partition definition
+ affect the selection of nodes (e.g., Nodes,
+ Shared, MaxNodes)
+ |
+
+
+
+
+ FastSchedule
+ |
+
+ 0 | 1 | 2
+ |
+
+ Controls
+ how the information in a node definition is used.
+ |
+
+
+
+
+ SelectType
+ |
+
+
+ select/linear | select/cons_res
+ |
+
+ Controls
+ whether CPU resources are allocated to jobs and job steps in
+ units of whole nodes or as consumable resources (sockets, cores
+ or threads).
+ |
+
+
+
+
+ SelectTypeParameters
+ |
+
+ CR_CPU | CR_CPU_Memory | CR_Core |
+CR_Core_Memory | CR_Socket | CR_Socket_Memory
Plus additional options. See man page for details.
+ |
+
+ Defines
+ the consumable resource type and controls other aspects of CPU
+ resource allocation by the select plugin.
+ |
+
+
+
+
+
+
+ srun/salloc/sbatch command line options that control Step 1
+
+
+
+
+
+
+ Command
+ line option
+ |
+
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ -B, --extra-node-info
+
+ |
+
+
+ <sockets[:cores[:threads]]>
+ |
+
+ Restricts
+ node selection to nodes with a specified layout of sockets, cores
+ and threads.
+ |
+
+
+
+
+ -C, --constraint
+ |
+
+ <list>
+
+ |
+
+ Restricts
+ node selection to nodes with specified attributes
+ |
+
+
+
+
+ --contiguous
+ |
+
+ N/A
+ |
+
+ Restricts
+ node selection to contiguous nodes
+ |
+
+
+
+
+ --cores-per-socket
+ |
+
+ <cores>
+ |
+
+ Restricts
+ node selection to nodes with at least the specified number of cores per socket
+ |
+
+
+
+
+ -c, --cpus-per-task
+ |
+
+ <ncpus>
+ |
+
+ Controls
+ the number of CPUs allocated per task
+ |
+
+
+
+
+ --exclusive
+ |
+
+ N/A
+ |
+
+ Prevents
+ sharing of allocated nodes with other jobs. Suballocates CPUs to job steps.
+ |
+
+
+
+
+ -F, --nodefile
+ |
+
+ <node file>
+ |
+
+ File
+ containing a list of specific nodes to be selected for the job (salloc and sbatch only)
+ |
+
+
+
+
+ --hint
+ |
+
+ compute_bound |
+ memory_bound | [no]multithread
+ |
+
+ Additional
+ controls on allocation of CPU resources
+ |
+
+
+
+
+ --mincpus
+ |
+
+ <n>
+ |
+
+ Controls
+ the minimum number of CPUs allocated per node
+ |
+
+
+
+
+ -N, --nodes
+ |
+
+
+ <minnodes[-maxnodes]>
+ |
+
+ Controls
+ the minimum/maximum number of nodes allocated to the job
+ |
+
+
+
+
+ -n, --ntasks
+ |
+
+ <number>
+ |
+
+ Controls
+ the number of tasks to be created for the job
+ |
+
+
+
+
+ --ntasks-per-core
+ |
+
+ <number>
+ |
+
+ Controls
+ the maximum number of tasks per allocated core
+ |
+
+
+
+
+ --ntasks-per-socket
+ |
+
+ <number>
+ |
+
+ Controls
+ the maximum number of tasks per allocated socket
+ |
+
+
+
+
+ --ntasks-per-node
+ |
+
+ <number>
+ |
+
+ Controls
+ the maximum number of tasks per allocated node
+ |
+
+
+
+
+ -O, --overcommit
+ |
+
+ N/A
+ |
+
+ Allows
+ fewer CPUs to be allocated than the number of tasks
+ |
+
+
+
+
+ -p, --partition
+ |
+
+
+ <partition_names>
+ |
+
+ Controls
+ which partition is used for the job
+ |
+
+
+
+
+ -s, --share
+ |
+
+ N/A
+ |
+
+ Allows
+ sharing of allocated nodes with other jobs
+ |
+
+
+
+
+ --sockets-per-node
+ |
+
+ <sockets>
+ |
+
+ Restricts
+ node selection to nodes with at least the specified number of sockets
+ |
+
+
+
+
+ --threads-per-core
+ |
+
+ <threads>
+ |
+
+ Restricts
+ node selection to nodes with at least the specified number of threads per core
+ |
+
+
+
+
+ -w, --nodelist
+ |
+
+
+ <host1,host2,... or filename>
+ |
+
+ List
+ of specific nodes to be allocated to the job
+ |
+
+
+
+
+ -x, --exclude
+ |
+
+
+ <host1,host2,... or filename>
+ |
+
+ List
+ of specific nodes to be excluded from allocation to the job
+ |
+
+
+
+
+ -Z, --no-allocate
+ |
+
+ N/A
+ |
+
+ Bypass
+ normal allocation (privileged option available to users
+ “SlurmUser” and “root” only)
+ |
+
+
+
+
+
+Step 2: Allocation of CPUs from the selected Nodes
+In Step 2, SLURM allocates CPU resources to a job/step from the set of nodes selected
+in Step 1. CPU allocation is therefore influenced by the configuration and command line options
+that relate to node selection.
+If
+SelectType=select/linear is configured, all resources on the selected nodes will be allocated
+to the job/step. If SelectType=select/cons_res is configured,
+individual sockets, cores and threads may be allocated from the selected nodes as
+consumable resources. The consumable resource type is defined by
+SelectTypeParameters.
+
+
When using SelectType=select/cons_res,
+the default allocation method across nodes is block allocation (allocate all available CPUs in
+a node before using another node). The default allocation method within a node is cyclic
+allocation (allocate available CPUs in a round-robin fashion across the sockets within a node).
+Users may override the default behavior using the appropriate command
+line options described below. The choice of allocation methods may influence which specific
+CPUs are allocated to the job/step.
+
+Step 2 is performed by slurmctld and the select plugin.
+
+
+
+ slurm.conf options that control Step 2
+
+
+
+
+
+ slurm.conf
+ parameter
+ |
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ NodeName
+ |
+
+ <name of the node>
+ Plus additional parameters. See man page for details.
+ |
+
+ Defines
+ a node. This includes the number and layout of sockets, cores,
+ threads and processors (logical CPUs) on the node.
+ |
+
+
+
+
+ PartitionName
+ |
+
+ <name of the partition>
+ Plus additional parameters. See man page for details.
+ |
+
+ Defines
+ a partition. Several parameters of the partition definition
+ affect the allocation of CPU resources to jobs (e.g., Nodes,
+ Shared, MaxNodes)
+ |
+
+
+
+
+ FastSchedule
+ |
+
+ 0 | 1 | 2
+ |
+
+ Controls
+ how the information in a node definition is used.
+ |
+
+
+
+
+ SelectType
+ |
+
+
+ select/linear | select/cons_res
+ |
+
+ Controls
+ whether CPU resources are allocated to jobs and job steps in
+ units of whole nodes or as consumable resources (sockets, cores
+ or threads).
+ |
+
+
+
+
+ SelectTypeParameters
+ |
+
+ CR_CPU | CR_CPU_Memory | CR_Core |
+CR_Core_Memory | CR_Socket | CR_Socket_Memory
Plus additional options. See man page for details.
+ |
+
+ Defines
+ the consumable resource type and controls other aspects of CPU
+ resource allocation by the select plugin.
+ |
+
+
+
+
+
+
+ srun/salloc/sbatch command line options that control Step 2
+
+
+
+
+
+
+ Command
+ line option
+ |
+
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ -B, --extra-node-info
+
+ |
+
+
+ <sockets[:cores[:threads]]>
+ |
+
+ Restricts
+ node selection to nodes with a specified layout of sockets, cores
+ and threads.
+ |
+
+
+
+
+ -C, --constraint
+ |
+
+ <list>
+
+ |
+
+ Restricts
+ node selection to nodes with specified attributes
+ |
+
+
+
+
+ --contiguous
+ |
+
+ N/A
+ |
+
+ Restricts
+ node selection to contiguous nodes
+ |
+
+
+
+
+ --cores-per-socket
+ |
+
+ <cores>
+ |
+
+ Restricts
+ node selection to nodes with at least the specified number of cores per socket
+ |
+
+
+
+
+ -c, --cpus-per-task
+ |
+
+ <ncpus>
+ |
+
+ Controls
+ the number of CPUs allocated per task
+ |
+
+
+
+
+ --distribution, -m
+ |
+
+
+ block|cyclic |arbitrary|plane=<options>[:block|cyclic]
+ |
+
+ The second specified distribution (after the ":")
+ can be used to override the default allocation method within nodes
+ |
+
+
+
+
+ --exclusive
+ |
+
+ N/A
+ |
+
+ Prevents
+ sharing of allocated nodes with other jobs
+ |
+
+
+
+
+ -F, --nodefile
+ |
+
+ <node file>
+ |
+
+ File
+ containing a list of specific nodes to be selected for the job (salloc and sbatch only)
+ |
+
+
+
+
+ --hint
+ |
+
+ compute_bound |
+ memory_bound | [no]multithread
+ |
+
+ Additional
+ controls on allocation of CPU resources
+ |
+
+
+
+
+ --mincpus
+ |
+
+ <n>
+ |
+
+ Controls
+ the minimum number of CPUs allocated per node
+ |
+
+
+
+
+ -N, --nodes
+ |
+
+
+ <minnodes[-maxnodes]>
+ |
+
+ Controls
+ the minimum/maximum number of nodes allocated to the job
+ |
+
+
+
+
+ -n, --ntasks
+ |
+
+ <number>
+ |
+
+ Controls
+ the number of tasks to be created for the job
+ |
+
+
+
+
+ --ntasks-per-core
+ |
+
+ <number>
+ |
+
+ Controls
+ the maximum number of tasks per allocated core
+ |
+
+
+
+
+ --ntasks-per-socket
+ |
+
+ <number>
+ |
+
+ Controls
+ the maximum number of tasks per allocated socket
+ |
+
+
+
+
+ --ntasks-per-node
+ |
+
+ <number>
+ |
+
+ Controls
+ the maximum number of tasks per allocated node
+ |
+
+
+
+
+ -O, --overcommit
+ |
+
+ N/A
+ |
+
+ Allows
+ fewer CPUs to be allocated than the number of tasks
+ |
+
+
+
+
+ -p, --partition
+ |
+
+
+ <partition_names>
+ |
+
+ Controls
+ which partition is used for the job
+ |
+
+
+
+
+ -s, --share
+ |
+
+ N/A
+ |
+
+ Allows
+ sharing of allocated nodes with other jobs
+ |
+
+
+
+
+ --sockets-per-node
+ |
+
+ <sockets>
+ |
+
+ Restricts
+ node selection to nodes with at least the specified number of sockets
+ |
+
+
+
+
+ --threads-per-core
+ |
+
+ <threads>
+ |
+
+ Restricts
+ node selection to nodes with at least the specified number of threads per core
+ |
+
+
+
+
+ -w, --nodelist
+ |
+
+
+ <host1,host2,... or filename>
+ |
+
+ List
+ of specific nodes to be allocated to the job
+ |
+
+
+
+
+ -x, --exclude
+ |
+
+
+ <host1,host2,... or filename>
+ |
+
+ List
+ of specific nodes to be excluded from allocation to the job
+ |
+
+
+
+
+ -Z, --no-allocate
+ |
+
+ N/A
+ |
+
+ Bypass
+ normal allocation (privileged option available to users
+ “SlurmUser” and “root” only)
+ |
+
+
+
+
+
+Step 3: Distribution of Tasks to the selected Nodes
+In Step 3, SLURM distributes tasks to the nodes that were selected for
+the job/step in Step 1. Each task is distributed to only one node, but more than one
+task may be distributed to each node. Unless overcommitment of CPUs to tasks is
+specified for the job, the number of tasks distributed to a node is
+constrained by the number of CPUs allocated on the node and the number of CPUs per
+task. If consumable resources is configured, or resource sharing is allowed, tasks from
+more than one job/step may run on the same node concurrently.
+
+Step 3 is performed by slurmctld.
+
+
+
+ slurm.conf options that control Step 3
+
+
+
+
+
+ slurm.conf
+ parameter
+ |
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ MaxTasksPerNode
+ |
+
+ <number>
+ |
+
+
+ Controls the maximum number of tasks that a job step can spawn on a single node
+
+ |
+
+
+
+
+
+
+ srun/salloc/sbatch command line options that control Step 3
+
+
+
+
+
+
+ Command
+ line option
+ |
+
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ --distribution, -m
+ |
+
+
+ block|cyclic |arbitrary|plane=<options>[:block|cyclic]
+ |
+
+ The first specified distribution (before the ":")
+ controls the sequence in which tasks are distributed to each of the selected nodes. Note that
+ this option does not affect the number of tasks distributed to each node, but only the sequence of
+ distribution.
+ |
+
+
+
+
+ --ntasks-per-core
+ |
+
+ <number>
+
+ |
+
+
+ Controls the maximum number of tasks per allocated core
+ |
+
+
+
+
+ --ntasks-per-socket
+ |
+
+ <number>
+ |
+
+
+ Controls the maximum number of tasks per allocated socket
+ |
+
+
+
+
+ --ntasks-per-node
+ |
+
+ <number>
+ |
+
+
+ Controls the maximum number of tasks per allocated node
+ |
+
+
+
+
+ -r, --relative
+ |
+
+ N/A
+ |
+
+ Controls
+ which node is used for a job step
+ |
+
+
+
+
+
+Step 4: Optional Distribution and Binding of Tasks to CPUs within a Node
+In optional Step 4, SLURM distributes and binds each task to a specified subset of
+the allocated CPUs on the node to which the task was distributed in Step 3. Different
+tasks distributed to the same node may be bound to the same subset of CPUs or to
+different subsets. This step is known as task affinity or task/CPU binding.
+
+Step 4 is performed by slurmd and the task plugin.
+
+
+
+ slurm.conf options that control Step 4
+
+
+
+
+
+ slurm.conf
+ parameter
+ |
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ TaskPlugin
+ |
+
+
+ task/none | task/affinity | task/cgroup
+ |
+
+
+ Controls whether this step is enabled and which task plugin to use
+
+ |
+
+
+
+
+ TaskPluginParam
+ |
+
+ See man page
+ |
+
+
+ For task/affinity, controls the binding unit (sockets, cores or threads) and the
+ binding method (sched or cpusets)
+ |
+
+
+
+
+
+ cgroup.conf options that control Step 4 (task/cgroup plugin only)
+
+
+
+
+
+ cgroup.conf
+ parameter
+ |
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ ConstrainCores
+ |
+
+
+ yes|no
+ |
+
+
+ Controls whether jobs are constrained to their allocated CPUs
+
+ |
+
+
+
+
+ TaskAffinity
+ |
+
+
+ yes|no
+ |
+
+
+ Controls whether task-to-CPU binding is enabled
+ |
+
+
+
+
+
+
+ srun/salloc/sbatch command line options that control Step 4
+
+
+
+
+
+
+ Command
+ line option
+ |
+
+
+ Possible values
+ |
+
+ Description
+ |
+
+
+
+
+ --cpu_bind
+ |
+
+
+ See man page
+ |
+
+ Controls binding of tasks to CPUs
+ |
+
+
+
+
+ --ntasks-per-core
+ |
+
+ <number>
+
+ |
+
+
+ Controls the maximum number of tasks per allocated core
+ |
+
+
+
+
+ --distribution, -m
+ |
+
+
+ block|cyclic |arbitrary|plane=<options>[:block|cyclic]
+ |
+
+
+ The second specified distribution (after the ":") controls the sequence in which tasks are
+ distributed to allocated CPUs within a node for binding of tasks to CPUs
+ |
+
+
+
+
+
+Additional Notes on CPU Management Steps
+For consumable resources, it is important for users to understand the difference between
+cpu allocation (Step 2) and task affinity/binding (Step 4). Exclusive (unshared) allocation
+of CPUs as consumable resources limits the number of jobs/steps/tasks that
+can use a node concurrently. But it does not limit the set of CPUs on the node that each
+task distributed to the node can use. Unless some form of CPU/task binding is used
+(e.g., a task or spank plugin), all tasks distributed to a node can use all of
+the CPUs on the node, including CPUs not allocated to their job/step. This may have
+unexpected adverse effects on performance, since it allows one job to use CPUs allocated
+exclusively to another job. For this reason, it may not be advisable to configure
+consumable resources without also configuring task affinity. Note that task affinity
+can also be useful when select/linear (whole node allocation) is configured, to improve
+performance by restricting each task to a particular socket or other subset of CPU
+resources on a node.
+
+
+Getting Information about CPU usage by Jobs/Steps/Tasks
+There is no easy way to generate a comprehensive set of CPU management information
+for a job/step (allocation, distribution and binding). However, several
+commands/options provide limited information about CPU usage.
+
+
+
+
+
+ Command/Option
+ |
+
+ Information
+ |
+
+
+
+
+scontrol show job option:
+--details
+ |
+
+
+This option provides a list of the nodes selected for the job and the CPU ids allocated to the job on each
+node. Note that the CPU ids reported by this command are SLURM abstract CPU ids, not Linux/hardware CPU ids
+(as reported by, for example, /proc/cpuinfo).
+
+ |
+
+
+
+
+Linux command: env
+ |
+
+
+Many SLURM environment variables provide information related to node and CPU usage:
+
+
+SLURM_JOB_CPUS_PER_NODE
+SLURM_CPUS_PER_TASK
+SLURM_CPU_BIND
+SLURM_DISTRIBUTION
+SLURM_NODELIST
+SLURM_TASKS_PER_NODE
+SLURM_STEP_NODELIST
+SLURM_STEP_NUM_NODES
+SLURM_STEP_NUM_TASKS
+SLURM_STEP_TASKS_PER_NODE
+SLURM_NNODES
+SLURM_NTASKS
+SLURM_NPROCS
+SLURM_CPUS_ON_NODE
+SLURM_NODEID
+SLURMD_NODENAME
+
+
+ |
+
+
+
+
+srun/salloc/sbatch option:
+--cpu_bind=verbose
+ |
+
+
+This option provides a list of the CPU masks used by task affinity to bind tasks to CPUs.
+Note that the CPU ids represented by these masks are Linux/hardware CPU ids, not SLURM
+abstract CPU ids as reported by scontrol, etc.
+
+ |
+
+
+
+
+srun/salloc/sbatch option:
+-l
+ |
+
+
+This option adds the task id as a prefix to each line of output from a task sent to stdout/stderr.
+This can be useful for distinguishing node-related and CPU-related information by task id
+for multi-task jobs/steps.
+
+ |
+
+
+
+
+Linux command:
+cat /proc/<pid>/status | grep Cpus_allowed_list
+ |
+
+
+Given a task's pid (or "self" if the command is executed by the task itself), this command
+produces a list of the CPU ids bound to the task. This is the same information that is
+provided by --cpu_bind=verbose, but in a more readable format.
+
+ |
+
+
+
+A Note on CPU Numbering
+The number and layout of logical CPUs known to SLURM is described in the node definitions in slurm.conf. This may
+differ from the physical CPU layout on the actual hardware. For this reason, SLURM generates its own internal, or
+"abstract", CPU numbers. These numbers may not match the physical, or "machine", CPU numbers known to Linux.
+A utility to convert between SLURM abstract CPU numbers and physical machine CPU numbers is provided by SLURM. See
+module src/slurmd/slurmd/get_mach_stat.c for details.
+
+
+CPU Management and SLURM Accounting
+CPU management by SLURM users is subject to limits imposed by SLURM Accounting. Accounting limits may be applied on CPU
+usage at the level of users, groups and clusters. For details, see the sacctmgr man page.
+
+
+CPU Management Examples
+The following examples illustrate some scenarios for managing CPU
+resources using SLURM. Many additional scenarios are possible. In
+each example, it is assumed that all CPUs on each node are available
+for allocation.
+
+
+Example Node and Partition Configuration
+For these examples, the SLURM cluster contains the following nodes:
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+ n3
+ |
+
+
+
+ Number
+ of Sockets
+ |
+
+ 2
+ |
+
+ 2
+ |
+
+ 2
+ |
+
+ 2
+ |
+
+
+
+ Number
+ of Cores per Socket
+ |
+
+ 4
+ |
+
+ 4
+ |
+
+ 4
+ |
+
+ 4
+ |
+
+
+
+ Total
+ Number of Cores
+ |
+
+ 8
+ |
+
+ 8
+ |
+
+ 8
+ |
+
+ 8
+ |
+
+
+
+ Number
+ of Threads (CPUs) per Core
+ |
+
+ 1
+ |
+
+ 1
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+
+
+ Total
+ Number of CPUs
+ |
+
+ 8
+ |
+
+ 8
+ |
+
+ 8
+ |
+
+ 16
+ |
+
+
+
+And the following partitions:
+
+
+
+
+
+
+
+ PartitionName
+ |
+
+ regnodes
+ |
+
+ hypernode
+ |
+
+
+
+ Nodes
+ |
+
+ n0
+ n1 n2
+ |
+
+ n3
+ |
+
+
+
+ Default
+ |
+
+ YES
+ |
+
+ -
+ |
+
+
+
+These entities are defined in slurm.conf as follows:
+Nodename=n0 NodeAddr=node0 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8
+Nodename=n1 NodeAddr=node1 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8 State=IDLE
+Nodename=n2 NodeAddr=node2 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8 State=IDLE
+Nodename=n3 NodeAddr=node3 Sockets=2 CoresPerSocket=4 ThreadsPerCore=2 Procs=16 State=IDLE
+PartitionName=regnodes Nodes=n0,n1,n2 Shared=YES Default=YES State=UP
+PartitionName=hypernode Nodes=n3 State=UP
+
+
+
+Example 1: Allocation of whole nodes
+Allocate a minimum of two whole nodes to a job.
+slurm.conf options:
+SelectType=select/linear
+
+Command line:
+srun --nodes=2 ...
+
+Comments:
+The SelectType=select/linear
+configuration option specifies allocation in units of whole nodes.
+The --nodes=2 srun option causes
+SLURM to allocate at least 2 nodes to the job.
+
+
+Example 2: Simple allocation of cores as consumable resources
+A job requires 6 CPUs (2 tasks and 3 CPUs per task with no overcommitment). Allocate the 6 CPUs as consumable resources
+from a single node in the default partition.
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+
+Command line:
+srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 ...
+
+Comments:
+The SelectType configuration options define cores as consumable resources.
+The --nodes=1-1 srun option
+ restricts the job to a single node. The following table shows a possible pattern of allocation
+ for this job.
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Number
+ of Allocated CPUs
+ |
+
+ 6
+ |
+
+ 0
+ |
+
+ 0
+ |
+
+
+
+ Number
+ of Tasks
+ |
+
+ 2
+ |
+
+ 0
+ |
+
+ 0
+ |
+
+
+
+
+
+Example 3: Consumable resources with balanced allocation across nodes
+A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment).
+Allocate 3 CPUs from each of the 3 nodes in the default partition.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+
+Command line:
+srun --nodes=3-3 --ntasks=3 --cpus-per-task=3 ...
+
+Comments:
+The options specify the following conditions for the job: 3 tasks, 3 unique CPUs
+ per task, using exactly 3 nodes. To satisfy these conditions, SLURM must
+ allocate 3 CPUs from each node. The following table shows the allocation
+ for this job.
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Number
+ of Allocated CPUs
+ |
+
+ 3
+ |
+
+ 3
+ |
+
+ 3
+ |
+
+
+
+ Number
+ of Tasks
+ |
+
+ 1
+ |
+
+ 1
+ |
+
+ 1
+ |
+
+
+
+
+
+Example 4: Consumable resources with minimization of resource fragmentation
+A job requires 12 CPUs (12 tasks and 1 CPU per task with no overcommitment). Allocate
+CPUs using the minimum number of nodes and the minimum number of sockets required for
+the job in order to minimize fragmentation of allocated/unallocated CPUs in the cluster.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+
+Command line:
+srun --ntasks=12 ...
+
+Comments:
+The default allocation method across nodes is block. This minimizes the number of nodes
+ used for the job. The configuration option
+ CR_CORE_DEFAULT_DIST_BLOCK sets the default allocation method within a
+ node to block. This minimizes the number of sockets used for the job within a node.
+ The combination of these two methods causes SLURM to allocate the 12 CPUs using the
+ minimum required number of nodes (2 nodes) and sockets (3 sockets).The following
+ table shows a possible pattern of allocation for this job.
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 4
+ |
+
+ 4
+ |
+
+ 4
+ |
+
+ 0
+ |
+
+ 0
+ |
+
+ 0
+ |
+
+
+
+ Number of
+ Tasks
+ |
+
+ 8
+ |
+
+ 4
+ |
+
+ 0
+ |
+
+
+
+
+
+Example 5: Consumable resources with cyclic distribution of tasks to nodes
+A job requires 12 CPUs (6 tasks and 2 CPUs per task with no overcommitment). Allocate
+6 CPUs from each of 2 nodes in the default partition. Distribute tasks to nodes cyclically.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+
+Command line:
+srun --nodes=2-2 --ntasks-per-node=3 --distribution=cyclic
+--ntasks=6 --cpus-per-task=2 ...
+
+Comments:
+The options specify the following conditions for the job: 6 tasks, 2 unique CPUs per task,
+using exactly 2 nodes, and with 3 tasks per node. To satisfy these conditions, SLURM
+must allocate 6 CPUs from each of 2 nodes. The
+--distribution=cyclic option causes the tasks to be distributed to the nodes in a
+round-robin fashion. The following table shows a possible pattern of allocation and
+distribution for this job.
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 6
+ |
+
+ 6
+ |
+
+ 0
+ |
+
+
+
+ Number of
+ Tasks
+ |
+
+ 3
+ |
+
+ 3
+ |
+
+ 0
+ |
+
+
+
+ Distribution
+ of Tasks to Nodes, by Task id
+ |
+
+
+ 0 2 4 |
+
+
+ 1 3 5 |
+
+ -
+ |
+
+
+
+
+
+Example 6: Consumable resources with default allocation and plane distribution of tasks to nodes
+A job requires 16 CPUs (8 tasks and 2 CPUs per task with no overcommitment).
+Use all 3 nodes in the default partition. Distribute tasks to each node in blocks of two in a round-robin fashion.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+
+Command line:
+srun --nodes=3-3 --distribution=plane=2 --ntasks=8 --cpus-per-task=2 ...
+
+Comments:
+The options specify the following conditions for the job: 8 tasks, 2 unique CPUs
+per task, using all 3 nodes in the partition. To satisfy these conditions using
+the default allocation method across nodes (block), SLURM allocates 8 CPUs from
+the first node, 6 CPUs from the second node and 2 CPUs from the third node.
+The --distribution=plane=2 option causes SLURM
+to distribute tasks in blocks of two to each of the nodes in a round-robin fashion,
+subject to the number of CPUs allocated on each node. So, for example, only 1 task
+is distributed to the third node because only 2 CPUs were allocated on that node and
+each task requires 2 CPUs. The following table shows a possible pattern of allocation
+and distribution for this job.
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 8
+ |
+
+ 6
+ |
+
+ 2
+ |
+
+
+
+ Number of
+ Tasks
+ |
+
+ 4
+ |
+
+ 3
+ |
+
+ 1
+ |
+
+
+
+ Distribution
+ of Tasks to Nodes, by Task id
+ |
+
+ 0
+ 1 5 6
+ |
+
+ 2
+ 3 7
+ |
+
+ 4
+
+ |
+
+
+
+
+
+Example 7: Consumable resources with overcommitment of CPUs to tasks
+A job has 20 tasks. Run the job in a single node.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+
+Command line:
+srun --nodes=1-1 --ntasks=20 --overcommit ...
+
+Comments:
+The
+--overcommit option allows the job to
+run in only one node by overcommitting CPUs to tasks.The following table shows
+ a possible pattern of allocation and distribution for this job.
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 8
+ |
+
+ 0
+ |
+
+ 0
+ |
+
+
+
+ Number of
+ Tasks
+ |
+
+ 20
+ |
+
+ 0
+ |
+
+ 0
+ |
+
+
+
+ Distribution
+ of Tasks to Nodes, by Task id
+ |
+
+ 0
+ - 19
+ |
+
+ -
+ |
+
+ -
+ |
+
+
+
+
+
+Example 8: Consumable resources with resource sharing between jobs
+2 jobs each require 6 CPUs (6 tasks per job with no overcommitment).
+Run both jobs simultaneously in a single node.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+
+Command line:
+srun --nodes=1-1 --nodelist=n0 --ntasks=6 --share ...
+srun --nodes=1-1 --nodelist=n0 --ntasks=6 --share ...
+
+Comments:
+The --nodes=1-1 and -w n0
+srun options together restrict both jobs to node n0. The
+Shared=YES option in the partition definition plus
+the --share srun option allows the two
+jobs to share CPUs on the node.
+
+
+Example 9: Consumable resources on multithreaded node, allocating only one thread per core
+A job requires 8 CPUs (8 tasks with no overcommitment). Run the job on node n3,
+allocating only one thread per core.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_CPU
+
+Command line:
+srun --partition=hypernode --ntasks=8 --hint=nomultithread ...
+
+Comments:
+The CR_CPU configuration
+option enables the allocation of only one thread per core.
+The --hint=nomultithread
+srun option causes SLURM to allocate only one thread from each core to
+this job. The following table shows a possible pattern of allocation
+for this job.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n3
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Core id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+ 8
+ |
+
+ 9
+ |
+
+ 10
+ |
+
+ 11
+ |
+
+ 12
+ |
+
+ 13
+ |
+
+ 14
+ |
+
+ 15
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 4
+ |
+
+ 4
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0
+ 2 4 6
+ |
+
+ 8
+ 10 12 14
+ |
+
+
+
+
+
+Example 10: Consumable resources with task affinity and core binding
+A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a
+single node in the default partition. Apply core binding to each task.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+
+Command line:
+srun --nodes=1-1 --ntasks=6 --cpu_bind=cores ...
+
+Comments:
+Using the default allocation method within nodes (cyclic), SLURM allocates
+3 CPUs on each socket of 1 node. Using the default distribution method
+within nodes (cyclic), SLURM distributes and binds each task to an allocated
+core in a round-robin fashion across the sockets. The following table shows
+a possible pattern of allocation, distribution and binding for this job.
+For example, task id 2 is bound to CPU id 1.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 3
+ |
+
+ 3
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2
+ |
+
+ 4 5 6
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task id
+ |
+
+ 0
+ |
+
+ 2
+ |
+
+ 4
+ |
+
+ -
+ |
+
+ 1
+ |
+
+ 3
+ |
+
+ 5
+ |
+
+ -
+ |
+
+
+
+
+
+Example 11: Consumable resources with task affinity and socket binding, Case 1
+A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in
+a single node in the default partition. Apply socket binding to each task.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+
+Command line:
+srun --nodes=1-1 --ntasks=6 --cpu_bind=sockets ...
+
+Comments:
+Using the default allocation method within nodes (cyclic), SLURM allocates 3
+CPUs on each socket of 1 node. Using the default distribution method within nodes
+(cyclic), SLURM distributes and binds each task to all of the allocated CPUs in
+one socket in a round-robin fashion across the sockets. The following table shows
+a possible pattern of allocation, distribution and binding for this job. For
+example, task ids 1, 3 and 5 are all bound to CPU ids 4, 5 and 6.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 3
+ |
+
+ 3
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2
+ |
+
+ 4 5 6
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task ids
+ |
+
+ 0
+ 2 4
+ |
+
+ -
+ |
+
+ 1
+ 3 5
+ |
+
+ -
+ |
+
+
+
+
+
+Example 12: Consumable resources with task affinity and socket binding, Case 2
+A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in
+a single node in the default partition. Allocate cores using the block allocation method.
+Distribute cores using the block distribution method. Apply socket binding to each task.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+
+Command line:
+srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 --cpu_bind=sockets
+--distribution=block:block ...
+
+Comments:
+Using the block allocation method, SLURM allocates 4
+CPUs on one socket and 2 CPUs on the other socket of one node. Using the block distribution method within
+nodes, SLURM distributes 3 CPUs to each task. Applying socket binding, SLURM binds each task to all
+allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows
+a possible pattern of allocation, distribution and binding for this job. In this example, using the
+block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on
+socket id 1. Using the block distribution method, CPU ids 0-2 were distributed to task id 0, and CPU ids
+3-5 were distributed to task id 1. Applying socket binding, task id 0 is therefore bound to the allocated
+CPUs on socket 0, and task id 1 is bound to the allocated CPUs on both sockets.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 4
+ |
+
+ 2
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2 3
+ |
+
+ 4 5
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task ids
+ |
+
+ 0 1
+ |
+
+ 1
+ |
+
+ -
+ |
+
+
+
+
+
+Example 13: Consumable resources with task affinity and socket binding, Case 3
+A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in
+a single node in the default partition. Allocate cores using the block allocation method.
+Distribute cores using the cyclic distribution method. Apply socket binding to each task.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+
+Command line:
+srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 --cpu_bind=sockets
+--distribution=block:cyclic ...
+
+Comments:
+Using the block allocation method, SLURM allocates 4
+CPUs on one socket and 2 CPUs on the other socket of one node. Using the cyclic distribution method within
+nodes, SLURM distributes 3 CPUs to each task. Applying socket binding, SLURM binds each task to all
+allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows
+a possible pattern of allocation, distribution and binding for this job. In this example, using the
+block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on
+socket id 1. Using the cyclic distribution method, CPU ids 0, 1 and 4 were distributed to task id 0, and CPU ids
+2, 3 and 5 were distributed to task id 1. Applying socket binding, both tasks are therefore bound to the
+allocated CPUs on both sockets.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 4
+ |
+
+ 2
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2 3
+ |
+
+ 4 5
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task ids
+ |
+
+ 0 1
+ |
+
+ 0 1
+ |
+
+ -
+ |
+
+
+
+
+
+Example 14: Consumable resources with task affinity and customized allocation and distribution
+A job requires 18 CPUs (18 tasks with no overcommitment). Run the job in the
+default partition. Allocate 6 CPUs on each node using block allocation within
+nodes. Use cyclic distribution of tasks to nodes and block distribution of
+tasks for CPU binding.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+
+Command line:
+srun --nodes=3-3 --ntasks=18 --ntasks-per-node=6
+--distribution=cyclic:block --cpu_bind=cores ...
+
+Comments:
+This example shows the use of task affinity with customized allocation of CPUs and
+distribution of tasks across nodes and within nodes for binding. The srun options
+specify the following conditions for the job: 18 tasks, 1 unique CPU per task, using
+all 3 nodes in the partition, with 6 tasks per node.
+The CR_CORE_DEFAULT_DIST_BLOCK
+configuration option specifies block allocation within nodes. To satisfy these
+conditions, SLURM allocates 6 CPUs on each node, with 4 CPUs allocated on one socket
+and 2 CPUs on the other socket. The
+--distribution=cyclic:block option specifies cyclic distribution of
+tasks to nodes and block distribution of tasks to CPUs within nodes for binding.
+The following table shows a possible pattern of allocation, distribution and binding
+for this job. For example, task id 10 is bound to CPU id 3 on node n1.
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 4
+ |
+
+ 2
+ |
+
+ 4
+ |
+
+ 2
+ |
+
+ 4
+ |
+
+ 2
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2 3 4 5
+ |
+
+ 0 1 2 3 4 5
+ |
+
+ 0 1 2 3 4 5
+ |
+
+
+
+ Number of
+ Tasks
+ |
+
+ 6
+ |
+
+ 6
+ |
+
+ 6
+ |
+
+
+
+ Distribution
+ of Tasks to Nodes, by Task id
+ |
+
+ 0
+ 3
+ 6
+ 9
+ 12
+ 15
+
+ |
+
+ 1
+ 4
+ 7
+ 10
+ 13
+ 16
+
+ |
+
+ 2
+ 5
+ 8
+ 11
+ 14
+ 17
+
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task id
+ |
+
+ 0
+ |
+
+ 3
+ |
+
+ 6
+ |
+
+ 9
+ |
+
+ 12
+ |
+
+ 15
+ |
+
+ -
+ |
+
+ -
+ |
+
+ 1
+ |
+
+ 4
+ |
+
+ 7
+ |
+
+ 10
+ |
+
+ 13
+ |
+
+ 16
+ |
+
+ -
+ |
+
+ -
+ |
+
+ 2
+ |
+
+ 5
+ |
+
+ 8
+ |
+
+ 11
+ |
+
+ 14
+ |
+
+ 17
+ |
+
+ -
+ |
+
+ -
+ |
+
+
+
+
+
+Example 15: Consumable resources with task affinity to optimize the performance of a multi-task,
+multi-thread job
+A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment). Run
+the job in the default partition, managing the CPUs to optimize the performance
+of the job.
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+
+Command line:
+srun --ntasks=3 --cpus-per-task=3 --ntasks-per-node=1 --cpu_bind=cores ...
+
+Comments:
+To optimize the performance of this job, the user wishes to allocate 3 CPUs from each of
+3 sockets and bind each task to the 3 CPUs in a single socket. The
+SelectTypeParameters configuration option specifies
+a consumable resource type of cores and block allocation within nodes. The
+TaskPlugin
+and TaskPluginParam
+configuration options enable task affinity. The srun options specify the following
+conditions for the job: 3 tasks, with 3 unique CPUs per task, with 1 task per node. To satisfy
+these conditions, SLURM allocates 3 CPUs from one socket in each of the 3 nodes in the default partition. The
+--cpu_bind=cores option causes SLURM to bind
+each task to the 3 allocated CPUs on the node to which it is distributed. The
+following table shows a possible pattern of allocation, distribution and binding
+for this job. For example, task id 2 is bound to CPU ids 0, 1 and 2 on socket id 0 of node n2.
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+ n1
+ |
+
+ n2
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 3
+ |
+
+ 0
+ |
+
+ 3
+ |
+
+ 0
+ |
+
+ 3
+ |
+
+ 0
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2
+ |
+
+ 0 1 2
+ |
+
+ 0 1 2
+ |
+
+
+
+ Number of
+ Tasks
+ |
+
+ 1
+ |
+
+ 1
+ |
+
+ 1
+ |
+
+
+
+ Distribution
+ of Tasks to Nodes, by Task id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task id
+ |
+
+ 0
+ |
+
+ -
+ |
+
+ 1
+ |
+
+ -
+ |
+
+ 2
+ |
+
+ --
+ |
+
+
+
+
+
+Example 16: Consumable resources with task cgroup and core binding
+A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a
+single node in the default partition. Apply core binding to each task using the task/cgroup plugin.
+
+slurm.conf options:
+SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+TaskPlugin=task/cgroup
+
+cgroup.conf options:
+ConstrainCores=yes
+TaskAffinity=yes
+
+Command line:
+srun --nodes=1-1 --ntasks=6 --cpu_bind=cores ...
+
+Comments:
+The task/cgroup plugin currently supports only the block method for
+allocating cores within nodes and distributing tasks to CPUs for binding.
+The following table shows a possible pattern of allocation, distribution
+and binding for this job. For example, task id 2 is bound to CPU id 2.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodename
+ |
+
+ n0
+ |
+
+
+
+ Socket id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+
+
+ Number of
+ Allocated CPUs
+ |
+
+ 4
+ |
+
+ 2
+ |
+
+
+
+ Allocated
+ CPU ids
+ |
+
+ 0 1 2 3
+ |
+
+ 4 5
+ |
+
+
+
+ Binding of
+ Tasks to CPUs
+ |
+
+ CPU id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ 6
+ |
+
+ 7
+ |
+
+
+
+ Task id
+ |
+
+ 0
+ |
+
+ 1
+ |
+
+ 2
+ |
+
+ 3
+ |
+
+ 4
+ |
+
+ 5
+ |
+
+ -
+ |
+
+ -
+ |
+
+
+
+
+
+
+Last modified 26 September 2011
+
+
diff -Nru slurm-llnl-2.2.7/doc/html/cray.shtml slurm-llnl-2.3.2/doc/html/cray.shtml
--- slurm-llnl-2.2.7/doc/html/cray.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/cray.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -2,144 +2,637 @@
SLURM User and Administrator Guide for Cray systems
-NOTE: As of January 2009, the SLURM interface to Cray systems is incomplete.
-
User Guide
-This document describes the unique features of SLURM on
-Cray computers.
+
This document describes the unique features of SLURM on Cray computers.
You should be familiar with the SLURM's mode of operation on Linux clusters
-before studying the relatively few differences in Cray system
-operation described in this document.
+before studying the differences in Cray system operation described in this
+document.
+
+SLURM version 2.3 is designed to operate as a job scheduler over Cray's
+Application Level Placement Scheduler (ALPS).
+Use SLURM's sbatch or salloc commands to create a resource
+allocation in ALPS.
+Then use ALPS' aprun command to launch parallel jobs within the resource
+allocation.
+The resource allocation is terminated once the the batch script or the
+salloc command terminates.
+Alternately there is an aprun wrapper distributed with SLURM in
+contribs/cray/srun which will translate srun options
+into the equivalent aprun options. This wrapper will also execute
+salloc as needed to create a job allocation in which to run the
+aprun command. The srun script contains two new options:
+--man will print a summary of the options including notes about which
+srun options are not supported and --alps=" which can be used
+to specify aprun options which lack an equivalent within srun.
+For example, srun --alps="-a xt" -n 4 a.out.
+Since aprun is used to launch tasks (the equivalent of a SLURM
+job step), the job steps will not be visible using SLURM commands.
+Other than SLURM's srun command being replaced by aprun
+and the job steps not being visible, all other SLURM commands will operate
+as expected. Note that in order to build and install the aprun wrapper
+described above, execute "configure" with the --with-srun2aprun
+option or add %_with_srun2aprun 1 to your ~/.rpmmacros file.
+
+Node naming and node geometry on Cray XT/XE systems
+SLURM node names will be of the form "nid#####" where "#####" is a five-digit sequence number.
+ Other information available about the node are it's XYZ coordinate in the node's NodeAddr
+ field and it's component label in the HostNodeName field.
+ The format of the component label is "c#-#c#s#n#" where the "#" fields represent in order:
+ cabinet, row, cage, blade or slot, and node.
+ For example "c0-1c2s5n3" is cabinet 0, row 1, cage 3, slot 5 and node 3.
+
+Cray XT/XE systems come with a 3D torus by default. On smaller systems the cabling in X dimension is
+ omitted, resulting in a two-dimensional torus (1 x Y x Z). On Gemini/XE systems, pairs of adjacent nodes
+ (nodes 0/1 and 2/3 on each blade) share one network interface each. This causes the same Y coordinate to
+ be assigned to those nodes, so that the number of distinct torus coordinates is half the number of total
+ nodes.
+The SLURM smap and sview tools can visualize node torus positions. Clicking on a particular
+ node shows its NodeAddr field, which is its (X,Y,Z) torus coordinate base-36 encoded as a 3-character
+ string. For example, a NodeAddr of '07A' corresponds to the coordinates X = 0, Y = 7, Z = 10.
+ The NodeAddr of a node can also be shown using 'scontrol show node nid#####'.
+
+Please note that the sbatch/salloc options "--geometry" and "--no-rotate" are BlueGene-specific
+ and have no impact on Cray systems. Topological node placement depends on what Cray makes available via the
+ ALPS_NIDORDER configuration option (see below).
+
+Specifying thread depth
+For threaded applications, use the --cpus-per-task/-c parameter of sbatch/salloc to set
+ the thread depth per node. This corresponds to mppdepth in PBS and to the aprun -d parameter. Please
+ note that SLURM does not set the OMP_NUM_THREADS environment variable. Hence, if an application spawns
+ 4 threads, an example script would look like
+
+ #SBATCH --comment="illustrate the use of thread depth and OMP_NUM_THREADS"
+ #SBATCH --ntasks=3
+ #SBATCH -c 4
+ export OMP_NUM_THREADS=4
+ aprun -n 3 -d $OMP_NUM_THREADS ./my_exe
+
+
+Specifying number of tasks per node
+SLURM uses the same default as ALPS, assigning each task to a single core/CPU. In order to
+ make more resources available per task, you can reduce the number of processing elements
+ per node (aprun -N parameter, mppnppn in PBS) with the
+ --ntasks-per-node option of sbatch/salloc.
+ This is in particular necessary when tasks require more memory than the per-CPU default.
+
+Specifying per-task memory
+In Cray terminology, a task is also called a "processing element" (PE), hence below we
+ refer to the per-task memory and "per-PE" memory interchangeably. The per-PE memory
+ requested through the batch system corresponds to the aprun -m parameter.
+
+Due to the implicit default assumption that 1 task runs per core/CPU, the default memory
+ available per task is the per-CPU share of node_memory / number_of_cores. For
+ example, on a XT5 system with 16000MB per 12-core node, the per-CPU share is 1333MB.
+
+If nothing else is specified, the --mem option to sbatch/salloc can only be used to
+ reduce the per-PE memory below the per-CPU share. This is also the only way that
+ the --mem-per-cpu option can be applied (besides, the --mem-per-cpu option
+ is ignored if the user forgets to set --ntasks/-n).
+ Thus, the preferred way of specifying memory is the more general --mem option.
+
+To increase the per-PE memory settable via the --mem option requires making
+ more per-task resources available using the --ntasks-per-node option to sbatch/salloc.
+ This allows --mem to request up to node_memory / ntasks_per_node MegaBytes.
+
+When --ntasks-per-node is 1, the entire node memory may be requested by the application.
+ Setting --ntasks-per-node to the number of cores per node yields the default per-CPU share
+ minimum value.
+
+For all cases in between these extremes, set --mem=per_task_memory and
+
+ --ntasks-per-node=floor(node_memory / per_task_memory)
+
+whenever per_task_memory needs to be larger than the per-CPU share.
+
+Example: An application with 64 tasks needs 7500MB per task on a cluster with 32000MB and 24 cores
+ per node. Hence ntasks_per_node = floor(32000/7500) = 4.
+
+ #SBATCH --comment="requesting 7500MB per task on 32000MB/24-core nodes"
+ #SBATCH --ntasks=64
+ #SBATCH --ntasks-per-node=4
+ #SBATCH --mem=7500
+
+If you would like to fine-tune the memory limit of your application, you can set the same parameters in
+ a salloc session and then check directly, using
+
+ apstat -rvv -R $BASIL_RESERVATION_ID
+
+to see how much memory has been requested.
-SLURM's primary mode of operation is designed for use on clusters with
-nodes configured in a one-dimensional space.
-Minor changes were required for the smap and sview tools
-to map nodes in a three-dimensional space.
-Some changes are also desirable to optimize job placement in three-dimensional
-space.
-
-SLURM has added an interface to Cray's Application Level Placement Scheduler
-(ALPS). The ALPS aprun command must used for task launch rather than SLURM's
-srun command. You should create a resource reservation using SLURM's
-salloc or sbatch command and execute aprun from within
-that allocation. /p>
+
Using aprun -B
+CLE 3.x allows a nice aprun shortcut via the -B option, which
+ reuses all the batch system parameters (--ntasks, --ntasks-per-node,
+ --cpus-per-task, --mem) at application launch, as if the corresponding
+ (-n, -N, -d, -m) parameters had been set; see the aprun(1) manpage
+ on CLE 3.x systems for details.
+
+Node ordering options
+SLURM honours the node ordering policy set for Cray's Application Level Placement Scheduler (ALPS). Node
+ ordering is a configurable system option (ALPS_NIDORDER in /etc/sysconfig/alps). The current
+ setting is reported by 'apstat -svv' (look for the line starting with "nid ordering option") and
+ can not be changed at runtime. The resulting, effective node ordering is revealed by 'apstat -no'
+ (if no special node ordering has been configured, 'apstat -no' shows the
+ same order as 'apstat -n').
+
+SLURM uses exactly the same order as 'apstat -no' when selecting
+ nodes for a job. With the --contiguous option to sbatch/salloc
+ you can request a contiguous (relative to the current ALPS nid ordering) set
+ of nodes. Note that on a busy system there is typically more fragmentation,
+ hence it may take longer (or even prove impossible) to allocate contiguous
+ sets of a larger size.
+
+Cray/ALPS node ordering is a topic of ongoing work, some information can be found in the CUG-2010 paper
+ "ALPS, Topology, and Performance" by Carl Albing and Mark Baker.
Administrator Guide
-Cray/ALPS configuration
+Install supporting rpms
+
+The build requires a few -devel RPMs listed below. You can obtain these from
+SuSe/Novell.
+
+- CLE 2.x uses SuSe SLES 10 packages (rpms may be on the normal isos)
+- CLE 3.x uses Suse SLES 11 packages (rpms are on the SDK isos, there
+are two SDK iso files for SDK)
+
+
+You can check by logging onto the boot node and running
+
+boot: # xtopview
+default: # rpm -qa
+
+
+The list of packages that should be installed is:
+
+- expat-2.0.xxx
+- libexpat-devel-2.0.xxx
+- cray-MySQL-devel-enterprise-5.0.64 (this should be on the Cray iso)
+
+
+For example, loading MySQL can be done like this:
+
+smw: # mkdir mnt
+smw: # mount -o loop, ro xe-sles11sp1-trunk.201107070231a03.iso mnt
+smw: # find mnt -name cray-MySQL-devel-enterprise\*
+mnt/craydist/xt-packages/cray-MySQL-devel-enterprise-5.0.64.1.0000.2899.19.2.x86_64.rpm
+smw: # scp mnt/craydist/xt-packages/cray-MySQL-devel-enterprise-5.0.64.1.0000.2899.19.2.x86_64
+
+
+Then switch to boot node and run:
+
+boot: # xtopview
+default: # rpm -ivh /software/cray-MySQL-devel-enterprise-5.0.64.1.0000.2899.19.2.x86_64.rpm
+default: # exit
+
+
+All Cray-specific PrgEnv and compiler modules should be removed and root
+privileges will be required to install these files.
+
+Create a build root
+
+The build is done on a normal service node, where you like
+(e.g. /ufs/slurm/build would work).
+Most scripts check for the environment variable LIBROOT.
+You can either edit the scripts or export this variable. Easiest way:
+
+
+login: # export LIBROOT=/ufs/slurm/build
+login: # mkdir -vp $LIBROOT
+login: # cd $LIBROOT
+
+
+Install SLURM modulefile
+
+This file is distributed as part the SLURM tar-ball in
+contribs/cray/opt_modulefiles_slurm. Install it as
+/opt/modulefiles/slurm (or anywhere else in your module path).
+It means that you can use Munge as soon as it is built.
+
+login: # scp ~/slurm/contribs/cray/opt_modulefiles_slurm root@boot:/rr/current/software/
+
+
+Build and install Munge
+
+Note the Munge installation process on Cray systems differs
+somewhat from that described in the
+
+MUNGE Installation Guide.
+
+Munge is the authentication daemon and needed by SLURM. Download
+munge-0.5.10.tar.bz2 or newer from
+
+http://code.google.com/p/munge/downloads/list. This is how one
+can build on a login node and install it.
+
+login: # cd $LIBROOT
+login: # cp ~/slurm/contribs/cray/munge_build_script.sh $LIBROOT
+login: # mkdir -p ${LIBROOT}/munge/zip
+login: # curl -O http://munge.googlecode.com/files/munge-0.5.10.tar.bz2
+login: # cp munge-0.5.10.tar.bz2 ${LIBROOT}/munge/zip
+login: # chmod u+x ${LIBROOT}/munge/zip/munge_build_script.sh
+login: # ${LIBROOT}/munge/zip/munge_build_script.sh
+(generates lots of output and enerates a tar-ball called
+$LIBROOT/munge_build-.*YYYY-MM-DD.tar.gz)
+login: # scp munge_build-2011-07-12.tar.gz root@boot:/rr/current/software
+
+
+Install the tar-ball by on the boot node and build an encryption
+key file executing:
+
+boot: # xtopview
+default: # tar -zxvf $LIBROOT/munge_build-*.tar.gz -C /rr/current /
+default: # dd if=/dev/urandom bs=1 count=1024 >/opt/slurm/munge/etc/munge.key
+default: # chmod go-rxw /opt/slurm/munge/etc/munge.key
+default: # exit
+
+
+Configure Munge
+
+The following steps apply to each login node and the sdb, where
+
+- The slurmd or slurmctld daemon will run and/or
+- Users will be submitting jobs
+
+
+
+login: # mkdir --mode=0711 -vp /var/lib/munge
+login: # mkdir --mode=0700 -vp /var/log/munge
+login: # mkdir --mode=0755 -vp /var/run/munge
+login: # module load slurm
+
+
+sdb: # mkdir --mode=0711 -vp /var/lib/munge
+sdb: # mkdir --mode=0700 -vp /var/log/munge
+sdb: # mkdir --mode=0755 -vp /var/run/munge
+
+
+Start the munge daemon and test it.
+
+login: # munged --key-file /opt/slurm/munge/etc/munge.key
+login: # munge -n
+MUNGE:AwQDAAAEy341MRViY+LacxYlz+mchKk5NUAGrYLqKRUvYkrR+MJzHTgzSm1JALqJcunWGDU6k3vpveoDFLD7fLctee5+OoQ4dCeqyK8slfAFvF9DT5pccPg=:
+
+
+When done, verify network connectivity by executing:
+
+- munge -n | ssh other-login-host /opt/slurm/munge/bin/unmunge
+
+
+
+If you decide to keep the installation, you may be interested in automating
+the process using an init.d script distributed with the Munge. This
+should be installed on all nodes running munge, e.g., 'xtopview -c login' and
+'xtopview -n sdbNodeID'
+
+
+boot: # xtopview -c login
+login: # cp /software/etc_init_d_munge /etc/init.d/munge
+login: # chmod u+x /etc/init.d/munge
+login: # chkconfig munge on
+login: # exit
+boot: # xtopview -n 31
+node/31: # cp /software/etc_init_d_munge /etc/init.d/munge
+node/31: # chmod u+x /etc/init.d/munge
+node/31: # chkconfig munge on
+node/31: # exit
+
+
+Enable the Cray job service
+
+This is a common dependency on Cray systems. ALPS relies on the Cray job service to
+ generate cluster-unique job container IDs (PAGG IDs). These identifiers are used by
+ ALPS to track running (aprun) job steps. The default (session IDs) is not unique
+ across multiple login nodes. This standard procedure is described in chapter 9 of
+ S-2393 and takes only two
+ steps, both to be done on all 'login' class nodes (xtopview -c login):
+
+ - make sure that the /etc/init.d/job service is enabled (chkconfig) and started
+ - enable the pam_job.so module from /opt/cray/job/default in /etc/pam.d/common-session
+ (NB: the default pam_job.so is very verbose, a simpler and quieter variant is provided
+ in contribs/cray.)
+
+The latter step is required only if you would like to run interactive
+ salloc sessions.
+
+boot: # xtopview -c login
+login: # chkconfig job on
+login: # emacs -nw /etc/pam.d/common-session
+(uncomment the pam_job.so line)
+session optional /opt/cray/job/default/lib64/security/pam_job.so
+login: # exit
+boot: # xtopview -n 31
+node/31:# chkconfig job on
+node/31:# emacs -nw /etc/pam.d/common-session
+(uncomment the pam_job.so line as shown above)
+
+
+Build and Configure SLURM
-Node names must have a three-digit suffix describing their
-zero-origin position in the X-, Y- and Z-dimension respectively (e.g.
-"tux000" for X=0, Y=0, Z=0; "tux123" for X=1, Y=2, Z=3).
-Rectangular prisms of nodes can be specified in SLURM commands and
-configuration files using the system name prefix with the end-points
-enclosed in square brackets and separated by an "x".
-For example "tux[620x731]" is used to represent the eight nodes in a
-block with endpoints at "tux620" and "tux731" (tux620, tux621, tux630,
-tux631, tux720, tux721, tux730, tux731).
-NOTE: We anticipate that Cray will provide node coordinate
-information via the ALPS interface in the future, which may result
-in a more flexible node naming convention.
-
-In ALPS, configure each node to be scheduled using SLURM as type
-BATCH.
-
-SLURM configuration
-
-Four variables must be defined in the config.h file:
-APBASIL_LOC (location of the apbasil command),
-HAVE_FRONT_END, HAVE_CRAY and HAVE_3D.
-The apbasil command should automatically be found.
-If that is not the case, please notify us of its location on your system
-and we will add that to the search paths tested at configure time.
-The other variable definitions can be initiated in several different
-ways depending upon how SLURM is being built.
-
-- Execute the configure command with the option
---enable-cray-xt OR
-- Execute the rpmbuild command with the option
---with cray_xt OR
-- Add %with_cray_xt 1 to your ~/.rpmmacros file.
-
-
-One slurmd will be used to run all of the batch jobs on
-the system. It is from here that users will execute aprun
-commands to launch tasks.
+
SLURM can be built and installed as on any other computer as described
+Quick Start Administrator Guide.
+An example of building and installing SLURM version 2.3.0 is shown below.
+
+
+login: # mkdir build && cd build
+login: # slurm/configure \
+ --prefix=/opt/slurm/2.3.0 \
+ --with-munge=/opt/slurm/munge/ \
+ --with-mysql_config=/opt/cray/MySQL/5.0.64-1.0000.2899.20.2.gem/bin \
+ --with-srun2aprun
+login: # make -j
+login: # mkdir install
+login: # make DESTDIR=/tmp/slurm/build/install install
+login: # make DESTDIR=/tmp/slurm/build/install install-contrib
+login: # cd install
+login: # tar czf slurm_opt.tar.gz opt
+login: # scp slurm_opt.tar.gz boot:/rr/current/software
+
+
+
+boot: # xtopview
+default: # tar xzf /software/slurm_opt.tar.gz -C /
+default: # cd /opt/slurm/
+default: # ln -s 2.3.0 default
+
+
+When building SLURM's slurm.conf configuration file, use the
+NodeName parameter to specify all batch nodes to be scheduled.
+If nodes are defined in ALPS, but not defined in the slurm.conf file, a
+complete list of all batch nodes configured in ALPS will be logged by
+the slurmctld daemon when it starts.
+One would typically use this information to modify the slurm.conf file
+and restart the slurmctld daemon.
+Note that the NodeAddr and NodeHostName fields should not be
+configured, but will be set by SLURM using data from ALPS.
+NodeAddr be set to the node's XYZ coordinate and be used by SLURM's
+smap and sview commands.
+NodeHostName will be set to the node's component label.
+The format of the component label is "c#-#c#s#n#" where the "#" fields
+represent in order: cabinet, row, cate, blade or slot, and node.
+For example "c0-1c2s5n3" is cabinet 0, row 1, cage 3, slot 5 and node 3.
+
+The slurmd daemons will not execute on the compute nodes, but will
+execute on one or more front end nodes.
+It is from here that batch scripts will execute aprun commands to
+launch tasks.
This is specified in the slurm.conf file by using the
-NodeName field to identify the compute nodes and both the
-NodeAddr and NodeHostname fields to identify the
-computer when slurmd runs (normally some sort of front-end node)
+FrontendName and optionally the FrontEndAddr fields
as seen in the examples below.
-Next you need to select from two options for the resource selection
-plugin (the SelectType option in SLURM's slurm.conf configuration
-file):
-
-- select/cons_res - Performs a best-fit algorithm based upon a
-one-dimensional space to allocate whole nodes, sockets, or cores to jobs
-based upon other configuration parameters.
-- select/linear - Performs a best-fit algorithm based upon a
-one-dimensional space to allocate whole nodes to jobs.
-
-
-In order for select/cons_res or select/linear to
-allocate resources physically nearby in three-dimensional space, the
-nodes be specified in SLURM's slurm.conf configuration file in
-such a fashion that those nearby in slurm.conf (one-dimensional
-space) are also nearby in the physical three-dimensional space.
-If the definition of the nodes in SLURM's slurm.conf configuration
-file are listed on one line (e.g. NodeName=tux[000x333]),
-SLURM will automatically perform that conversion using a
-Hilbert curve.
-Otherwise you may construct your own node name ordering and list them
-one node per line in slurm.conf.
-Note that each node must be listed exactly once and consecutive
-nodes should be nearby in three-dimensional space.
-Also note that each node must be defined individually rather than using
-a hostlist expression in order to preserve the ordering (there is no
-problem using a hostlist expression in the partition specification after
-the nodes have already been defined).
-The open source code used by SLURM to generate the Hilbert curve is
-included in the distribution at contribs/skilling.c in the event
-that you wish to experiment with it to generate your own node ordering.
-Two examples of SLURM configuration files are shown below:
-
-
-# slurm.conf for Cray XT system of size 4x4x4
-# Parameters removed here
-SelectType=select/linear
-NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
-NodeName=tux[000x333] NodeAddr=front_end NodeHostname=front_end
-PartitionName=debug Nodes=tux[000x333] Default=Yes State=UP
-
-
-
-# slurm.conf for Cray XT system of size 4x4x4
-# Parameters removed here
-SelectType=select/linear
-NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
-NodeName=tux000 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux100 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux110 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux010 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux011 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux111 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux101 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux001 NodeAddr=front_end NodeHostname=front_end
-PartitionName=debug Nodes=tux[000x111] Default=Yes State=UP
-
-
-In both of the examples above, the node names output by the
-scontrol show nodes will be ordered as defined (sequentially
-along the Hilbert curve or per the ordering in the slurm.conf file)
-rather than in numeric order (e.g. "tux001" follows "tux101" rather
-than "tux000").
-SLURM partitions should contain nodes which are defined sequentially
-by that ordering for optimal performance.
+Note that SLURM will by default kill running jobs when a node goes DOWN,
+while a DOWN node in ALPS only prevents new jobs from being scheduled on the
+node. To help avoid confusion, we recommend that SlurmdTimeout in the
+slurm.conf file be set to the same value as the suspectend
+parameter in ALPS' nodehealth.conf file.
+
+You need to specify the appropriate resource selection plugin (the
+SelectType option in SLURM's slurm.conf configuration file).
+Configure SelectType to select/cray The select/cray
+plugin provides an interface to ALPS plus issues calls to the
+select/linear, which selects resources for jobs using a best-fit
+algorithm to allocate whole nodes to jobs (rather than individual sockets,
+cores or threads).
+
+Note that the system topology is based upon information gathered from
+the ALPS database and is based upon the ALPS_NIDORDER configuration in
+/etc/sysconfig/alps. Excerpts of a slurm.conf file for
+use on a Cray systems follow:
+
+
+#---------------------------------------------------------------------
+# SLURM USER
+#---------------------------------------------------------------------
+# SLURM user on cray systems must be root
+# This requirement derives from Cray ALPS:
+# - ALPS reservations can only be created by the job owner or root
+# (confirmation may be done by other non-privileged users)
+# - Freeing a reservation always requires root privileges
+SlurmUser=root
+
+#---------------------------------------------------------------------
+# PLUGINS
+#---------------------------------------------------------------------
+# Network topology (handled internally by ALPS)
+TopologyPlugin=topology/none
+
+# Scheduling
+SchedulerType=sched/backfill
+
+# Node selection: use the special-purpose "select/cray" plugin.
+# Internally this uses select/linar, i.e. nodes are always allocated
+# in units of nodes (other allocation is currently not possible, since
+# ALPS does not yet allow to run more than 1 executable on the same
+# node, see aprun(1), section LIMITATIONS).
+#
+# Add CR_memory as parameter to support --mem/--mem-per-cpu.
+SelectType=select/cray
+SelectTypeParameters=CR_Memory
+
+# Proctrack plugin: only/default option is proctrack/sgi_job
+# ALPS requires cluster-unique job container IDs and thus the /etc/init.d/job
+# service needs to be started on all slurmd and login nodes, as described in
+# S-2393, chapter 9. Due to this requirement, ProctrackType=proctrack/sgi_job
+# is the default on Cray and need not be specified explicitly.
+
+#---------------------------------------------------------------------
+# PATHS
+#---------------------------------------------------------------------
+SlurmdSpoolDir=/ufs/slurm/spool
+StateSaveLocation=/ufs/slurm/spool/state
+
+# main logfile
+SlurmctldLogFile=/ufs/slurm/log/slurmctld.log
+# slurmd logfiles (using %h for hostname)
+SlurmdLogFile=/ufs/slurm/log/%h.log
+
+# PIDs
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+
+#---------------------------------------------------------------------
+# COMPUTE NODES
+#---------------------------------------------------------------------
+# Return DOWN nodes to service when e.g. slurmd has been unresponsive
+ReturnToService=1
+
+# Configure the suspectend parameter in ALPS' nodehealth.conf file to the same
+# value as SlurmdTimeout for consistent behavior (e.g. "suspectend: 600")
+SlurmdTimeout=600
+
+# Controls how a node's configuration specifications in slurm.conf are
+# used.
+# 0 - use hardware configuration (must agree with slurm.conf)
+# 1 - use slurm.conf, nodes with fewer resources are marked DOWN
+# 2 - use slurm.conf, but do not mark nodes down as in (1)
+FastSchedule=2
+
+# Per-node configuration for PALU AMD G34 dual-socket "Magny Cours"
+# Compute Nodes. We deviate from slurm's idea of a physical socket
+# here, since the Magny Cours hosts two NUMA nodes each, which is
+# also visible in the ALPS inventory (4 Segments per node, each
+# containing 6 'Processors'/Cores).
+NodeName=DEFAULT Sockets=4 CoresPerSocket=6 ThreadsPerCore=1
+NodeName=DEFAULT RealMemory=32000 State=UNKNOWN
+
+# List the nodes of the compute partition below (service nodes are not
+# allowed to appear)
+NodeName=nid00[002-013,018-159,162-173,178-189]
+
+# Frontend nodes: these should not be available to user logins, but
+# have all filesystems mounted that are also
+# available on a login node (/scratch, /home, ...).
+FrontendName=palu[7-9]
+
+#---------------------------------------------------------------------
+# ENFORCING LIMITS
+#---------------------------------------------------------------------
+# Enforce the use of associations: {associations, limits, wckeys}
+AccountingStorageEnforce=limits
+
+# Do not propagate any resource limits from the user's environment to
+# the slurmd
+PropagateResourceLimits=NONE
+
+#---------------------------------------------------------------------
+# Resource limits for memory allocation:
+# * the Def/Max 'PerCPU' and 'PerNode' variants are mutually exclusive;
+# * use the 'PerNode' variant for both default and maximum value, since
+# - slurm will automatically adjust this value depending on
+# --ntasks-per-node
+# - if using a higher per-cpu value than possible, salloc will just
+# block.
+#--------------------------------------------------------------------
+# XXX replace both values below with your values from 'xtprocadmin -A'
+DefMemPerNode=32000
+MaxMemPerNode=32000
+
+#---------------------------------------------------------------------
+# PARTITIONS
+#---------------------------------------------------------------------
+# defaults common to all partitions
+PartitionName=DEFAULT Nodes=nid00[002-013,018-159,162-173,178-189]
+PartitionName=DEFAULT MaxNodes=178
+PartitionName=DEFAULT Shared=EXCLUSIVE State=UP DefaultTime=60
+
+# "User Support" partition with a higher priority
+PartitionName=usup Hidden=YES Priority=10 MaxTime=720 AllowGroups=staff
+
+# normal partition available to all users
+PartitionName=day Default=YES Priority=1 MaxTime=01:00:00
+
+
+SLURM supports an optional cray.conf file containing Cray-specific
+configuration parameters. This file is NOT needed for production systems,
+but is provided for advanced configurations. If used, cray.conf must be
+located in the same directory as the slurm.conf file. Configuration
+parameters supported by cray.conf are listed below.
+
+
+- apbasil
+- Fully qualified pathname to the apbasil command.
+The default value is /usr/bin/apbasil.
+- apkill
+- Fully qualified pathname to the apkill command.
+The default value is /usr/bin/apkill.
+- SDBdb
+- Name of the ALPS database.
+The default value is XTAdmin.
+- SDBhost
+- Hostname of the database server.
+The default value is based upon the contents of the 'my.cnf' file used to
+store default database access information and that defaults to user 'sdb'.
+- SDBpass
+- Password used to access the ALPS database.
+The default value is based upon the contents of the 'my.cnf' file used to
+store default database access information and that defaults to user 'basic'.
+- SDBport
+- Port used to access the ALPS database.
+The default value is 0.
+- SDBuser
+- Name of user used to access the ALPS database.
+The default value is based upon the contents of the 'my.cnf' file used to
+store default database access information and that defaults to user 'basic'.
+
+
+
+# Example cray.conf file
+apbasil=/opt/alps_simulator_40_r6768/apbasil.sh
+SDBhost=localhost
+SDBuser=alps_user
+SDBdb=XT5istanbul
+
+
+One additional configuration script can be used to insure that the slurmd
+daemons execute with the highest resource limits possible, overriding default
+limits on Suse systems. Depending upon what resource limits are propagated
+from the user's environment, lower limits may apply to user jobs, but this
+script will insure that higher limits are possible. Copy the file
+contribs/cray/etc_sysconfig_slurm into /etc/sysconfig/slurm
+for these limits to take effect. This script is executed from
+/etc/init.d/slurm, which is typically executed to start the SLURM
+daemons. An excerpt of contribs/cray/etc_sysconfig_slurmis shown
+below.
+
+
+#
+# /etc/sysconfig/slurm for Cray XT/XE systems
+#
+# Cray is SuSe-based, which means that ulimits from
+# /etc/security/limits.conf will get picked up any time SLURM is
+# restarted e.g. via pdsh/ssh. Since SLURM respects configured limits,
+# this can mean that for instance batch jobs get killed as a result
+# of configuring CPU time limits. Set sane start limits here.
+#
+# Values were taken from pam-1.1.2 Debian package
+ulimit -t unlimited # max amount of CPU time in seconds
+ulimit -d unlimited # max size of a process's data segment in KB
+
+
+SLURM's init.d script should also be installed to automatically
+start SLURM daemons when nodes boot as shown below. Be sure to edit the script
+as appropriate to reference the proper file location (modify the variable
+PREFIX).
+
+
+login: # scp /home/crayadm/ben/slurm/etc/init.d.slurm boot:/rr/current/software/
+
+
+Now create the needed directories for logs and state files then start the
+daemons on the sdb and login nodes as shown below.
+
+
+sdb: # mkdir -p /ufs/slurm/log
+sdb: # mkdir -p /ufs/slurm/spool
+sdb: # /etc/init.d/slurm start
+
+
+
+login: # /etc/init.d/slurm start
+
+
+Srun wrapper configuration
+
+The srun wrapper to aprun might require modification to run
+as desired. Specifically the $aprun variable could be set to the
+absolute pathname of that executable file. Without that modification, the
+aprun command executed will depend upon the user's search path.
+
+In order to debug the srun wrapper, uncomment the line
+
+print "comment=$command\n"
+
+If the srun wrapper is executed from
+within an existing SLURM job allocation (i.e. within salloc or an
+sbatch script), then it just executes the aprun command with
+appropriate options. If executed without an allocation, the wrapper executes
+salloc, which then executes the srun wrapper again. This second
+execution of the srun wrapper is required in order to process environment
+variables that are set by the salloc command based upon the resource
+allocation.
-Last modified 9 January 2009
+Last modified 1 August 2011
diff -Nru slurm-llnl-2.2.7/doc/html/disclaimer.shtml slurm-llnl-2.3.2/doc/html/disclaimer.shtml
--- slurm-llnl-2.2.7/doc/html/disclaimer.shtml 1970-01-01 00:00:00.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/disclaimer.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -0,0 +1,90 @@
+
+
+Legal Notices
+
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General
+Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+NO WARRANTY
+The following is an excerpt from the GNU General Public License.
+
+BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+Copyrights
+
+SLURM represents the collaborative efforts of roughly 100 people representing
+roughtly 40 different organizations world-wide. A current list of contributors
+can be found at the SLURM Team web page.
+
+While many organizations contributed code and/or documentation without
+including a copyright notice, the following copyright notices are found in
+SLURM's code and documenation files:
+Copyright (C) 2011 Trinity Centre for High Performance Computing
+Copyright (C) 2010-2011 SchedMD LLC
+Copyright (C) 2009 CEA/DAM/DIF
+Copyright (C) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+Copyright (C) 2008-2011 Lawrence Livermore National Security
+Copyright (C) 2008 Vijay Ramasubramanian
+Copyright (C) 2007-2008 Red Hat, Inc.
+Copyright (C) 2007-2009 National University of Defense Technology, China
+Copyright (C) 2007-2011 Bull
+Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.
+Copyright (C) 2004-2009, Marcus Holland-Moritz
+Copyright (C) 2002-2007 The Regents of the University of California
+Copyright (C) 2002-2003 Linux NetworX
+Copyright (C) 2002 University of Chicago
+Copyright (C) 2001 Paul Marquess
+Copyright (C) 2000 Markus Friedl
+Copyright (C) 1999 Kenneth Albanowski
+Copyright (C) 1998 Todd C. Miller
+Copyright (C) 1996-2003 Maximum Entropy Data Consultants Ltd,
+Copyright (C) 1995 Tatu Ylonen , Espoo, Finland
+Copyright (C) 1989-1994, 1996-1999, 2001 Free Software Foundation, Inc.
+
+Much of the work was performed under the auspices of the U.S. Department of
+Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344.
+This work was sponsored by an agency of the United States government.
+Neither the United States Government nor Lawrence Livermore National
+Security, LLC, nor any of their employees, makes any warranty, express
+or implied, or assumes any liability or responsibility for the accuracy,
+completeness, or usefulness of any information, apparatus, product, or
+process disclosed, or represents that its use would not infringe privately
+owned rights. References herein to any specific commercial products, process,
+or services by trade names, trademark, manufacturer or otherwise does not
+necessarily constitute or imply its endorsement, recommendation, or
+favoring by the United States Government or the Lawrence Livermore National
+Security, LLC. The views and opinions of authors expressed herein do not
+necessarily state or reflect those of the United States government or
+Lawrence Livermore National Security, LLC, and shall not be used for
+advertising or product endorsement purposes.
+
+Last modified 27 June 2010
+
+
+
diff -Nru slurm-llnl-2.2.7/doc/html/dist_plane.shtml slurm-llnl-2.3.2/doc/html/dist_plane.shtml
--- slurm-llnl-2.2.7/doc/html/dist_plane.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/dist_plane.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -17,7 +17,7 @@
On One (1) node: srun -N 1-1 -n 21 -m plane=4 <...>.
The distribution results in a plane distribution with plane_size 21.
-Even thought the user specified a plane_size of 4 the final plane
+Even though the user specified a plane_size of 4 the final plane
distribution results in a plane_size of 21.
@@ -114,7 +114,7 @@
On One (1) node:
srun -N 1-1 -n 21 -m plane=4 --cpu_bind=core <...>.
-Even thought the user specified a plane_size of 4 the final plane
+Even though the user specified a plane_size of 4 the final plane
distribution results in a plane distribution with plane_size=8.
diff -Nru slurm-llnl-2.2.7/doc/html/documentation.shtml slurm-llnl-2.3.2/doc/html/documentation.shtml
--- slurm-llnl-2.2.7/doc/html/documentation.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/documentation.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -7,6 +7,8 @@
SLURM Users
- Quick Start User Guide
+- Man Pages
+- CPU Management User and Administrator Guide
- MPI Use Guide
- Support for Multi-core/Multi-threaded Architectures
- Multi-Cluster Operation
@@ -23,6 +25,7 @@
SLURM Administrators
- Quick Start Administrator Guide
+- CPU Management User and Administrator Guide
- Configuration Tool
- Troubleshooting Guide
- Large Cluster Administration Guide
@@ -32,7 +35,7 @@
- Consumable Resources Guide
- Gang Scheduling
-- Generic Resource (Gres) Scheduling
+- Generic Resource (GRES) Scheduling
- High Throughput Computing Guide
- Multifactor Job Priority
- Preemption
@@ -61,6 +64,12 @@
- Programmer Guide
- Application Programmer Interface (API) Guide
+- Design Information
+
- Plugin Programmer Guide
- Plugin Interface Details
-Last modified 27 August 2010
+Last modified 31 May 2011
diff -Nru slurm-llnl-2.2.7/doc/html/download.shtml slurm-llnl-2.3.2/doc/html/download.shtml
--- slurm-llnl-2.2.7/doc/html/download.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/download.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -1,15 +1,28 @@
Download
-
-SLURM source can be downloaded from
-
-http://sourceforge.net/projects/slurm/
+
SLURM source can be downloaded from
+
+http://www.schedmd.com/#repos
SLURM has also been packaged for
Debian and
Ubuntu
(both named slurm-llnl).
+A SLURM simulator
+is available to assess various scheduling policies.
+Under simulation jobs are not actually executed. Instead a job execution trace
+from a real system or a synthetic trace are used.
+
+
+
Related software available from various sources include:
@@ -40,7 +53,7 @@
See our Accounting web page for more information.
- Debuggers and debugging tools
@@ -68,7 +81,7 @@
is an implementation of Open Grid Forum
DRMAA 1.0 (Distributed Resource Management Application API)
specification for submission
-and control of jobs to SLURM.
+and control of jobs to SLURM.
Using DRMAA, grid applications builders, portal developers and ISVs can use
the same high-level API to link their software with different cluster/resource
management systems.
@@ -88,8 +101,8 @@
plugin also requires the libelanhosts library and
a corresponding /etc/elanhosts configuration file, used to map
hostnames to Elan IDs. The libelanhosts source is available from
-
-https://sourceforge.net/projects/slurm/.
+
+http://www.schedmd.com/download/extras/libelanhosts-0.9-1.tgz.
- I/O Watchdog
@@ -118,15 +131,15 @@
- Quadrics MPI
-- PAM Modules (pam_slurm)
+ - PAM Module (pam_slurm)
Pluggable Authentication Module (PAM) for restricting access to compute nodes
where SLURM performs resource management. Access to the node is restricted to
user root and users who have been allocated resources on that node.
NOTE: pam_slurm is included within the SLURM distribution for version 2.1
or higher.
For earlier SLURM versions, pam_slurm is available for download from
-
-https://sourceforge.net/projects/slurm/
+
+http://www.schedmd.com/download/extras/pam_slurm-1.6.tar.bz2
SLURM's PAM module has also been packaged for
Debian and
Ubuntu
@@ -176,20 +189,6 @@
repository with the following command:
svn checkout http://slurm-spank-plugins.googlecode.com/svn/trunk/ slurm-plugins
-- PAM Module (pam_slurm)
-Pluggable Authentication Module (PAM) for restricting access to compute nodes
-where SLURM performs resource management. Access to the node is restricted to
-user root and users who have been allocated resources on that node.
-NOTE: pam_slurm is included within the SLURM distribution for version 2.1
-or higher.
-For earlier SLURM versions, pam_slurm is available for download from
-
-https://sourceforge.net/projects/slurm/
-SLURM's PAM module has also been packaged for
-Debian and
-Ubuntu
-(both named libpam-slurm).
-
- Sqlog
A set of scripts that leverages SLURM's job completion logging facility
in provide information about what jobs were running at any point in the
@@ -204,6 +203,6 @@
-Last modified 20 December 2010
+Last modified 24 May 2011
diff -Nru slurm-llnl-2.2.7/doc/html/faq.shtml slurm-llnl-2.3.2/doc/html/faq.shtml
--- slurm-llnl-2.2.7/doc/html/faq.shtml 2011-06-10 16:57:27.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/faq.shtml 2011-12-05 17:20:08.000000000 +0000
@@ -44,6 +44,8 @@
the slurm-dev mailing list?
- Can I change my job's size after it has started
running?
+- Why is my MPIHCH2 or MVAPICH2 job not running with
+SLURM? Why does the DAKOTA program not run with SLURM?
For Administrators
@@ -86,7 +88,7 @@
core files?
- Is resource limit propagation
useful on a homogeneous cluster?
-- Do I need to maintain synchronized clocks
+
- Do I need to maintain synchronized clocks
on the cluster?
- Why are "Invalid job credential" errors
generated?
@@ -300,9 +302,9 @@
- Job: ExcNodeList=NULL
- Job: Contiguous=0
- Job: Features=NULL
-- Job: MinProcs, MinMemory, and MinTmpDisk satisfied by all nodes in
+
- Job: MinCPUs, MinMemory, and MinTmpDisk satisfied by all nodes in
the partition
-- Job: MinProcs or MinNodes not to exceed partition's MaxNodes
+- Job: MinCPUs or MinNodes not to exceed partition's MaxNodes
If the partitions specifications differ from those listed above,
no jobs in that partition will be scheduled by the backfills scheduler.
@@ -646,9 +648,22 @@
24. Can I change my job's size after it has started
running?
-Beginning with Slurm version 2.2 it is possible to decrease a job's size after
-it has started, but it is not currently possible to increase a job's size.
-Use the scontrol command to change a job's size either by specifying
+Support to decrease the size of a running job was added to SLURM version 2.2.
+The ability to increase the size of a running job was added to SLURM version 2.3.
+While the size of a pending job may be changed with few restrictions, several
+significant restrictions apply to changing the size of a running job as noted
+below:
+
+- Support is not available on BlueGene or Cray system due to limitations
+in the software underlying SLURM.
+- Job(s) changing size must not be in a suspended state, including jobs
+suspended for gang scheduling. The jobs must be in a state of pending or
+running. We plan to modify the gang scheduling logic in the future to
+concurrently schedule a job to be used for expanding another job and the
+job to be expanded.
+
+
+Use the scontrol command to change a job's size either by specifying
a new node count (NumNodes=) for the job or identify the specific nodes
(NodeList=) that you want the job to retain.
Any job steps running on the nodes which are reliquished by the job will be
@@ -658,11 +673,13 @@
should either be removed or altered (e.g. SLURM_NNODES, SLURM_NODELIST and
SLURM_NPROCS).
The scontrol command will generate a script that can be executed to
-reset local environment variables
+reset local environment variables.
You must retain the SLURM_JOBID environment variable in order for the
srun command to gather information about the job's current state and
-specify the desired node and/or task count in subsequent srun invocations.
-An example is shown below.
+specify the desired node and/or task count in subsequent srun invocations.
+A new accounting record is generated when a job is resized showing the to have
+been resubmitted and restarted at the new size.
+An example is shown below.
#!/bin/bash
srun my_big_job
@@ -672,6 +689,114 @@
rm slurm_job_${SLURM_JOBID}_resize.*
+Increasing a job's size
+Directly increasing the size of a running job would adversely effect the
+scheduling of pending jobs.
+For the sake of fairness in job scheduling, expanding a running job requires
+the user to submit a new job, but specify the option
+--dependency=expand:<jobid>.
+This option tells SLURM that the job, when scheduled, can be used to expand
+the specified jobid.
+Other job options would be used to identify the required resources
+(e.g. task count, node count, node features, etc.).
+This new job's time limit will be automatically set to reflect the end time of
+the job being expanded.
+This new job's generic resources specification will be automatically set
+equal to that of the job being merged to. This is due to the current SLURM
+restriction of all nodes associated with a job needing to have the same
+generic resource specification (i.e. a job can not have one GPU on one
+node and two GPUs on another node), although this restriction may be removed
+in the future. This restriction can pose some problems when both jobs can be
+allocated resources on the same node, in which case the generic resources
+allocated to the new job will be released. If the jobs are allocated resources
+on different nodes, the generic resources associated with the resulting job
+allocation after the merge will be consistent as expected.
+Any licenses associated with the new job will be added to those available in
+the job being merged to.
+Note that partition and Quality Of Service (QOS) limits will be applied
+independently to the new job allocation so the expanded job may exceed size
+limits configured for an individual job.
+
+After the new job is allocated resources, merge that job's allocation
+into that of the original job by executing:
+scontrol update jobid=<jobid> NumNodes=0
+The jobid above is that of the job to relinquish it's resources.
+To provides more control over when the job expansion occurs, the resources are
+not merged into the original job until explicitly requested.
+These resources will be transfered to the original job and the scontrol
+command will generate a script to reset variables in the second
+job's environment to reflect it's modified resource allocation (which would
+be no resources).
+One would normally exit this second job at this point, since it has no
+associated resources.
+In order to generate a script to modify the environment variables for the
+expanded job, execute:
+scontrol update jobid=<jobid> NumNodes=ALL
+Then execute the script generated.
+Note that this command does not change the original job's size, but only
+generates the script to change its environment variables.
+Until the environment variables are modified (e.g. the job's node count,
+CPU count, hostlist, etc.), any srun command will only consider the resources
+in the original resource allocation.
+Note that the original job may have active job steps at the time of it's
+expansion, but they will not be effected by the change.
+An example of the proceedure is shown below in which the original job
+allocation waits until the second resource allocation request can be
+satisfied. The job requesting additional resources could also use the sbatch
+command and permit the original job to continue execution at its initial size.
+Note that the development of additional user tools to manage SLURM resource
+allocations is planned in the future to make this process both simpler and
+more flexible.
+
+
+$ salloc -N4 bash
+salloc: Granted job allocation 65542
+$ srun hostname
+icrm1
+icrm2
+icrm3
+icrm4
+
+$ salloc -N4 --dependency=expand:$SLURM_JOBID bash
+salloc: Granted job allocation 65543
+$ scontrol update jobid=$SLURM_JOBID NumNodes=0
+To reset SLURM environment variables, execute
+ For bash or sh shells: . ./slurm_job_65543_resize.sh
+ For csh shells: source ./slurm_job_65543_resize.csh
+$ exit
+exit
+salloc: Relinquishing job allocation 65543
+
+$ scontrol update jobid=$SLURM_JOBID NumNodes=ALL
+To reset SLURM environment variables, execute
+ For bash or sh shells: . ./slurm_job_65542_resize.sh
+ For csh shells: source ./slurm_job_65542_resize.csh
+$ . ./slurm_job_$SLURM_JOBID_resize.sh
+
+$ srun hostname
+icrm1
+icrm2
+icrm3
+icrm4
+icrm5
+icrm6
+icrm7
+icrm8
+$ exit
+exit
+salloc: Relinquishing job allocation 65542
+
+
+25. Why is my MPIHCH2 or MVAPICH2 job not running with
+SLURM? Why does the DAKOTA program not run with SLURM?
+The SLURM library used to support MPIHCH2 or MVAPICH2 references a variety of
+symbols. If those symbols resolve to functions or variables in your program
+rather than the appropriate library, the application will fail. In the case of
+DAKOTA, it contains a function named
+regcomp, which will get used rather than the POSIX regex functions.
+Rename DAKOTA's function and references from regcomp to something else to make
+it work properly.
+
@@ -953,7 +1078,7 @@
resources than physically exist on the node?
Yes in SLURM version 1.2 or higher.
In the slurm.conf file, set FastSchedule=2 and specify
-any desired node resource specifications (Procs, Sockets,
+any desired node resource specifications (CPUs, Sockets,
CoresPerSocket, ThreadsPerCore, and/or TmpDisk).
SLURM will use the resource specification for each node that is
given in slurm.conf and will not check these specifications
@@ -1269,7 +1394,7 @@
communication doesn't work. Solution is to either remove this line or
set a different nodeaddr that is known by your other nodes.
-38. How can I stop SLURM from scheduling jobs?
+
39. How can I stop SLURM from scheduling jobs?
You can stop SLURM from scheduling jobs on a per partition basis by setting
that partition's state to DOWN. Set its state UP to resume scheduling.
For example:
@@ -1278,7 +1403,7 @@
$ scontrol update PartitionName=bar State=UP
-39. Can I update multiple jobs with a
+40. Can I update multiple jobs with a
single scontrol command?
No, but you can probably use squeue to build the script taking
advantage of its filtering and formatting options. For example:
@@ -1286,7 +1411,7 @@
$ squeue -tpd -h -o "scontrol update jobid=%i priority=1000" >my.script
-40. Can SLURM be used to run jobs on
+41. Can SLURM be used to run jobs on
Amazon's EC2?
Yes, here is a description of use SLURM use with
Amazon's EC2 courtesy of
@@ -1310,7 +1435,7 @@
all other instances. This way I have persistent home directories and a very
simple first-login script that configures the virtual cluster for me.
-41. If a SLURM daemon core dumps, where can I find the
+42. If a SLURM daemon core dumps, where can I find the
core file?
For slurmctld the core file will be in the same directory as its
log files (SlurmctldLogFile) iif configured using an fully qualified
@@ -1326,7 +1451,7 @@
occurs. It will either be in spawned job's working directory on the same
location as that described above for the slurmd daemon.
-42. How can TotalView be configured to operate with
+43. How can TotalView be configured to operate with
SLURM?
The following lines should also be added to the global .tvdrc file
for TotalView to operate with SLURM:
@@ -1344,6 +1469,6 @@
-Last modified 6 January 2011
+Last modified 4 September 2011
diff -Nru slurm-llnl-2.2.7/doc/html/footer.txt slurm-llnl-2.3.2/doc/html/footer.txt
--- slurm-llnl-2.2.7/doc/html/footer.txt 2011-06-10 16:55:37.000000000 +0000
+++ slurm-llnl-2.3.2/doc/html/footer.txt 2011-12-05 17:20:08.000000000 +0000
@@ -1,32 +1,15 @@
-
-
-