ports/100629: New port: sysutils/torque - Open source resource manager
trasz
trasz at pin.if.uz.zgora.pl
Thu Jul 20 19:40:25 UTC 2006
>Number: 100629
>Category: ports
>Synopsis: New port: sysutils/torque - Open source resource manager
>Confidential: no
>Severity: non-critical
>Priority: low
>Responsible: freebsd-ports-bugs
>State: open
>Quarter:
>Keywords:
>Date-Required:
>Class: change-request
>Submitter-Id: current-users
>Arrival-Date: Thu Jul 20 19:40:16 GMT 2006
>Closed-Date:
>Last-Modified:
>Originator: trasz
>Release:
>Organization:
>Environment:
>Description:
# This is a shell archive. Save it in a file, remove anything before
# this line, and then unpack it by entering "sh file". Note, it may
# create directories; files and directories will be owned by you and
# have default permissions.
#
# This archive contains:
#
# torque
# torque/Makefile
# torque/distinfo
# torque/pkg-descr
# torque/pkg-plist
# torque/files
# torque/files/pkg-message.in
# torque/files/pbs_sched.sh.in
# torque/files/pbs_server.sh.in
# torque/files/patch-mom_mach.c
# torque/files/pbs_mom.sh.in
# torque/files/patch-fifo-Makefile.in
# torque/files/patch-pbs_mkdirs.in
#
echo c - torque
mkdir -p torque > /dev/null 2>&1
echo x - torque/Makefile
sed 's/^X//' >torque/Makefile << 'END-of-torque/Makefile'
X# New ports collection makefile for: torque
X# Date created: 20 Jul 2006
X# Whom: trasz
X#
X# $FreeBSD$
X#
X
XPORTNAME= torque
XPORTVERSION= 2.1.1
XPORTREVISION= 1
XCATEGORIES= sysutils parallel
XMASTER_SITES= http://www.clusterresources.com/downloads/torque/
X
XMAINTAINER= trasz at pin.if.uz.zgora.pl
XCOMMENT= Open source resource manager
X
XRESTRICTED= Redistribution is only permitted for non-commercial, non-profit purposes
XGNU_CONFIGURE= yes
XCONFIGURE_ARGS= --with-rcp=scp
XUSE_GMAKE= yes
XUSE_RC_SUBR= pbs_server.sh pbs_sched.sh pbs_mom.sh
XSUB_FILES= pkg-message
XPKGMESSAGE= ${WRKDIR}/pkg-message
X
XMAN1= qalter.1 qdel.1 qhold.1 qmove.1 qmsg.1 qorder.1 qrerun.1 \
X qrls.1 qselect.1 qsig.1 qstat.1 qsub.1 pbs.1 pbsdsh.1 \
X nqs2pbs.1 basl2c.1 xpbs.1 xpbsmon.1 qmgr.1
X
XMAN3= rpp.3 tm.3 pbs_alterjob.3 pbs_connect.3 pbs_default.3 \
X pbs_deljob.3 pbs_disconnect.3 pbs_geterrmsg.3 pbs_holdjob.3 \
X pbs_locate.3 pbs_manager.3 pbs_movejob.3 pbs_msgjob.3 \
X pbs_orderjob.3 pbs_rerunjob.3 pbs_rescquery.3 \
X pbs_rescreserve.3 pbs_rlsjob.3 pbs_runjob.3 pbs_selectjob.3 \
X pbs_selstat.3 pbs_sigjob.3 pbs_stagein.3 pbs_statjob.3 \
X pbs_statnode.3 pbs_statque.3 pbs_statserver.3 pbs_submit.3 \
X pbs_terminate.3
X
XMAN7= pbs_job_attributes.7 pbs_queue_attributes.7 \
X pbs_server_attributes.7 pbs_resources_aix4.7 \
X pbs_resources_digitalunix.7 pbs_resources_aix5.7 \
X pbs_resources_darwin.7 pbs_resources_freebsd.7 \
X pbs_resources_fujitsu.7 pbs_resources_hpux10.7 \
X pbs_resources_hpux11.7 pbs_resources_irix5.7 \
X pbs_resources_irix6.7 pbs_resources_irix6array.7 \
X pbs_resources_linux.7 pbs_resources_netbsd.7 \
X pbs_resources_solaris5.7 pbs_resources_solaris7.7 \
X pbs_resources_sp2.7 pbs_resources_sunos4.7 \
X pbs_resources_unicos8.7 pbs_resources_unicosmk2.7
X
XMAN8= pbs_mom.8 pbsnodes.8 pbs_sched_basl.8 pbs_sched_cc.8 \
X pbs_sched_tcl.8 pbs_server.8 qdisable.8 qenable.8 qrun.8 \
X qstart.8 qstop.8 qterm.8
X
Xpost-install:
X ${INSTALL_SCRIPT} ${WRKSRC}/torque.setup ${EXAMPLESDIR}
X @${CAT} ${PKGMESSAGE}
X
X.include <bsd.port.mk>
END-of-torque/Makefile
echo x - torque/distinfo
sed 's/^X//' >torque/distinfo << 'END-of-torque/distinfo'
XMD5 (torque-2.1.1.tar.gz) = c3032a22a03be995946d7fe71e19c6ee
XSHA256 (torque-2.1.1.tar.gz) = 7927b56f1733192eb862d8537e46afc733ac3f1461c8671fcd00c46fe09579ae
XSIZE (torque-2.1.1.tar.gz) = 2651760
END-of-torque/distinfo
echo x - torque/pkg-descr
sed 's/^X//' >torque/pkg-descr << 'END-of-torque/pkg-descr'
XTORQUE is an open source resource manager providing control over
Xbatch jobs and distributed compute nodes. It is a community effort
Xbased on the original *PBS project and, with more than 1,200 patches,
Xhas incorporated significant advances in the areas of scalability,
Xfault tolerance, and feature extensions contributed by NCSA, OSC,
XUSC , the U.S. Dept of Energy, Sandia, PNNL, U of Buffalo, TeraGrid,
Xand many other leading edge HPC organizations.
X
XWWW: http://www.clusterresources.com/pages/products/torque-resource-manager.php
END-of-torque/pkg-descr
echo x - torque/pkg-plist
sed 's/^X//' >torque/pkg-plist << 'END-of-torque/pkg-plist'
Xinclude/pbs_error.h
Xinclude/pbs_ifl.h
Xinclude/tm.h
Xinclude/tm_.h
Xinclude/rpp.h
Xinclude/rm.h
Xlib/libtorque.so.0
Xlib/libtorque.la
Xlib/libtorque.a
Xsbin/pbs_server
Xsbin/pbs_sched
Xsbin/pbs_mom
Xsbin/pbs_iff
Xbin/qalter
Xbin/qdel
Xbin/qhold
Xbin/qmove
Xbin/qorder
Xbin/qmsg
Xbin/qrerun
Xbin/qrls
Xbin/qselect
Xbin/qsig
Xbin/qstat
Xbin/qsub
Xbin/pbsdsh
Xbin/qdisable
Xbin/qenable
Xbin/qrun
Xbin/qstart
Xbin/qstop
Xbin/qterm
Xbin/pbsnodes
Xbin/qmgr
Xbin/nqs2pbs
Xsbin/pbs_demux
Xsbin/momctl
Xbin/chk_tree
Xbin/hostn
Xbin/printjob
Xbin/printtracking
Xbin/tracejob
Xshare/examples/torque/torque.setup
Xshare/examples/torque/var/spool/torque/pbs_environment
Xshare/examples/torque/var/spool/torque/server_name
Xshare/examples/torque/var/spool/torque/sched_priv/sched_config
Xshare/examples/torque/var/spool/torque/sched_priv/resource_group
Xshare/examples/torque/var/spool/torque/sched_priv/holidays
Xshare/examples/torque/var/spool/torque/sched_priv/dedicated_time
X at dirrm share/examples/torque/var/spool/torque/aux
X at dirrm share/examples/torque/var/spool/torque/checkpoint
X at dirrm share/examples/torque/var/spool/torque/mom_logs
X at dirrm share/examples/torque/var/spool/torque/mom_priv/jobs
X at dirrm share/examples/torque/var/spool/torque/mom_priv
X at dirrm share/examples/torque/var/spool/torque/sched_logs
X at dirrm share/examples/torque/var/spool/torque/sched_priv
X at dirrm share/examples/torque/var/spool/torque/server_logs
X at dirrm share/examples/torque/var/spool/torque/server_priv/jobs
X at dirrm share/examples/torque/var/spool/torque/server_priv/queues
X at dirrm share/examples/torque/var/spool/torque/server_priv/acl_svr
X at dirrm share/examples/torque/var/spool/torque/server_priv/acl_hosts
X at dirrm share/examples/torque/var/spool/torque/server_priv/acl_users
X at dirrm share/examples/torque/var/spool/torque/server_priv/acl_groups
X at dirrm share/examples/torque/var/spool/torque/server_priv/accounting
X at dirrm share/examples/torque/var/spool/torque/server_priv
X at dirrm share/examples/torque/var/spool/torque/undelivered
X at dirrm share/examples/torque/var/spool/torque/spool
X at dirrm share/examples/torque/var/spool/torque
X at dirrm share/examples/torque/var/spool
X at dirrm share/examples/torque/var
X at dirrm share/examples/torque
END-of-torque/pkg-plist
echo c - torque/files
mkdir -p torque/files > /dev/null 2>&1
echo x - torque/files/pkg-message.in
sed 's/^X//' >torque/files/pkg-message.in << 'END-of-torque/files/pkg-message.in'
X***************************************************************
XYou can find example configuration in
X
X %%PREFIX%%/share/examples/torque/var/spool/torque
X
XOn all machines, both server and computing nodes, copy it into
X
X /var/spool/torque/
X
Xand put hostname of the server into
X
X /var/spool/torque/server_name
X
XOn computing nodes, put '$pbsserver ip.address.of.server' into
X
X /var/spool/torque/mom_priv/config
X
XOn server, put hostnames of computing nodes, one per line, into
X
X /var/spool/torque/server_priv/nodes
X
XFor both server and computing nodes, add the following line
Xto /etc/rc.conf:
X
X pbs_mom_enable="YES"
X
XFor this machine to work as TORQUE server, add the following
Xlines to /etc/rc.conf:
X
X pbs_server_enable="YES"
X pbs_sched_enable="YES"
X
XTo configure queues on server, execute
X
X %%PREFIX%%/share/examples/torque/torque.setup <admin>
X
XFor further information, see
X
X http://www.clusterresources.com/torquedocs21/
X
X***************************************************************
END-of-torque/files/pkg-message.in
echo x - torque/files/pbs_sched.sh.in
sed 's/^X//' >torque/files/pbs_sched.sh.in << 'END-of-torque/files/pbs_sched.sh.in'
X#!/bin/sh
X
X# PROVIDE: pbs_sched
X# REQUIRE: pbs_server
X#
X# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
X# to enable this service:
X#
X# pbs_sched_enable (bool): Set to NO by default.
X# Set it to YES to enable pbs_sched.
X#
X
X. %%RC_SUBR%%
X
Xname="pbs_sched"
Xrcvar=${name}_enable
X
Xpidfile=/var/spool/torque/sched_priv/sched.lock
Xcommand=%%PREFIX%%/sbin/${name}
X
Xload_rc_config $name
X
X: ${pbs_sched_enable="NO"}
X
Xcommand_args=""
X
Xrun_rc_command "$1"
END-of-torque/files/pbs_sched.sh.in
echo x - torque/files/pbs_server.sh.in
sed 's/^X//' >torque/files/pbs_server.sh.in << 'END-of-torque/files/pbs_server.sh.in'
X#!/bin/sh
X
X# PROVIDE: pbs_server
X# REQUIRE:
X#
X# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
X# to enable this service:
X#
X# pbs_server_enable (bool): Set to NO by default.
X# Set it to YES to enable pbs_server.
X#
X
X. %%RC_SUBR%%
X
Xname="pbs_server"
Xrcvar=${name}_enable
X
Xpidfile=/var/spool/torque/server_priv/server.lock
Xcommand=%%PREFIX%%/sbin/${name}
X
Xload_rc_config $name
X
X: ${pbs_server_enable="NO"}
X
Xcommand_args=""
X
Xrun_rc_command "$1"
END-of-torque/files/pbs_server.sh.in
echo x - torque/files/patch-mom_mach.c
sed 's/^X//' >torque/files/patch-mom_mach.c << 'END-of-torque/files/patch-mom_mach.c'
X--- src.orig/resmom/freebsd/mom_mach.c Thu Jul 20 21:04:54 2006
X+++ src/resmom/freebsd/mom_mach.c Thu Jul 20 21:05:39 2006
X@@ -370,40 +370,10 @@
X continue;
X
X nps++;
X- cputime += tvk(pp->kp_proc.p_rtime);
X
X- if (pp->kp_proc.p_ru == NULL) {
X- struct pstats ps;
X-
X- DBPRT(("%s: p_stats 0x%lx\n", id,
X- (u_long)pp->kp_proc.p_stats))
X- if (pp->kp_proc.p_stats == NULL)
X- continue;
X-
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_stats, &ps,
X- sizeof(ps)) != sizeof(ps)) {
X- log_err(errno, id, "kvm_read(pstats)");
X- continue;
X- }
X- cputime += tv(ps.p_ru.ru_utime) +
X- tv(ps.p_ru.ru_stime) +
X- tv(ps.p_cru.ru_utime) +
X- tv(ps.p_cru.ru_stime);
X- }
X- else {
X- struct rusage ru;
X-
X- DBPRT(("%s: p_ru 0x%lx\n", id,
X- (u_long)pp->kp_proc.p_ru))
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_ru, &ru,
X- sizeof(ru)) != sizeof(ru)) {
X- log_err(errno, id, "kvm_read(session)");
X- continue;
X- }
X- cputime += tv(ru.ru_utime) + tv(ru.ru_stime);
X- }
X+ cputime += tv(pp->ki_rusage.ru_utime) + tv(pp->ki_rusage.ru_stime);
X DBPRT(("%s: ses %d pid %d cputime %d\n", id,
X- sess_tbl[i], pp->kp_proc.p_pid, cputime))
X+ sess_tbl[i], pp->ki_pid, cputime))
X }
X
X if (nps == 0)
X@@ -434,11 +404,9 @@
X if (!injob(pjob, sess_tbl[i]))
X continue;
X
X- memsize += ctob(pp->kp_eproc.e_vm.vm_tsize +
X- pp->kp_eproc.e_vm.vm_dsize +
X- pp->kp_eproc.e_vm.vm_ssize);
X+ memsize += ctob(pp->ki_tsize + pp->ki_dsize + pp->ki_ssize);
X DBPRT(("%s: ses %d pid=%d totmem=%lu\n", id,
X- sess_tbl[i], pp->kp_proc.p_pid, memsize))
X+ sess_tbl[i], pp->ki_pid, memsize))
X }
X
X return (memsize);
X@@ -461,10 +429,10 @@
X if (!injob(pjob, sess_tbl[i]))
X continue;
X
X- memsize += ctob(pp->kp_eproc.e_vm.vm_rssize);
X+ memsize += ctob(pp->ki_rssize);
X DBPRT(("%s: pid=%d ses=%d mem=%d totmem=%d\n", id,
X- pp->kp_proc.p_pid, sess_tbl[i],
X- pp->kp_eproc.e_vm.vm_rssize, memsize))
X+ pp->ki_pid, sess_tbl[i],
X+ pp->ki_rssize, memsize))
X }
X
X return (memsize);
X@@ -485,9 +453,7 @@
X if (!injob(pjob, sess_tbl[i]))
X continue;
X
X- if (ctob(pp->kp_eproc.e_vm.vm_tsize +
X- pp->kp_eproc.e_vm.vm_dsize +
X- pp->kp_eproc.e_vm.vm_ssize) > limit)
X+ if (ctob(pp->ki_tsize + pp->ki_dsize + pp->ki_ssize) > limit)
X return (TRUE);
X }
X
X@@ -732,7 +698,7 @@
X const void *b)
X
X {
X- return((int)((struct kinfo_proc *)a)->kp_eproc.e_paddr - (int)((struct kinfo_proc *)b)->kp_eproc.e_paddr);
X+ return((int)((struct kinfo_proc *)a)->ki_paddr - (int)((struct kinfo_proc *)b)->ki_paddr);
X }
X
X int bs_cmp(
X@@ -741,7 +707,7 @@
X const void *member)
X
X {
X- return((int)((struct session *)key)->s_leader - (int)((struct kinfo_proc *)member)->kp_eproc.e_paddr);
X+ return((int)((struct session *)key)->s_leader - (int)((struct kinfo_proc *)member)->ki_paddr);
X }
X
X
X@@ -786,24 +752,10 @@
X
X qsort(proc_tbl, nproc, sizeof(struct kinfo_proc), qs_cmp);
X
X- for (i=0, kp=proc_tbl; i<nproc; i++, kp++) {
X- if (kvm_read(kd, (u_long)kp->kp_eproc.e_sess, &ss, sizeof(ss))
X- != sizeof(ss)) {
X- sprintf(log_buffer,
X- "kvm_read: %s", kvm_geterr(kd));
X- log_err(errno, id, log_buffer);
X- return (PBSE_SYSTEM);
X- }
X- if (ss.s_leader == kp->kp_eproc.e_paddr ||
X- ss.s_leader == NULL) {
X- sid = kp->kp_proc.p_pid;
X- }
X- else {
X- leader = bsearch(&ss, proc_tbl, nproc,
X- sizeof(struct kinfo_proc), bs_cmp);
X- sid = leader ? leader->kp_proc.p_pid : 0;
X- }
X- sess_tbl[i] = sid;
X+ for (i=0; i<nproc; i++) {
X+ struct kinfo_proc *pp = &proc_tbl[i];
X+
X+ sess_tbl[i] = pp->ki_sid;
X }
X
X return (PBSE_NONE);
X@@ -1012,8 +964,8 @@
X continue;
X
X DBPRT(("%s: send signal %d to pid %d\n", id,
X- sig, pp->kp_proc.p_pid))
X- (void)kill(pp->kp_proc.p_pid, sig);
X+ sig, pp->ki_pid))
X+ (void)kill(pp->ki_pid, sig);
X ++ct;
X }
X return ct;
X@@ -1118,36 +1070,10 @@
X if (jobid != sess_tbl[i])
X continue;
X
X- cputime += tvk(pp->kp_proc.p_rtime);
X-
X- if (pp->kp_proc.p_ru == NULL) {
X- struct pstats ps;
X-
X- if (pp->kp_proc.p_stats == NULL)
X- continue;
X-
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_stats, &ps,
X- sizeof(ps)) != sizeof(ps)) {
X- log_err(errno, id, "kvm_read(pstats)");
X- continue;
X- }
X- cputime += tv(ps.p_ru.ru_utime) +
X- tv(ps.p_ru.ru_stime) +
X- tv(ps.p_cru.ru_utime) +
X- tv(ps.p_cru.ru_stime);
X- }
X- else {
X- struct rusage ru;
X+ cputime += tv(pp->ki_rusage.ru_utime) + tv(pp->ki_rusage.ru_stime);
X
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_ru, &ru,
X- sizeof(ru)) != sizeof(ru)) {
X- log_err(errno, id, "kvm_read(session)");
X- continue;
X- }
X- cputime += tv(ru.ru_utime) + tv(ru.ru_stime);
X- }
X DBPRT(("%s: ses %d pid %d cputime %d\n", id,
X- jobid, pp->kp_proc.p_pid, cputime))
X+ jobid, pp->ki_pid, cputime))
X
X }
X
X@@ -1166,37 +1092,11 @@
X for (i=0; i<nproc; i++) {
X struct kinfo_proc *pp = &proc_tbl[i];
X
X- if (pid != pp->kp_proc.p_pid)
X+ if (pid != pp->ki_pid)
X continue;
X
X- cputime = tvk(pp->kp_proc.p_rtime);
X-
X- if (pp->kp_proc.p_ru == NULL) {
X- struct pstats ps;
X-
X- if (pp->kp_proc.p_stats == NULL)
X- break;
X-
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_stats, &ps,
X- sizeof(ps)) != sizeof(ps)) {
X- log_err(errno, id, "kvm_read(pstats)");
X- break;
X- }
X- cputime += tv(ps.p_ru.ru_utime) +
X- tv(ps.p_ru.ru_stime) +
X- tv(ps.p_cru.ru_utime) +
X- tv(ps.p_cru.ru_stime);
X- }
X- else {
X- struct rusage ru;
X+ cputime += tv(pp->ki_rusage.ru_utime) + tv(pp->ki_rusage.ru_stime);
X
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_ru, &ru,
X- sizeof(ru)) != sizeof(ru)) {
X- log_err(errno, id, "kvm_read(session)");
X- break;
X- }
X- cputime += tv(ru.ru_utime) + tv(ru.ru_stime);
X- }
X DBPRT(("%s: pid %d cputime %d\n", id, pid, cputime))
X
X sprintf(ret_string, "%.2f", (double)cputime * cputfactor);
X@@ -1261,9 +1161,7 @@
X continue;
X
X found = 1;
X- addmem = pp->kp_eproc.e_vm.vm_tsize +
X- pp->kp_eproc.e_vm.vm_dsize +
X- pp->kp_eproc.e_vm.vm_ssize;
X+ addmem = pp->ki_tsize + pp->ki_dsize + pp->ki_ssize;
X memsize += addmem;
X }
X if (found) {
X@@ -1289,12 +1187,10 @@
X for (i=0; i<nproc; i++) {
X struct kinfo_proc *pp = &proc_tbl[i];
X
X- if (pid != pp->kp_proc.p_pid)
X+ if (pid != pp->ki_pid)
X continue;
X
X- memsize = pp->kp_eproc.e_vm.vm_tsize +
X- pp->kp_eproc.e_vm.vm_dsize +
X- pp->kp_eproc.e_vm.vm_ssize;
X+ memsize = pp->ki_tsize + pp->ki_dsize + pp->ki_ssize;
X sprintf(ret_string, "%ukb", ctob(memsize) >> 10); /* KB */
X return ret_string;
X }
X@@ -1358,7 +1254,7 @@
X continue;
X
X found = 1;
X- resisize += pp->kp_eproc.e_vm.vm_rssize;
X+ resisize += pp->ki_rssize;
X }
X if (found) {
X sprintf(ret_string, "%ukb", ctob(resisize) >> 10); /* KB */
X@@ -1385,10 +1281,10 @@
X for (i=0; i<nproc; i++) {
X struct kinfo_proc *pp = &proc_tbl[i];
X
X- if (pid != pp->kp_proc.p_pid)
X+ if (pid != pp->ki_pid)
X continue;
X
X- resisize = pp->kp_eproc.e_vm.vm_rssize;
X+ resisize = pp->ki_rssize;
X sprintf(ret_string, "%ukb", ctob(resisize) >> 10); /* KB */
X return ret_string;
X }
X@@ -1462,12 +1358,12 @@
X for (i=0; i<nproc; i++) {
X struct kinfo_proc *pp = &proc_tbl[i];
X
X- if (pp->kp_eproc.e_pcred.p_ruid == 0)
X+ if (pp->ki_ruid == 0)
X continue;
X
X jobid = sess_tbl[i];
X DBPRT(("%s: pid %d sid %u\n",
X- id, (int)pp->kp_proc.p_pid, jobid))
X+ id, (int)pp->ki_pid, jobid))
X for (j=0; j<njids; j++) {
X if (jids[j] == jobid)
X break;
X@@ -1548,12 +1444,12 @@
X struct kinfo_proc *pp = &proc_tbl[i];
X
X DBPRT(("%s[%d]: pid %d sid %u\n",
X- id, num_pids, pp->kp_proc.p_pid, sess_tbl[i]))
X+ id, num_pids, pp->ki_pid, sess_tbl[i]))
X if (jobid != sess_tbl[i])
X continue;
X
X checkret(&fmt, 100);
X- sprintf(fmt, " %d", pp->kp_proc.p_pid);
X+ sprintf(fmt, " %d", pp->ki_pid);
X fmt += strlen(fmt);
X num_pids++;
X }
X@@ -1591,11 +1487,11 @@
X for (i=0; i<nproc; i++) {
X struct kinfo_proc *pp = &proc_tbl[i];
X
X- if ((uid = pp->kp_eproc.e_pcred.p_ruid) == 0)
X+ if ((uid = pp->ki_ruid) == 0)
X continue;
X
X DBPRT(("%s: pid %d uid %u\n",
X- id, (int)pp->kp_proc.p_pid, uid))
X+ id, (int)pp->ki_pid, uid))
X for (j=0; j<nuids; j++) {
X if (uids[j] == uid)
X break;
X@@ -1884,23 +1780,12 @@
X continue;
X }
X else {
X- if (value != pp->kp_proc.p_pid)
X+ if (value != pp->ki_pid)
X continue;
X }
X
X- if (pp->kp_proc.p_stats == NULL) {
X- rm_errno = RM_ERR_SYSTEM;
X- return NULL;
X- }
X-
X- if (kvm_read(kd, (u_long)pp->kp_proc.p_stats, &ps,
X- sizeof(ps)) != sizeof(ps)) {
X- log_err(errno, id, "kvm_read(pstats)");
X- rm_errno = RM_ERR_SYSTEM;
X- return NULL;
X- }
X found = 1;
X- start = MIN(start, ps.p_start.tv_sec);
X+ start = MIN(start, pp->ki_start.tv_sec);
X }
X if (found) {
X sprintf(ret_string, "%ld", (long)((double)(now - start) * wallfactor));
X@@ -2035,7 +1920,7 @@
X dirdev = sb.st_dev;
X DBPRT(("dir has devnum %d\n", dirdev))
X
X- if (setfsent() == NULL) {
X+ if (setfsent() == 0) {
X log_err(errno, id, "setfsent");
X rm_errno = RM_ERR_SYSTEM;
X return NULL;
END-of-torque/files/patch-mom_mach.c
echo x - torque/files/pbs_mom.sh.in
sed 's/^X//' >torque/files/pbs_mom.sh.in << 'END-of-torque/files/pbs_mom.sh.in'
X#!/bin/sh
X
X# PROVIDE: pbs_mom
X# REQUIRE:
X#
X# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
X# to enable this service:
X#
X# pbs_mom_enable (bool): Set to NO by default.
X# Set it to YES to enable pbs_mom.
X#
X
X. %%RC_SUBR%%
X
Xname="pbs_mom"
Xrcvar=${name}_enable
X
Xpidfile=/var/spool/torque/mom_priv/mom.lock
Xcommand=%%PREFIX%%/sbin/${name}
X
Xload_rc_config $name
X
X: ${pbs_mom_enable="NO"}
X
Xcommand_args=""
X
Xrun_rc_command "$1"
END-of-torque/files/pbs_mom.sh.in
echo x - torque/files/patch-fifo-Makefile.in
sed 's/^X//' >torque/files/patch-fifo-Makefile.in << 'END-of-torque/files/patch-fifo-Makefile.in'
X--- src.orig/scheduler.cc/samples/fifo/Makefile.in Wed Jul 19 22:47:42 2006
X+++ src/scheduler.cc/samples/fifo/Makefile.in Wed Jul 19 22:59:44 2006
X@@ -132,7 +132,7 @@
X XPBSMON_DIR = $(libdir)/xpbsmon
X
X # Support files used by the scheduler
X-schedprivdir = $(PBS_SERVER_HOME)/sched_priv
X+schedprivdir = ${prefix}/share/examples/torque/var/spool/torque/sched_priv
X schedpriv_DATA = sched_config resource_group holidays dedicated_time
X
X EXTRA_DIST = $(schedpriv_DATA)
END-of-torque/files/patch-fifo-Makefile.in
echo x - torque/files/patch-pbs_mkdirs.in
sed 's/^X//' >torque/files/patch-pbs_mkdirs.in << 'END-of-torque/files/patch-pbs_mkdirs.in'
Xdiff -urN buildutils.orig/pbs_mkdirs.in buildutils/pbs_mkdirs.in
X--- buildutils.orig/pbs_mkdirs.in Wed Jul 19 23:10:12 2006
X+++ buildutils/pbs_mkdirs.in Wed Jul 19 23:10:34 2006
X@@ -112,7 +112,7 @@
X # PBS_SERVER_HOME is defined without DESTDIR so that DEFAULT_FILE and EVIRON
X # can be based on SERVER_HOME. SERVER_HOME is defined a second time to pick
X # up DESTDIR
X-test -n "$PBS_SERVER_HOME" || PBS_SERVER_HOME=@PBS_SERVER_HOME@
X+test -n "$PBS_SERVER_HOME" || PBS_SERVER_HOME=/usr/local/share/examples/torque/var/spool/torque
X test -n "$PBS_DEFAULT_FILE" || PBS_DEFAULT_FILE=${DESTDIR}@PBS_DEFAULT_FILE@
X test -n "$PBS_ENVIRON" || PBS_ENVIRON=${DESTDIR}@PBS_ENVIRON@
X
END-of-torque/files/patch-pbs_mkdirs.in
exit
>How-To-Repeat:
>Fix:
>Release-Note:
>Audit-Trail:
>Unformatted:
More information about the freebsd-ports-bugs
mailing list