Warning: Cannot modify header information - headers already sent by (output started at /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code:102) in /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code on line 4
Warning: Cannot modify header information - headers already sent by (output started at /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code:102) in /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code on line 4
Warning: Cannot modify header information - headers already sent by (output started at /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code:102) in /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code on line 4
Warning: Cannot modify header information - headers already sent by (output started at /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code:102) in /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code on line 4
Warning: Cannot modify header information - headers already sent by (output started at /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code:102) in /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code on line 4
Warning: Cannot modify header information - headers already sent by (output started at /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code:102) in /var/www/iplanru/data/www/intesco.ru/d59ed/index.php(1) : eval()'d code(2) : eval()'d code on line 4
apt 0000666 00000034717 15077014703 0005277 0 ustar 00 #!/bin/sh
#set -e
#
# This file understands the following apt configuration variables:
# Values here are the default.
# Create /etc/apt/apt.conf.d/02periodic file to set your preference.
#
# Dir "/";
# - RootDir for all configuration files
#
# Dir::Cache "var/apt/cache/";
# - Set apt package cache directory
#
# Dir::Cache::Archives "archives/";
# - Set package archive directory
#
# APT::Periodic::Enable "1";
# - Enable the update/upgrade script (0=disable)
#
# APT::Periodic::BackupArchiveInterval "0";
# - Backup after n-days if archive contents changed.(0=disable)
#
# APT::Periodic::BackupLevel "3";
# - Backup level.(0=disable), 1 is invalid.
#
# Dir::Cache::Backup "backup/";
# - Set periodic package backup directory
#
# APT::Archives::MaxAge "0"; (old, deprecated)
# APT::Periodic::MaxAge "0"; (new)
# - Set maximum allowed age of a cache package file. If a cache
# package file is older it is deleted (0=disable)
#
# APT::Archives::MinAge "2"; (old, deprecated)
# APT::Periodic::MinAge "2"; (new)
# - Set minimum age of a package file. If a file is younger it
# will not be deleted (0=disable). Usefull to prevent races
# and to keep backups of the packages for emergency.
#
# APT::Archives::MaxSize "0"; (old, deprecated)
# APT::Periodic::MaxSize "0"; (new)
# - Set maximum size of the cache in MB (0=disable). If the cache
# is bigger, cached package files are deleted until the size
# requirement is met (the biggest packages will be deleted
# first).
#
# APT::Periodic::Update-Package-Lists "0";
# - Do "apt-get update" automatically every n-days (0=disable)
#
# APT::Periodic::Download-Upgradeable-Packages "0";
# - Do "apt-get upgrade --download-only" every n-days (0=disable)
#
# APT::Periodic::Download-Upgradeable-Packages-Debdelta "1";
# - Use debdelta-upgrade to download updates if available (0=disable)
#
# APT::Periodic::Unattended-Upgrade "0";
# - Run the "unattended-upgrade" security upgrade script
# every n-days (0=disabled)
# Requires the package "unattended-upgrades" and will write
# a log in /var/log/unattended-upgrades
#
# APT::Periodic::AutocleanInterval "0";
# - Do "apt-get autoclean" every n-days (0=disable)
#
# APT::Periodic::Verbose "0";
# - Send report mail to root
# 0: no report (or null string)
# 1: progress report (actually any string)
# 2: + command outputs (remove -qq, remove 2>/dev/null, add -d)
# 3: + trace on
check_stamp()
{
stamp="$1"
interval="$2"
if [ $interval -eq 0 ]; then
debug_echo "check_stamp: interval=0"
# treat as no time has passed
return 1
fi
if [ ! -f $stamp ]; then
debug_echo "check_stamp: missing time stamp file: $stamp."
# treat as enough time has passed
return 0
fi
# compare midnight today to midnight the day the stamp was updated
stamp_file="$stamp"
stamp=$(date --date=$(date -r $stamp_file --iso-8601) +%s 2>/dev/null)
if [ "$?" != "0" ]; then
# Due to some timezones returning 'invalid date' for midnight on
# certain dates (eg America/Sao_Paulo), if date returns with error
# remove the stamp file and return 0. See coreutils bug:
# http://lists.gnu.org/archive/html/bug-coreutils/2007-09/msg00176.html
rm -f "$stamp_file"
return 0
fi
now=$(date --date=$(date --iso-8601) +%s 2>/dev/null)
if [ "$?" != "0" ]; then
# As above, due to some timezones returning 'invalid date' for midnight
# on certain dates (eg America/Sao_Paulo), if date returns with error
# return 0.
return 0
fi
delta=$(($now-$stamp))
# intervall is in days, convert to sec.
interval=$(($interval*60*60*24))
debug_echo "check_stamp: interval=$interval, now=$now, stamp=$stamp, delta=$delta (sec)"
# remove timestamps a day (or more) in the future and force re-check
if [ $stamp -gt $(($now+86400)) ]; then
echo "WARNING: file $stamp_file has a timestamp in the future: $stamp"
rm -f "$stamp_file"
return 0
fi
if [ $delta -ge $interval ]; then
return 0
fi
return 1
}
update_stamp()
{
stamp="$1"
touch $stamp
}
# we check here if autoclean was enough sizewise
check_size_constraints()
{
MaxAge=0
eval $(apt-config shell MaxAge APT::Archives::MaxAge)
eval $(apt-config shell MaxAge APT::Periodic::MaxAge)
MinAge=2
eval $(apt-config shell MinAge APT::Archives::MinAge)
eval $(apt-config shell MinAge APT::Periodic::MinAge)
MaxSize=0
eval $(apt-config shell MaxSize APT::Archives::MaxSize)
eval $(apt-config shell MaxSize APT::Periodic::MaxSize)
Cache="/var/cache/apt/archives/"
eval $(apt-config shell Cache Dir::Cache::archives/d)
# sanity check
if [ -z "$Cache" ]; then
echo "empty Dir::Cache::archives, exiting"
exit
fi
# check age
if [ ! $MaxAge -eq 0 ] && [ ! $MinAge -eq 0 ]; then
debug_echo "aged: ctime <$MaxAge and mtime <$MaxAge and ctime>$MinAge and mtime>$MinAge"
find $Cache -name "*.deb" \( -mtime +$MaxAge -and -ctime +$MaxAge \) -and -not \( -mtime -$MinAge -or -ctime -$MinAge \) -print0 | xargs -r -0 rm -f
elif [ ! $MaxAge -eq 0 ]; then
debug_echo "aged: ctime <$MaxAge and mtime <$MaxAge only"
find $Cache -name "*.deb" -ctime +$MaxAge -and -mtime +$MaxAge -print0 | xargs -r -0 rm -f
else
debug_echo "skip aging since MaxAge is 0"
fi
# check size
if [ ! $MaxSize -eq 0 ]; then
# maxSize is in MB
MaxSize=$(($MaxSize*1024))
#get current time
now=$(date --date=$(date --iso-8601) +%s)
MinAge=$(($MinAge*24*60*60))
# reverse-sort by mtime
for file in $(ls -rt $Cache/*.deb 2>/dev/null); do
du=$(du -s $Cache)
size=${du%%/*}
# check if the cache is small enough
if [ $size -lt $MaxSize ]; then
debug_echo "end remove by archive size: size=$size < $MaxSize"
break
fi
# check for MinAge of the file
if [ $MinAge -ne 0 ]; then
# check both ctime and mtime
mtime=$(stat -c %Y $file)
ctime=$(stat -c %Z $file)
if [ $mtime -gt $ctime ]; then
delta=$(($now-$mtime))
else
delta=$(($now-$ctime))
fi
if [ $delta -le $MinAge ]; then
debug_echo "skip remove by archive size: $file, delta=$delta < $MinAgeSec"
break
else
# delete oldest file
debug_echo "remove by archive size: $file, delta=$delta >= $MinAgeSec (sec), size=$size >= $MaxSize"
rm -f $file
fi
fi
done
fi
}
# deal with the Apt::Periodic::BackupArchiveInterval
do_cache_backup()
{
BackupArchiveInterval="$1"
if [ $BackupArchiveInterval -eq 0 ]; then
return
fi
# Set default values and normalize
CacheDir="/var/cache/apt"
eval $(apt-config shell CacheDir Dir::Cache/d)
CacheDir=${CacheDir%/}
if [ -z "$CacheDir" ]; then
debug_echo "practically empty Dir::Cache, exiting"
return 0
fi
Cache="${CacheDir}/archives/"
eval $(apt-config shell Cache Dir::Cache::Archives/d)
if [ -z "$Cache" ]; then
debug_echo "practically empty Dir::Cache::archives, exiting"
return 0
fi
BackupLevel=3
eval $(apt-config shell BackupLevel APT::Periodic::BackupLevel)
if [ $BackupLevel -le 1 ]; then
BackupLevel=2 ;
fi
Back="${CacheDir}/backup/"
eval $(apt-config shell Back Dir::Cache::Backup/d)
if [ -z "$Back" ]; then
echo "practically empty Dir::Cache::Backup, exiting" 1>&2
return
fi
CacheArchive="$(basename "${Cache}")"
test -n "${CacheArchive}" || CacheArchive="archives"
BackX="${Back}${CacheArchive}/"
for x in $(seq 0 1 $((${BackupLevel}-1))); do
eval "Back${x}=${Back}${x}/"
done
# backup after n-days if archive contents changed.
# (This uses hardlink to save disk space)
BACKUP_ARCHIVE_STAMP=/var/lib/apt/periodic/backup-archive-stamp
if check_stamp $BACKUP_ARCHIVE_STAMP $BackupArchiveInterval; then
if [ $({(cd $Cache 2>/dev/null; find . -name "*.deb"); (cd $Back0 2>/dev/null;find . -name "*.deb") ;}| sort|uniq -u|wc -l) -ne 0 ]; then
mkdir -p $Back
rm -rf $Back$((${BackupLevel}-1))
for y in $(seq $((${BackupLevel}-1)) -1 1); do
eval BackY=${Back}$y
eval BackZ=${Back}$(($y-1))
if [ -e $BackZ ]; then
mv -f $BackZ $BackY ;
fi
done
cp -la $Cache $Back ; mv -f $BackX $Back0
update_stamp $BACKUP_ARCHIVE_STAMP
debug_echo "backup with hardlinks. (success)"
else
debug_echo "skip backup since same content."
fi
else
debug_echo "skip backup since too new."
fi
}
# sleep for a random interval of time (default 30min)
# (some code taken from cron-apt, thanks)
random_sleep()
{
RandomSleep=1800
eval $(apt-config shell RandomSleep APT::Periodic::RandomSleep)
if [ $RandomSleep -eq 0 ]; then
return
fi
if [ -z "$RANDOM" ] ; then
# A fix for shells that do not have this bash feature.
RANDOM=$(dd if=/dev/urandom count=1 2> /dev/null | cksum | cut -c"1-5")
fi
TIME=$(($RANDOM % $RandomSleep))
debug_echo "sleeping for $TIME seconds"
sleep $TIME
}
debug_echo()
{
# Display message if $VERBOSE >= 1
if [ "$VERBOSE" -ge 1 ]; then
echo $1 1>&2
fi
}
# ------------------------ main ----------------------------
# Backup the 7 last versions of APT's extended_states file
# shameless copy from dpkg cron
if cd /var/backups ; then
if ! cmp -s apt.extended_states.0 /var/lib/apt/extended_states; then
cp -p /var/lib/apt/extended_states apt.extended_states
savelog -c 7 apt.extended_states >/dev/null
fi
fi
# check apt-config exstance
if ! which apt-config >/dev/null ; then
exit 0
fi
# check if the user really wants to do something
AutoAptEnable=1 # default is yes
eval $(apt-config shell AutoAptEnable APT::Periodic::Enable)
if [ $AutoAptEnable -eq 0 ]; then
exit 0
fi
# Set VERBOSE mode from apt-config (or inherit from environment)
VERBOSE=0
eval $(apt-config shell VERBOSE APT::Periodic::Verbose)
debug_echo "verbose level $VERBOSE"
if [ "$VERBOSE" -le 2 ]; then
# quiet for 0,1,2
XSTDOUT=">/dev/null"
XSTDERR="2>/dev/null"
XAPTOPT="-qq"
XUUPOPT=""
else
XSTDOUT=""
XSTDERR=""
XAPTOPT=""
XUUPOPT="-d"
fi
if [ "$VERBOSE" -ge 3 ]; then
# trace output
set -x
fi
# laptop check, on_ac_power returns:
# 0 (true) System is on main power
# 1 (false) System is not on main power
# 255 (false) Power status could not be determined
# Desktop systems always return 255 it seems
if which on_ac_power >/dev/null; then
on_ac_power
POWER=$?
if [ $POWER -eq 1 ]; then
debug_echo "exit: system NOT on main power"
exit 0
elif [ $POWER -ne 0 ]; then
debug_echo "power status ($POWER) undetermined, continuing"
fi
debug_echo "system is on main power."
fi
# check if we can lock the cache and if the cache is clean
if which apt-get >/dev/null && ! eval apt-get check -f $XAPTOPT $XSTDERR ; then
debug_echo "error encountered in cron job with \"apt-get check\"."
exit 0
fi
# Global current time in seconds since 1970-01-01 00:00:00 UTC
now=$(date +%s)
# Support old Archive for compatibility.
# Document only Periodic for all controling parameters of this script.
UpdateInterval=0
eval $(apt-config shell UpdateInterval APT::Periodic::Update-Package-Lists)
DownloadUpgradeableInterval=0
eval $(apt-config shell DownloadUpgradeableInterval APT::Periodic::Download-Upgradeable-Packages)
UnattendedUpgradeInterval=0
eval $(apt-config shell UnattendedUpgradeInterval APT::Periodic::Unattended-Upgrade)
AutocleanInterval=0
eval $(apt-config shell AutocleanInterval APT::Periodic::AutocleanInterval)
BackupArchiveInterval=0
eval $(apt-config shell BackupArchiveInterval APT::Periodic::BackupArchiveInterval)
Debdelta=1
eval $(apt-config shell Debdelta APT::Periodic::Download-Upgradeable-Packages-Debdelta)
# check if we actually have to do anything that requires locking the cache
if [ $UpdateInterval -eq 0 ] &&
[ $DownloadUpgradeableInterval -eq 0 ] &&
[ $UnattendedUpgradeInterval -eq 0 ] &&
[ $BackupArchiveInterval -eq 0 ] &&
[ $AutocleanInterval -eq 0 ]; then
# check cache size
check_size_constraints
exit 0
fi
# deal with BackupArchiveInterval
do_cache_backup $BackupArchiveInterval
# sleep random amount of time to avoid hitting the
# mirrors at the same time
random_sleep
# include default system language so that "apt-get update" will
# fetch the right translated package descriptions
if [ -r /etc/default/locale ]; then
. /etc/default/locale
export LANG LANGUAGE LC_MESSAGES LC_ALL
fi
# update package lists
UPDATED=0
UPDATE_STAMP=/var/lib/apt/periodic/update-stamp
if check_stamp $UPDATE_STAMP $UpdateInterval; then
if eval apt-get $XAPTOPT -y update $XSTDERR; then
debug_echo "download updated metadata (success)."
if which dbus-send >/dev/null && pidof dbus-daemon >/dev/null; then
if dbus-send --system / app.apt.dbus.updated boolean:true ; then
debug_echo "send dbus signal (success)"
else
debug_echo "send dbus signal (error)"
fi
else
debug_echo "dbus signal not send (command not available)"
fi
update_stamp $UPDATE_STAMP
UPDATED=1
else
debug_echo "download updated metadata (error)"
fi
else
debug_echo "download updated metadata (not run)."
fi
# download all upgradeable packages (if it is requested)
DOWNLOAD_UPGRADEABLE_STAMP=/var/lib/apt/periodic/download-upgradeable-stamp
if [ $UPDATED -eq 1 ] && check_stamp $DOWNLOAD_UPGRADEABLE_STAMP $DownloadUpgradeableInterval; then
if [ $Debdelta -eq 1 ]; then
debdelta-upgrade >/dev/null 2>&1 || true
fi
if eval apt-get $XAPTOPT -y -d dist-upgrade $XSTDERR; then
update_stamp $DOWNLOAD_UPGRADEABLE_STAMP
debug_echo "download upgradable (success)"
else
debug_echo "download upgradable (error)"
fi
else
debug_echo "download upgradable (not run)"
fi
# auto upgrade all upgradeable packages
UPGRADE_STAMP=/var/lib/apt/periodic/upgrade-stamp
if [ $UPDATED -eq 1 ] && which unattended-upgrade >/dev/null && check_stamp $UPGRADE_STAMP $UnattendedUpgradeInterval; then
if unattended-upgrade $XUUPOPT; then
update_stamp $UPGRADE_STAMP
debug_echo "unattended-upgrade (success)"
else
debug_echo "unattended-upgrade (error)"
fi
else
debug_echo "unattended-upgrade (not run)"
fi
# autoclean package archive
AUTOCLEAN_STAMP=/var/lib/apt/periodic/autoclean-stamp
if check_stamp $AUTOCLEAN_STAMP $AutocleanInterval; then
if eval apt-get $XAPTOPT -y autoclean $XSTDERR; then
debug_echo "autoclean (success)."
update_stamp $AUTOCLEAN_STAMP
else
debug_echo "autoclean (error)"
fi
else
debug_echo "autoclean (not run)"
fi
# check cache size
check_size_constraints
#
# vim: set sts=4 ai :
#
samba 0000666 00000000577 15077014703 0005573 0 ustar 00 #!/bin/sh
#
# cron script to save a backup copy of /etc/samba/smbpasswd in /var/backups.
#
# Written by Eloy A. Paris for the Debian project.
#
BAK=/var/backups
umask 022
if cd $BAK; then
# Make sure /etc/samba/smbpasswd exists
if [ -f /etc/samba/smbpasswd ]; then
cmp -s smbpasswd.bak /etc/samba/smbpasswd || cp -p /etc/samba/smbpasswd smbpasswd.bak
fi
fi
sendmail 0000666 00000006325 15077014703 0006301 0 ustar 00 #!/bin/sh
set +e;
#------------------------------------------------------------------------------
# Autoconf variables - in a form suitable for sh, perl
# Generated automatically from autoconf.sh.in by configure.
#------------------------------------------------------------------------------
# Variables for, and by, Autoconf (Don't touch these! edit config step)
PACKAGE_NAME="Sendmail";
PACKAGE_VERSION="8.14.3";
prefix="/usr";
exec_prefix="/usr";
bindir="/usr/bin";
sbindir="/usr/sbin";
libexecdir="/usr/lib/sm.bin";
datadir="/usr/share";
sysconfdir="/etc";
sharedstatedir="/usr/com";
localstatedir="/var";
libdir="/usr/lib";
includedir="/usr/include";
infodir="/usr/share/info";
mandir="/usr/share/man";
docdir="/usr/share/doc";
srcdir=".";
copies=2;
# backup "/etc/mail/sendmail.mc"
if [ -f ${sysconfdir}/mail/sendmail.mc ]; then
if [ ! -f ${localstatedir}/backups/sendmail.mc.bak ]; then
cp -pf ${sysconfdir}/mail/sendmail.mc \
${localstatedir}/backups/sendmail.mc.bak;
fi;
if ! cmp -s ${localstatedir}/backups/sendmail.mc.bak \
${sysconfdir}/mail/sendmail.mc ; then
cd ${localstatedir}/backups;
/usr/bin/savelog -p -c $copies sendmail.mc.bak > /dev/null;
cp -pf ${sysconfdir}/mail/sendmail.mc \
${localstatedir}/backups/sendmail.mc.bak;
fi;
fi;
# backup "/etc/mail/submit.mc"
if [ -f ${sysconfdir}/mail/submit.mc ]; then
if [ ! -f ${localstatedir}/backups/submit.mc.bak ]; then
cp -pf ${sysconfdir}/mail/submit.mc \
${localstatedir}/backups/submit.mc.bak;
fi;
if ! cmp -s ${localstatedir}/backups/submit.mc.bak \
${sysconfdir}/mail/submit.mc ; then
cd ${localstatedir}/backups;
/usr/bin/savelog -p -c $copies submit.mc.bak > /dev/null;
cp -pf ${sysconfdir}/mail/submit.mc \
${localstatedir}/backups/submit.mc.bak;
fi;
fi;
# while we're here, might as well do sendmail.cf
if [ -f ${sysconfdir}/mail/sendmail.cf ]; then
if [ ! -f ${localstatedir}/backups/sendmail.cf.bak ]; then
cp -pf ${sysconfdir}/mail/sendmail.cf \
${localstatedir}/backups/sendmail.cf.bak;
fi;
if ! cmp -s ${localstatedir}/backups/sendmail.cf.bak \
${sysconfdir}/mail/sendmail.cf ; then
cd ${localstatedir}/backups;
# save previous generation only if differences are significant
diff -bBwI "^\#\#\#\#\# " \
${localstatedir}/backups/sendmail.cf.bak \
${sysconfdir}/mail/sendmail.cf > /dev/null;
if [ $? -ne 0 ]; then
/usr/bin/savelog -p -c $copies sendmail.cf.bak \
> /dev/null;
fi;
cp -pf ${sysconfdir}/mail/sendmail.cf \
${localstatedir}/backups/sendmail.cf.bak;
fi;
fi;
# while we're here, might as well do submit.cf
if [ -f ${sysconfdir}/mail/submit.cf ]; then
if [ ! -f ${localstatedir}/backups/submit.cf.bak ]; then
cp -pf ${sysconfdir}/mail/submit.cf \
${localstatedir}/backups/submit.cf.bak;
fi;
if ! cmp -s ${localstatedir}/backups/submit.cf.bak \
${sysconfdir}/mail/submit.cf ; then
cd ${localstatedir}/backups;
# save previous generation only if differences are significant
diff -bBwI "^\#\#\#\#\# " \
${localstatedir}/backups/submit.cf.bak \
${sysconfdir}/mail/submit.cf > /dev/null;
if [ $? -ne 0 ]; then
/usr/bin/savelog -p -c $copies submit.cf.bak \
> /dev/null;
fi;
cp -pf ${sysconfdir}/mail/submit.cf \
${localstatedir}/backups/submit.cf.bak;
fi;
fi;
ntp 0000666 00000002202 15077014703 0005274 0 ustar 00 #!/bin/sh
# The default Debian ntp.conf enables logging of various statistics to
# the /var/log/ntpstats directory. The daemon automatically changes
# to a new datestamped set of files at midnight, so all we need to do
# is delete old ones, and compress the ones we're keeping so disk
# usage is controlled.
statsdir=$(cat /etc/ntp.conf | grep -v '^#' | sed -n 's/statsdir \([^ ][^ ]*\)/\1/p')
if [ -n "$statsdir" ] && [ -d "$statsdir" ]; then
# only keep a week's depth of these
find "$statsdir" -type f -mtime +7 -exec rm {} \;
# compress whatever is left to save space
cd "$statsdir"
ls loopstats.???????? peerstats.???????? > /dev/null 2>&1
if [ $? -eq 0 ]; then
# Note that gzip won't compress the file names that
# are hard links to the live/current files, so this
# compresses yesterday and previous, leaving the live
# log alone. We supress the warnings gzip issues
# about not compressing the linked file.
gzip --best --quiet loopstats.???????? peerstats.????????
return=$?
case $return in
2)
exit 0 # squash all warnings
;;
*)
exit $return # but let real errors through
;;
esac
fi
fi
mlocate 0000666 00000001136 15077014703 0006124 0 ustar 00 #! /bin/bash
set -e
[ -x /usr/bin/updatedb.mlocate ] || exit 0
if which on_ac_power >/dev/null 2>&1; then
ON_BATTERY=0
on_ac_power >/dev/null 2>&1 || ON_BATTERY=$?
if [ "$ON_BATTERY" -eq 1 ]; then
exit 0
fi
fi
##
LOCKFILE="/var/lib/mlocate/daily.lock"
trap "rm -f $LOCKFILE" EXIT
if [ -e "$LOCKFILE" ]; then
echo >&2 "Warning: $LOCKFILE present, not running updatedb."
exit 1
else
touch "$LOCKFILE"
fi
##
# See ionice(1)
if [ -x /usr/bin/ionice ] &&
/usr/bin/ionice -c3 true 2>/dev/null; then
IONICE="/usr/bin/ionice -c3"
fi
$IONICE /usr/bin/updatedb.mlocate
passwd 0000666 00000000371 15077014703 0006001 0 ustar 00 #!/bin/sh
cd /var/backups || exit 0
for FILE in passwd group shadow gshadow; do
test -f /etc/$FILE || continue
cmp -s $FILE.bak /etc/$FILE && continue
cp -p /etc/$FILE $FILE.bak && chmod 600 $FILE.bak
done
logrotate 0000666 00000000131 15077014703 0006472 0 ustar 00 #!/bin/sh
test -x /usr/sbin/logrotate || exit 0
/usr/sbin/logrotate /etc/logrotate.conf
standard 0000666 00000007012 15077014703 0006277 0 ustar 00 #!/bin/sh
# /etc/cron.daily/standard: standard daily maintenance script
# Written by Ian A. Murdock
# Modified by Ian Jackson
# Modified by Steve Greenland
# Start in the root filesystem, make SElinux happy
cd /
bak=/var/backups
LOCKFILE=/var/lock/cron.daily
umask 022
#
# Avoid running more than one at a time
#
if [ -x /usr/bin/lockfile-create ] ; then
lockfile-create $LOCKFILE
if [ $? -ne 0 ] ; then
cat </dev/null |
awk '/\/dev\// { print }' | sed -e 's/ [[:space:]]*/ /g' |
while read mount block used avail perc mp; do
[ "$mp" = "/" ] && mp=""
echo "$mp/lost+found"
done`
# Don't use space as a field separator
oldifs="$IFS"
IFS=`printf '\n\t'`
for lfdir in $lfdirs; do
# In each directory, look for files
if [ -d "$lfdir" ] ; then
more_lost_found=`ls -1 "$lfdir" 2>/dev/null | grep -v 'lost+found$' | sed 's/^/ /'`
if [ -n "$more_lost_found" ] ; then
lost_found="$lost_found
$lfdir:
$more_lost_found"
# NOTE: above weird line breaks in string are intentional!
fi
else
# Do nothing for XFS filesystems they do not need to
# have a lost and found dir
fs=`cat /proc/mounts | grep " ${lfdir%/lost+found} "`
case "$fs" in
ext*)
no_lost_found="$no_lost_found
$lfdir"
;;
*)
;;
esac
fi
done
# Restore IFS
IFS="$oldifs"
unset oldifs
# NOTE: This might need to be configurable if systems abound
# w/o lost+found out there to prevent giving out this warning
# every day.
if [ -n "$lost_found" ]; then
cat << EOF
Files were found in lost+found directories. This is probably
the result of a crash or bad shutdown, or possibly of a disk
problem. These files may contain important information. You
should examine them, and move them out of lost+found or delete
them if they are not important.
The following files were found:
$lost_found
EOF
fi
if [ -n "$no_lost_found" ]; then
cat << EOF
Some local filesystems do not have lost+found directories. This
means that these filesystems will not be able to recover
lost files when the filesystem is checked after a crash.
Consider creating a lost+found directory with mklost+found(8).
The following lost+found directories were not available:
$no_lost_found
EOF
fi
#
# Clean up lockfile
#
if [ -x /usr/bin/lockfile-create ] ; then
kill $LOCKTOUCHPID
lockfile-remove $LOCKFILE
fi
man-db 0000666 00000002467 15077014703 0005646 0 ustar 00 #!/bin/sh
#
# man-db cron daily
set -e
iosched_idle=
# Don't try to change I/O priority in a vserver or OpenVZ.
if ! egrep -q '(envID|VxID):.*[1-9]' /proc/self/status && \
([ ! -d /proc/vz ] || [ -d /proc/bc ]); then
dpkg_version="$(dpkg-query -W -f '${Version}' dpkg)"
if dpkg --compare-versions "$dpkg_version" ge 1.15.0; then
iosched_idle='--iosched idle'
fi
fi
if ! [ -d /var/cache/man ]; then
# Recover from deletion, per FHS.
mkdir -p /var/cache/man
chown man:root /var/cache/man || true
chmod 2755 /var/cache/man
fi
# expunge old catman pages which have not been read in a week
if [ -d /var/cache/man ]; then
cd /
if ! dpkg-statoverride --list /var/cache/man >/dev/null 2>&1; then
chown -R man /var/cache/man || true # just in case
fi
start-stop-daemon --start --pidfile /dev/null --startas /bin/sh \
--oknodo --chuid man $iosched_idle -- -c \
"find /var/cache/man -type f -name '*.gz' -atime +6 -print0 | \
xargs -r0 rm -f"
fi
# regenerate man database
if [ -x /usr/bin/mandb ]; then
# --pidfile /dev/null so it always starts; mandb isn't really a daemon,
# but we want to start it like one.
start-stop-daemon --start --pidfile /dev/null \
--startas /usr/bin/mandb --oknodo --chuid man \
$iosched_idle \
-- --no-purge --quiet
fi
exit 0
dpkg 0000666 00000000400 15077014703 0005416 0 ustar 00 #!/bin/sh
# Backup the 7 last versions of dpkg's status file
if cd /var/backups ; then
if ! cmp -s dpkg.status.0 /var/lib/dpkg/status ; then
cp -p /var/lib/dpkg/status dpkg.status
savelog -c 7 dpkg.status >/dev/null
fi
fi
aptitude 0000666 00000000472 15077014703 0006321 0 ustar 00 #!/bin/sh
bak=/var/backups
# Shamelessly ripped from /etc/cron.daily/standard
if test -f /var/lib/aptitude/pkgstates && cd $bak ; then
if ! cmp -s aptitude.pkgstates.0 /var/lib/aptitude/pkgstates ; then
cp -p /var/lib/aptitude/pkgstates aptitude.pkgstates
savelog -c 7 aptitude.pkgstates > /dev/null
fi
fi
locate 0000666 00000004243 15077014703 0005751 0 ustar 00 #! /bin/sh
set -e
# cron script to update the `locatedb' database.
#
# Written by Ian A. Murdock and
# Kevin Dalley
# Please consult updatedb(1) and /usr/share/doc/locate/README.Debian
[ -e /usr/bin/updatedb.findutils ] || exit 0
if [ "$(id -u)" != "0" ]; then
echo "You must be root."
exit 1
fi
# Global options for invocations of find(1)
FINDOPTIONS='-ignore_readdir_race'
# filesystems which are pruned from updatedb database
PRUNEFS="NFS nfs nfs4 afs binfmt_misc proc smbfs autofs iso9660 ncpfs coda devpts ftpfs devfs mfs shfs sysfs cifs lustre_lite tmpfs usbfs udf ocfs2"
# paths which are pruned from updatedb database
PRUNEPATHS="/tmp /usr/tmp /var/tmp /afs /amd /alex /var/spool /sfs /media /var/lib/schroot/mount"
# netpaths which are added
NETPATHS=""
# run find as this user
LOCALUSER="nobody"
# cron.daily/find: run at this priority -- higher number means lower priority
# (this is relative to the default which cron sets, which is usually +5)
NICE=10
# I/O priority
# 1 for real time, 2 for best-effort, 3 for idle ("3" only allowed for root)
IONICE_CLASS=3
# 0-7 (only valid for IONICE_CLASS 1 and 2), 0=highest, 7=lowest
IONICE_PRIORITY=7
# allow keeping local customizations in a separate file
if [ -r /etc/updatedb.findutils.cron.local ] ; then
. /etc/updatedb.findutils.cron.local
fi
export FINDOPTIONS PRUNEFS PRUNEPATHS NETPATHS LOCALUSER
# Set the task to run with desired I/O priority if possible
# Linux supports io scheduling priorities and classes since
# 2.6.13 with the CFQ io scheduler
if [ -x /usr/bin/ionice ] && [ "${UPDATDB_NO_IONICE}" = "" ]; then
# don't run ionice if kernel version < 2.6.13
KVER=$(uname -r)
case "$KVER" in
2.[012345]*) ;;
2.6.[0-9]) ;;
2.6.[0-9].*) ;;
2.6.1[012]*) ;;
*)
# Avoid providing "-n" when IONICE_CLASS isn't 1 or 2
case "$IONICE_CLASS" in
1|2) priority="-n ${IONICE_PRIORITY:-7}" ;;
*) priority="" ;;
esac
ionice -c $IONICE_CLASS $priority -p $$ > /dev/null 2>&1 || true
;;
esac
fi
if getent passwd $LOCALUSER > /dev/null ; then
cd / && nice -n ${NICE:-10} updatedb.findutils 2>/dev/null
else
echo "User $LOCALUSER does not exist."
exit 1
fi
apache2 0000666 00000001171 15077014703 0006002 0 ustar 00 #!/bin/sh
# run htcacheclean
set -e
set -u
[ -e /usr/sbin/htcacheclean ] || exit 0
[ -e /etc/default/apache2 ] || exit 0
# edit /etc/default/apache2 to change this
HTCACHECLEAN_MODE=daemon
HTCACHECLEAN_RUN=auto
HTCACHECLEAN_SIZE=300M
HTCACHECLEAN_PATH=/var/cache/apache2/mod_disk_cache
HTCACHECLEAN_OPTIONS=""
. /etc/default/apache2
[ "$HTCACHECLEAN_MODE" = "cron" ] || exit 0
[ "$HTCACHECLEAN_RUN" = "yes" ] ||
( [ "$HTCACHECLEAN_RUN" = "auto" ] && \
[ -e /etc/apache2/mods-enabled/disk_cache.load ] ) || exit 0
/usr/sbin/htcacheclean ${HTCACHECLEAN_OPTIONS} \
-p${HTCACHECLEAN_PATH} \
-l${HTCACHECLEAN_SIZE}
cracklib-runtime 0000666 00000000600 15077014703 0007726 0 ustar 00 #!/bin/sh
set -e
if [ -x /usr/sbin/update-cracklib -a -r /etc/cracklib/cracklib.conf ]
then
status="$(/usr/sbin/update-cracklib)"
if [ -n "${status}" ]
then
/usr/bin/logger -p cron.info -t cracklib "updated dictionary (read/written words: ${status})."
else
/usr/bin/logger -p cron.info -t cracklib "no dictionary update necessary."
fi
fi
exit 0
.placeholder 0000666 00000000146 15077014703 0007040 0 ustar 00 # DO NOT EDIT OR REMOVE
# This file is a simple placeholder to keep dpkg from removing this directory
maldet 0000666 00000007301 15077014703 0005746 0 ustar 00 #!/usr/bin/env bash
export PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:$PATH
export LMDCRON=1
inspath='/usr/local/maldetect'
intcnf="$inspath/internals/internals.conf"
if [ -f "$intcnf" ]; then
source $intcnf
else
echo "\$intcnf not found."
exit 1
fi
if [ -f "$cnf" ]; then
source $cnf
if [ -f "$compatcnf" ]; then
source $compatcnf
fi
else
echo "could not find \$cnf, fatal error, bye."
exit 1
fi
if [ -f "/etc/sysconfig/maldet" ]; then
. /etc/sysconfig/maldet
elif [ -f "/etc/default/maldet" ]; then
. /etc/default/maldet
fi
if [ -f "$cron_custom_conf" ]; then
. $cron_custom_conf
fi
if [ -z "$scan_days" ]; then
scan_days=1
fi
if [ -z "$cron_prune_days" ]; then
cron_prune_days=21
fi
if [ "$find" ]; then
# prune any quarantine/session/tmp data older than 7 days
tmpdirs="$tmpdir $varlibpath/sess $varlibpath/quarantine $varlibpath/pub"
for dir in $tmpdirs; do
if [ -d "$dir" ]; then
$find $dir -type f -mtime +${cron_prune_days} -print0 | xargs -0 rm -f >> /dev/null 2>&1
fi
done
fi
if [ "$autoupdate_version" == "1" ] || [ "$autoupdate_signatures" == "1" ]; then
# sleep for random 1-999s interval to better distribute upstream load
sleep $(echo $RANDOM | cut -c1-3) >> /dev/null 2>&1
fi
if [ "$autoupdate_version" == "1" ]; then
# check for new release version
$inspath/maldet -d >> /dev/null 2>&1
fi
if [ "$autoupdate_signatures" == "1" ]; then
# check for new definition set
$inspath/maldet -u >> /dev/null 2>&1
fi
# if we're running inotify monitoring, send daily hit summary
if [ "$(ps -A --user root -o "cmd" | grep -E maldetect | grep -E inotifywait)" ]; then
$inspath/maldet --monitor-report >> /dev/null 2>&1
elif [ "$cron_daily_scan" == "1" ]; then
if [ -d "/home/virtual" ] && [ -d "/usr/lib/opcenter" ]; then
# ensim
$inspath/maldet -b -r /home/virtual/?/fst/var/www/html/,/home/virtual/?/fst/home/?/public_html/ $scan_days >> /dev/null 2>&1
elif [ -d "/etc/psa" ] && [ -d "/var/lib/psa" ]; then
# psa
$inspath/maldet -b -r /var/www/vhosts/?/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/local/directadmin" ]; then
# DirectAdmin
$inspath/maldet -b -r /home?/?/domains/?/public_html/,/var/www/html/?/ $scan_days >> /dev/null 2>&1
elif [ -d "/var/www/clients" ]; then
# ISPConfig
$inspath/maldet -b -r /var/www/clients/?/web?/web,/var/www/clients/?/web?/subdomains,/var/www $scan_days >> /dev/null 2>&1
elif [ -d "/etc/webmin/virtual-server" ]; then
# Virtualmin
$inspath/maldet -b -r /home/?/public_html/,/home/?/domains/?/public_html/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/local/ispmgr" ] || [ -d "/usr/local/mgr5" ]; then
# ISPmanager
$inspath/maldet -b -r /var/www/?/data/,/home/?/data/ $scan_days >> /dev/null 2>&1
elif [ -d "/var/customers/webs" ]; then
# froxlor
$inspath/maldet -b -r /var/customers/webs/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/local/vesta" ]; then
# VestaCP
$inspath/maldet -b -r /home/?/web/?/public_html/,/home/?/web/?/public_shtml/,/home/?/tmp/,/home/?/web/?/private/ $scan_days >> /dev/null 2>&1
elif [ -d "/usr/share/dtc" ]; then
# DTC
if [ -f /var/lib/dtc/saved_install_config ]; then
. /var/lib/dtc/saved_install_config
fi
$inspath/maldet -b -r ${conf_hosting_path:-/var/www/sites}/?/?/subdomains/?/html/ $scan_days >> /dev/null 2>&1
else
# cpanel, interworx and other standard home/user/public_html setups
$inspath/maldet -b -r /home?/?/public_html/,/var/www/html/,/usr/local/apache/htdocs/ $scan_days >> /dev/null 2>&1
fi
fi
if [ -f "$cron_custom_exec" ]; then
. $cron_custom_exec
fi
squirrelmail 0000666 00000000512 15077014703 0007206 0 ustar 00 #!/bin/sh
set -e
test -d /var/spool/squirrelmail/attach || exit 0
cd /var/spool/squirrelmail/attach
find \
-maxdepth 2 \
-xdev \
-type f \
\! -name '*.*' \
\! -name '*_*' \
-atime +2 \
-print0 \
| xargs \
-0 \
--no-run-if-empty \
rm --
dovecot 0000666 00000000433 15077014703 0006142 0 ustar 00 #!/bin/sh
[ -x /usr/sbin/dovecot ] || exit 0
[ -x /usr/lib/dovecot/expire-tool.sh ] || exit 0
[ -e /etc/default/dovecot ] || exit 0
EXPIRE_CRON=""
. /etc/default/dovecot
[ "x$EXPIRE_CRON" = "xdaily" ] || exit 0
/usr/sbin/dovecot --exec-mail ext /usr/lib/dovecot/expire-tool.sh
sysklogd 0000666 00000002306 15077014703 0006337 0 ustar 00 #! /bin/sh
# sysklogd Cron script to rotate system log files daily.
#
# If you want to rotate other logfiles daily, edit
# this script. An easy way is to add files manually,
# to add -a (for all log files) to syslogd-listfiles and
# add some grep stuff, or use the -s pattern argument to
# specify files that must not be listed.
#
# This is a configration file. You are invited to edit
# it and maintain it on your own. You'll have to do
# that if you don't like the default policy
# wrt. rotating logfiles (i.e. with large logfiles
# weekly and daily rotation may interfere). If you edit
# this file and don't let dpkg upgrade it, you have full
# control over it. Please read the manpage to
# syslogd-listfiles.
#
# Written by Martin Schulze .
# $Id: cron.daily,v 1.14 2007-05-28 16:33:34 joey Exp $
test -x /usr/sbin/syslogd-listfiles || exit 0
test -x /sbin/syslogd || exit 0
test -f /usr/share/sysklogd/dummy || exit 0
set -e
cd /var/log
logs=$(syslogd-listfiles)
test -n "$logs" || exit 0
for LOG in $logs
do
if [ -s $LOG ]; then
savelog -g adm -m 640 -u root -c 7 $LOG >/dev/null
fi
done
# Restart syslogd
#
/etc/init.d/sysklogd reload-or-restart > /dev/null
bsdmainutils 0000666 00000000766 15077014703 0007206 0 ustar 00 #!/bin/sh
# /etc/cron.daily/calendar: BSD mainutils calendar daily maintenance script
# Written by Austin Donnelly
# Comment the following line if you'd like all of your users'
# ~/calendar files to be checked daily. Calendar will send them mail
# to remind them of upcoming events. See calendar(1) for more details.
exit 0
[ -x /usr/sbin/sendmail ] || exit 0
if [ ! -x /usr/bin/cpp ]; then
echo "The cpp package is needed to run calendar."
exit 1
fi
/usr/bin/calendar -a
quota 0000666 00000000531 15077014703 0005627 0 ustar 00 #! /bin/sh
# check if quota package is available
test -x /usr/sbin/warnquota || exit 0
# check if warnquota run is configured
test -f /etc/default/quota || exit 0
. /etc/default/quota
if [ "$run_warnquota" = "true" ]; then
# check if quotas are enabled
if grep -q '^[^#]*quota' /etc/fstab; then
/usr/sbin/warnquota
fi
fi
exit 0
exim4-base 0000666 00000010015 15077014703 0006432 0 ustar 00 #!/bin/sh
if [ -n "$EX4DEBUG" ]; then
echo "now debugging $0 $@"
set -x
fi
# set this to some other value if you don't want the panic log to be
# watched by this script, for example when you're using your own log
# checking mechanisms or don't care.
E4BCD_DAILY_REPORT_TO=""
E4BCD_DAILY_REPORT_OPTIONS=""
E4BCD_WATCH_PANICLOG="yes"
# Number of lines of paniclog quoted in warning email.
E4BCD_PANICLOG_LINES="10"
E4BCD_PANICLOG_NOISE=""
# Only do anything if exim4 is actually installed
if [ ! -x /usr/lib/exim4/exim4 ]; then
exit 0
fi
[ -f /etc/default/exim4 ] && . /etc/default/exim4
SPOOLDIR="$(exim4 -bP spool_directory | sed 's/.*=[[:space:]]\(.*\)/\1/')"
# The log processing code used in this cron script is not very
# sophisticated. It relies on this cron job being executed earlier than
# the log rotation job, and will have false results if the log is not
# rotated exactly once daily in the daily cron processing. Even in the
# default configuration, it will ignore log entries made between this
# cron job and the log rotation job.
# Patches for more sophisticated processing are appreciated via the
# Debian BTS.
E4BCD_MAINLOG_NOISE="^[[:digit:][:space:]:-]\{20\}\(\(Start\|End\) queue run: pid=[[:digit:]]\+\|exim [[:digit:]\.]\+ daemon started: pid=[[:digit:]]\+, .*\)$"
if [ -n "$E4BCD_DAILY_REPORT_TO" ]; then
if [ -x "$(command -v eximstats)" ] && [ -x "$(command -v mail)" ]; then
if [ "$(< /var/log/exim4/mainlog grep -v "$E4BCD_MAINLOG_NOISE" | wc -l)" -gt "0" ]; then
< /var/log/exim4/mainlog grep -v "$E4BCD_MAINLOG_NOISE" \
| eximstats $E4BCD_DAILY_REPORT_OPTIONS \
| mail $E4BCD_DAILY_REPORT_TO -s"$(hostname --fqdn) Daily e-mail activity report"
else
echo "no mail activity in this interval" \
| mail $E4BCD_DAILY_REPORT_TO -s"$(hostname --fqdn) Daily e-mail activity report"
fi
else
echo "The exim4 cron job is configured to send a daily report, but eximstats"
echo "and/or mail cannot be found. Please check and make sure that these two"
echo "binaries are available"
fi
fi
log_this() {
TEXT="$@"
if ! logger -t exim4 -p mail.alert $TEXT; then
RET="$?"
echo >&2 "ALERT: could not syslog $TEXT, logger return value $RET"
fi
}
if [ "$E4BCD_WATCH_PANICLOG" != "no" ]; then
if [ -s "/var/log/exim4/paniclog" ]; then
if [ -x "/usr/local/lib/exim4/nonzero_paniclog_hook" ]; then
/usr/local/lib/exim4/nonzero_paniclog_hook
fi
if [ -z "$E4BCD_PANICLOG_NOISE" ] || grep -vq "$E4BCD_PANICLOG_NOISE" /var/log/exim4/paniclog; then
log_this "ALERT: exim paniclog /var/log/exim4/paniclog has non-zero size, mail system possibly broken"
if ! printf "Subject: exim paniclog on %s has non-zero size\nTo: root\n\nexim paniclog /var/log/exim4/paniclog on %s has non-zero size, mail system might be broken. The last ${E4BCD_PANICLOG_LINES} lines are quoted below.\n\n%s\n" \
"$(hostname --fqdn)" "$(hostname --fqdn)" \
"$(tail -n "${E4BCD_PANICLOG_LINES}" /var/log/exim4/paniclog)" \
| exim4 root; then
log_this "PANIC: sending out e-mail warning has failed, exim has non-zero return code"
fi
if [ "$E4BCD_WATCH_PANICLOG" = "once" ]; then
logrotate -f /etc/logrotate.d/exim4-paniclog
fi
fi
fi
fi
# run tidydb as Debian-exim:Debian-exim.
if [ -x /usr/sbin/exim_tidydb ]; then
cd $SPOOLDIR/db || exit 1
if ! find $SPOOLDIR/db -maxdepth 1 -name '*.lockfile' -or -name 'log.*' \
-or -type f -printf '%f\0' | \
xargs -0r -n 1 \
start-stop-daemon --start --exec /usr/sbin/exim_tidydb \
--chuid Debian-exim:Debian-exim -- $SPOOLDIR > /dev/null; then
# if we reach this, invoking exim_tidydb from start-stop-daemon has
# failed, most probably because of libpam-tmpdir being in use
# (see #373786 and #376165)
find $SPOOLDIR/db -maxdepth 1 -name '*.lockfile' -or -name 'log.*' \
-or -type f -printf '%f\0' | \
su - --shell /bin/bash \
--command "xargs -0r -n 1 /usr/sbin/exim_tidydb $SPOOLDIR > /dev/null" \
Debian-exim
fi
fi