summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/setup_howto137
-rw-r--r--helper/distribution.inc56
-rw-r--r--helper/fileutil.inc199
-rw-r--r--helper/string.inc8
-rwxr-xr-xmltk228
-rwxr-xr-xopenslx255
-rw-r--r--remote/includes/binutil.inc (renamed from helper/binutil.inc)94
-rw-r--r--remote/includes/distribution.inc53
-rw-r--r--remote/includes/downloader.inc (renamed from helper/downloader.inc)0
-rw-r--r--remote/includes/kernel.inc (renamed from helper/kernel.inc)117
-rw-r--r--remote/includes/keyvalueutil.inc (renamed from helper/keyvalueutil.inc)0
-rw-r--r--remote/includes/packagemanager.inc138
-rw-r--r--remote/includes/system.inc (renamed from helper/system.inc)5
-rw-r--r--remote/includes/useradd.inc (renamed from helper/useradd.inc)5
-rwxr-xr-xremote/setup_target45
-rwxr-xr-xserver/export_target44
-rw-r--r--server/includes/packing.inc35
17 files changed, 796 insertions, 623 deletions
diff --git a/doc/setup_howto b/doc/setup_howto
index 9ed7f1d1..e520973e 100644
--- a/doc/setup_howto
+++ b/doc/setup_howto
@@ -2,11 +2,11 @@ This is a little howto to get people started on openSLX. Please expand!
-1. Prerequisites
+1. Client (template) prerequisites
Hard disk space: As the tm-scripts directory will expand considerably while
- building openSLX (to 5-6 GB), we recommend to allocate around 8-10 GB
- disk space. OpenSLX will install some packages into the base system
+ building mini-linux (to 5-6 GB), we recommend to allocate around 8-10 GB
+ disk space. mltk will install some packages into the base system
depending on chosen modules.
Currently supported distributions:
@@ -25,7 +25,7 @@ There are some other git repositories needed by the build process, but they
will be automatically checked out, e.g. busybox or printergui.
-3. Server prerequisites
+3. Deployment server prerequisites
Needed services: dhcp, tftpd, httpd, NFS and/or dnbd3.
@@ -35,12 +35,10 @@ Needed services: dhcp, tftpd, httpd, NFS and/or dnbd3.
checkout openSLX git repository:
# git clone git://git.openslx.org/openslx-ng/tm-scripts.git
-There are some other git repositories needed by the build process, but they
-are automatically checked out, e.g. busybox or printergui.
-
5. Getting started
+On your client machine that serves as the template for the final system:
Change into directory tm-scripts, and execute the mltk script ('mini linux
toolkit') without parameters (or use -h, --help) to see possible options
including some examples.
@@ -57,112 +55,101 @@ stderr.log). Detailed information can also be obtained using the '-d'
kernel options arise, if no value was being given through 'make oldconfig',
as without '-d' the system will assume the default answer is correct.
-Please take note that mltk functions are divided in two parts, somewhat
-misleadingly named 'remote' and 'server' (second parameter choice). As
-rule of thumb it may be said that 'remote' applies to building and 'server'
-applies to packaging the built system in appropriate ways (initramfs, sqfs)
-for delivery.
-
-
6. Building
Build Stage31:
-# ./mltk remote stage31 -c -b (-d -> debug when appropriate)
+# ./mltk stage31 -c -b (-d -> debug when appropriate)
-... this will take quite some time, mostly due to kernel compiling.
+... this will take quite some time, the first time mostly due to kernel compiling.
Build Stage32:
-# ./mltk remote stage32 -c -b (-d )
+# ./mltk stage32 -c -b (-d )
-Build Stage32 for openSuse:
-# ./mltk remote stage32-opensuse -c -b
+Build Stage32 for openSuse: (not really needed, should be identical to stage32)
+# ./mltk stage32-opensuse -c -b
... this will take some time, mostly due to compiling a couple of packages.
-Building a single module:
-# ./mltk remote stage32 -c [module] -b [module] (-d)
+(Re)building a single module:
+# ./mltk stage32 -c [module] -b [module] (-d)
Building a single module for openSuse:
-./mltk remote stage32-opensuse -c [module] [module] -b (-d)
+./mltk stage32-opensuse -c [module] [module] -b (-d)
Build addons (vmware etc.)
-# ./mltk remote vmware -c -b
-# ./mltk remote vbox -c -b
-# ./mltk remote debug -c -b (as always: -d -> debug when appropriate)
+# ./mltk vmware -c -b
+# ./mltk vbox -c -b
+# ./mltk debug -c -b (as always: -d -> debug when appropriate)
7. Packaging
-When using the parameter 'server' either an IP adress or 'local' is expected.
-If the building machine is also used to deliver the built boot images 'local'
-should be used.
-
-If another machine is used to deliver built images (by http etc.) the IP
-adress of the build machine has to be be used. In that case mltk needs to be
-present on the server machine.
-
-Please note that the remote machine (the machine on which the build process
-runs) needs to export the build structure (option remote -n, see
-mltk --help). This option executes a bind mount of the local build directory
-to a standardized place, /export/build, which can be accessed later from the
-server machine via rsync. To facilitate this rsync-ing it may be wise to
-add the ssh key to the build machine (authorized_keys), as then no password
+This should be done on the 'packaging server' which creates the files
+required for booting from the remote template machine from above.
+You can do this on the same machine you were building mini-linux on,
+but it might lead to problems when builsing stage 4 later.
+For this you need the openslx script from the tm-scripts repo, which
+also needs to be run as root (for proper rsync).
+
+Please note that the remote machine (the machine on which the build process
+ran) needs to export the build structure (option -n, see
+mltk --help). This option executes a bind mount of the local build directory
+to a standardized place, /export/build, which can be accessed later from the
+server machine via rsync. To facilitate this rsync-ing it may be wise to
+add the ssh key to the build machine (authorized_keys), as then no password
has to be given when syncing from the server machine.
So, remember to execute
-# ./mltk remote -n
-on the build machine, as the build is usually going to by synchronized to a
-dedicated server machine.
+# ./mltk -n
+on the build machine once after bootup, as the build is usually going to
+by synchronized to a dedicated server machine for packing.
7.1 Packaging locally (build and server machine are the same machine)
-Even though the usual way to go is using dedicated machines to build and to
-serve it is possible to package locally, e.g. for testing purposes. So, to
-package stages and addons (for example vmware), presuming the same machine
+Even though the usual way to go is using dedicated machines to build and to
+serve it is possible to package locally, e.g. for testing purposes. So, to
+package stages and addons (for example vmware), presuming the same machine
is used for building and serving:
-# ./mltk server local stage31 -e stage31
-# ./mltk server local stage32 -e stage32
+# ./openslx local stage31 -e cpio
+# ./openslx local stage32 -e sqfs
(Use this call for openSuse:)
-# ./mltk server local stage32-opensuse -e stage32 (for target opensuse)
+# ./openslx local stage32-opensuse -e sqfs (for target opensuse)
-To package addons the parameter 'addons' has to be used:
-# ./mltk server local vmware -e addons
-... other addons likewise.
+Addons can be packed the same way:
+# ./openslx local vmware -e sqfs
7.2 Remote packaging (needed if build and server machine not identical)
First, do a
-# ./mltk server [IP a build machine] -s
+# ./openslx <IP of build machine> -s
-to synchronize all stage/addon builds in one pass. This synchronizes the
-complete build directories from the remote (build) machine to the server.
-It is possible to synchronize several build machines (thus different
-flavours) to one server. The IP adresses of build machines are used in
-server directory structure to distinguish builds; therefore the option
+to synchronize all stage/addon builds in one pass. This synchronizes the
+complete build directories from the remote (build) machine to the server.
+It is possible to synchronize several build machines (thus different
+flavours) to one server. The IP adresses of build machines are used in
+server directory structure to distinguish builds; therefore the option
'local' should be used with care.
The stages and addons may be packed in analogue to the 'local case'
-mentioned above:
+mentioned above:
-# ./mltk server [IP of build machine] stage31 -e stage31
-# ./mltk server [IP of build machine] stage32 -e stage32
+# ./openslx <IP of build machine> stage31 -e cpio
+# ./openslx <IP of build machine> stage32 -e sqfs
Use this call for openSuse:
-# ./mltk server [IP of build machine] stage32-opensuse -e stage32
+# ./openslx <IP of build machine> stage32-opensuse -e sqfs
-For packaging addons the parameter 'addons' should be used:
-# ./mltk server [IP of build machine] vmware -e addons
-... other addons likewise.
+Addons:
+# ./openslx <IP of build machine> vmware -e sqfs
-Please note that stages/addons can be synchronized independently, if
-needed:
-# ./mltk server [IP of build machine] stage31 -e stage31 -s
+You can synchronize and pack at the same time
+# ./openslx <IP of build machine> stage31 -e cpio -s
# [...]
-# ./mltk server [IP of build machine] vmware -e addons -s
+# ./openslx <IP of build machine> vmware -e sqfs -s
8. Preparing for client boot
@@ -227,30 +214,30 @@ the script clone_stage4 rejects 'local' as IP parameter.
To use Stage 4 a nfs export will be necessary, as later on the files of stage4
will be accessed client-side by nfs. Please keep in mind that
-"./mltk remote -n" has to be executed on the build machine before cloning
+"./mltk -n" has to be executed on the build machine before cloning
Stage 4.
Then, be sure all builds are synced to the server machine, if that has not
happened before:
-# ./mltk server [IP of build machine] -s
+# ./openslx <IP of build machine> -s
... or, if wanted, sync just some parts, if you know what you're doing.
Stage31 makes most sense so far, so to say. So, for example:
-# ./mltk server [IP] stage31 -e stage31 -s
+# ./openslx <IP> stage31 -e cpio -s
etc.
Well, then do the cloning work:
-# ./scripts/clone_stage4 [IP of build machine] stage32
- /path/to/your/nfs/share/stage4 (this is one line!)
+# ./scripts/clone_stage4 [IP of build machine] stage32 \
+ /path/to/your/nfs/share/stage4 # (this is one line!)
To use Stage 4 the clients need the nfs mount information. This is handled via
a configuration variable (please consult doc/boot_config_vars for a full
-summary) called SLX_STAGE4_NFS.
+summary) called SLX_STAGE4.
So now would be a good time to check (or re-check) your base config file in
the client directory you chose above (see 8. Preparing for client boot)
contains a line
-SLX_STAGE4_NFS=[IP of service computer]:/path/to/your/nfs/share/stage4
+SLX_STAGE4=[IP of service computer]:/path/to/your/nfs/share/stage4
You should see Stage 4 working after rebooting the client. The Stage 4 entries
should be placed above the list of virtual machines.
diff --git a/helper/distribution.inc b/helper/distribution.inc
index 817baa53..51a01eae 100644
--- a/helper/distribution.inc
+++ b/helper/distribution.inc
@@ -1,54 +1,2 @@
-
-
-detect_distribution () {
- # Set up distribution and package management
- # Lowercase distributor ID is what we use as the general distribution name
- SYS_DISTRIBUTION=$(lsb_release -is | tolower | sed -r 's/[^a-z0-9]//g;s/project$//g;s/scientificsl$/scientific/g')
- # Then determine packet manager
- case "$SYS_DISTRIBUTION" in
- ubuntu)
- PACKET_MANAGER="apt"
- PACKET_HANDLER="dpkg"
- detect_ubuntu_lts
- ;;
- debian)
- PACKET_MANAGER="apt"
- PACKET_HANDLER="dpkg"
- ;;
- opensuse)
- PACKET_MANAGER="zypper"
- PACKET_HANDLER="rpm"
- ;;
- scientific)
- PACKET_MANAGER="yum"
- PACKET_HANDLER="rpm"
- ;;
- *)
- perror "Unknown Distribution: $SYS_DISTRIBUTION - Please specify its packet manager in remote/setup_target"
- ;;
- esac
- # Get version - we mangle this quite a bit. first make sure it has no spaces, then split version at period (.), underscore (_) and dash (-)
- SYS_VERSION=$(lsb_release -rs | tolower)
- local VERSION=$(echo $SYS_VERSION | sed -r 's/\s//g;s/[\._]/ /g;s/-//g')
- local STRTMP=""
- PRINT_SYS_VERSIONS="*.conf.$SYS_DISTRIBUTION"
- SYS_VERSIONS="$SYS_DISTRIBUTION"
- for PART in $VERSION; do
- [ -z "$PART" ] && continue
- STRTMP+=".$PART"
- SYS_VERSIONS="${SYS_DISTRIBUTION}${STRTMP} $SYS_VERSIONS"
- PRINT_SYS_VERSIONS="*.conf.${SYS_DISTRIBUTION}${STRTMP} $PRINT_SYS_VERSIONS"
- done
- pinfo "Config source order: *.conf first, then the first one of these (if found)"
- pinfo "$PRINT_SYS_VERSIONS"
-}
-
-detect_ubuntu_lts () {
- local TMP=$(dpkg -S /usr/bin/Xorg)
- [[ "$TMP" == xserver-xorg* ]] || perror "Could not detect xserver package version (returned: $TMP)"
- TMP=${TMP%: *}
- TMP=${TMP#xserver-xorg-core}
- pinfo "Ubuntu LTS Xorg suffix: $TMP"
- UBUNTU_XORG_PKG_SUFFIX="$TMP"
-}
-
+# Get simple distribution name
+SYS_DISTRIBUTION=$(lsb_release -is | tr '[A-Z]' '[a-z]' | sed -r 's/[^a-z0-9]//g;s/project$//g;s/scientificsl$/scientific/g')
diff --git a/helper/fileutil.inc b/helper/fileutil.inc
index aae654bc..525e46a7 100644
--- a/helper/fileutil.inc
+++ b/helper/fileutil.inc
@@ -29,203 +29,4 @@ tarcopy () {
[ "x${PS[1]}" != "x0" ] && perror "unpacking-part of tar-copy from '$SHORT' to '$TO' failed. (${PS[1]})"
}
-# get all files of required packages by a module
-list_packet_files() {
- [ -z "$REQUIRED_CONTENT_PACKAGES" ] && pinfo "No required packages for $TOOL" && return 1
- local PACKAGE=""
- for PACKAGE in $REQUIRED_CONTENT_PACKAGES; do
- local OPTIONAL="$(echo "$PACKAGE" | cut -c 1)"
- [ "x$OPTIONAL" = "x@" ] && PACKAGE="$(echo "$PACKAGE" | cut -c 2-)"
- local FILES=""
- if [ "$PACKET_HANDLER" = "dpkg" ]; then
- PACKAGECOMMAND="dpkg -L"
- elif [ "$PACKET_HANDLER" = "rpm" ]; then
- PACKAGECOMMAND="rpm -ql"
- fi
- if [ -n "$REQUIRED_PACKET_FILES_BLACKLIST" ]; then
- FILES="$($PACKAGECOMMAND "$PACKAGE" | grep "^/" | \
- grep -v "$REQUIRED_PACKET_FILES_BLACKLIST" | \
- grep -v -E 'share/(man|doc)|/var/run|/var/log'; \
- echo ":###:${PIPESTATUS[0]}")"
- else
- FILES="$($PACKAGECOMMAND "$PACKAGE" | grep "^/" | grep -v -E 'share/(man|doc)|/var/run|/var/log'; echo ":###:${PIPESTATUS[0]}")"
- fi
-# FILES="$(rpm -ql "$PACKAGE" | grep "^/" | grep -v -E 'share/(man|doc)|/var/run|/var/log'; echo ":###:${PIPESTATUS[0]}")"
-
- # ugly hack to get our return value
- #local LPRET=$(echo "$FILES" | tail -1 | sed 's/^.*:###:\([0-9]*\)$/\1/g')
- #FILES=$(echo "$FILES" | sed 's/^\(.*\):###:[0-9]*$/\1/g')
- local LPRET=$(echo "$FILES" | awk -F ':###:' '{printf $2}')
- FILES=$(echo "$FILES" | awk -F ':###:' '{print $1}')
- if [ "x$LPRET" != "x0" -a "x$OPTIONAL" != "x@" ]; then
- pdebug "FILES: '$FILES'"
- perror "dpkg/rpm exited with code '$LPRET' for required package ${PACKAGE}."
- fi
- [ "x$LPRET" != "x0" ] && pwarning "dpkg/rpm exited with code '$LPRET' for optional package ${PACKAGE}." && continue
- [ -z "$FILES" ] && pwarning "list_packet_files empty for packet ${PACKAGE}." && continue
- pdebug "Packet $PACKAGE has $(echo $FILES | wc -w) files..."
- for FILE in $FILES; do
- [ ! -d "$FILE" ] && echo "$FILE"
- done
- done
-}
-#
-# Conveniance function
-#
-# install all dependencies of a module
-# goes through all package as given in the variable REQUIRED_INSTALLED_PACKAGES
-install_dependencies() {
- [ -z "$REQUIRED_INSTALLED_PACKAGES" ] && return
- install_packages "$REQUIRED_INSTALLED_PACKAGES"
-}
-#
-# install given packet through system's packet manager
-# uses PACKET_HANDLER as determined in helper/system.inc
-#
-install_packages() {
- [ $# -eq 0 ] && perror "Sanity check failed: no argument given to install_package"
- local PACKAGE_LIST="$@"
- local INSTALLED_PACKAGES=""
-
- for PKG in ${PACKAGE_LIST}; do
- # check if installed
- if [ "x$PACKET_HANDLER" == "xdpkg" ]; then
- dpkg -l ${PKG} > /dev/null 2>&1
- elif [ "x$PACKET_HANDLER" == "xrpm" ]; then
- rpm -ql ${PKG} > /dev/null 2>&1
- else
- perror "No packet manager / handler determined, this should not happen!"
- fi
-
- local LRET=$?
- if [ "x$LRET" == "x0" ]; then
- # check if it is completly installed,
- # not just leftover configuration files
- local INSTALL_STATUS=$(dpkg -l $PKG | grep $PKG | cut -c1-2)
- if [[ $INSTALL_STATUS != "ii" ]]; then
- pinfo "$PKG not installed!"
- install_package $PKG
- else
- # package installed
- pdebug "$PKG installed!"
- fi
- else
- # package not installed
- pdebug "$PKG not installed!"
- install_package $PKG
- fi
- done
- [ ! -z "$INSTALLED_PACKAGES" ] && pinfo "Packages installed: ${INSTALLED_PACKAGES}"
-}
-
-#
-# install individual package depending on package manager
-#
-install_package() {
- if [ "$#" -ne 1 ]; then
- perror "Only call install_package with one argument!"
- fi
-
- if [ "x$PACKET_MANAGER" == "xapt" ]; then
- apt-get install -y ${PKG}
- local IRET=$?
- if [ "x$IRET" == "x0" ]; then
- # $PGK was installed successfully
- INSTALLED_PACKAGES+="$PKG "
- else
- # PKG was not installed
- # TODO error handling
- perror "install_packages: apt-get failed with '$IRET' for package '$PKG'"
- fi
- elif [ "x$PACKET_MANAGER" == "xzypper" ]; then
- zypper --no-refresh --non-interactive install ${PKG}
- local IRET=$?
- if [ "x$IRET" == "x0" ]; then
- # $PGK was installed successfully
- INSTALLED_PACKAGES+="$PKG "
- else
- # PKG was not installed
- # TODO error handling
- perror "install_packages: zypper failed with '$IRET' for package '$PKG'"
- fi
- elif [ "x$PACKET_MANAGER" == "xyum" ]; then
- yum --assumeyes install ${PKG}
- local IRET=$?
- if [ "x$IRET" == "x0" ]; then
- # $PGK was installed successfully
- INSTALLED_PACKAGES+="$PKG "
- else
- # PKG was not installed
- # TODO error handling
- perror "install_packages: yum failed with '$IRET' for package '$PKG'"
- fi
- else
- perror "No packet manager determined, this should not happen!"
- fi
-}
-
-#
-# copies static data files from <MODULE>/data/ to <TARGET_BUILD_DIR>
-#
-copy_static_data() {
- [ ! -d "${MODULE_DIR}/data" ] && pinfo "${MODULE} has no static 'data' directory." && return
- cp -r "${MODULE_DIR}/data/"* ${TARGET_BUILD_DIR} || perror "Could not copy static data of ${MODULE}"
-}
-
-copy_system_files() {
- [ ! -z "$REQUIRED_SYSTEM_FILES" ] && tarcopy "$REQUIRED_SYSTEM_FILES" "$TARGET_BUILD_DIR"
-}
-
-calc_size() {
-
- local CURRENT_BUILD_SIZE=$(du -bc "${TARGET_BUILD_DIR}" | awk 'END {print $1}')
-
- [ ! -z "${BUILD_SIZE[$MODULE]}" ] && local OLD_MODULE_SIZE=${BUILD_SIZE[$MODULE]} || local OLD_MODULE_SIZE=0
- local diff=$((CURRENT_BUILD_SIZE-TARGET_BUILD_SIZE+OLD_MODULE_SIZE))
-
- if [ -z "${BUILD_SIZE[$MODULE]}" ]; then
- echo "BUILD_SIZE[$MODULE]=${diff}" >> "${ROOT_DIR}/logs/${TARGET}.size"
- else
- sed -i "s/^BUILD_SIZE\[${MODULE}\]=.*$/BUILD_SIZE\[${MODULE}\]=${diff}/g" "${ROOT_DIR}/logs/${TARGET}.size"
- fi
-
- MODULE_BUILD_SIZE=$(echo $diff | awk '{ sum=$1; hum[1024^3]="GB"; hum[1024^2]="MB"; hum[1024]="KB";
- for (x=1024^3; x>=1024; x/=1024){
- if (sum>=x) { printf "%.2f %s\n",sum/x,hum[x]; break }
- }
- }')
-}
-
-#
-# generate initramfs of directory
-# usage:
-# generate_initramfs <source_dir> <files> <destination_dir/filename>
-# example:
-# generate_initramfs "./server/boot/stage32_sqfs" "./mnt/openslx.sqfs" "./server/boot/initramfs2"
-# generate_initramfs "./server/build/stage31" "." "./server/boot/initramfs"
-generate_initramfs() {
- [ $# -ne 3 ] && perror "Sanity check failed: generate_initramfs needs exactly two params, but $# were given."
- cd "$1" || perror "Cannot cd to '$1'"
-
- find $2 | cpio --format="newc" --create | gzip -9 > "$3"
- local PS=(${PIPESTATUS[*]})
- [ "x${PS[0]}" != "x0" ] && perror "'find $2' in '$(pwd)' failed."
- [ "x${PS[1]}" != "x0" ] && perror "cpio create failed."
- [ "x${PS[2]}" != "x0" ] && perror "gzip to '$3' failed."
- cd - &> /dev/null
- pinfo "Created initramfs of $1 at $3"
- pinfo "Size: $(du -bsh "$3" | awk 'END {print $1}')"
-}
-
-# generates squashfs of directory
-# usage:
-# generate_squashfs <source_dir> <destination_dir/filename>
-generate_squashfs() {
- [ $# -ne 2 ] && perror "Sanity check failed: generate_squashfs needs exactly two params, but $# were given."
- [ -d "$1" ] || perror "$1 is not a directory."
- mksquashfs "$1" "$2" -comp xz -b 1M -no-recovery >&6 \
- || perror "mksquashfs failed ($?)."
- pinfo "Created squashfs of $1 at $2"
- pinfo "Size: $(du -bsh "$2" | awk 'END {print $1}')"
-}
diff --git a/helper/string.inc b/helper/string.inc
index 4c6b9778..75baf2ca 100644
--- a/helper/string.inc
+++ b/helper/string.inc
@@ -1,10 +1,10 @@
# usage: VAR=$(trim " string ")
trim() {
- local var=$1
- var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
- var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
- echo -n "$var"
+ local var=$1
+ var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
+ var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
+ echo -n "$var"
}
# Inline version of trim, use when piping
diff --git a/mltk b/mltk
index 753af701..ca736592 100755
--- a/mltk
+++ b/mltk
@@ -1,6 +1,6 @@
#!/bin/bash
# -----------------------------------------------------------------------------
-#
+#
# Copyright (c) 2011 - OpenSLX GmbH
#
# This program is free software distributed under the GPL version 2.
@@ -11,12 +11,13 @@
#
# General information about OpenSLX can be found at http://openslx.org/
# -----------------------------------------------------------------------------
-#
+#
# Mini-Linux Toolkit
-#
+#
# -----------------------------------------------------------------------------
-SELF="$(readlink -f $0)"
+ARG0="$0"
+SELF="$(readlink -f "$ARG0")"
ROOT_DIR="$(dirname "${SELF}")"
MLTK_PID="$$"
@@ -31,14 +32,24 @@ qnd_exit() {
exit 1
}
-# source all helper functions files that are found under helper/*.inc
-for HELPER in $ROOT_DIR/helper/*.inc; do
+# source all helper unit files that are found under helper/*.inc
+# these are general and used by "mltk" and "openslx"
+for HELPER in "$ROOT_DIR"/helper/*.inc; do
+ . "$HELPER" && continue
+ unset_quiet
+ echo "Could not source $HELPER"
+ qnd_exit
+done
+
+# source specific units only used by mltk
+for HELPER in "$ROOT_DIR"/remote/includes/*.inc; do
. "$HELPER" && continue
unset_quiet
echo "Could not source $HELPER"
qnd_exit
done
+
banner () {
echo -e "\033[38;5;202m\t __ __ __ "
echo -e "\033[38;5;202m\t.--------.| | | |_| |--."
@@ -47,63 +58,43 @@ banner () {
echo -e "\033[38;5;214m\t "
echo -e "\033[38;5;220m\t ** OpenSLX Project // 2013 **"
echo -e "\033[38;5;226m\t http://lab.openslx.org/"
- echo -e "\033[0m"
+ echo -e "\033[00m"
}
print_usage() {
echo -e ""
echo -e "Toolkit for creating preboot mini-linux for OpenSLX NG (mltk)"
- echo -e "Usage: $(basename ${SELF}) remote <target> [-d] [-c [module]*] [-b [module]*] [-p profile]"
- echo -e " $(basename ${SELF}) server <remotehost> [-s] [<target>] [-e stage31|stage32|addons] [-d] [-c]"
- echo -e ""
- echo -e " Mode:"
- echo -e " server \t server mode: packs stage3.1, stage3.2 or addons as initramfs/squashfs."
- echo -e " remote \t remote mode: builds a minimal rootfs based on the activated modules in <target>"
+ #echo -e "Usage: $(basename ${SELF}) <target> [-d] [-c [module]*] [-b [module]*] [-p profile]"
+ echo -e "Usage: $ARG0 <target> [-d] [-c [module]*] [-b [module]*] [-n]"
echo -e ""
- echo -e " Global options (valid for all modes):"
- echo -e " -d activates debug output for the task"
- echo -e " "
- echo -e " Options for 'remote'-mode:"
+ echo -e " Options:"
+ echo -e " -d activates debug output for the task (spamy)"
echo -e " -b build module(s) and copy them to the target build directory"
- echo -e " -c clean build directory of module(s)."
- echo -e " -p profile build all modules from given profile (DEPRECATED?)"
- echo -e " -n bind mount all the local builds under remote/builds to /export/builds."
- echo -e " Allows for syncing from the server machine using the '-s'-flag."
- echo -e ""
- echo -e " In mode 'remote', you can pass names of specific modules to clean/build."
- echo -e " Otherwise, all modules will be built/cleaned."
+ echo -e " -c clean build directory of module(s) and target dir"
+ #echo -e " -p profile build all modules from given profile (DEPRECATED?)"
+ echo -e " -n bind mount all the local builds (remote/builds) to /export/builds"
echo -e ""
- echo -e " Options for 'server'-mode:"
- echo -e " -e type export target as 'type'. type can be of 'stage31' (simple initramfs), 'stage32' or 'addons' (both beeing squashfs)"
- echo -e " -s sync 'builds' directory of remote host"
- echo -e " -c clean target in remote_builds/ and corresponding files under boot/"
- echo -e ""
- echo -e "--------------------------------------------------------------------------------------------------------------------------------------"
+ echo -e " You can pass names of specific modules to clean/build (-c / -b)."
+ echo -e " Otherwise, all modules will be cleaned/built."
echo -e ""
echo -e " Examples:"
- echo -e " remote stage31 -c -b (clean all modules and build all linked modules in remote/targets/stage31)"
- echo -e " remote stage32 -c -b rootfs_stage31 sshd (clean all modules, build base, policykit and sshd in remote/builds/stage32)"
- echo -e " remote stage32 -c base sshd -b sshd ldm -d (clean base and sshd, build sshd and ldm, be verbose)"
- echo -e " remote -n (provides bind mount at /export/build for synchronizing)"
- echo -e " server 1.2.3.4 -s (sync all builds from remote system 1.2.3.4)"
- echo -e " server 1.2.3.4 stage32 -e stage32 (packs stage32 as squashfs from remote system 1.2.3.4)"
- echo -e " server 1.2.3.4 stage31 -c (clean stage31 build under server/remote_builds and initramfs under server/boot)"
- echo -e " server 1.2.3.4 addons-eexam -e addons (pack eexam-addons as squashfs)"
- echo -e " server 1.2.3.4 -k [configname] (generate config file configs/config.tgz)"
- echo -e ""
- echo -e " Existing targets for remote are:"
- echo -e " $(echo $(ls ${ROOT_DIR}/remote/targets 2>/dev/null || echo "No targets for remote found."))"
+ echo -e " $ARG0 stage31 -c -b"
+ echo -e " (clean and build all modules symlinked in remote/targets/stage31)"
+ echo -e " $ARG0 stage32 -c base sshd -b sshd ldm -d"
+ echo -e " (clean base and sshd, build sshd and ldm, be verbose)"
+ echo -e " $ARG0 -n"
+ echo -e " (provides bind mount at /export/build for synchronizing)"
echo -e ""
- echo -e " NOTE: Use 'local' as the remote host if you're running the server part on the same machine as the remote part."
+ echo -e " Existing targets are:"
+ echo -e " $(echo $(ls "${ROOT_DIR}/remote/targets" 2>/dev/null || echo "No targets for remote found."))"
echo -e ""
}
-
check_devtools() {
# Checking for needed development tools, compilers etc.
# Required: m4 make gcc g++ binutils
# no qmake here, qmake (libqt4-devel) should be installed in a module!
- local DEV_TOOLS="gcc c++ g++ make m4 strip git depmod patch mksquashfs pkg-config" # 'strip' as marker for binutils
+ local DEV_TOOLS="gcc g++ make m4 strip git depmod patch pkg-config" # 'strip' as marker for binutils
# DEV_CHECK marks missing dev-tools
local DEV_TOOLS_MISSING=""
@@ -116,19 +107,21 @@ check_devtools() {
pinfo "Missing tools are: $DEV_TOOLS_MISSING"
pinfo "For $SYS_DISTRIBUTION you probably need to run:"
case $SYS_DISTRIBUTION in
- ubuntu | debian) pinfo "apt-get install build-essential m4 squashfs-tools"
- pinfo "to install missing development tools."
- ;;
- # FIXME: Don't know how precise this filter works so we might want to have a better matching ...
- opensuse) pinfo "zypper install gcc gcc-c++ make m4 binutils git module-init-tools patch squashfs pkg-config"
- pinfo "to install missing development tools."
- ;;
- *) perror "Cannot determine SYS_DISTRIBUTION: $SYS_DISTRIBUTION unknown!"
- ;;
+ ubuntu | debian)
+ pinfo "apt-get install build-essential m4 squashfs-tools"
+ pinfo "to install missing development tools."
+ ;;
+ # FIXME: Don't know how precise this filter works so we might want to have a better matching ...
+ opensuse)
+ pinfo "zypper install gcc gcc-c++ make m4 binutils git module-init-tools patch squashfs pkg-config"
+ pinfo "to install missing development tools."
+ ;;
+ *)
+ perror "No installation help for $SYS_DISTRIBUTION available."
+ ;;
esac
perror "Please install missing dependencies (see above) and try again!"
-
- fi
+ fi
}
initial_checks() {
@@ -138,50 +131,40 @@ initial_checks() {
else
banner
fi
- pinfo "Arch triplet of this machine: $ARCH_TRIPLET"
+ pdebug "Arch triplet of this machine: $ARCH_TRIPLET"
- # source the 2 central scripts:
+ # source the central script:
# setup_target
# - builds all modules activated under the target directory
# (basic targets are 'stage31' and 'stage32')
- # export_target
- # - pack given target as a cpio or squashfs depending on
- # the export type:
- # stage31 (cpio archive of the complete stage31-filesystem)
- # stage32 (cpio archive containing the squashfs'ed stage32-filesystem)
REMOTE_SETUP_TARGET="${ROOT_DIR}/remote/setup_target"
- SERVER_EXPORT_TARGET="${ROOT_DIR}/server/export_target"
[ ! -e "${REMOTE_SETUP_TARGET}" ] && perror "Missing script remote/setup_target. Exiting."
- [ ! -e "${SERVER_EXPORT_TARGET}" ] && perror "Missing script server/export_target. Exiting."
}
read_params() {
- local MODE=""
local SUBMODE=""
- # mltk knows two modes in which to run:
- # - remote: functions to be run on the reference system
- # from which to build the mini-linux
- # - server: functions for rsyncing the generated stage31/stage32
- # to a server, exporting the stage31-/stage32-filesystems
case "$1" in
server)
- MODE="SERVER"
- [ "$#" -lt "2" ] && perror "Missing argument to server-mode flag 'server' (expecting remote host)"
- REMOTE_IP="$2"
- shift
+ pwarning " ** WARNING: mltk has been split up into 'mltk' and 'openslx'"
+ pwarning " ** mltk now only has the old 'mltk remote' functionality, while"
+ pwarning " ** openslx provides the 'mltk server' functionality. Please"
+ pwarning " ** use 'openslx' instead of 'mltk server'."
+ pwarning " ** Run 'mltk' or 'openslx' without any arguments for help."
+ if [ -x "./openslx" ]; then
+ pwarning " ** Calling 'openslx' in 5 seconds for compatibility...."
+ sleep 5
+ shift
+ exec ./openslx $@
+ fi
;;
remote)
- MODE="REMOTE"
- ;;
- *)
- pwarning "Unknown mode: $1"
- print_usage
- check_devtools
- exit 1
+ pwarning " ** Passing 'remote' as the first argument is not required"
+ pwarning " ** anymore. mltk now only provides the 'remote' functionality"
+ pwarning " ** while the 'server' functionality has been moved to 'openslx'."
+ shift
;;
esac
- shift
# A target represents a set of modules to be build for a specific stage.
# i.e. a very basic stage31 target should includes:
@@ -197,10 +180,10 @@ read_params() {
# - kdm
# - plymouth
TARGET=""
- if [[ $1 != "-"* ]]; then
- TARGET=$1
+ if [[ "$1" != -* ]]; then
+ TARGET="$1"
shift
- elif [[ $1 != "-s" && $1 != "-n" && $1 != "-k" ]]; then
+ elif [[ "$1" != "-n" ]]; then
perror "A target is required. None given."
fi
@@ -210,63 +193,30 @@ read_params() {
shift
# options to current target
- if [[ "$PARAM" == "-"* ]]; then
+ if [[ "$PARAM" == -* ]]; then
case "$PARAM" in
- -k)
- [ "$MODE" != "SERVER" ] && perror "-k can only be used in server mode"
- # NOTE: check for validity of config is done in 'inital_checks' of server/export_target
- SERVER_CONFIG_TYPE="$1"
- SERVER_CONFIG="1"
- shift
- continue
- ;;
- -s)
- SERVER_SYNC="1"
- continue
- ;;
-c)
SUBMODE="CLEAN"
;;
-b)
- [ "$MODE" != "REMOTE" ] && perror "-b can only be used in remote mode"
SUBMODE="BUILD"
;;
-d)
- eval ${MODE}_DEBUG="1"
+ REMOTE_DEBUG=1
continue
;;
- -p)
+ -p) # Profile mode - currently not supported
[ "$#" -lt "1" ] && perror "Missing argument to -p"
- [ "$MODE" != "REMOTE" ] && perror "-p can only be used in remote mode"
- . "remote/profiles/${1}.profile" || perror "Profile '$1' not found in remote/profiles/"
+ . "remote/profiles/${PARAM}.profile" || perror "Profile '$PARAM' not found in remote/profiles/"
REMOTE_BUILD="1"
REMOTE_LIST_BUILD="$REMOTE_LIST_BUILD $MODULES"
unset MODULES
shift
continue
;;
- -e)
- [ "$#" -lt "1" ] && perror "Missing argument to -e"
- #TODO: rework these types, since now we only have initramfs or squashfs. This makes a distinction between
- # stage32 and addons obsolete.
- [[ "stage31|stage32|addons" != *"$1"* ]] && perror "Wrong type specified. Muste be either 'stage31', 'stage32' or 'addons'"
- SERVER_EXPORT="1"
- SERVER_EXPORT_TYPE="$1"
- shift
- continue
- ;;
-n)
- if [ "$MODE" == "REMOTE" ]; then
- [ "x" != "x$1" ] && perror "'-n' accepts no parameters. Given: $1"
- REMOTE_EXPORT="1"
- fi
- continue
- ;;
- -a)
- if [ "$MODE" == "REMOTE" ]; then
- [ "x" != "x$1" ] && perror "'-a' accepts no parameters. Given: $1"
- REMOTE_AUTOMATIC_BUILD="1"
- fi
+ [ "x" != "x$1" ] && perror "'-n' accepts no parameters. Given: $1"
+ REMOTE_EXPORT="1"
continue
;;
*)
@@ -275,19 +225,17 @@ read_params() {
exit 1
;;
esac
- eval ${MODE}_${SUBMODE}="1"
+ eval REMOTE_${SUBMODE}=1
continue
fi
# module name
- [[ $MODE != REMOTE ]] && pwarning "You cannot specify module names in server mode." && print_usage && exit 1
- [[ $SUBMODE != CLEAN && $SUBMODE != BUILD ]] && pwarning "Module name given in remote mode, but no action specified (eg. build)" && print_usage && exit 1
- eval "${MODE}_LIST_${SUBMODE}=\"\$${MODE}_LIST_${SUBMODE} \$PARAM\""
+ [[ "$SUBMODE" != "CLEAN" && "$SUBMODE" != "BUILD" ]] && pwarning "Module name given, but no action specified (eg. build)" && print_usage && exit 1
+ eval "REMOTE_LIST_${SUBMODE}=\"\$REMOTE_LIST_${SUBMODE} \$PARAM\""
done
-
+
# exit if no command
-
- [[ $SERVER_CLEAN == 0 && $SERVER_EXPORT == 0 && $REMOTE_CLEAN == 0 && $REMOTE_BUILD == 0 && $SERVER_SYNC == 0 && $REMOTE_EXPORT == 0 && $SERVER_CONFIG == 0 ]] && print_usage && exit 1
+ [[ "$REMOTE_CLEAN" == 0 && "$REMOTE_BUILD" == 0 && "$REMOTE_EXPORT" == 0 ]] && print_usage && exit 1
}
run() {
@@ -298,22 +246,8 @@ run() {
[[ $REMOTE_BUILD == 1 ]] && generate_target $TARGET $REMOTE_LIST_BUILD
[[ $REMOTE_EXPORT == 1 ]] && export_builds
fi
- if [[ $SERVER_CLEAN == 1 || $SERVER_EXPORT == 1 || $SERVER_SYNC == 1 || $SERVER_CONFIG == 1 ]]; then
- [[ $SERVER_DEBUG == 1 ]] && unset_quiet || set_quiet
- . "${SERVER_EXPORT_TARGET}" || perror "Cannot source ${SERVER_EXPORT_TARGET}"
- [[ $SERVER_SYNC == 1 ]] && sync_remote
- [[ $SERVER_CLEAN == 1 ]] && clean_target $TARGET
- [[ $SERVER_EXPORT == 1 ]] && export_target $TARGET $SERVER_EXPORT_TYPE
- [[ $SERVER_CONFIG == 1 ]] && generate_config $SERVER_CONFIG_TYPE
- fi
}
-SERVER_DEBUG="0"
-SERVER_EXPORT="0"
-SERVER_CLEAN="0"
-SERVER_EXPORT_TYPE=""
-SERVER_SYNC="0"
-SERVER_CONFIG="0"
REMOTE_DEBUG="0"
REMOTE_CLEAN="0"
REMOTE_BUILD="0"
@@ -327,3 +261,5 @@ read_params $@
check_devtools
run
+exit 0
+
diff --git a/openslx b/openslx
new file mode 100755
index 00000000..ca79800a
--- /dev/null
+++ b/openslx
@@ -0,0 +1,255 @@
+#!/bin/bash
+# -----------------------------------------------------------------------------
+#
+# Copyright (c) 2011 - OpenSLX GmbH
+#
+# This program is free software distributed under the GPL version 2.
+# See http://openslx.org/COPYING
+#
+# If you have any feedback please consult http://openslx.org/feedback and
+# send your suggestions, praise, or complaints to feedback@openslx.org
+#
+# General information about OpenSLX can be found at http://openslx.org/
+# -----------------------------------------------------------------------------
+#
+# OpenSLX server tool for packing up files from remote machines
+#
+# -----------------------------------------------------------------------------
+
+SELF="$(readlink -f $0)"
+ROOT_DIR="$(dirname "${SELF}")"
+OPENSLX_PID="$$"
+
+qnd_exit() {
+ unset_quiet
+ kill "$OPENSLX_PID"
+ [ $# -ge 1 ] && kill "$1"
+ exit 1
+}
+
+# source all helper unit files that are found under helper/*.inc
+# these are general and used by "mltk" and "openslx"
+for HELPER in "$ROOT_DIR"/helper/*.inc; do
+ . "$HELPER" && continue
+ unset_quiet
+ echo "Could not source $HELPER"
+ qnd_exit
+done
+
+# source specific units only used by openslx
+for HELPER in "$ROOT_DIR"/server/includes/*.inc; do
+ . "$HELPER" && continue
+ unset_quiet
+ echo "Could not source $HELPER"
+ qnd_exit
+done
+
+
+banner () {
+# (
+# ( )\ )
+# ( ` ) ))\ ( ( ((_)( /(
+# )\ /(/( /((_) )\ ) )\ _ )\())
+# ((_)((_)_\ (_)) _(_/( ((_)| |((_)\
+#/ _ \| '_ \)/ -_)| ' \))(_-<| |\ \ /
+#\___/| .__/ \___||_||_| /__/|_|/_\_\
+# |_|
+# Aus der Kategorie: Man kanns auch übertreiben
+ echo -e "\033[38;5;202m\t ( "
+ echo -e "\033[38;5;202m\t ( )\\ ) "
+ echo -e "\033[38;5;208m\t ( \` ) ))\\ ( ( ((_)( /( "
+ echo -e "\033[38;5;208m\t )\\ /(/( /((_) )\\ ) )\\ \033[38;5;220m_\033[38;5;208m )\\()) "
+ echo -e "\033[38;5;208m\t ((\033[38;5;220m_\033[38;5;208m)((\033[38;5;220m_\033[38;5;208m)\033[38;5;220m_\033[38;5;208m\\ (\033[38;5;220m_\033[38;5;208m)) \033[38;5;220m_\033[38;5;208m(\033[38;5;220m_\033[38;5;208m/( ((\033[38;5;220m_\033[38;5;208m)\033[38;5;220m| |\033[38;5;208m((\033[38;5;220m_\033[38;5;208m)\\ "
+ echo -e "\033[38;5;220m\t/ _ \\| '_ \\ \033[38;5;208m)\033[38;5;220m/ -_)| ' \\ \033[38;5;208m))\033[38;5;220m(_-<| |\\ \\ / "
+ echo -e "\033[38;5;226m\t\\___/| .__/ \\___||_||_| /__/|_|/_\\_\\ "
+ echo -e "\033[38;5;226m\t | | "
+ echo -e "\033[38;5;220m\t | | ** OpenSLX Project // 2013 **"
+ echo -e "\033[38;5;226m\t |_| http://lab.openslx.org/"
+ echo -e "\033[00m"
+}
+
+print_usage() {
+ echo -e ""
+ echo -e "Toolkit for packing mini linux generated by mltk"
+ echo -e "Usage: $(basename ${SELF}) <remotehost> [-s] [<target>] [-e cpio|sqfs] [-d] [-c] [-k config]"
+ echo -e ""
+ echo -e " Options:"
+ echo -e " -d activates debug output for the task"
+ echo -e " -e type export target as 'type'. can be 'cpio' (simple initramfs) or 'sqfs' (squashfs)"
+ echo -e " -s sync 'builds' from remote host"
+ echo -e " -c clean target in server/local_builds/ and corresponding files under boot/"
+ echo -e ""
+ echo -e " Examples:"
+ echo -e " server 1.2.3.4 -s"
+ echo -e " (sync all builds from remote system 1.2.3.4)"
+ echo -e " server 1.2.3.4 stage32 -e sqfs"
+ echo -e " (packs stage32 as squashfs from remote system 1.2.3.4)"
+ echo -e " server 1.2.3.4 stage31 -c"
+ echo -e " (clean stage31 build under server/local_builds and initramfs under server/boot)"
+ echo -e " server 1.2.3.4 addons-eexam -e sqfs -s"
+ echo -e " (pack eexam-addons as squashfs, sync from remote before packing)"
+ echo -e " server 1.2.3.4 -k <configname>"
+ echo -e " (generate config file server/boot/<remotehost>/configs/<configname>/config.tgz)"
+ echo -e ""
+ echo -e " NOTE: Use 'local' as the remote host if you're running on the same machine as mltk."
+ echo -e " (for testing only, not recommended for actual use)"
+ echo -e ""
+}
+
+
+check_devtools() {
+ # Checking for needed development tools.
+ local DEV_TOOLS="mksquashfs rsync"
+
+ # DEV_CHECK marks missing dev-tools
+ local DEV_TOOLS_MISSING=""
+ for i in $DEV_TOOLS; do
+ which "$i" 2>/dev/null 1>&2 || DEV_TOOLS_MISSING+="$i "
+ done
+
+ if [ -n "$DEV_TOOLS_MISSING" ]; then
+ pinfo "You appear to be missing following development tools."
+ pinfo "Missing tools are: $DEV_TOOLS_MISSING"
+ pinfo "For $SYS_DISTRIBUTION you probably need to run:"
+ case $SYS_DISTRIBUTION in
+ ubuntu | debian)
+ pinfo "apt-get install squashfs-tools rsync"
+ pinfo "to install missing development tools."
+ ;;
+ # FIXME: Don't know how precise this filter works so we might want to have a better matching ...
+ opensuse)
+ pinfo "zypper install squashfs rsync"
+ pinfo "to install missing development tools."
+ ;;
+ *)
+ perror "No help for $SYS_DISTRIBUTION available."
+ ;;
+ esac
+ perror "Please install missing dependencies (see above) and try again!"
+
+ fi
+}
+
+initial_checks() {
+ if [ "x$(whoami)" != "xroot" ]; then
+ perror "ERROR: You need to have root rights to install packages."
+ exit 1
+ else
+ banner
+ fi
+
+ # source the central script:
+ # export_target
+ # - pack given target as a cpio or squashfs depending on
+ # the export type:
+ # stage31 (cpio archive of the complete stage31-filesystem)
+ # stage32 (cpio archive containing the squashfs'ed stage32-filesystem)
+ SERVER_EXPORT_TARGET="${ROOT_DIR}/server/export_target"
+
+ [ ! -e "${SERVER_EXPORT_TARGET}" ] && perror "Missing script server/export_target. Exiting."
+ return 0
+}
+
+read_params() {
+ local SUBMODE=""
+
+ REMOTE_IP="$1"
+ shift
+
+ TARGET=""
+ if [[ $1 != -* ]]; then
+ TARGET="$1"
+ shift
+ elif [[ "$1" != "-s" && "$1" != "-k" ]]; then
+ perror "A target is required. None given."
+ fi
+
+ # handle rest of arguments
+ while [ "$#" -gt "0" ]; do
+ local PARAM="$1"
+ shift
+
+ # options to current target
+ if [[ "$PARAM" == "-"* ]]; then
+ case "$PARAM" in
+ -k)
+ # NOTE: check for validity of config is done in 'inital_checks' of server/export_target
+ SERVER_CONFIG_TYPE="$1"
+ SERVER_CONFIG="1"
+ shift
+ continue
+ ;;
+ -s)
+ [ "$REMOTE_IP" != "local" ] && SERVER_SYNC="1"
+ continue
+ ;;
+ -c)
+ SUBMODE="CLEAN"
+ ;;
+ -d)
+ eval SERVER_DEBUG="1"
+ continue
+ ;;
+ -e)
+ [ "$#" -lt "1" ] && perror "Missing argument to -e"
+ case "$1" in
+ cpio|stage31) # stage31 for compat
+ SERVER_EXPORT_TYPE="cpio"
+ ;;
+ sqfs|stage32|addons)
+ SERVER_EXPORT_TYPE="sqfs"
+ ;;
+ *)
+ perror "Wrong type specified. Must be either 'cpio' or 'sqfs'"
+ ;;
+ esac
+ SERVER_EXPORT="1"
+ shift
+ continue
+ ;;
+ *)
+ pwarning "Unknown flag to target: $PARAM"
+ print_usage
+ exit 1
+ ;;
+ esac
+ eval SERVER_${SUBMODE}="1"
+ continue
+ fi
+
+ pwarning "Unexpected '$PARAM'" && print_usage && exit 1
+ done
+
+ # exit if no command
+ [[ "$SERVER_CLEAN" == 0 && "$SERVER_EXPORT" == 0 && "$SERVER_SYNC" == 0 && "$SERVER_CONFIG" == 0 ]] && print_usage && exit 1
+ return 0
+}
+
+run() {
+ if [[ "$SERVER_CLEAN" == 1 || "$SERVER_EXPORT" == 1 || "$SERVER_SYNC" == 1 || "$SERVER_CONFIG" == 1 ]]; then
+ [[ "$SERVER_DEBUG" == 1 ]] && unset_quiet || set_quiet
+ . "${SERVER_EXPORT_TARGET}" || perror "Cannot source ${SERVER_EXPORT_TARGET}"
+ [[ "$SERVER_SYNC" == 1 ]] && sync_remote
+ [[ "$SERVER_CLEAN" == 1 ]] && clean_target "$TARGET"
+ [[ "$SERVER_EXPORT" == 1 ]] && export_target "$TARGET" "$SERVER_EXPORT_TYPE"
+ [[ "$SERVER_CONFIG" == 1 ]] && generate_config "$SERVER_CONFIG_TYPE"
+ pinfo "Exiting normally"
+ fi
+ return 0
+}
+
+SERVER_DEBUG="0"
+SERVER_EXPORT="0"
+SERVER_CLEAN="0"
+SERVER_EXPORT_TYPE=""
+SERVER_SYNC="0"
+SERVER_CONFIG="0"
+
+initial_checks || perror "initial_checks failed."
+read_params $@ || perror "read_params failed."
+
+check_devtools || perror "check_devtools failed."
+
+run || perror "run failed."
+exit 0
+
diff --git a/helper/binutil.inc b/remote/includes/binutil.inc
index aa793e08..906b6cc2 100644
--- a/helper/binutil.inc
+++ b/remote/includes/binutil.inc
@@ -1,18 +1,18 @@
#!/bin/bash
-#
+#
# Common functions to copy binaries and their dependancies.
#
############################################################
# This will parse the output of ldd on given binaries
# and echo the location of these libs to STDOUT
# The output of this function has to be used in some
-# way, it only echos!
+# way, it only echos!
#
# About local search:
# It is required that we can search for the dynamic
# libraries in a specific directory, namely the one
-# where we (potentially) built the binary. If a
-# corresponding library is found, it should take
+# where we (potentially) built the binary. If a
+# corresponding library is found, it should take
# precedence over ones found on the system.
# This can be done by using the '-l' switch, see below.
#
@@ -30,13 +30,13 @@ CURRENT_BLACKLIST=$(echo ${BLACKLIST} | sed 's/ /\\|/g')
############################################################
#
-# Usage:
+# Usage:
# get_dynamic_dependencies [-l <searchdir>] <binary_list>
# * the list must be seperated by spaces
# * the search for lib needed by a binary can be done locally,
# using the -l <searchdir> option
#
-# Ouput:
+# Ouput:
# Will simply echo list of required libraries
get_dynamic_dependencies() {
@@ -48,13 +48,13 @@ get_dynamic_dependencies() {
[ ! -d "$1" ] && perror "Directory '$1' does not exist, exiting."
LOCALSEARCHDIR="$1"
shift
- fi
-
+ fi
+
# main loop over the list of binaries
while [ $# != 0 ]; do
local BINARY="$1"
shift
-
+
# now run ldd on it and save the output in $LDD_OUT
local LDD_OUT="ldd_output"
if ldd "$BINARY" > "$LDD_OUT"; then
@@ -73,10 +73,10 @@ get_dynamic_dependencies() {
pdebug "\t\t\t(Not a dynamic.)"
rm -f "$LDD_OUT"
continue
- fi
+ fi
rm -f "$LDD_OUT"
done
-
+
}
############################################################
@@ -94,38 +94,38 @@ get_dynamic_dependencies() {
# this will search for the library in LOCALSEARCHDIR first.
# If its not found, then it will look system-wide.
lib_search() {
-
+
# if activated, start by searching the lib locally
if [ "x$LOCALSEARCH" == "x1" ]; then
cd "$LOCALSEARCHDIR"
- local LOCAL_MATCHES=$(find . -name "${LIBLINK[0]}") # | awk -F '.' '{print $1}')".so\*)
+ local LOCAL_MATCHES=$(find . -name "${LIBLINK[0]}") # | awk -F '.' '{print $1}')".so\*)
cd - >/dev/null
- if [ "x${LOCAL_MATCHES}" != "x" ]; then
- for LOCALLIB in ${LOCAL_MATCHES}; do
+ if [ "x${LOCAL_MATCHES}" != "x" ]; then
+ for LOCALLIB in ${LOCAL_MATCHES}; do
get_link_chain "${LOCALSEARCHDIR}/${LOCALLIB}" "${LOCALSEARCHDIR}"
get_dynamic_dependencies -l "${LOCALSEARCHDIR}" "${LOCALLIB}"
CURRENT_BLACKLIST+="\|${LIBLINK[0]}"
- done
+ done
# found the libs, we are done
return
- fi
+ fi
# mark local search as done
fi
-
+
# search the lib on the system since it was not found earlier
if [ ! -z ${LIBLINK[1]} ] && [ "x${LIBLINK[1]}" != "xnot" ]; then
# get chain of symlink for that lib
- get_link_chain "${LIBLINK[1]}"
+ get_link_chain "${LIBLINK[1]}"
CURRENT_BLACKLIST+="\|${LIBLINK[1]}"
- else
- pwarning "\t\tLib '${LIBLINK[0]}' from required dir '$ENTRY' neither found in build directory nor on this system."
- pwarning "\t\tIf this lib is not supplied by another module, this module will probably fail in your final system"
- fi
+ else
+ pwarning "\t\tLib '${LIBLINK[0]}' from required dir '$ENTRY' neither found in build directory nor on this system."
+ pwarning "\t\tIf this lib is not supplied by another module, this module will probably fail in your final system"
+ fi
}
############################################################
#
# Usage:
-# get_link_chain <link> [prefix]
+# get_link_chain <link> [prefix]
# * <link> must be in absolute form-
# * [prefix] is the prefix to strip from the ouput.
#
@@ -133,7 +133,7 @@ lib_search() {
# Lists the symlink chain until a hardlink is found.
#
get_link_chain() {
-
+
# sanity checks
if [[ "$1" == /* ]]; then
[ -e $1 ] || perror "get_link_chain: no such file: $1"
@@ -148,12 +148,12 @@ get_link_chain() {
# mark prefix as not set
local PREFIX="notset"
fi
-
+
# canonalize
local LINK=$(canonicalize $1)
-
+
local CHAIN="$LINK"
-
+
# write the first link in the chain
if [ "x$PREFIX" != "x" -a "x$PREFIX" != "xnotset" ]; then
if [ "x${LINK#$PREFIX}" == "x${LINK}" ]; then
@@ -162,17 +162,17 @@ get_link_chain() {
else
# prefix was in the link
echo ./"${LINK#$PREFIX}"
- fi
+ fi
else
# no prefix, copy like it is
echo "$LINK"
fi
-
+
# now we check for symlinks
local TRY=0
while [ -L "$LINK" ] && [ $TRY -lt 10 ]; do
let TRY=TRY+1
-
+
# save the directory prefix
CURRENTDIR=$(dirname "${LINK}")
# first follow the link
@@ -181,18 +181,18 @@ get_link_chain() {
# $LINK can be absolute or relative, check cases
[[ "$LINK" == /* ]] || LINK=$(canonicalize "$CURRENTDIR"/"${LINK}")
# write the first link in the chain
- if [ "x$PREFIX" != "x" -a "x$PREFIX" != "xnotset" ]; then
- if [ "x${LINK#$PREFIX}" == "x${LINK}" ]; then
- # prefix was not in the link
- echo "$LINK"
- else
- # prefix was in the link
- echo ./"${LINK#$PREFIX}"
- fi
- else
- # no prefix, copy like it is
- echo "$LINK"
- fi
+ if [ "x$PREFIX" != "x" -a "x$PREFIX" != "xnotset" ]; then
+ if [ "x${LINK#$PREFIX}" == "x${LINK}" ]; then
+ # prefix was not in the link
+ echo "$LINK"
+ else
+ # prefix was in the link
+ echo ./"${LINK#$PREFIX}"
+ fi
+ else
+ # no prefix, copy like it is
+ echo "$LINK"
+ fi
done
pdebug "\t\t$CHAIN"
}
@@ -206,9 +206,9 @@ get_link_chain() {
# - libc.so, ld-linux.so
#
list_basic_libs() {
- for i in $(ldd ${SHELL})
- do
- [ $(echo $i | grep '^/' | grep -c ld) -eq 1 -o $(echo $i | grep '^/' | grep -c libc.so) -eq 1 ] && get_link_chain $i
- done
+ for i in $(ldd ${SHELL})
+ do
+ [ $(echo $i | grep '^/' | grep -c ld) -eq 1 -o $(echo $i | grep '^/' | grep -c libc.so) -eq 1 ] && get_link_chain $i
+ done
}
diff --git a/remote/includes/distribution.inc b/remote/includes/distribution.inc
new file mode 100644
index 00000000..64190986
--- /dev/null
+++ b/remote/includes/distribution.inc
@@ -0,0 +1,53 @@
+
+
+detect_distribution () {
+ # Set up distribution and package management
+ [ -z "$SYS_DISTRIBUTION" ] && perror "SYS_DISTRIBUTION not set (should be done by helper/distribution.inc)"
+ # Then determine packet manager
+ case "$SYS_DISTRIBUTION" in
+ ubuntu)
+ PACKET_MANAGER="apt"
+ PACKET_HANDLER="dpkg"
+ detect_ubuntu_lts
+ ;;
+ debian)
+ PACKET_MANAGER="apt"
+ PACKET_HANDLER="dpkg"
+ ;;
+ opensuse)
+ PACKET_MANAGER="zypper"
+ PACKET_HANDLER="rpm"
+ ;;
+ scientific)
+ PACKET_MANAGER="yum"
+ PACKET_HANDLER="rpm"
+ ;;
+ *)
+ perror "Unknown Distribution: $SYS_DISTRIBUTION - Please specify its packet manager in remote/setup_target"
+ ;;
+ esac
+ # Get version - we mangle this quite a bit. first make sure it has no spaces, then split version at period (.), underscore (_) and dash (-)
+ SYS_VERSION=$(lsb_release -rs | tolower)
+ local VERSION=$(echo $SYS_VERSION | sed -r 's/\s//g;s/[\._]/ /g;s/-//g')
+ local STRTMP=""
+ PRINT_SYS_VERSIONS="*.conf.$SYS_DISTRIBUTION"
+ SYS_VERSIONS="$SYS_DISTRIBUTION"
+ for PART in $VERSION; do
+ [ -z "$PART" ] && continue
+ STRTMP+=".$PART"
+ SYS_VERSIONS="${SYS_DISTRIBUTION}${STRTMP} $SYS_VERSIONS"
+ PRINT_SYS_VERSIONS="*.conf.${SYS_DISTRIBUTION}${STRTMP} $PRINT_SYS_VERSIONS"
+ done
+ pinfo "Config source order: *.conf first, then the first one of these (if found)"
+ pinfo "$PRINT_SYS_VERSIONS"
+}
+
+detect_ubuntu_lts () {
+ local TMP=$(dpkg -S /usr/bin/Xorg)
+ [[ "$TMP" == xserver-xorg* ]] || perror "Could not detect xserver package version (returned: $TMP)"
+ TMP=${TMP%: *}
+ TMP=${TMP#xserver-xorg-core}
+ pinfo "Ubuntu LTS Xorg suffix: $TMP"
+ UBUNTU_XORG_PKG_SUFFIX="$TMP"
+}
+
diff --git a/helper/downloader.inc b/remote/includes/downloader.inc
index e12c8a02..e12c8a02 100644
--- a/helper/downloader.inc
+++ b/remote/includes/downloader.inc
diff --git a/helper/kernel.inc b/remote/includes/kernel.inc
index 61fa3efd..ae5791df 100644
--- a/helper/kernel.inc
+++ b/remote/includes/kernel.inc
@@ -1,4 +1,4 @@
-#
+#
# Common functions to copy kernel related files
#
############################################################
@@ -13,33 +13,30 @@
# must be given as kernel/fs/nfs/nfs.ko
#
-# this code depends on KERNEL_CURRENT_VERSION, this file needs to be sourced after helper/system.inc!
-# (TODO: maybe source system.inc if KERNEL_CURRENT_VERSION
-if [ -z "${KERNEL_CURRENT_VERSION}" ]; then
- . ${ROOT_DIR}/helper/system.inc
- [ -z "${KERNEL_CURRENT_VERSION}" ] && perror "KERNEL_CURRENT_VERSION still not set after sourcing, something is very wrong..."
-fi
+# determine kernel version
+KERNEL_CURRENT_VERSION=$(uname -r)
+[ -z "$KERNEL_CURRENT_VERSION" ] && pwarning "Could not determine kernel version."
# set global KERNEL_TARGET_NAME
KERNEL_TARGET_NAME="kernel"
check_kernel_build_dir() {
[ -d "${MODULES_DIR}/kernel/build" ] && KERNEL_BUILD_DIR="${MODULES_DIR}/kernel/build" \
- || perror "No build directory set for the kernel. Was is built?"
+ || perror "No build directory set for the kernel. Was is built?"
# hack to get the real path of the installed modules
KERNEL_NEW_VERSION=$(ls ${KERNEL_BUILD_DIR}/lib/modules)
}
copy_kernel_modules() {
- pinfo "Copying kernel modules for kernel ${KERNEL_CURRENT_VERSION}..."
+ pinfo "Copying kernel modules for kernel ${KERNEL_CURRENT_VERSION}..."
[ -z "${REQUIRED_KERNEL_MODULES}" ] && perror "REQUIRED_KERNEL_MODULES is empty. Check your config file."
check_kernel_build_dir
#
- # process modules list
- #
+ # process modules list
+ #
# search for modules in KERNEL_BUILD_DIR
cd "${KERNEL_BUILD_DIR}" || perror "Could not cd to ${KERNEL_BUILD_DIR}"
-
+
local KERNEL_MODULES_DIR="lib/modules/${KERNEL_NEW_VERSION}"
local KERNEL_MODULES_LIST=""
local REQUIRED_KERNEL_MODULES_EXPANDED=""
@@ -57,19 +54,19 @@ copy_kernel_modules() {
done
cd -
pinfo "Expanded the list of $(echo "$REQUIRED_KERNEL_MODULES" | wc -w) required kernel modules to $(echo "$REQUIRED_KERNEL_MODULES_EXPANDED" | wc -w)"
-
- for KERNEL_MODULE in ${REQUIRED_KERNEL_MODULES_EXPANDED}; do
- local KERNEL_MODULE_PATH="${KERNEL_MODULES_DIR}/${KERNEL_MODULE}"
+
+ for KERNEL_MODULE in ${REQUIRED_KERNEL_MODULES_EXPANDED}; do
+ local KERNEL_MODULE_PATH="${KERNEL_MODULES_DIR}/${KERNEL_MODULE}"
if grep "^${KERNEL_MODULE}$" "${KERNEL_BUILD_DIR}/${KERNEL_MODULES_DIR}/modules.builtin" >/dev/null; then
pdebug "Already built-in ${KERNEL_MODULE}."
- elif [ -e "${KERNEL_MODULE_PATH}" ]; then
- pdebug "Copying '${KERNEL_MODULE_PATH}'"
- KERNEL_MODULES_LIST+=" ${KERNEL_MODULE_PATH}"
+ elif [ -e "${KERNEL_MODULE_PATH}" ]; then
+ pdebug "Copying '${KERNEL_MODULE_PATH}'"
+ KERNEL_MODULES_LIST+=" ${KERNEL_MODULE_PATH}"
else
- pwarning "Module ${KERNEL_MODULE} not found. Skipping. (might cause problems on certain clients!)"
- continue
- fi
-
+ pwarning "Module ${KERNEL_MODULE} not found. Skipping. (might cause problems on certain clients!)"
+ continue
+ fi
+
# check for dependencies
local DEPS=$(grep "${KERNEL_MODULE}:" "${KERNEL_BUILD_DIR}/${KERNEL_MODULES_DIR}/modules.dep" | cut -d ":" -f2-)
if [ ! -z "$DEPS" ]; then
@@ -80,42 +77,42 @@ copy_kernel_modules() {
else
pdebug "${KERNEL_MODULE} has no dependencies."
fi
- done
-
- if [ ! -z "${KERNEL_MODULES_LIST}" ]; then
- local COUNT=$(echo "${KERNEL_MODULES_LIST}" | wc -w)
- pinfo "Copying $COUNT modules to target directory."
- tarcopy "${KERNEL_MODULES_LIST}" "${TARGET_BUILD_DIR}"
- fi
-
+ done
+
+ if [ ! -z "${KERNEL_MODULES_LIST}" ]; then
+ local COUNT=$(echo "${KERNEL_MODULES_LIST}" | wc -w)
+ pinfo "Copying $COUNT modules to target directory."
+ tarcopy "${KERNEL_MODULES_LIST}" "${TARGET_BUILD_DIR}"
+ fi
+
#
# generate modules map files
#
- # first strip modules.order of all the modules we don't use
- cat "${KERNEL_MODULES_DIR}/modules.order" | grep -E $(echo ${REQUIRED_KERNEL_MODULES} | tr '\ ' '|') \
- >> "${TARGET_BUILD_DIR}/${KERNEL_MODULES_DIR}/modules.order"
- # copy list of builtin kernel modules
- cp "${KERNEL_MODULES_DIR}/modules.builtin" "${TARGET_BUILD_DIR}/${KERNEL_MODULES_DIR}"
- # with modules.order and modules.builtin, we can run depmod for the rest of the files
- depmod -b "${TARGET_BUILD_DIR}" -a "${KERNEL_NEW_VERSION}"
-
+ # first strip modules.order of all the modules we don't use
+ cat "${KERNEL_MODULES_DIR}/modules.order" | grep -E $(echo ${REQUIRED_KERNEL_MODULES} | tr '\ ' '|') \
+ >> "${TARGET_BUILD_DIR}/${KERNEL_MODULES_DIR}/modules.order"
+ # copy list of builtin kernel modules
+ cp "${KERNEL_MODULES_DIR}/modules.builtin" "${TARGET_BUILD_DIR}/${KERNEL_MODULES_DIR}"
+ # with modules.order and modules.builtin, we can run depmod for the rest of the files
+ depmod -b "${TARGET_BUILD_DIR}" -a "${KERNEL_NEW_VERSION}"
+
cd - >/dev/null
}
copy_firmware() {
-
+
pinfo "Copying firmware for kernel ${KERNEL_CURRENT_VERSION}..."
[ -z "${REQUIRED_FIRMWARE}" ] && perror "REQUIRED_FIRMWARE is empty. Check your config file."
local OLD_DIR=$(pwd)
check_kernel_build_dir
#
- # process firmware list
+ # process firmware list
#
cd "${KERNEL_BUILD_DIR}" || perror "Could not cd!"
- local FIRMWARE_DIR="lib/firmware"
+ local FIRMWARE_DIR="lib/firmware"
local FIRMWARE_LIST=""
- for FIRMWARE in ${REQUIRED_FIRMWARE}; do
- local FOUND=0
+ for FIRMWARE in ${REQUIRED_FIRMWARE}; do
+ local FOUND=0
# check for firmware in the build directory of the kernel
for CANDIDATE in "${FIRMWARE_DIR}/${FIRMWARE}" "${FIRMWARE_DIR}/${KERNEL_NEW_VERSION}/${FIRMWARE}"; do
if [ -e "${CANDIDATE}" ]; then
@@ -124,7 +121,7 @@ copy_firmware() {
FOUND=1
fi
done
-
+
# if we didn't found it in the kernel build directory, check for firmware in the system firmware directory
if [ $FOUND -ne 1 ]; then
for CANDIDATE in "/${FIRMWARE_DIR}/${FIRMWARE}" "/${FIRMWARE_DIR}/${KERNEL_CURRENT_VERSION}/${FIRMWARE}"; do
@@ -136,21 +133,21 @@ copy_firmware() {
pdebug "Copying from system: '${CANDIDATE}' to ${FIRMWARE_DIR}/${KERNEL_NEW_VERSION}/${FIRMWARE}"
FIRMWARE_LIST+=" /${FIRMWARE_DIR}/${KERNEL_CURRENT_VERSION}/${FIRMWARE}"
fi
- FOUND=1
+ FOUND=1
fi
done
fi
-
- [ $FOUND -ne 1 ] && pwarning "Neither '${FIRMWARE_DIR}/${FIRMWARE}' nor '${FIRMWARE_DIR}/${KERNEL_NEW_VERSION}/${FIRMWARE}' "\
- " was found on the system. Skipping. (might cause problems on certain clients!)"
- done
-
- if [ ! -z "${FIRMWARE_LIST}" ]; then
- local COUNT=$(echo "${FIRMWARE_LIST}" | wc -w)
- pinfo "Copying $COUNT firmware to target directory."
- tarcopy "${FIRMWARE_LIST}" "${TARGET_BUILD_DIR}"
- fi
-
+
+ [ $FOUND -ne 1 ] && pwarning "Neither '${FIRMWARE_DIR}/${FIRMWARE}' nor '${FIRMWARE_DIR}/${KERNEL_NEW_VERSION}/${FIRMWARE}' "\
+ " was found on the system. Skipping. (might cause problems on certain clients!)"
+ done
+
+ if [ ! -z "${FIRMWARE_LIST}" ]; then
+ local COUNT=$(echo "${FIRMWARE_LIST}" | wc -w)
+ pinfo "Copying $COUNT firmware to target directory."
+ tarcopy "${FIRMWARE_LIST}" "${TARGET_BUILD_DIR}"
+ fi
+
# post-process to fix the path of the firmwares found on the system unter /lib/firmware/$(uname -r)
# which have to be copied to /lib/firmware/${KERNEL_NEW_VERSION}
if [ -d "${TARGET_BUILD_DIR}/lib/firmware/${KERNEL_CURRENT_VERSION}" ]; then
@@ -162,14 +159,14 @@ copy_firmware() {
else
pdebug "No ${TARGET_BUILD_DIR}/lib/firmware/${KERNEL_CURRENT_VERSION} directory, skipping the merge."
fi
-
+
cd "$OLD_DIR"
}
copy_kernel() {
- check_kernel_build_dir
-
- local TOOL_STR="$TOOL_STR copy_kernel:"
+ check_kernel_build_dir
+
+ local TOOL_STR="$TOOL_STR copy_kernel:"
local KERNEL_DIR="${MODE_DIR}/builds/kernel"
pinfo "Copying '${KERNEL_TARGET_NAME}' to '${KERNEL_DIR}'."
[ -d "${KERNEL_DIR}" ] || mkdir -p "${KERNEL_DIR}"
diff --git a/helper/keyvalueutil.inc b/remote/includes/keyvalueutil.inc
index a0a89db7..a0a89db7 100644
--- a/helper/keyvalueutil.inc
+++ b/remote/includes/keyvalueutil.inc
diff --git a/remote/includes/packagemanager.inc b/remote/includes/packagemanager.inc
new file mode 100644
index 00000000..6f0d86dd
--- /dev/null
+++ b/remote/includes/packagemanager.inc
@@ -0,0 +1,138 @@
+#
+# get all files of required packages by a module
+#
+list_packet_files() {
+ [ -z "$REQUIRED_CONTENT_PACKAGES" ] && pinfo "No required packages for $TOOL" && return 1
+ local PACKAGE=""
+ for PACKAGE in $REQUIRED_CONTENT_PACKAGES; do
+ local OPTIONAL="$(echo "$PACKAGE" | cut -c 1)"
+ [ "x$OPTIONAL" = "x@" ] && PACKAGE="$(echo "$PACKAGE" | cut -c 2-)"
+ local FILES=""
+ if [ "$PACKET_HANDLER" = "dpkg" ]; then
+ PACKAGECOMMAND="dpkg -L"
+ elif [ "$PACKET_HANDLER" = "rpm" ]; then
+ PACKAGECOMMAND="rpm -ql"
+ fi
+
+ if [ -n "$REQUIRED_PACKET_FILES_BLACKLIST" ]; then
+ FILES="$($PACKAGECOMMAND "$PACKAGE" | grep "^/" | \
+ grep -v "$REQUIRED_PACKET_FILES_BLACKLIST" | \
+ grep -v -E 'share/(man|doc)|/var/run|/var/log'; \
+ echo ":###:${PIPESTATUS[0]}")"
+ else
+ FILES="$($PACKAGECOMMAND "$PACKAGE" | grep "^/" | grep -v -E 'share/(man|doc)|/var/run|/var/log'; echo ":###:${PIPESTATUS[0]}")"
+ fi
+# FILES="$(rpm -ql "$PACKAGE" | grep "^/" | grep -v -E 'share/(man|doc)|/var/run|/var/log'; echo ":###:${PIPESTATUS[0]}")"
+
+ # ugly hack to get our return value
+ local LPRET=$(echo "$FILES" | awk -F ':###:' '{printf $2}')
+ FILES=$(echo "$FILES" | awk -F ':###:' '{print $1}')
+ if [ "x$LPRET" != "x0" -a "x$OPTIONAL" != "x@" ]; then
+ pdebug "FILES: '$FILES'"
+ perror "dpkg/rpm exited with code '$LPRET' for required package ${PACKAGE}."
+ fi
+ [ "x$LPRET" != "x0" ] && pwarning "dpkg/rpm exited with code '$LPRET' for optional package ${PACKAGE}." && continue
+ [ -z "$FILES" ] && pwarning "list_packet_files empty for packet ${PACKAGE}." && continue
+ pdebug "Packet $PACKAGE has $(echo $FILES | wc -w) files..."
+ for FILE in $FILES; do
+ [ ! -d "$FILE" ] && echo "$FILE"
+ done
+ done
+}
+
+#
+# Conveniance function
+#
+# install all dependencies of a module
+# goes through all package as given in the variable REQUIRED_INSTALLED_PACKAGES
+install_dependencies() {
+ [ -z "$REQUIRED_INSTALLED_PACKAGES" ] && return
+ install_packages "$REQUIRED_INSTALLED_PACKAGES"
+}
+
+#
+# install given packet through system's packet manager
+# uses PACKET_HANDLER as determined in helper/system.inc
+#
+install_packages() {
+ [ $# -eq 0 ] && perror "Sanity check failed: no argument given to install_package"
+ local PACKAGE_LIST="$@"
+ local INSTALLED_PACKAGES=""
+
+ for PKG in ${PACKAGE_LIST}; do
+ # check if installed
+ if [ "x$PACKET_HANDLER" == "xdpkg" ]; then
+ dpkg -l ${PKG} > /dev/null 2>&1
+ elif [ "x$PACKET_HANDLER" == "xrpm" ]; then
+ rpm -ql ${PKG} > /dev/null 2>&1
+ else
+ perror "No packet manager / handler determined, this should not happen!"
+ fi
+
+ local LRET=$?
+ if [ "x$LRET" == "x0" ]; then
+ # check if it is completly installed,
+ # not just leftover configuration files
+ local INSTALL_STATUS=$(dpkg -l $PKG | grep $PKG | cut -c1-2)
+ if [[ $INSTALL_STATUS != "ii" ]]; then
+ pinfo "$PKG not installed!"
+ install_package $PKG
+ else
+ # package installed
+ pdebug "$PKG installed!"
+ fi
+ else
+ # package not installed
+ pdebug "$PKG not installed!"
+ install_package $PKG
+ fi
+ done
+ [ ! -z "$INSTALLED_PACKAGES" ] && pinfo "Packages installed: ${INSTALLED_PACKAGES}"
+}
+
+#
+# install individual package depending on package manager
+#
+install_package() {
+ if [ "$#" -ne 1 ]; then
+ perror "Only call install_package with one argument!"
+ fi
+
+ if [ "x$PACKET_MANAGER" == "xapt" ]; then
+ apt-get install -y ${PKG}
+ local IRET=$?
+ if [ "x$IRET" == "x0" ]; then
+ # $PGK was installed successfully
+ INSTALLED_PACKAGES+="$PKG "
+ else
+ # PKG was not installed
+ # TODO error handling
+ perror "install_packages: apt-get failed with '$IRET' for package '$PKG'"
+ fi
+ elif [ "x$PACKET_MANAGER" == "xzypper" ]; then
+ zypper --no-refresh --non-interactive install ${PKG}
+ local IRET=$?
+ if [ "x$IRET" == "x0" ]; then
+ # $PGK was installed successfully
+ INSTALLED_PACKAGES+="$PKG "
+ else
+ # PKG was not installed
+ # TODO error handling
+ perror "install_packages: zypper failed with '$IRET' for package '$PKG'"
+ fi
+ elif [ "x$PACKET_MANAGER" == "xyum" ]; then
+ yum --assumeyes install ${PKG}
+ local IRET=$?
+ if [ "x$IRET" == "x0" ]; then
+ # $PGK was installed successfully
+ INSTALLED_PACKAGES+="$PKG "
+ else
+ # PKG was not installed
+ # TODO error handling
+ perror "install_packages: yum failed with '$IRET' for package '$PKG'"
+ fi
+ else
+ perror "No packet manager determined, this should not happen!"
+ fi
+}
+
diff --git a/helper/system.inc b/remote/includes/system.inc
index 806799ad..bcdaed23 100644
--- a/helper/system.inc
+++ b/remote/includes/system.inc
@@ -11,10 +11,6 @@ ARCH_TRIPLET=$(ldd $SHELL|grep "libc.so" | awk -F "/" '{print $3}')
ARCH_LIB_DIR=$(ldd $SHELL | grep "libc.so" | sed -r 's#^.*(/lib.*)/libc.so.*$#\1#g')
[ -z "$ARCH_LIB_DIR" -o ! -d "$ARCH_LIB_DIR" ] && perror "Could not determine arch dependent lib dir (where libc.so resides)"
-# determine kernel version
-KERNEL_CURRENT_VERSION=$(uname -r)
-[ ! -z $KERNEL_CURRENT_VERSION ] || pwarning "Could not determine kernel version."
-
# determine number of CPU cores
CPU_CORES=$(cat /proc/cpuinfo | grep processor | wc -l)
export MAKEFLAGS="-j$CPU_CORES"
@@ -22,7 +18,6 @@ export MAKEFLAGS="-j$CPU_CORES"
# Determine if we have lib64
if [ "$(uname -m)x" = "x86_64x" ]; then
# Setting LIB64, as openSuse differentiates but Ubuntu does not:
- SYS_DISTRIBUTION=$(lsb_release -is | tr '[A-Z]' '[a-z]' | sed -r 's/[^a-z0-9]//g;s/project$//g;s/scientificsl$/scientific/g')
case $SYS_DISTRIBUTION in
ubuntu | debian) LIB64="lib" ;;
opensuse) LIB64="lib64" ;;
diff --git a/helper/useradd.inc b/remote/includes/useradd.inc
index 1e35c571..352da606 100644
--- a/helper/useradd.inc
+++ b/remote/includes/useradd.inc
@@ -1,3 +1,8 @@
+# This helper is stupid because it reimplements what useradd etc. do.
+# They could do the job just fine by using the -R option - maybe change this
+# dome day.
+#
+
# Add a user to the system
#
# Usage:
diff --git a/remote/setup_target b/remote/setup_target
index 8f9b9708..1d5fb921 100755
--- a/remote/setup_target
+++ b/remote/setup_target
@@ -181,7 +181,6 @@ copy_files_with_deps () {
fi
for FILE in $FINAL_LIST; do
pdebug "* $FILE"
- strip "$FILE" || pdebug "Could not strip '${FILE}'"
get_link_chain "${MODULE_BUILD_DIR}/${FILE}" "${MODULE_BUILD_DIR}" >> "${COPYFILES_LIST}"
get_dynamic_dependencies -l "${MODULE_BUILD_DIR}" "${FILE}" >> "${COPYFILES_LIST}"
done
@@ -195,7 +194,6 @@ copy_files_with_deps () {
[ -z "$FILE_CANDIDATES" ] && perror "Cannot find required library $LIB"
for LOCATION in $FILE_CANDIDATES; do
pdebug "* $LOCATION"
- strip "$LOCATION" || pdebug "Could not strip '${LOCATION}'"
get_link_chain "${MODULE_BUILD_DIR}/${LOCATION}" "${MODULE_BUILD_DIR}" >> "${COPYFILES_LIST}"
get_dynamic_dependencies -l "${MODULE_BUILD_DIR}" "${LOCATION}" >> "${COPYFILES_LIST}"
done
@@ -212,7 +210,6 @@ copy_files_with_deps () {
echo "${ENTRY}" >> "${COPYFILES_LIST}"
for BIN in $(find "${ENTRY}" -type f -a \( -executable -o -name '*.so*' \) -a -not -name '*.a'); do
#pdebug "\tSearching libs for ${BIN}..."
- strip "$BIN" || pdebug "Could not strip '${LOCATION}'"
get_link_chain "${MODULE_BUILD_DIR}/${BIN}" "${MODULE_BUILD_DIR}" >> "${COPYFILES_LIST}"
get_dynamic_dependencies -l "${MODULE_BUILD_DIR}" "${BIN}" >> "${COPYFILES_LIST}"
done
@@ -398,6 +395,7 @@ process_module() {
pinfo "## Building"
cd "${MODULE_DIR}" || perror "cd to '${MODULE_DIR}' failed."
build # calls perror if something fails, no need to do that here
+ strip_recursive "$MODULE_BUILD_DIR"
touch "$BUILD_FLAG" || pwarning "Error setting built-flag"
fi
# Remove *.la files as they might confuse libtool/linker of other tool packages
@@ -548,3 +546,44 @@ clean_kernel_module() {
fi
pinfo "Done cleaning kernel."
}
+
+# Recursively strip binaries and libraries in the given directory
+strip_recursive() {
+ local DIR="$1"
+ [ -n "$DIR" -a -d "$DIR" ] || perror "strip_recursive(): No such directory: '$DIR'"
+ # Will try to strip shell scripts too but shouldn't do any harm
+ find "$DIR" -type f -a \( -executable -o -name "*.so*" \) -exec strip {} \; 2> /dev/null
+}
+
+# copies static data files from <MODULE>/data/ to <TARGET_BUILD_DIR>
+copy_static_data() {
+ [ ! -d "${MODULE_DIR}/data" ] && pinfo "${MODULE} has no static 'data' directory." && return
+ cp -r "${MODULE_DIR}/data/"* ${TARGET_BUILD_DIR} || perror "Could not copy static data of ${MODULE}"
+}
+
+# Copies files with their absolute paths in $REQUIRED_SYSTEM_FILES to $TARGET_BUILD_DIR
+copy_system_files() {
+ [ ! -z "$REQUIRED_SYSTEM_FILES" ] && tarcopy "$REQUIRED_SYSTEM_FILES" "$TARGET_BUILD_DIR"
+}
+
+# Tries to calculate the size of modules - doesn't seem to work all the time
+calc_size() {
+
+ local CURRENT_BUILD_SIZE=$(du -bc "${TARGET_BUILD_DIR}" | awk 'END {print $1}')
+
+ [ ! -z "${BUILD_SIZE[$MODULE]}" ] && local OLD_MODULE_SIZE=${BUILD_SIZE[$MODULE]} || local OLD_MODULE_SIZE=0
+ local diff=$((CURRENT_BUILD_SIZE-TARGET_BUILD_SIZE+OLD_MODULE_SIZE))
+
+ if [ -z "${BUILD_SIZE[$MODULE]}" ]; then
+ echo "BUILD_SIZE[$MODULE]=${diff}" >> "${ROOT_DIR}/logs/${TARGET}.size"
+ else
+ sed -i "s/^BUILD_SIZE\[${MODULE}\]=.*$/BUILD_SIZE\[${MODULE}\]=${diff}/g" "${ROOT_DIR}/logs/${TARGET}.size"
+ fi
+
+ MODULE_BUILD_SIZE=$(echo $diff | awk '{ sum=$1; hum[1024^3]="GB"; hum[1024^2]="MB"; hum[1024]="KB";
+ for (x=1024^3; x>=1024; x/=1024){
+ if (sum>=x) { printf "%.2f %s\n",sum/x,hum[x]; break }
+ }
+ }')
+}
+
diff --git a/server/export_target b/server/export_target
index 906c5cdc..c5fae7f4 100755
--- a/server/export_target
+++ b/server/export_target
@@ -39,7 +39,7 @@ copy_kernel() {
pinfo "Copying kernel from ${REMOTE_IP} to ${SERVER_BOOT_DIR}/kernel/"
if [ -d ${SERVER_BUILD_DIR}/kernel ]; then
cd ${SERVER_BUILD_DIR}
- tarcopy kernel ${SERVER_BOOT_DIR}
+ tarcopy "kernel" "${SERVER_BOOT_DIR}"
cd - &> /dev/null
fi
}
@@ -59,27 +59,6 @@ sync_remote() {
fi
}
-generate_stage32() {
- local TOOL_STR="${TOOL_STR} generate_stage32:"
- rm -f "${SERVER_BOOT_DIR}/${TARGET}.sqfs"
- pinfo "Writing '${TARGET}.sqfs' to '${SERVER_BOOT_DIR}/${TARGET}.sqfs'"
- mksquashfs "${SERVER_BUILD_DIR}/${TARGET}/" "${SERVER_BOOT_DIR}/${TARGET}.sqfs" -comp xz -b 1M -no-recovery >&6 || perror "mksquashfs failed ($?)."
- pinfo "Created '${SERVER_BOOT_DIR}/${TARGET}.sqfs'."
-}
-
-generate_stage31() {
- local TOOL_STR="${TOOL_STR} generate_stage31:"
- pinfo "Writing 'initramfs-${TARGET}' to '${SERVER_BOOT_DIR}'"
- generate_initramfs "${SERVER_BUILD_DIR}/${TARGET}" "." "${SERVER_BOOT_DIR}/initramfs-${TARGET}"
-}
-
-generate_addons() {
- local TOOL_STR="${TOOL_STR} generate_addons:"
- pinfo "Writing '${TARGET}.sqfs' to '${SERVER_BOOT_DIR}/${TARGET}.sqfs'"
- [ -e "${SERVER_BOOT_DIR}/${TARGET}.sqfs" ] && rm "${SERVER_BOOT_DIR}/${TARGET}.sqfs"
- mksquashfs "${SERVER_BUILD_DIR}/${TARGET}/" "${SERVER_BOOT_DIR}/${TARGET}.sqfs" -comp xz -b 1M -no-recovery >&6 || perror "mksquashfs failed ($?)."
-}
-
generate_config() {
# generate config from the target directory
local TOOL_STR="${TOOL_STR} generate_config:"
@@ -159,19 +138,24 @@ export_target() {
copy_kernel
TARGET=$1
- [ -d ${SERVER_BUILD_DIR}/${TARGET} ] || perror "Given target directory does not exist: ${SERVER_BUILD_DIR}/${TARGET}"
+ [ -z "$TARGET" ] && perror "No target passed to export_target()"
+ [ -d "${SERVER_BUILD_DIR}/${TARGET}" ] || perror "Given target directory does not exist: ${SERVER_BUILD_DIR}/${TARGET}"
case "$2" in
- stage31)
- generate_stage31
+ cpio)
+ local TOOL_STR="${TOOL_STR} generate_initramfs:"
+ pinfo "Writing 'initramfs-${TARGET}' to '${SERVER_BOOT_DIR}/'"
+ generate_initramfs "${SERVER_BUILD_DIR}/${TARGET}/" "." "${SERVER_BOOT_DIR}/initramfs-${TARGET}"
;;
- stage32)
- generate_stage32
+ sqfs)
+ local TOOL_STR="${TOOL_STR} generate_squashfs:"
+ pinfo "Creating '${TARGET}.sqfs' in '${SERVER_BOOT_DIR}/'"
+ generate_squashfs "${SERVER_BUILD_DIR}/${TARGET}/" "${SERVER_BOOT_DIR}/${TARGET}.sqfs"
;;
- addons)
- generate_addons
+ *)
+ perror "Invalid export format: $2"
;;
- esac
+ esac
}
diff --git a/server/includes/packing.inc b/server/includes/packing.inc
new file mode 100644
index 00000000..84b47262
--- /dev/null
+++ b/server/includes/packing.inc
@@ -0,0 +1,35 @@
+
+#
+# generate initramfs of directory
+# usage:
+# generate_initramfs <source_dir> <files> <destination_dir/filename>
+# example:
+# generate_initramfs "./server/boot/stage32_sqfs" "./mnt/openslx.sqfs" "./server/boot/initramfs2"
+# generate_initramfs "./server/build/stage31" "." "./server/boot/initramfs"
+generate_initramfs() {
+ [ $# -ne 3 ] && perror "Sanity check failed: generate_initramfs needs exactly two params, but $# were given."
+ cd "$1" || perror "Cannot cd to '$1'"
+ rm -f -- "$3"
+
+ find $2 | cpio --format="newc" --create | gzip -9 > "$3"
+ local PS=(${PIPESTATUS[*]})
+ [ "x${PS[0]}" != "x0" ] && perror "'find $2' in '$(pwd)' failed."
+ [ "x${PS[1]}" != "x0" ] && perror "cpio create failed."
+ [ "x${PS[2]}" != "x0" ] && perror "gzip to '$3' failed."
+ cd - > /dev/null
+ pinfo "Created initramfs of $1 at $3"
+ pinfo "Size: $(du -bsh "$3" | awk 'END {print $1}')"
+}
+
+# generates squashfs of directory
+# usage:
+# generate_squashfs <source_dir> <destination_dir/filename>
+generate_squashfs() {
+ [ $# -ne 2 ] && perror "Sanity check failed: generate_squashfs needs exactly two params, but $# were given."
+ [ -d "$1" ] || perror "$1 is not a directory."
+ mksquashfs "$1" "$2" -comp xz -b 1M -no-recovery >&6 \
+ || perror "mksquashfs failed ($?)."
+ pinfo "Created squashfs of $1 at $2"
+ pinfo "Size: $(du -bsh "$2" | awk 'END {print $1}')"
+}
+