From c66c0a0cb85e5b690d4f631d8b714b5c86548638 Mon Sep 17 00:00:00 2001 From: Lars Erik Wik Date: Thu, 19 Mar 2026 15:32:48 +0100 Subject: [PATCH] Added container-based CFEngine package builder Introduced build-in-container, a Python/Docker-based build system that builds CFEngine packages inside containers using the existing build scripts. Supports incremental builds via named Docker volumes, interactive configuration with persistent defaults, and per-platform dependency caching. Ticket: ENT-13777 Signed-off-by: Lars Erik Wik --- build-in-container | 467 ++++++++++++++++++++++++++++++++++++ build-in-container-inner | 183 ++++++++++++++ build-in-container.md | 164 +++++++++++++ container/Dockerfile.debian | 47 ++++ 4 files changed, 861 insertions(+) create mode 100755 build-in-container create mode 100755 build-in-container-inner create mode 100644 build-in-container.md create mode 100644 container/Dockerfile.debian diff --git a/build-in-container b/build-in-container new file mode 100755 index 000000000..37d21a301 --- /dev/null +++ b/build-in-container @@ -0,0 +1,467 @@ +#!/usr/bin/env python3 +"""Container-based CFEngine package builder. + +Builds CFEngine packages inside Docker containers using the existing build +scripts. Supports incremental builds via named Docker volumes and interactive +configuration with persistent defaults. +""" + +import argparse +import hashlib +import json +import logging +import os +import subprocess +import sys +from pathlib import Path + +log = logging.getLogger("build-in-container") + +PLATFORMS = { + "ubuntu-20": { + "base_image": "ubuntu:20.04", + "dockerfile": "Dockerfile.debian", + "extra_build_args": {"NCURSES_PKGS": "libncurses5 libncurses5-dev"}, + }, + "ubuntu-22": { + "base_image": "ubuntu:22.04", + "dockerfile": "Dockerfile.debian", + "extra_build_args": {}, + }, + "ubuntu-24": { + "base_image": "ubuntu:24.04", + "dockerfile": "Dockerfile.debian", + "extra_build_args": {}, + }, + "debian-11": { + "base_image": "debian:11", + "dockerfile": "Dockerfile.debian", + "extra_build_args": {}, + }, + "debian-12": { + "base_image": "debian:12", + "dockerfile": "Dockerfile.debian", + "extra_build_args": {}, + }, +} + +CONFIG_DIR = Path.home() / ".config" / "build-in-container" +CONFIG_FILE = CONFIG_DIR / "last-config.json" + +HARDCODED_DEFAULTS = { + "platform": "ubuntu-20", + "project": "community", + "role": "agent", + "build_type": "DEBUG", +} + +VALID_STEPS = [ + "autogen", + "install-dependencies", + "mission-portal-deps", + "configure", + "compile", + "package", +] + + +def load_last_config(): + """Load last-used config, returning {} on missing/corrupt file.""" + try: + return json.loads(CONFIG_FILE.read_text()) + except (FileNotFoundError, json.JSONDecodeError): + return {} + + +def save_last_config(config): + """Persist the resolved config for next run.""" + CONFIG_DIR.mkdir(parents=True, exist_ok=True) + CONFIG_FILE.write_text(json.dumps(config, indent=2) + "\n") + + +def prompt_choice(name, choices, default=None): + """Prompt the user to pick from a list of choices.""" + choices_str = ", ".join(choices) + prompt = f"{name} [{choices_str}]" + if default: + prompt += f" ({default})" + prompt += ": " + while True: + value = input(prompt).strip() + if not value and default: + return default + if value in choices: + return value + print(f" Invalid choice '{value}'. Options: {choices_str}") + + +def detect_source_dir(): + """Find the root directory containing all repos (parent of buildscripts/).""" + script_dir = Path(__file__).resolve().parent + # The script lives in buildscripts/, so the source dir is one level up + source_dir = script_dir.parent + if not (source_dir / "buildscripts").is_dir(): + log.error(f"Cannot find buildscripts/ in {source_dir}") + sys.exit(1) + return source_dir + + +def prompt_config(args, source_dir): + """Fill in missing config options interactively, then save.""" + last = load_last_config() + interactive = not args.non_interactive and sys.stdin.isatty() + + # Platform + if args.platform is None: + default = last.get("platform", HARDCODED_DEFAULTS["platform"]) + if interactive: + args.platform = prompt_choice("Platform", list(PLATFORMS.keys()), default) + else: + args.platform = default + + # Project + if args.project is None: + default = last.get("project", HARDCODED_DEFAULTS["project"]) + if interactive: + args.project = prompt_choice("Project", ["community", "nova"], default) + else: + args.project = default + + # Role + if args.role is None: + default = last.get("role", HARDCODED_DEFAULTS["role"]) + if interactive: + args.role = prompt_choice("Role", ["agent", "hub"], default) + else: + args.role = default + + # Build type + if args.build_type is None: + default = last.get("build_type", HARDCODED_DEFAULTS["build_type"]) + if interactive: + args.build_type = prompt_choice( + "Build type", ["DEBUG", "RELEASE"], default + ) + else: + args.build_type = default + + # Save resolved config + save_last_config( + { + "platform": args.platform, + "project": args.project, + "role": args.role, + "build_type": args.build_type, + } + ) + + +def dockerfile_hash(dockerfile_path): + """Compute SHA256 hash of a Dockerfile.""" + return hashlib.sha256(dockerfile_path.read_bytes()).hexdigest() + + +def image_needs_rebuild(image_tag, current_hash): + """Check if the Docker image needs rebuilding based on Dockerfile hash.""" + result = subprocess.run( + ["docker", "inspect", "--format", "{{index .Config.Labels \"dockerfile-hash\"}}", image_tag], + capture_output=True, + text=True, + ) + if result.returncode != 0: + return True # Image doesn't exist + stored_hash = result.stdout.strip() + return stored_hash != current_hash + + +def build_image(platform_name, platform_config, script_dir, rebuild=False): + """Build the Docker image for the given platform.""" + image_tag = f"cfengine-builder-{platform_name}" + dockerfile_name = platform_config["dockerfile"] + dockerfile_path = script_dir / "container" / dockerfile_name + current_hash = dockerfile_hash(dockerfile_path) + + if not rebuild and not image_needs_rebuild(image_tag, current_hash): + log.info(f"Docker image {image_tag} is up to date.") + return image_tag + + log.info(f"Building Docker image {image_tag}...") + cmd = [ + "docker", + "build", + "-f", + str(dockerfile_path), + "--build-arg", + f"BASE_IMAGE={platform_config['base_image']}", + "--build-arg", + f"HOST_UID={os.getuid()}", + "--build-arg", + f"HOST_GID={os.getgid()}", + "--label", + f"dockerfile-hash={current_hash}", + "-t", + image_tag, + ] + + for key, value in platform_config.get("extra_build_args", {}).items(): + cmd.extend(["--build-arg", f"{key}={value}"]) + + if rebuild: + cmd.append("--no-cache") + + cmd.extend(["--network", "host"]) + + # Build context is the container/ directory + cmd.append(str(script_dir / "container")) + + result = subprocess.run(cmd) + if result.returncode != 0: + log.error("Docker image build failed.") + sys.exit(1) + + return image_tag + + +def run_container(args, image_tag, source_dir, script_dir): + """Run the build inside a Docker container.""" + volume_name = f"cfengine-build-{args.platform}" + output_dir = Path(args.output_dir).resolve() + cache_dir = Path(args.cache_dir).resolve() + + # Pre-create host directories so Docker doesn't create them as root + output_dir.mkdir(parents=True, exist_ok=True) + cache_dir.mkdir(parents=True, exist_ok=True) + + cmd = ["docker", "run", "--rm", "--network", "host"] + + if args.shell: + cmd.extend(["-it"]) + + # Mounts + # Two named volumes persist build state across container restarts: + # - build volume: source copies, step markers, compiled objects + # - prefix volume: installed dependencies at /var/cfengine (BUILDPREFIX) + prefix_volume = f"cfengine-prefix-{args.platform}" + cmd.extend([ + "-v", f"{source_dir}:/srv/source:ro", + "--mount", f"type=volume,src={volume_name},dst=/home/builder/build", + "--mount", f"type=volume,src={prefix_volume},dst=/var/cfengine", + "-v", f"{cache_dir}:/home/builder/.cache/buildscripts_cache", + "-v", f"{output_dir}:/output", + ]) + + # Environment variables + # JOB_BASE_NAME is used by deps-packaging/pkg-cache to derive the cache + # label. Format: "label=". Without it, all platforms share NO_LABEL. + cache_label = f"label=container_{args.platform}" + cmd.extend([ + "-e", f"PROJECT={args.project}", + "-e", f"BUILD_TYPE={args.build_type}", + "-e", f"EXPLICIT_ROLE={args.role}", + "-e", f"BUILD_NUMBER={args.build_number}", + "-e", f"JOB_BASE_NAME={cache_label}", + "-e", "CACHE_IS_ONLY_LOCAL=yes", + ]) + + if args.version: + cmd.extend(["-e", f"EXPLICIT_VERSION={args.version}"]) + + if args.step: + cmd.extend(["-e", f"RESTART_FROM_STEP={args.step}"]) + + cmd.append(image_tag) + + if args.shell: + cmd.append("/bin/bash") + else: + cmd.append(str(Path("/srv/source/buildscripts/build-in-container-inner"))) + + result = subprocess.run(cmd) + return result.returncode + + +def remove_volume(volume_name): + """Remove a single Docker volume.""" + result = subprocess.run( + ["docker", "volume", "rm", volume_name], + capture_output=True, + text=True, + ) + if result.returncode == 0: + log.info(f" Removed {volume_name}.") + elif "No such volume" in result.stderr: + log.info(f" {volume_name} does not exist (nothing to clean).") + else: + log.warning(f" Failed to remove {volume_name}: {result.stderr.strip()}") + + +def clean_volume(platform_name): + """Remove the named build volumes for a platform.""" + log.info(f"Removing build volumes for {platform_name}...") + remove_volume(f"cfengine-build-{platform_name}") + remove_volume(f"cfengine-prefix-{platform_name}") + + +def main(): + parser = argparse.ArgumentParser( + description="Build CFEngine packages in Docker containers." + ) + parser.add_argument( + "--platform", + choices=list(PLATFORMS.keys()), + help="Target platform", + ) + parser.add_argument( + "--project", + choices=["community", "nova"], + help="CFEngine edition (default: auto-detect from source dirs)", + ) + parser.add_argument( + "--role", + choices=["agent", "hub"], + help="Component to build (default: agent)", + ) + parser.add_argument( + "--build-type", + dest="build_type", + choices=["DEBUG", "RELEASE"], + help="Build type (default: DEBUG)", + ) + parser.add_argument( + "--source-dir", + help="Root directory containing repos (default: auto-detect)", + ) + parser.add_argument( + "--output-dir", + default="./output", + help="Output directory for packages (default: ./output)", + ) + parser.add_argument( + "--cache-dir", + default=str(Path.home() / ".cache" / "buildscripts_cache"), + help="Dependency cache directory", + ) + parser.add_argument( + "--rebuild-image", + action="store_true", + help="Force rebuild of Docker image (--no-cache)", + ) + parser.add_argument( + "--shell", + action="store_true", + help="Drop into container shell for debugging", + ) + parser.add_argument( + "--list-platforms", + action="store_true", + help="List available platforms and exit", + ) + parser.add_argument( + "--build-number", + default="1", + help="Build number for package versioning (default: 1)", + ) + parser.add_argument( + "--version", + help="Override version string", + ) + parser.add_argument( + "--step", + choices=VALID_STEPS, + help="Re-run from a specific build step", + ) + parser.add_argument( + "--clean", + action="store_true", + help="Remove build volume and start from scratch", + ) + parser.add_argument( + "--non-interactive", + action="store_true", + help="Never prompt; use defaults for unspecified options", + ) + + args = parser.parse_args() + + logging.basicConfig( + level=logging.INFO, + format="%(message)s", + ) + + # --list-platforms: print and exit + if args.list_platforms: + print("Available platforms:") + for name, config in PLATFORMS.items(): + print(f" {name:15s} ({config['base_image']})") + sys.exit(0) + + # Detect source directory + if args.source_dir: + source_dir = Path(args.source_dir).resolve() + else: + source_dir = detect_source_dir() + + script_dir = source_dir / "buildscripts" + + # Interactive configuration + if args.shell: + # Shell mode only needs platform + last = load_last_config() + if args.platform is None: + interactive = not args.non_interactive and sys.stdin.isatty() + default = last.get("platform", HARDCODED_DEFAULTS["platform"]) + if interactive: + args.platform = prompt_choice( + "Platform", list(PLATFORMS.keys()), default + ) + else: + args.platform = default + # Fill in defaults for the rest (needed for env vars) + if args.project is None: + args.project = last.get("project", HARDCODED_DEFAULTS["project"]) + if args.role is None: + args.role = last.get("role", HARDCODED_DEFAULTS["role"]) + if args.build_type is None: + args.build_type = last.get("build_type", HARDCODED_DEFAULTS["build_type"]) + else: + prompt_config(args, source_dir) + + # Validate platform + if args.platform not in PLATFORMS: + log.error(f"Unknown platform '{args.platform}'") + sys.exit(1) + + platform_config = PLATFORMS[args.platform] + + # Handle --clean + if args.clean: + clean_volume(args.platform) + + # Build Docker image + image_tag = build_image( + args.platform, platform_config, script_dir, rebuild=args.rebuild_image + ) + + if not args.shell: + log.info(f"Building {args.project} {args.role} for {args.platform} ({args.build_type})...") + + # Run the container + rc = run_container(args, image_tag, source_dir, script_dir) + + if rc != 0: + log.error(f"Build failed (exit code {rc}).") + sys.exit(rc) + + if not args.shell: + output_dir = Path(args.output_dir).resolve() + packages = list(output_dir.glob("*.deb")) + list(output_dir.glob("*.rpm")) + list(output_dir.glob("*.pkg.tar.gz")) + if packages: + log.info("Output packages:") + for p in sorted(packages): + log.info(f" {p}") + else: + log.warning("No packages found in output directory.") + + +if __name__ == "__main__": + main() diff --git a/build-in-container-inner b/build-in-container-inner new file mode 100755 index 000000000..8d8d5624e --- /dev/null +++ b/build-in-container-inner @@ -0,0 +1,183 @@ +#!/bin/bash +set -e + +# Configuration via environment variables: +# PROJECT, BUILD_TYPE, EXPLICIT_ROLE, BUILD_NUMBER, EXPLICIT_VERSION +# RESTART_FROM_STEP - if set, clear this step's marker and all subsequent ones + +BASEDIR=/home/builder/build +STEP_DIR="$BASEDIR/.build-steps" +export BASEDIR +export AUTOBUILD_PATH="$BASEDIR/buildscripts" + +mkdir -p "$BASEDIR" "$STEP_DIR" + +# The bind mount for the dep cache may create ~/.cache/ as root. +# Fix ownership so we can create subdirectories (composer, npm, etc). +sudo chown "$(id -u):$(id -g)" "$HOME/.cache" + +# Prevent git "dubious ownership" errors +git config --global --add safe.directory '*' + +# === Auto-clean on config change === +CURRENT_CONFIG="PROJECT=$PROJECT BUILD_TYPE=$BUILD_TYPE ROLE=$EXPLICIT_ROLE" +CONFIG_FILE="$BASEDIR/.build-config" +if [ -f "$CONFIG_FILE" ]; then + PREV_CONFIG=$(cat "$CONFIG_FILE") + if [ "$PREV_CONFIG" != "$CURRENT_CONFIG" ]; then + echo "=== Config changed, cleaning build directory ===" + echo " was: $PREV_CONFIG" + echo " now: $CURRENT_CONFIG" + # Remove all build artifacts (repos will be re-synced from source mount). + # Keep only .build-steps and .build-config themselves. + find "$BASEDIR" -mindepth 1 -maxdepth 1 \ + ! -name '.build-steps' ! -name '.build-config' \ + -exec rm -rf {} + + rm -f "$STEP_DIR"/*.done + fi +fi +echo "$CURRENT_CONFIG" > "$CONFIG_FILE" + +# === Sync source repos (always, to pick up fixes) === +repos="buildscripts core masterfiles" +if [ "$PROJECT" = "nova" ]; then + repos="$repos enterprise nova" + if [ "$EXPLICIT_ROLE" = "hub" ]; then + repos="$repos mission-portal" + fi +fi + +for repo in $repos; do + src="/srv/source/$repo" + # Use rsync -aL to follow symlinks during copy. + # The source dir may use symlinks (e.g., core -> cfengine/core/). + # -L resolves them at copy time, so the destination gets real files + # regardless of the host directory layout. + if [ -d "$src" ] || [ -L "$src" ]; then + echo "Syncing $repo..." + rsync -aL --exclude='config.cache' "$src/" "$BASEDIR/$repo/" + else + echo "ERROR: Required repository $repo not found" >&2 + exit 1 + fi +done + +# === Step runner with failure reporting === +# Disable set -e so we can capture exit codes and report which step failed. +set +e +run_step() { + local name="$1" + shift + if [ -f "$STEP_DIR/$name.done" ]; then + echo "=== Skipping $name (already completed) ===" + return 0 + fi + echo "=== Running $name ===" + "$@" + local rc=$? + if [ $rc -eq 0 ]; then + touch "$STEP_DIR/$name.done" + else + echo "" + echo "=== FAILED: $name (exit code $rc) ===" + echo "Re-run the same command to retry from this step." + echo "Use --clean to start from scratch." + exit $rc + fi +} + +# === Handle --step (restart from a specific step) === +# RESTART_FROM_STEP is passed by the Python orchestrator when --step is used. +# It maps friendly names to step prefixes. When set, we remove the marker for +# that step and all subsequent ones so the build re-runs from that point. +if [ -n "$RESTART_FROM_STEP" ]; then + STEP_MAP="autogen:01 install-dependencies:03 mission-portal-deps:04 configure:05 compile:06 package:07" + STEP_NUM="" + for entry in $STEP_MAP; do + name="${entry%%:*}" + num="${entry##*:}" + if [ "$name" = "$RESTART_FROM_STEP" ]; then + STEP_NUM="$num" + break + fi + done + if [ -z "$STEP_NUM" ]; then + echo "ERROR: Unknown step '$RESTART_FROM_STEP'" >&2 + echo "Valid steps: autogen, install-dependencies, mission-portal-deps, configure, compile, package" >&2 + exit 1 + fi + echo "=== Restarting from step $RESTART_FROM_STEP ===" + for marker in "$STEP_DIR"/*.done; do + [ -f "$marker" ] || continue + marker_num=$(basename "$marker" | grep -o '^[0-9]*') + if [ "$marker_num" -ge "$STEP_NUM" ]; then + echo " Removing marker: $(basename "$marker")" + rm -f "$marker" + fi + done +fi + +# === Build steps === +run_step "01-autogen" "$BASEDIR/buildscripts/build-scripts/autogen" + +# clean-buildmachine is skipped in container builds. It removes leftover packages +# and processes from a shared build machine -- in a fresh container this is +# unnecessary and can fail on operations that assume a full init system. + +run_step "03-install-dependencies" "$BASEDIR/buildscripts/build-scripts/install-dependencies" + +# bootstrap-tarballs is skipped in container builds. It generates source +# tarballs (for CI distribution) and installs Mission Portal dependencies. +# We only need the latter, handled by install-mission-portal-deps below. + +install_mission_portal_deps() ( + set -e + + if [ -f "$BASEDIR/mission-portal/public/scripts/package.json" ]; then + echo "Installing npm dependencies..." + npm ci --prefix "$BASEDIR/mission-portal/public/scripts/" + echo "Building react components..." + npm run build --prefix "$BASEDIR/mission-portal/public/scripts/" + rm -rf "$BASEDIR/mission-portal/public/scripts/node_modules" + fi + + if [ -f "$BASEDIR/mission-portal/composer.json" ]; then + echo "Installing Mission Portal PHP dependencies..." + (cd "$BASEDIR/mission-portal" && php /usr/bin/composer.phar install --no-dev --ignore-platform-reqs) + fi + + if [ -f "$BASEDIR/nova/api/http/composer.json" ]; then + echo "Installing Nova API PHP dependencies..." + (cd "$BASEDIR/nova/api/http" && php /usr/bin/composer.phar install --no-dev --ignore-platform-reqs) + fi + + if [ -f "$BASEDIR/mission-portal/public/themes/default/bootstrap/cfengine_theme.less" ]; then + echo "Compiling Mission Portal styles..." + (cd "$BASEDIR/mission-portal/public/themes/default/bootstrap" && \ + lessc --compress ./cfengine_theme.less ./compiled/css/cfengine.less.css) + fi + + if [ -f "$BASEDIR/mission-portal/ldap/composer.json" ]; then + echo "Installing LDAP API PHP dependencies..." + (cd "$BASEDIR/mission-portal/ldap" && php /usr/bin/composer.phar install --no-dev --ignore-platform-reqs) + fi +) + +if [ "$EXPLICIT_ROLE" = "hub" ]; then + run_step "04-mission-portal-deps" install_mission_portal_deps +fi +run_step "05-configure" "$BASEDIR/buildscripts/build-scripts/configure" +run_step "06-compile" "$BASEDIR/buildscripts/build-scripts/compile" +run_step "07-package" "$BASEDIR/buildscripts/build-scripts/package" + +# === Copy output packages === +# Packages are created under $BASEDIR// by dpkg-buildpackage / rpmbuild. +# Exclude deps-packaging to avoid copying dependency packages. +find "$BASEDIR" -maxdepth 4 \ + -path "$BASEDIR/buildscripts/deps-packaging" -prune -o \ + \( -name '*.deb' -o -name '*.rpm' -o -name '*.pkg.tar.gz' \) -print \ + -exec cp {} /output/ \; + +echo "" +echo "=== Build complete ===" +ls -lh /output/ diff --git a/build-in-container.md b/build-in-container.md new file mode 100644 index 000000000..7e782aa8c --- /dev/null +++ b/build-in-container.md @@ -0,0 +1,164 @@ +# build-in-container + +Build CFEngine packages inside Docker containers using the existing build +scripts. Requires only Docker and Python 3 on the host. + +## Quick start + +```bash +# Build a community agent .deb for Ubuntu 22 +./build-in-container --platform ubuntu-22 --project community --role agent + +# Or run interactively (prompts for any unspecified options) +./build-in-container +``` + +Output packages are written to `./output/`. + +## Usage + +``` +./build-in-container [OPTIONS] +``` + +| Option | Default | Description | +|--------|---------|-------------| +| `--platform` | prompted | Target platform (e.g. `ubuntu-22`, `debian-12`) | +| `--project` | `community` | `community` or `nova` | +| `--role` | `agent` | `agent` or `hub` | +| `--build-type` | `DEBUG` | `DEBUG` or `RELEASE` | +| `--output-dir` | `./output` | Where to write output packages | +| `--cache-dir` | `~/.cache/buildscripts_cache` | Dependency cache directory | +| `--build-number` | `1` | Build number for package versioning | +| `--version` | auto | Override version string | +| `--step STEP` | | Re-run from a specific step (clears that step and all subsequent) | +| `--clean` | | Remove build volume and start from scratch | +| `--rebuild-image` | | Force rebuild of Docker image (bypasses Docker layer cache) | +| `--shell` | | Drop into a bash shell inside the container for debugging | +| `--list-platforms` | | List available platforms and exit | +| `--source-dir` | auto-detect | Root directory containing repos | +| `--non-interactive` | | Never prompt; use defaults for unspecified options | + +## Supported platforms + +| Name | Base image | +|------|------------| +| `ubuntu-20` | `ubuntu:20.04` | +| `ubuntu-22` | `ubuntu:22.04` | +| `ubuntu-24` | `ubuntu:24.04` | +| `debian-11` | `debian:11` | +| `debian-12` | `debian:12` | + +Adding a new Debian/Ubuntu platform requires only a new entry in the +`PLATFORMS` dict in `build-in-container`. Adding RHEL/CentOS requires a new +`container/Dockerfile.rhel` plus platform entries. + +## How it works + +The system has three components: + +1. **`build-in-container`** (Python) -- the orchestrator that runs on the host. + Parses arguments, builds the Docker image, and launches the container with + the correct mounts and environment variables. + +2. **`build-in-container-inner`** (Bash) -- runs inside the container. Syncs + source repos from the read-only mount into the build volume, then calls the + existing build scripts in order. + +3. **`container/Dockerfile.debian`** -- parameterized Dockerfile shared by all + Debian/Ubuntu platforms via a `BASE_IMAGE` build arg. + +### Container mounts + +| Host path | Container path | Mode | Purpose | +|-----------|---------------|------|---------| +| Source repos (parent of `buildscripts/`) | `/srv/source` | read-only | Protects host repos from modification | +| Named volume `cfengine-build-{platform}` | `/home/builder/build` | read-write | Persists build state for incremental builds | +| `~/.cache/buildscripts_cache/` | `/home/builder/.cache/buildscripts_cache` | read-write | Dependency cache shared across builds | +| `./output/` | `/output` | read-write | Output packages copied here | + +### Build steps + +The inner script runs these steps in order: + +1. **autogen** -- runs `autogen.sh` in each repo +2. **install-dependencies** -- builds and installs bundled dependencies +3. **mission-portal-deps** -- (hub only) installs PHP/npm/LESS assets +4. **configure** -- runs `./configure` with platform-appropriate flags +5. **compile** -- compiles and installs to the dist tree +6. **package** -- creates `.deb` or `.rpm` packages + +## Incremental builds + +Build state lives on a named Docker volume (`cfengine-build-{platform}`) that +persists between runs. Each completed step writes a marker file. On re-run: + +- Source repos are re-synced via rsync (fast for unchanged files) +- Steps with existing markers are skipped +- The first step without a marker runs (i.e., the one that failed or hasn't run) + +This means fixing a compile error and re-running picks up right where it left +off -- no need to rebuild dependencies. + +### Retrying from a specific step + +```bash +# Re-run configure and everything after it +./build-in-container --platform ubuntu-22 --step configure +``` + +`--step` removes the marker for that step and all subsequent ones. + +### Starting fresh + +```bash +# Remove the build volume entirely +./build-in-container --platform ubuntu-22 --clean +``` + +The dependency cache (bind-mounted from the host) is unaffected by `--clean`, +so dependency rebuilds are still avoided. + +### Auto-clean on config change + +If the project, role, or build type changes between runs on the same platform, +the inner script detects the mismatch and automatically cleans the build +directory. No manual `--clean` needed. + +## Interactive configuration + +When options are omitted, the script prompts for them interactively. Each prompt +defaults to the last-used value (persisted to +`~/.config/build-in-container/last-config.json`). Press Enter to accept the +default. + +``` +$ ./build-in-container + +Platform [ubuntu-20, ubuntu-22, ubuntu-24, debian-11, debian-12] (ubuntu-22): +Project (community/nova) [nova]: +Role (agent/hub) [hub]: +Build type (DEBUG/RELEASE) [DEBUG]: +``` + +If stdin is not a TTY or `--non-interactive` is passed, defaults are used +without prompting. + +## Docker image management + +The Docker image is tagged `cfengine-builder-{platform}` and rebuilt +automatically when the Dockerfile changes (tracked via a content hash stored as +an image label). Use `--rebuild-image` to force a full rebuild bypassing the +Docker layer cache (useful when upstream packages change). + +## Debugging + +```bash +# Drop into a shell inside the container +./build-in-container --platform ubuntu-22 --shell +``` + +The shell session has the same mounts and environment as a build run. The build +volume is mounted, so you can inspect build state, run individual build scripts, +or poke around. The container is ephemeral (`--rm`), so anything outside the +named volume is lost on exit. diff --git a/container/Dockerfile.debian b/container/Dockerfile.debian new file mode 100644 index 000000000..137f6d55c --- /dev/null +++ b/container/Dockerfile.debian @@ -0,0 +1,47 @@ +ARG BASE_IMAGE=ubuntu:20.04 +FROM ${BASE_IMAGE} + +ENV DEBIAN_FRONTEND=noninteractive + +# Build tools extracted from ci/cfengine-build-host-setup.cf (debian|ubuntu section) +RUN apt-get update && apt-get install -y \ + autoconf automake binutils bison build-essential curl debhelper \ + dpkg-dev expat fakeroot flex gdb git libexpat1-dev \ + libmodule-load-conditional-perl libpam0g-dev libtool \ + pkg-config psmisc python3-pip rsync sudo systemd-coredump wget \ + && rm -rf /var/lib/apt/lists/* + +# ncurses: ubuntu-20 uses libncurses5/libncurses5-dev, newer use libncurses6/libncurses-dev +ARG NCURSES_PKGS="libncurses6 libncurses-dev" +RUN apt-get update && apt-get install -y ${NCURSES_PKGS} \ + && rm -rf /var/lib/apt/lists/* + +# Hub build tools: Node.js 20 LTS (system nodejs is too old for modern npm +# packages that use the node: protocol), PHP, and Composer +RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \ + && apt-get install -y nodejs php-cli \ + && rm -rf /var/lib/apt/lists/* +RUN npm install -g less +RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/bin --filename=composer.phar + +# Remove system dev libraries that conflict with bundled deps +# (from cfengine-build-host-setup.cf lines 53-59) +RUN apt-get purge -y \ + libattr1-dev libssl-dev libpcre2-dev libacl1-dev \ + libyaml-dev libxml2-dev librsync-dev 2>/dev/null || true + +# Create build user with UID/GID matching the host user. +# This ensures bind-mounted directories (cache, output) have correct ownership. +# HOST_UID and HOST_GID are passed as build args by the Python orchestrator. +ARG HOST_UID=1000 +ARG HOST_GID=1000 +RUN groupadd -g ${HOST_GID} builder \ + && useradd -m -s /bin/bash -u ${HOST_UID} -g ${HOST_GID} builder \ + && echo "builder ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/builder + +USER builder +WORKDIR /home/builder + +# Pre-create the build directory so that when a named volume is mounted here, +# Docker initializes it with the correct ownership (builder:builder). +RUN mkdir -p /home/builder/build