From a0b24f0810fefcdf34c807203c1cdb2e6abba990 Mon Sep 17 00:00:00 2001 From: rht-jbittner <40197105+rht-jbittner@users.noreply.github.com> Date: Fri, 14 Nov 2025 13:25:27 +0100 Subject: [PATCH 01/13] added option to skip debian package installation in installing_deps.sh script --- installing_deps.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/installing_deps.sh b/installing_deps.sh index 6a705c52..377ef5cd 100755 --- a/installing_deps.sh +++ b/installing_deps.sh @@ -8,6 +8,7 @@ # SKIP_KVROCKS=1 # SKIP_GEN_CERT=1 # SKIP_DB_SETUP=1 +#. SKIP_LNX_PKG_INSTALL=1 # # Example: SKIP_REDIS=1 SKIP_YARA=1 ./install.sh # @@ -18,6 +19,7 @@ set -e ## bash debug mode toggle below #set -x +if [ -z "$SKIP_LNX_PKG_INSTALL" ]; then sudo apt-get update sudo apt-get install python3-pip virtualenv python3-dev python3-tk libfreetype6-dev \ @@ -54,6 +56,10 @@ sudo apt-get install build-essential libffi-dev autoconf -qq # sflock, gz requirement sudo apt-get install p7zip-full -qq # TODO REMOVE ME +else + echo "--- Skipping Linux packages installation ---" +fi + # SUBMODULES # git submodule update --init --recursive From 94b6b2f4d7d8f54b05cf1f3c70131845187885e0 Mon Sep 17 00:00:00 2001 From: rht-jbittner <40197105+rht-jbittner@users.noreply.github.com> Date: Fri, 14 Nov 2025 14:08:04 +0100 Subject: [PATCH 02/13] Archived legacy docker configs and initial release of new dockerfile. --- .../{docker => docker-legacy}/.dockerignore | 0 other_installers/docker-legacy/Dockerfile | 40 + other_installers/docker-legacy/README.md | 45 + .../docker-compose.yml | 0 .../docker-legacy/docker_start.sh | 21 + .../pystemon/config.cfg | 0 .../pystemon/install.sh | 0 .../pystemon/proxies.txt | 0 .../pystemon/pystemon.yaml | 0 other_installers/docker/Dockerfile | 150 ++- other_installers/docker/README.md | 46 +- other_installers/docker/core.cfg | 310 ++++++ other_installers/docker/kvrocks_6383.conf | 915 ++++++++++++++++++ other_installers/docker/update.cfg | 4 + 14 files changed, 1455 insertions(+), 76 deletions(-) rename other_installers/{docker => docker-legacy}/.dockerignore (100%) create mode 100644 other_installers/docker-legacy/Dockerfile create mode 100644 other_installers/docker-legacy/README.md rename other_installers/{docker => docker-legacy}/docker-compose.yml (100%) create mode 100644 other_installers/docker-legacy/docker_start.sh rename other_installers/{docker => docker-legacy}/pystemon/config.cfg (100%) rename other_installers/{docker => docker-legacy}/pystemon/install.sh (100%) mode change 100755 => 100644 rename other_installers/{docker => docker-legacy}/pystemon/proxies.txt (100%) rename other_installers/{docker => docker-legacy}/pystemon/pystemon.yaml (100%) create mode 100644 other_installers/docker/core.cfg create mode 100644 other_installers/docker/kvrocks_6383.conf create mode 100644 other_installers/docker/update.cfg diff --git a/other_installers/docker/.dockerignore b/other_installers/docker-legacy/.dockerignore similarity index 100% rename from other_installers/docker/.dockerignore rename to other_installers/docker-legacy/.dockerignore diff --git a/other_installers/docker-legacy/Dockerfile b/other_installers/docker-legacy/Dockerfile new file mode 100644 index 00000000..3f63372b --- /dev/null +++ b/other_installers/docker-legacy/Dockerfile @@ -0,0 +1,40 @@ +FROM ubuntu:22.04 +ARG tz_buildtime=Europe/Rome +ENV TZ=$tz_buildtime +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +# Make sure that all updates are in place +RUN apt-get clean && apt-get update -y && apt-get upgrade -y \ + && apt-get dist-upgrade -y && apt-get autoremove -y + +# Install needed packages +RUN apt-get install git python3-dev build-essential \ + libffi-dev libssl-dev libfuzzy-dev wget sudo -y + +# Adding sudo command +RUN useradd -m docker && echo "docker:docker" | chpasswd && adduser docker sudo +RUN echo "root ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +# Installing AIL dependencies +RUN mkdir /opt/AIL +ADD . /opt/AIL +WORKDIR /opt/AIL +RUN ./installing_deps.sh +WORKDIR /opt/AIL + +# Default to UTF-8 file.encoding +ENV LANG C.UTF-8 +ENV AIL_HOME /opt/AIL +ENV AIL_BIN ${AIL_HOME}/bin +ENV AIL_FLASK ${AIL_HOME}/var/www +ENV AIL_REDIS ${AIL_HOME}/redis/src +ENV AIL_ARDB ${AIL_HOME}/ardb/src +ENV AIL_VENV ${AIL_HOME}/AILENV + +ENV PATH ${AIL_VENV}/bin:${AIL_HOME}:${AIL_REDIS}:${AIL_ARDB}:${AIL_BIN}:${AIL_FLASK}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +RUN ./pystemon/install.sh +RUN pip install -r /opt/pystemon/requirements.txt + +COPY docker_start.sh /docker_start.sh +ENTRYPOINT ["/bin/bash", "docker_start.sh"] diff --git a/other_installers/docker-legacy/README.md b/other_installers/docker-legacy/README.md new file mode 100644 index 00000000..018ba146 --- /dev/null +++ b/other_installers/docker-legacy/README.md @@ -0,0 +1,45 @@ +Docker Quick Start (Ubuntu 18.04 LTS) +------------ + +:warning: +This Docker is not maintained at the moment. +If you are interested to contribute, please submit a Pull Request + + +1. Install Docker +```bash +sudo su +apt-get install -y curl +curl https://get.docker.com | /bin/bash +``` + +2. Type these commands to build the Docker image: +```bash +git clone https://github.com/ail-project/ail-framework.git +cd AIL-framework +cp -r ./other_installers/docker/Dockerfile ./other_installers/docker/docker_start.sh ./other_installers/docker/pystemon ./ +cp ./configs/update.cfg.sample ./configs/update.cfg +vim/nano ./configs/update.cfg (set auto_update to False) +docker build --build-arg tz_buildtime=YOUR_GEO_AREA/YOUR_CITY -t ail-framework . +``` +3. To start AIL on port 7000, type the following command below: +``` +docker run -p 7000:7000 ail-framework +``` + +4. To debug the running container, type the following command and note the container name or identifier: +```bash +docker ps +``` + +After getting the name or identifier type the following commands: +```bash +docker exec -it CONTAINER_NAME_OR_IDENTIFIER bash +cd /opt/ail +``` + +Install using Ansible +--------------------- + +Please check the [Ansible readme](ansible/README.md). + diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker-legacy/docker-compose.yml similarity index 100% rename from other_installers/docker/docker-compose.yml rename to other_installers/docker-legacy/docker-compose.yml diff --git a/other_installers/docker-legacy/docker_start.sh b/other_installers/docker-legacy/docker_start.sh new file mode 100644 index 00000000..5b0e1b06 --- /dev/null +++ b/other_installers/docker-legacy/docker_start.sh @@ -0,0 +1,21 @@ +#!/bin/bash +signalListener() { + "$@" & + pid="$!" + trap "echo 'Stopping'; kill -SIGTERM $pid" SIGINT SIGTERM + + while kill -0 $pid > /dev/null 2>&1; do + wait + done +} + + +source ./AILENV/bin/activate +cd bin +./LAUNCH.sh -l +./LAUNCH.sh -c +./LAUNCH.sh -f + +signalListener tail -f /dev/null $@ + +./LAUNCH.sh -k diff --git a/other_installers/docker/pystemon/config.cfg b/other_installers/docker-legacy/pystemon/config.cfg similarity index 100% rename from other_installers/docker/pystemon/config.cfg rename to other_installers/docker-legacy/pystemon/config.cfg diff --git a/other_installers/docker/pystemon/install.sh b/other_installers/docker-legacy/pystemon/install.sh old mode 100755 new mode 100644 similarity index 100% rename from other_installers/docker/pystemon/install.sh rename to other_installers/docker-legacy/pystemon/install.sh diff --git a/other_installers/docker/pystemon/proxies.txt b/other_installers/docker-legacy/pystemon/proxies.txt similarity index 100% rename from other_installers/docker/pystemon/proxies.txt rename to other_installers/docker-legacy/pystemon/proxies.txt diff --git a/other_installers/docker/pystemon/pystemon.yaml b/other_installers/docker-legacy/pystemon/pystemon.yaml similarity index 100% rename from other_installers/docker/pystemon/pystemon.yaml rename to other_installers/docker-legacy/pystemon/pystemon.yaml diff --git a/other_installers/docker/Dockerfile b/other_installers/docker/Dockerfile index 3f63372b..02ec4296 100644 --- a/other_installers/docker/Dockerfile +++ b/other_installers/docker/Dockerfile @@ -1,40 +1,128 @@ -FROM ubuntu:22.04 -ARG tz_buildtime=Europe/Rome -ENV TZ=$tz_buildtime -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -# Make sure that all updates are in place -RUN apt-get clean && apt-get update -y && apt-get upgrade -y \ - && apt-get dist-upgrade -y && apt-get autoremove -y - -# Install needed packages -RUN apt-get install git python3-dev build-essential \ - libffi-dev libssl-dev libfuzzy-dev wget sudo -y - -# Adding sudo command -RUN useradd -m docker && echo "docker:docker" | chpasswd && adduser docker sudo -RUN echo "root ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - -# Installing AIL dependencies -RUN mkdir /opt/AIL -ADD . /opt/AIL -WORKDIR /opt/AIL -RUN ./installing_deps.sh -WORKDIR /opt/AIL - -# Default to UTF-8 file.encoding +ARG BASE_IMAGE=ubuntu +FROM $BASE_IMAGE + +# Installing_deps.sh script arguments +ARG SKIP_REDIS +ARG SKIP_TLSH +ARG SKIP_PGPDUMP +ARG SKIP_YARA +ARG SKIP_KVROCKS +ARG SKIP_GEN_CERT +ARG SKIP_DB_SETUP +ARG SKIP_LNX_PKG_INSTALL + +Installing_deps.sh script env (arg to env mapping) +ENV SKIP_REDIS=$SKIP_REDIS +ENV SKIP_TLSH=$SKIP_TLSH +ENV SKIP_PGPDUMP=$SKIP_PGPDUMP +ENV SKIP_YARA=$SKIP_YARA +ENV SKIP_KVROCKS=$SKIP_KVROCKS +ENV SKIP_GEN_CERT=$SKIP_GEN_CERT +ENV SKIP_DB_SETUP=$SKIP_DB_SETUP +ENV SKIP_LNX_PKG_INSTALL=$SKIP_LNX_PKG_INSTALL + +# AIL runtime env variables ENV LANG C.UTF-8 -ENV AIL_HOME /opt/AIL +ENV AIL_HOME /home/ail/ail-framework ENV AIL_BIN ${AIL_HOME}/bin ENV AIL_FLASK ${AIL_HOME}/var/www ENV AIL_REDIS ${AIL_HOME}/redis/src ENV AIL_ARDB ${AIL_HOME}/ardb/src ENV AIL_VENV ${AIL_HOME}/AILENV -ENV PATH ${AIL_VENV}/bin:${AIL_HOME}:${AIL_REDIS}:${AIL_ARDB}:${AIL_BIN}:${AIL_FLASK}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +#ENV PATH ${AIL_VENV}/bin:${AIL_HOME}:${AIL_REDIS}:${AIL_ARDB}:${AIL_BIN}:${AIL_FLASK}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + +# Build Dependencies +RUN apt-get update && \ + apt-get install -y \ + sudo \ + wget \ + git \ + python3-dev \ + g++ \ + cmake \ + automake \ + libtool \ + make \ + gcc \ + pkg-config \ + build-essential \ + autoconf \ + virtualenv \ + unzip \ + libsnappy-dev \ + libssl-dev \ + libfreetype6-dev \ + protobuf-compiler \ + libprotobuf-dev \ + libadns1-dev \ + libev-dev \ + libgmp-dev \ + libfuzzy-dev \ + libffi-dev && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Runtime Dependencies +RUN apt-get update && \ + apt-get install -y \ + python3-pip \ + python3-tk \ + screen \ + python3-numpy \ + python3-opencv \ + libzbar0 \ + libadns1 \ + graphviz \ + p7zip-full && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Replacements of Comiles/Builds +RUN wget https://github.com/RocksLabs/kvrocks-fpm/releases/download/202502091/kvrocks_2.11.1-1_amd64.deb && \ + apt-get update && \ + apt-get install -y \ + pgpdump \ + tlsh-tools \ + yara \ + ./kvrocks_2.11.1-1_amd64.deb && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Create ail user +RUN useradd -m -s /bin/bash ail + +# Create ail framework work directory +RUN mkdir -p /home/ail/ail-framework + +# Change to ail work directory +WORKDIR /home/ail/ail-framework + +# Copy AIL files +COPY bin configs doc files logs samples tests tools update var install_virtualenv.sh installing_deps.sh requirements.txt reset_AIL.sh ./ + +# Start AIL installers +RUN apt-get update && \ + chmod +x ./installing_deps.sh && \ + sed -i 's/^sudo *//' ./installing_deps.sh && \ + ./installing_deps.sh && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Copy Docker modified files +COPY other_installers/docker/update.cfg configs/update.cfg + +COPY other_installers/docker/core.cfg configs/core.cfg + +COPY other_installers/docker/kvrocks_6383.conf configs/6383.conf + +COPY other_installers/docker/docker_start.sh /usr/local/sbin/ + +RUN chmod +x /usr/local/sbin/docker_start.sh + +RUN chown -R ail:ail /home/ail/ -RUN ./pystemon/install.sh -RUN pip install -r /opt/pystemon/requirements.txt +# Change user +USER ail -COPY docker_start.sh /docker_start.sh -ENTRYPOINT ["/bin/bash", "docker_start.sh"] +ENTRYPOINT ["/bin/bash", "/usr/local/sbin/docker_start.sh"] \ No newline at end of file diff --git a/other_installers/docker/README.md b/other_installers/docker/README.md index 018ba146..0e8521fa 100644 --- a/other_installers/docker/README.md +++ b/other_installers/docker/README.md @@ -1,45 +1 @@ -Docker Quick Start (Ubuntu 18.04 LTS) ------------- - -:warning: -This Docker is not maintained at the moment. -If you are interested to contribute, please submit a Pull Request - - -1. Install Docker -```bash -sudo su -apt-get install -y curl -curl https://get.docker.com | /bin/bash -``` - -2. Type these commands to build the Docker image: -```bash -git clone https://github.com/ail-project/ail-framework.git -cd AIL-framework -cp -r ./other_installers/docker/Dockerfile ./other_installers/docker/docker_start.sh ./other_installers/docker/pystemon ./ -cp ./configs/update.cfg.sample ./configs/update.cfg -vim/nano ./configs/update.cfg (set auto_update to False) -docker build --build-arg tz_buildtime=YOUR_GEO_AREA/YOUR_CITY -t ail-framework . -``` -3. To start AIL on port 7000, type the following command below: -``` -docker run -p 7000:7000 ail-framework -``` - -4. To debug the running container, type the following command and note the container name or identifier: -```bash -docker ps -``` - -After getting the name or identifier type the following commands: -```bash -docker exec -it CONTAINER_NAME_OR_IDENTIFIER bash -cd /opt/ail -``` - -Install using Ansible ---------------------- - -Please check the [Ansible readme](ansible/README.md). - +TBA \ No newline at end of file diff --git a/other_installers/docker/core.cfg b/other_installers/docker/core.cfg new file mode 100644 index 00000000..66b7b034 --- /dev/null +++ b/other_installers/docker/core.cfg @@ -0,0 +1,310 @@ +[Directories] +bloomfilters = Blooms +dicofilters = Dicos +pastes = PASTES +hash = HASHS +crawled = crawled +har = CRAWLED_SCREENSHOT +screenshot = CRAWLED_SCREENSHOT/screenshot +images = IMAGES +favicons = FAVICONS + +wordtrending_csv = var/www/static/csv/wordstrendingdata +wordsfile = files/wordfile + +protocolstrending_csv = var/www/static/csv/protocolstrendingdata +protocolsfile = files/protocolsfile + +tldstrending_csv = var/www/static/csv/tldstrendingdata +tldsfile = faup/src/data/mozilla.tlds + +domainstrending_csv = var/www/static/csv/domainstrendingdata + +sentiment_lexicon_file = sentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt + +[Pystemon] +dir = /home/pystemon/pystemon/ +redis_host = ail-redis-cache-master +redis_port = 6379 +redis_db = 10 + +##### Logs ###### +[Logs] +# activate syslog +ail_logs_syslog = False +ail_logs_syslog_server = +# default=514 +ail_logs_syslog_port = +# ['auth', 'authpriv', 'cron', 'daemon', 'ftp', 'kern', 'lpr', 'mail', 'news', 'syslog', 'user', 'uucp', 'local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7'] +ail_logs_syslog_facility = +# ['DEBUG', 'INFO', 'NOTICE', 'WARNING', 'ERROR', 'CRITICAL'] +ail_logs_syslog_level = + +##### Notifications ###### +[Notifications] +ail_domain = https://localhost:7000 +sender = sender@example.com +sender_host = smtp.example.com +sender_port = 1337 +sender_pw = None +# Only needed for SMTP over SSL if the mail server don't support TLS (used by default). use this option to validate the server certificate. +cert_required = False +# Only needed for SMTP over SSL if you want to validate your self signed certificate for SSL +ca_file = +# Only needed when the credentials for email server needs a username instead of an email address +#sender_user = sender +sender_user = + +# optional for using with authenticated SMTP over SSL +# sender_pw = securepassword + +##### Flask ##### +[Flask] +#Proxying requests to the app +baseUrl = / +#Host to bind to +host = 0.0.0.0 +#Flask server port +port = 7000 +#Number of logs to display in the dashboard +max_dashboard_logs = 15 +#Maximum number of character to display in the toolip +max_preview_char = 250 +#Maximum number of character to display in the modal +max_preview_modal = 800 +#Default number of header to display in trending graphs +default_display = 10 +#Number of minutes displayed for the number of processed pastes. +minute_processed_paste = 10 +#Maximum line length authorized to make a diff between duplicates +DiffMaxLineLength = 10000 + +##### Images ##### +[Images] +ollama_url = http://127.0.0.1:11434 +ollama_enabled = True + +##### Users ##### +[Users] +force_2fa = False +2fa_name = AIL + +[AIL_2_AIL] +server_host = 0.0.0.0 +server_port = 4443 +local_addr = + +#### Modules #### +[BankAccount] +max_execution_time = 60 + +[Categ] +#Minimum number of match between the paste and the category file +matchingThreshold=1 + +[Credential] +#Minimum length that a credential must have to be considered as such +minimumLengthThreshold=3 +#Will be pushed as alert if the number of credentials is greater to that number +criticalNumberToAlert=8 +#Will be considered as false positive if less that X matches from the top password list +minTopPassList=5 + +[Decoder] +max_execution_time_base64 = 60 +max_execution_time_binary = 60 +max_execution_time_hexadecimal = 60 + +[Onion] +save_i2p = False +max_execution_time = 180 + +[PgpDump] +max_execution_time = 60 + +[Modules_Duplicates] +#Number of month to look back +maximum_month_range = 3 +#The value where two pastes are considerate duplicate for ssdeep. +threshold_duplicate_ssdeep = 50 +#The value where two pastes are considerate duplicate for tlsh. +threshold_duplicate_tlsh = 52 +#Minimum size of the paste considered +min_paste_size = 0.3 + +[Module_ModuleInformation] +#Threshold to deduce if a module is stuck or not, in seconds. +threshold_stucked_module=600 + +[Module_Mixer] +#Define the configuration of the mixer, possible value: 1, 2 or 3 +operation_mode = 3 +#Define the time that a paste will be considerate duplicate. in seconds (1day = 86400) +ttl_duplicate = 86400 +default_unnamed_feed_name = unnamed_feeder + +[Tracker_Term] +max_execution_time = 120 + +[Tracker_Regex] +max_execution_time = 60 + +##### Redis ##### +[Redis_Cache] +#host = localhost +host = ail-redis-cache-master +port = 6379 +db = 0 + +[Redis_Log] +host = ail-redis-log-mastert +port = 6379 +db = 0 + +[Redis_Log_submit] +host = ail-redis-log-master +port = 6379 +db = 1 + +[Redis_Queues] +host = ail-redis-queues-master +port = 6379 +db = 0 + +[Redis_Process] +host = ail-redis-queues-master +port = 6379 +db = 2 + +[Redis_Mixer_Cache] +host = ail-redis-queues-mastert +port = 6379 +db = 1 + +##### KVROCKS ##### + +[Kvrocks_DB] +host = localhost +port = 6383 +password = ail + +[Kvrocks_Duplicates] +host = localhost +port = 6383 +password = ail_dups + +[Kvrocks_Correlations] +host = localhost +port = 6383 +password = ail_correls + +[Kvrocks_Crawler] +host = localhost +port = 6383 +password = ail_crawlers + +[Kvrocks_Languages] +host = localhost +port = 6383 +password = ail_langs + +[Kvrocks_Objects] +host = localhost +port = 6383 +password = ail_objs + +[Kvrocks_Relationships] +host = localhost +port = 6383 +password = ail_rels + +[Kvrocks_Searchs] +host = localhost +port = 6383 +password = ail_searchs + +[Kvrocks_Timeline] +host = localhost +port = 6383 +password = ail_tls + +[Kvrocks_Stats] +host = localhost +port = 6383 +password = ail_stats + +[Kvrocks_Tags] +host = localhost +port = 6383 +password = ail_tags + +[Kvrocks_Trackers] +host = localhost +port = 6383 +password = ail_trackers + +##### - ##### + +[Url] +cc_critical = DE + +[DomClassifier] +#cc = DE +#cc_tld = r'\.de$' +cc = +cc_tld = +dns = 8.8.8.8 + + +[Mail] +dns = 8.8.8.8 + +# Indexer configuration +[Indexer] +meilisearch = False +meilisearch_url = http://localhost:7700 +meilisearch_key = ailmeilisearchpassword + +[ailleakObject] +maxDuplicateToPushToMISP=10 + +############################################################################### + +# For multiple feed, add them with "," without space +# e.g.: tcp://127.0.0.1:5556,tcp://127.0.0.1:5557 +[ZMQ_Global] +# address = tcp://127.0.0.1:5556,tcp://crf.circl.lu:5556 +address = tcp://127.0.0.1:5556 +channel = 102 +bind = tcp://127.0.0.1:5556 + +[RedisPubSub] +host = ail-redis-queues-master +port = 6379 +db = 0 + +[Crawler] +activate_crawler = False +default_depth_limit = 1 +default_har = True +default_screenshot = True +onion_proxy = onion.foundation +ail_url_to_push_onion_discovery = +ail_key_to_push_onion_discovery = + +[Translation] +libretranslate = + +[IP] +# list of comma-separated CIDR that you wish to be alerted for. e.g: +#networks = 192.168.34.0/24,10.0.0.0/8,192.168.33.0/24 +networks = + +[SubmitPaste] +# 1 Mb Max text paste size for text submission +TEXT_MAX_SIZE = 1000000 +# 1 Gb Max file size for file submission +FILE_MAX_SIZE = 1000000000 +# Managed file extenions for file submission, comma separated +# TODO add zip, gz and tar.gz +FILE_ALLOWED_EXTENSIONS = txt,sh,pdf,html,json diff --git a/other_installers/docker/kvrocks_6383.conf b/other_installers/docker/kvrocks_6383.conf new file mode 100644 index 00000000..1949f1e7 --- /dev/null +++ b/other_installers/docker/kvrocks_6383.conf @@ -0,0 +1,915 @@ +################################ GENERAL ##################################### + +# By default kvrocks listens for connections from localhost interface. +# It is possible to listen to just one or multiple interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# bind 0.0.0.0 +bind 127.0.0.1 + +# Unix socket. +# +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so kvrocks will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/kvrocks.sock +# unixsocketperm 777 + +# Accept connections on the specified port, default is 6666. +port 6383 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# The number of worker's threads, increase or decrease would affect the performance. +workers 8 + +# By default, kvrocks does not run as a daemon. Use 'yes' if you need it. +# Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized +daemonize no + +# Kvrocks implements the cluster solution that is similar to the Redis cluster solution. +# You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is +# adapted to redis-cli, redis-benchmark, Redis cluster SDK, and Redis cluster proxy. +# But kvrocks doesn't support communicating with each other, so you must set +# cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219. +# +# PLEASE NOTE: +# If you enable cluster, kvrocks will encode key with its slot id calculated by +# CRC16 and modulo 16384, encoding key with its slot id makes it efficient to +# migrate keys based on the slot. So if you enabled at first time, cluster mode must +# not be disabled after restarting, and vice versa. That is to say, data is not +# compatible between standalone mode with cluster mode, you must migrate data +# if you want to change mode, otherwise, kvrocks will make data corrupt. +# +# Default: no + +cluster-enabled no + +# By default, namespaces are stored in the configuration file and won't be replicated +# to replicas. This option allows to change this behavior, so that namespaces are also +# propagated to slaves. Note that: +# 1) it won't replicate the 'masterauth' to prevent breaking master/replica replication +# 2) it will overwrite replica's namespace with master's namespace, so be careful of in-using namespaces +# 3) cannot switch off the namespace replication once it's enabled +# +# Default: no +repl-namespace-enabled no + +# Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration +# takes effect only if the cluster mode was enabled. +# +# If yes, it will try to load the cluster topology from the local file when starting, +# and dump the cluster nodes into the file if it was changed. +# +# Default: yes +persist-cluster-nodes-enabled yes + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients. However, if the server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# +# Once the limit is reached the server will close all the new connections sending +# an error 'max number of clients reached'. +# +maxclients 10000 + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running kvrocks. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since kvrocks is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +requirepass ail + +# If the master is password protected (using the "masterauth" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process. Otherwise, the master will +# refuse the slave request. +# +# masterauth foobared + +# Master-Salve replication would check db name is matched. if not, the slave should +# refuse to sync the db from master. Don't use the default value, set the db-name to identify +# the cluster. +db-name change.me.db + +# The working directory +# +# The DB will be written inside this directory +# Note that you must specify a directory here, not a file name. +dir DATA_KVROCKS + +# You can configure where to store your server logs by the log-dir. +# If you don't specify one, we will use the above `dir` as our default log directory. +# We also can send logs to stdout/stderr is as simple as: +# +# log-dir stdout + +# Log level +# Possible values: info, warning, error, fatal +# Default: info +log-level info + +# You can configure log-retention-days to control whether to enable the log cleaner +# and the maximum retention days that the INFO level logs will be kept. +# +# if set to -1, that means to disable the log cleaner. +# if set to 0, all previous INFO level logs will be immediately removed. +# if set to between 0 to INT_MAX, that means it will retent latest N(log-retention-days) day logs. + +# By default the log-retention-days is -1. +log-retention-days -1 + +# When running in daemonize mode, kvrocks writes a PID file in ${CONFIG_DIR}/kvrocks.pid by +# default. You can specify a custom pid file location here. +# pidfile /var/run/kvrocks.pid +pidfile DATA_KVROCKS/kvrocks.pid + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +slave-read-only yes + +# The slave priority is an integer number published by Kvrocks in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slave with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to Get the desired effect. +tcp-backlog 511 + +# If the master is an old version, it may have specified replication threads +# that use 'port + 1' as listening port, but in new versions, we don't use +# extra port to implement replication. In order to allow the new replicas to +# copy old masters, you should indicate that the master uses replication port +# or not. +# If yes, that indicates master uses replication port and replicas will connect +# to 'master's listening port + 1' when synchronization. +# If no, that indicates master doesn't use replication port and replicas will +# connect 'master's listening port' when synchronization. +master-use-repl-port no + +# Currently, master only checks sequence number when replica asks for PSYNC, +# that is not enough since they may have different replication histories even +# the replica asking sequence is in the range of the master current WAL. +# +# We design 'Replication Sequence ID' PSYNC, we add unique replication id for +# every write batch (the operation of each command on the storage engine), so +# the combination of replication id and sequence is unique for write batch. +# The master can identify whether the replica has the same replication history +# by checking replication id and sequence. +# +# By default, it is not enabled since this stricter check may easily lead to +# full synchronization. +use-rsid-psync no + +# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of +# another kvrocks server. A few things to understand ASAP about kvrocks replication. +# +# 1) Kvrocks replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Kvrocks slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof +# slaveof 127.0.0.1 6379 + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out-of-date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all kinds of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# To guarantee slave's data safe and serve when it is in full synchronization +# state, slave still keep itself data. But this way needs to occupy much disk +# space, so we provide a way to reduce disk occupation, slave will delete itself +# entire database before fetching files from master during full synchronization. +# If you want to enable this way, you can set 'slave-delete-db-before-fullsync' +# to yes, but you must know that database will be lost if master is down during +# full synchronization, unless you have a backup of database. +# +# This option is similar redis replicas RDB diskless load option: +# repl-diskless-load on-empty-db +# +# Default: no +slave-empty-db-before-fullsync no + +# A Kvrocks master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +# If replicas need full synchronization with master, master need to create +# checkpoint for feeding replicas, and replicas also stage a checkpoint of +# the master. If we also keep the backup, it maybe occupy extra disk space. +# You can enable 'purge-backup-on-fullsync' if disk is not sufficient, but +# that may cause remote backup copy failing. +# +# Default: no +purge-backup-on-fullsync no + +# The maximum allowed rate (in MB/s) that should be used by replication. +# If the rate exceeds max-replication-mb, replication will slow down. +# Default: 0 (i.e. no limit) +max-replication-mb 0 + +# The maximum allowed aggregated write rate of flush and compaction (in MB/s). +# If the rate exceeds max-io-mb, io will slow down. +# 0 is no limit +# Default: 0 +max-io-mb 0 + +# The maximum allowed space (in GB) that should be used by RocksDB. +# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail. +# Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization +# Default: 0 (i.e. no limit) +max-db-size 0 + +# The maximum backup to keep, server cron would run every minutes to check the num of current +# backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep +# is 0, no backup would be kept. But now, we only support 0 or 1. +max-backup-to-keep 1 + +# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup. +# default: 1 day +max-backup-keep-hours 24 + +# max-bitmap-to-string-mb use to limit the max size of bitmap to string transformation(MB). +# +# Default: 16 +max-bitmap-to-string-mb 16 + +# Whether to enable SCAN-like cursor compatible with Redis. +# If enabled, the cursor will be unsigned 64-bit integers. +# If disabled, the cursor will be a string. +# Default: no +redis-cursor-compatible yes + +# Whether to enable the RESP3 protocol. +# NOTICE: RESP3 is still under development, don't enable it in production environment. +# +# Default: no +# resp3-enabled no + +# Maximum nesting depth allowed when parsing and serializing +# JSON documents while using JSON commands like JSON.SET. +# Default: 1024 +json-max-nesting-depth 1024 + +# The underlying storage format of JSON data type +# NOTE: This option only affects newly written/updated key-values +# The CBOR format may reduce the storage size and speed up JSON commands +# Available values: json, cbor +# Default: json +json-storage-format json + +################################## TLS ################################### + +# By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0. +# To enable it, `tls-port` can be used to define TLS-listening ports. +# tls-port 0 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. +# These files should be PEM formatted. +# +# tls-cert-file kvrocks.crt +# tls-key-file kvrocks.key + +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Kvrocks requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +# By default, a replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +################################## SLOW LOG ################################### + +# The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Kvrocks +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that -1 value disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 100000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +# If you run kvrocks from upstart or systemd, kvrocks can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting kvrocks into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +################################## PERF LOG ################################### + +# The Kvrocks Perf Log is a mechanism to log queries' performance context that +# exceeded a specified execution time. This mechanism uses rocksdb's +# Perf Context and IO Stats Context, Please see: +# https://github.com/facebook/rocksdb/wiki/Perf-Context-and-IO-Stats-Context +# +# This mechanism is enabled when profiling-sample-commands is not empty and +# profiling-sample-ratio greater than 0. +# It is important to note that this mechanism affects performance, but it is +# useful for troubleshooting performance bottlenecks, so it should only be +# enabled when performance problems occur. + +# The name of the commands you want to record. Must be original name of +# commands supported by Kvrocks. Use ',' to separate multiple commands and +# use '*' to record all commands supported by Kvrocks. +# Example: +# - Single command: profiling-sample-commands get +# - Multiple commands: profiling-sample-commands get,mget,hget +# +# Default: empty +# profiling-sample-commands "" + +# Ratio of the samples would be recorded. It is a number between 0 and 100. +# We simply use the rand to determine whether to record the sample or not. +# +# Default: 0 +profiling-sample-ratio 0 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the perf log with PERFLOG RESET. +# +# Default: 256 +profiling-sample-record-max-len 256 + +# profiling-sample-record-threshold-ms use to tell the kvrocks when to record. +# +# Default: 100 millisecond +profiling-sample-record-threshold-ms 100 + +################################## CRON ################################### + +# Compact Scheduler, auto compact at schedule time +# time expression format is the same as crontab(currently only support * and int) +# e.g. compact-cron 0 3 * * * 0 4 * * * +# would compact the db at 3am and 4am everyday +# compact-cron 0 3 * * * + +# The hour range that compaction checker would be active +# e.g. compaction-checker-range 0-7 means compaction checker would be worker between +# 0-7am every day. +compaction-checker-range 0-7 + +# When the compaction checker is triggered, the db will periodically pick the SST file +# with the highest "deleted percentage" (i.e. the percentage of deleted keys in the SST +# file) to compact, in order to free disk space. +# However, if a specific SST file was created more than "force-compact-file-age" seconds +# ago, and its percentage of deleted keys is higher than +# "force-compact-file-min-deleted-percentage", it will be forcely compacted as well. + +# Default: 172800 seconds; Range: [60, INT64_MAX]; +# force-compact-file-age 172800 +# Default: 10 %; Range: [1, 100]; +# force-compact-file-min-deleted-percentage 10 + +# Bgsave scheduler, auto bgsave at scheduled time +# time expression format is the same as crontab(currently only support * and int) +# e.g. bgsave-cron 0 3 * * * 0 4 * * * +# would bgsave the db at 3am and 4am every day + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance, the KEYS command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command KEYS b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command KEYS "" + +################################ MIGRATE ##################################### +# If the network bandwidth is completely consumed by the migration task, +# it will affect the availability of kvrocks. To avoid this situation, +# migrate-speed is adopted to limit the migrating speed. +# Migrating speed is limited by controlling the duration between sending data, +# the duration is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us). +# Value: [0,INT_MAX], 0 means no limit +# +# Default: 4096 +migrate-speed 4096 + +# In order to reduce data transmission times and improve the efficiency of data migration, +# pipeline is adopted to send multiple data at once. Pipeline size can be set by this option. +# Value: [1, INT_MAX], it can't be 0 +# +# Default: 16 +migrate-pipeline-size 16 + +# In order to reduce the write forbidden time during migrating slot, we will migrate the incremental +# data several times to reduce the amount of incremental data. Until the quantity of incremental +# data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by +# this option. +# Value: [1, INT_MAX], it can't be 0 +# +# Default: 10000 +migrate-sequence-gap 10000 + +################################ ROCKSDB ##################################### + +# Specify the capacity of column family block cache. A larger block cache +# may make requests faster while more keys would be cached. Max Size is 400*1024. +# Default: 4096MB +rocksdb.block_cache_size 4096 + +# Specify the type of cache used in the block cache. +# Accept value: "lru", "hcc" +# "lru" stands for the cache with the LRU(Least Recently Used) replacement policy. +# +# "hcc" stands for the Hyper Clock Cache, a lock-free cache alternative +# that offers much improved CPU efficiency vs. LRU cache under high parallel +# load or high contention. +# +# default lru +rocksdb.block_cache_type lru + +# A global cache for table-level rows in RocksDB. If almost always point +# lookups, enlarging row cache may improve read performance. Otherwise, +# if we enlarge this value, we can lessen metadata/subkey block cache size. +# +# Default: 0 (disabled) +# Deprecated +#rocksdb.row_cache_size 0 + +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. For universal-style compaction, you can usually set it to -1. +# Default: 8096 +rocksdb.max_open_files 8096 + +# Amount of data to build up in memory (backed by an unsorted log +# on disk) before converting to a sorted on-disk file. +# +# Larger values increase performance, especially during bulk loads. +# Up to max_write_buffer_number write buffers may be held in memory +# at the same time, +# so you may wish to adjust this parameter to control memory usage. +# Also, a larger write buffer will result in a longer recovery time +# the next time the database is opened. +# +# Note that write_buffer_size is enforced per column family. +# See db_write_buffer_size for sharing memory across column families. + +# default is 64MB +rocksdb.write_buffer_size 64 + +# Target file size for compaction, target file size for Level N can be calculated +# by target_file_size_base * (target_file_size_multiplier ^ (L-1)) +# +# Default: 128MB +rocksdb.target_file_size_base 128 + +# The maximum number of write buffers that are built up in memory. +# The default and the minimum number is 2, so that when 1 write buffer +# is being flushed to storage, new writes can continue to the other +# write buffer. +# If max_write_buffer_number > 3, writing will be slowed down to +# options.delayed_write_rate if we are writing to the last write buffer +# allowed. +rocksdb.max_write_buffer_number 4 + +# Maximum number of concurrent background jobs (compactions and flushes). +# For backwards compatibility we will set `max_background_jobs = +# max_background_compactions + max_background_flushes` in the case where user +# sets at least one of `max_background_compactions` or `max_background_flushes` +# (we replace -1 by 1 in case one option is unset). +rocksdb.max_background_jobs 4 + +# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs +# Maximum number of concurrent background compaction jobs, submitted to +# the default LOW priority thread pool. +rocksdb.max_background_compactions -1 + +# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs +# Maximum number of concurrent background memtable flush jobs, submitted by +# default to the HIGH priority thread pool. If the HIGH priority thread pool +# is configured to have zero threads, flush jobs will share the LOW priority +# thread pool with compaction jobs. +rocksdb.max_background_flushes -1 + +# This value represents the maximum number of threads that will +# concurrently perform a compaction job by breaking it into multiple, +# smaller ones that are run simultaneously. +# Default: 2 +# Invalid config +#rocksdb.max_sub_compactions 2 + +# In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size +# as the trigger of column family flush. Once WALs exceed this size, RocksDB +# will start forcing the flush of column families to allow deletion of some +# oldest WALs. This config can be useful when column families are updated at +# non-uniform frequencies. If there's no size limit, users may need to keep +# really old WALs when the infrequently-updated column families hasn't flushed +# for a while. +# +# In kvrocks, we use multiple column families to store metadata, subkeys, etc. +# If users always use string type, but use list, hash and other complex data types +# infrequently, there will be a lot of old WALs if we don't set size limit +# (0 by default in rocksdb), because rocksdb will dynamically choose the WAL size +# limit to be [sum of all write_buffer_size * max_write_buffer_number] * 4 if set to 0. +# +# Moreover, you should increase this value if you already set rocksdb.write_buffer_size +# to a big value, to avoid influencing the effect of rocksdb.write_buffer_size and +# rocksdb.max_write_buffer_number. +# +# default is 512MB +rocksdb.max_total_wal_size 512 + +# We implement the replication with rocksdb WAL, it would trigger full sync when the seq was out of range. +# wal_ttl_seconds and wal_size_limit_mb would affect how archived logs will be deleted. +# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that +# are older than WAL_ttl_seconds will be deleted# +# +# Default: 3 Hours +rocksdb.wal_ttl_seconds 10800 + +# If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, +# WAL files will be checked every 10 min and if total size is greater +# then WAL_size_limit_MB, they will be deleted starting with the +# earliest until size_limit is met. All empty files will be deleted +# Default: 16GB +rocksdb.wal_size_limit_mb 16384 + +# Approximate size of user data packed per block. Note that the +# block size specified here corresponds to uncompressed data. The +# actual size of the unit read from disk may be smaller if +# compression is enabled. +# +# Default: 16KB +rocksdb.block_size 16384 + +# Indicating if we'd put index/filter blocks to the block cache +# +# Default: yes +rocksdb.cache_index_and_filter_blocks yes + +# Specify the compression to use. Only compress level greater +# than 2 to improve performance. +# Accept value: "no", "snappy", "lz4", "zstd", "zlib" +# default snappy +rocksdb.compression snappy + +# If non-zero, we perform bigger reads when doing compaction. If you're +# running RocksDB on spinning disks, you should set this to at least 2MB. +# That way RocksDB's compaction is doing sequential instead of random reads. +# When non-zero, we also force new_table_reader_for_compaction_inputs to +# true. +# +# Default: 2 MB +rocksdb.compaction_readahead_size 2097152 + +# he limited write rate to DB if soft_pending_compaction_bytes_limit or +# level0_slowdown_writes_trigger is triggered. + +# If the value is 0, we will infer a value from `rater_limiter` value +# if it is not empty, or 16MB if `rater_limiter` is empty. Note that +# if users change the rate in `rate_limiter` after DB is opened, +# `delayed_write_rate` won't be adjusted. +# +rocksdb.delayed_write_rate 0 +# If enable_pipelined_write is true, separate write thread queue is +# maintained for WAL write and memtable write. +# +# Default: no +rocksdb.enable_pipelined_write no + +# Soft limit on number of level-0 files. We start slowing down writes at this +# point. A value <0 means that no writing slow down will be triggered by +# number of files in level-0. +# +# Default: 20 +rocksdb.level0_slowdown_writes_trigger 20 + +# Maximum number of level-0 files. We stop writes at this point. +# +# Default: 40 +rocksdb.level0_stop_writes_trigger 40 + +# Number of files to trigger level-0 compaction. +# +# Default: 4 +rocksdb.level0_file_num_compaction_trigger 4 + +# if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec +# +# Default: 0 +rocksdb.stats_dump_period_sec 0 + +# if yes, the auto compaction would be disabled, but the manual compaction remain works +# +# Default: no +rocksdb.disable_auto_compactions no + +# BlobDB(key-value separation) is essentially RocksDB for large-value use cases. +# Since 6.18.0, The new implementation is integrated into the RocksDB core. +# When set, large values (blobs) are written to separate blob files, and only +# pointers to them are stored in SST files. This can reduce write amplification +# for large-value use cases at the cost of introducing a level of indirection +# for reads. Please see: https://github.com/facebook/rocksdb/wiki/BlobDB. +# +# Note that when enable_blob_files is set to yes, BlobDB-related configuration +# items will take effect. +# +# Default: no +rocksdb.enable_blob_files no + +# The size of the smallest value to be stored separately in a blob file. Values +# which have an uncompressed size smaller than this threshold are stored alongside +# the keys in SST files in the usual fashion. +# +# Default: 4096 byte, 0 means that all values are stored in blob files +rocksdb.min_blob_size 4096 + +# The size limit for blob files. When writing blob files, a new file is +# opened once this limit is reached. +# +# Default: 268435456 bytes +rocksdb.blob_file_size 268435456 + +# Enables garbage collection of blobs. Valid blobs residing in blob files +# older than a cutoff get relocated to new files as they are encountered +# during compaction, which makes it possible to clean up blob files once +# they contain nothing but obsolete/garbage blobs. +# See also rocksdb.blob_garbage_collection_age_cutoff below. +# +# Default: yes +rocksdb.enable_blob_garbage_collection yes + +# The percentage cutoff in terms of blob file age for garbage collection. +# Blobs in the oldest N blob files will be relocated when encountered during +# compaction, where N = (garbage_collection_cutoff/100) * number_of_blob_files. +# Note that this value must belong to [0, 100]. +# +# Default: 25 +rocksdb.blob_garbage_collection_age_cutoff 25 + + +# The purpose of the following three options are to dynamically adjust the upper limit of +# the data that each layer can store according to the size of the different +# layers of the LSM. Enabling this option will bring some improvements in +# deletion efficiency and space amplification, but it will lose a certain +# amount of read performance. +# If you want to know more details about Levels' Target Size, you can read RocksDB wiki: +# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size +# +# Default: yes +rocksdb.level_compaction_dynamic_level_bytes yes + +# The total file size of level-1 sst. +# +# Default: 268435456 bytes +rocksdb.max_bytes_for_level_base 268435456 + +# Multiplication factor for the total file size of L(n+1) layers. +# This option is a double type number in RocksDB, but kvrocks is +# not support the double data type number yet, so we use integer +# number instead of double currently. +# +# Default: 10 +rocksdb.max_bytes_for_level_multiplier 10 + +# This feature only takes effect in Iterators and MultiGet. +# If yes, RocksDB will try to read asynchronously and in parallel as much as possible to hide IO latency. +# In iterators, it will prefetch data asynchronously in the background for each file being iterated on. +# In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible. + +# Default no +rocksdb.read_options.async_io no + +# If yes, the write will be flushed from the operating system +# buffer cache before the write is considered complete. +# If this flag is enabled, writes will be slower. +# If this flag is disabled, and the machine crashes, some recent +# rites may be lost. Note that if it is just the process that +# crashes (i.e., the machine does not reboot), no writes will be +# lost even if sync==false. +# +# Default: no +rocksdb.write_options.sync no + +# If yes, writes will not first go to the write ahead log, +# and the write may get lost after a crash. +# You must keep wal enabled if you use replication. +# +# Default: no +rocksdb.write_options.disable_wal no + +# If enabled and we need to wait or sleep for the write request, fails +# immediately. +# +# Default: no +rocksdb.write_options.no_slowdown no + +# If enabled, write requests are of lower priority if compaction is +# behind. In this case, no_slowdown = true, the request will be canceled +# immediately. Otherwise, it will be slowed down. +# The slowdown value is determined by RocksDB to guarantee +# it introduces minimum impacts to high priority writes. +# +# Default: no +rocksdb.write_options.low_pri no + +# If enabled, this writebatch will maintain the last insert positions of each +# memtable as hints in concurrent write. It can improve write performance +# in concurrent writes if keys in one writebatch are sequential. +# +# Default: no +rocksdb.write_options.memtable_insert_hint_per_batch no + + +# Support RocksDB auto-tune rate limiter for the background IO +# if enabled, Rate limiter will limit the compaction write if flush write is high +# Please see https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html +# +# Default: yes +rocksdb.rate_limiter_auto_tuned yes + +# Enable this option will schedule the deletion of obsolete files in a background thread +# on iterator destruction. It can reduce the latency if there are many files to be removed. +# see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io +# +# Default: yes +# rocksdb.avoid_unnecessary_blocking_io yes + +################################ NAMESPACE ##################################### +# namespace.test change.me + + +-# investigation -> db ???? +-# ail2ail -> a2a ???? + + +backup-dir DATA_KVROCKS/backup +log-dir DATA_KVROCKS +namespace.cor ail_correls +namespace.crawl ail_crawlers +namespace.db ail_datas +namespace.dup ail_dups +namespace.lg ail_langs +namespace.obj ail_objs +namespace.rel ail_rels +namespace.se ail_searchs +namespace.stat ail_stats +namespace.tag ail_tags +namespace.tl ail_tls +namespace.track ail_trackers \ No newline at end of file diff --git a/other_installers/docker/update.cfg b/other_installers/docker/update.cfg new file mode 100644 index 00000000..0fea91c3 --- /dev/null +++ b/other_installers/docker/update.cfg @@ -0,0 +1,4 @@ +[Update] +auto_update = False +upstream = upstream +update-fork = False From 612f49412d17416d35a2a8783279f299e31ce1e4 Mon Sep 17 00:00:00 2001 From: rht-jbittner <40197105+rht-jbittner@users.noreply.github.com> Date: Fri, 14 Nov 2025 14:14:49 +0100 Subject: [PATCH 03/13] Fixed Typo in Dockerfile --- other_installers/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other_installers/docker/Dockerfile b/other_installers/docker/Dockerfile index 02ec4296..703a472a 100644 --- a/other_installers/docker/Dockerfile +++ b/other_installers/docker/Dockerfile @@ -11,7 +11,7 @@ ARG SKIP_GEN_CERT ARG SKIP_DB_SETUP ARG SKIP_LNX_PKG_INSTALL -Installing_deps.sh script env (arg to env mapping) +# Installing_deps.sh script env (arg to env mapping) ENV SKIP_REDIS=$SKIP_REDIS ENV SKIP_TLSH=$SKIP_TLSH ENV SKIP_PGPDUMP=$SKIP_PGPDUMP From e032fac671c45ff993818d1f219318a26616577e Mon Sep 17 00:00:00 2001 From: rht-jbittner <40197105+rht-jbittner@users.noreply.github.com> Date: Fri, 14 Nov 2025 14:19:25 +0100 Subject: [PATCH 04/13] added timezone and ubuntu version to image --- other_installers/docker/Dockerfile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/other_installers/docker/Dockerfile b/other_installers/docker/Dockerfile index 703a472a..8b42604e 100644 --- a/other_installers/docker/Dockerfile +++ b/other_installers/docker/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=ubuntu +ARG BASE_IMAGE=ubuntu:24.04 FROM $BASE_IMAGE # Installing_deps.sh script arguments @@ -30,6 +30,11 @@ ENV AIL_REDIS ${AIL_HOME}/redis/src ENV AIL_ARDB ${AIL_HOME}/ardb/src ENV AIL_VENV ${AIL_HOME}/AILENV + +ARG tz_buildtime=Europe/Prague +ENV TZ=$tz_buildtime +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + #ENV PATH ${AIL_VENV}/bin:${AIL_HOME}:${AIL_REDIS}:${AIL_ARDB}:${AIL_BIN}:${AIL_FLASK}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin # Build Dependencies From bddfdf369f5568e776ba4d7de19e36c4a6e9401f Mon Sep 17 00:00:00 2001 From: rht-jbittner <40197105+rht-jbittner@users.noreply.github.com> Date: Fri, 14 Nov 2025 15:08:02 +0100 Subject: [PATCH 05/13] Added option to skip git submodule init --- installing_deps.sh | 4 ++++ other_installers/docker/Dockerfile | 2 ++ 2 files changed, 6 insertions(+) diff --git a/installing_deps.sh b/installing_deps.sh index 377ef5cd..a79152f9 100755 --- a/installing_deps.sh +++ b/installing_deps.sh @@ -61,7 +61,11 @@ else fi # SUBMODULES # +if [ -z "$SKIP_GIT_SUBMODULE" ]; then git submodule update --init --recursive +else + echo "--- Skipping Linux packages installation ---" +fi # REDIS # if [ -z "$SKIP_REDIS" ]; then diff --git a/other_installers/docker/Dockerfile b/other_installers/docker/Dockerfile index 8b42604e..b73be2ac 100644 --- a/other_installers/docker/Dockerfile +++ b/other_installers/docker/Dockerfile @@ -10,6 +10,7 @@ ARG SKIP_KVROCKS ARG SKIP_GEN_CERT ARG SKIP_DB_SETUP ARG SKIP_LNX_PKG_INSTALL +ARG SKIP_GIT_SUBMODULE # Installing_deps.sh script env (arg to env mapping) ENV SKIP_REDIS=$SKIP_REDIS @@ -20,6 +21,7 @@ ENV SKIP_KVROCKS=$SKIP_KVROCKS ENV SKIP_GEN_CERT=$SKIP_GEN_CERT ENV SKIP_DB_SETUP=$SKIP_DB_SETUP ENV SKIP_LNX_PKG_INSTALL=$SKIP_LNX_PKG_INSTALL +ENV SKIP_GIT_SUBMODULE=$SKIP_GIT_SUBMODULE # AIL runtime env variables ENV LANG C.UTF-8 From 444d378cd4bad20539f1320843033b2240784450 Mon Sep 17 00:00:00 2001 From: rht-jbittner <40197105+rht-jbittner@users.noreply.github.com> Date: Fri, 14 Nov 2025 17:21:00 +0100 Subject: [PATCH 06/13] Minor changes and initial Readme --- other_installers/docker/Dockerfile | 16 ++++++---- other_installers/docker/README.md | 51 +++++++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 7 deletions(-) diff --git a/other_installers/docker/Dockerfile b/other_installers/docker/Dockerfile index b73be2ac..8ffc9254 100644 --- a/other_installers/docker/Dockerfile +++ b/other_installers/docker/Dockerfile @@ -100,13 +100,19 @@ RUN wget https://github.com/RocksLabs/kvrocks-fpm/releases/download/202502091/kv RUN useradd -m -s /bin/bash ail # Create ail framework work directory -RUN mkdir -p /home/ail/ail-framework +RUN mkdir -p ${AIL_HOME} # Change to ail work directory -WORKDIR /home/ail/ail-framework +WORKDIR ${AIL_HOME} + +# Copy AIL dirs +COPY --parents bin configs doc files logs samples tests tools update var ./ # Copy AIL files -COPY bin configs doc files logs samples tests tools update var install_virtualenv.sh installing_deps.sh requirements.txt reset_AIL.sh ./ +COPY install_virtualenv.sh installing_deps.sh requirements.txt reset_AIL.sh ./ + +# Copy Docker modified files part 1 - to pass installing_deps.sh check +COPY other_installers/docker/core.cfg configs/core.cfg # Start AIL installers RUN apt-get update && \ @@ -116,11 +122,9 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -# Copy Docker modified files +# Copy Docker modified files part 2 COPY other_installers/docker/update.cfg configs/update.cfg -COPY other_installers/docker/core.cfg configs/core.cfg - COPY other_installers/docker/kvrocks_6383.conf configs/6383.conf COPY other_installers/docker/docker_start.sh /usr/local/sbin/ diff --git a/other_installers/docker/README.md b/other_installers/docker/README.md index 0e8521fa..ce2287b6 100644 --- a/other_installers/docker/README.md +++ b/other_installers/docker/README.md @@ -1 +1,50 @@ -TBA \ No newline at end of file +# Container Installation + +Still unde development - mostly notes rather than comprehensive manual + + +## Build + +Get the source + +``` +git clone -b dev https://github.com/rht-jbittner/ail-framework.git +cd ail-framework +git submodule update --init --recursive +``` + +Build primary container. In this example we disable most dependency builds as we do not need it in pure AIL container. +``` +podman build -t ail\ + --build-arg "BASE_IMAGE=ubuntu:24.04" \ + --build-arg "http_proxy=$http_proxy" \ + --build-arg "https_proxy=$https_proxy" \ + --build-arg "no_proxy=$no_proxy" \ + --build-arg "SKIP_REDIS=1" \ + --build-arg "SKIP_PGPDUMP=1" \ + --build-arg "SKIP_YARA=1" \ + --build-arg "SKIP_KVROCKS=1" \ + --build-arg "SKIP_GEN_CERT=1" \ + --build-arg "SKIP_DB_SETUP=1" \ + --build-arg "SKIP_LNX_PKG_INSTALL=1" \ + --build-arg "SKIP_GIT_SUBMODULE=1" \ + -f other_installers/docker/Dockerfile . +``` + +Comment: skipping tlsh build (`--build-arg "SKIP_TLSH=1"`) is possible, but additional chages would need to be added to install_virtualenv.sh file. In this case it probably does not matter, but package python3-tlsh exists in Ubuntu. + +## Start Container + +Simple way to start AIL container. We disable redis and kvrock related stuff as we have it deployed separately (not covered in this document). +``` +podman run --rm -p 7000:7000 --name ail \ + -e SKIP_LAUNCH_REDIS=true \ + -e SKIP_LAUNCH_KVROCKS=false \ + -e SKIP_CHECK_REDIS=true \ + -e SKIP_CHECK_KVROCKS=true \ + localhost/ail +``` + +## Create default user + +podman exec -it ail /bin/bash -c ". ./AILENV/bin/activate && cd var/www && python3 ./create_default_user.py" From e8ae8ff806fcf4e43a73a72311e58fc126e5345b Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Tue, 18 Nov 2025 11:48:41 +0100 Subject: [PATCH 07/13] initial configuration for kvrocks and new docker-compose files --- other_installers/docker/core.cfg | 24 +- other_installers/docker/docker-compose.yml | 21 + other_installers/docker/kvrocks.conf | 185 +++++ other_installers/docker/kvrocks_6383.conf | 915 --------------------- 4 files changed, 218 insertions(+), 927 deletions(-) create mode 100644 other_installers/docker/docker-compose.yml create mode 100644 other_installers/docker/kvrocks.conf delete mode 100644 other_installers/docker/kvrocks_6383.conf diff --git a/other_installers/docker/core.cfg b/other_installers/docker/core.cfg index 66b7b034..e886fe03 100644 --- a/other_installers/docker/core.cfg +++ b/other_installers/docker/core.cfg @@ -184,62 +184,62 @@ db = 1 ##### KVROCKS ##### [Kvrocks_DB] -host = localhost +host = kvrocks port = 6383 password = ail [Kvrocks_Duplicates] -host = localhost +host = kvrocks port = 6383 password = ail_dups [Kvrocks_Correlations] -host = localhost +host = kvrocks port = 6383 password = ail_correls [Kvrocks_Crawler] -host = localhost +host = kvrocks port = 6383 password = ail_crawlers [Kvrocks_Languages] -host = localhost +host = kvrocks port = 6383 password = ail_langs [Kvrocks_Objects] -host = localhost +host = kvrocks port = 6383 password = ail_objs [Kvrocks_Relationships] -host = localhost +host = kvrocks port = 6383 password = ail_rels [Kvrocks_Searchs] -host = localhost +host = kvrocks port = 6383 password = ail_searchs [Kvrocks_Timeline] -host = localhost +host = kvrocks port = 6383 password = ail_tls [Kvrocks_Stats] -host = localhost +host = kvrocks port = 6383 password = ail_stats [Kvrocks_Tags] -host = localhost +host = kvrocks port = 6383 password = ail_tags [Kvrocks_Trackers] -host = localhost +host = kvrocks port = 6383 password = ail_trackers diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker/docker-compose.yml new file mode 100644 index 00000000..54e41a90 --- /dev/null +++ b/other_installers/docker/docker-compose.yml @@ -0,0 +1,21 @@ +version: '3.8' + +services: + ail: + container_name: ail + image: localhost/ail + ports: + - "7000:7000" + environment: + - SKIP_LAUNCH_REDIS=true + - SKIP_LAUNCH_KVROCKS=false + - SKIP_CHECK_REDIS=true + - SKIP_CHECK_KVROCKS=true + depends_on: + - kvrocks + + kvrocks: + container_name: kvrocks + image: images.paas.redhat.com/it-cloud-ocp-proxy/apache/kvrocks:2.14.0 + volumes: + - ./kvrocks.conf:/var/lib/kvrocks/kvrocks.conf:Z \ No newline at end of file diff --git a/other_installers/docker/kvrocks.conf b/other_installers/docker/kvrocks.conf new file mode 100644 index 00000000..971c56fb --- /dev/null +++ b/other_installers/docker/kvrocks.conf @@ -0,0 +1,185 @@ +################################ GENERAL ##################################### + +# BINDING: Set to 0.0.0.0 to allow access from outside the Docker container. +bind 0.0.0.0 + +# PORT: Set to 6383 to match your original AIL configuration. +port 6383 + +# TIMEOUT: Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# WORKERS: The number of worker threads. +workers 8 + +# DAEMONIZE: Always 'no' for Docker. +daemonize no + +# SOCKET: Support for passing file descriptors (Standard in newer versions) +socket-fd -1 + +################################ AUTHENTICATION ############################## + +# PASSWORD: Set to 'ail' to match your original configuration. +requirepass ail + +# MASTERAUTH: If you use replication, set this to the master's password. +# masterauth ail + +################################ CLUSTER ##################################### + +cluster-enabled no +persist-cluster-nodes-enabled yes + +################################ REPLICATION ################################# + +# Namespace replication settings +repl-namespace-enabled no + +# Standard limits +maxclients 10000 +db-name change.me.db + +# DIRECTORY: Standard path for Docker persistence. +# Ensure your docker-compose maps a volume to this path. +dir /var/lib/kvrocks + +# LOGGING: Standard Docker logging (stdout) +log-dir stdout +log-level info +log-retention-days -1 + +# SLAVE SETTINGS +slave-read-only yes +slave-priority 100 +slave-serve-stale-data yes +slave-empty-db-before-fullsync no +purge-backup-on-fullsync no + +# REPLICATION TIMEOUTS (Newer defaults) +replication-connect-timeout-ms 3100 +replication-recv-timeout-ms 3200 +replication-group-sync no +replication-no-slowdown yes + +################################ LIMITS & PROTOCOLS ########################## + +# Max replication speed (0 = unlimited) +max-replication-mb 0 +max-io-mb 0 +max-db-size 0 + +# Backup retention +max-backup-to-keep 1 +max-backup-keep-hours 24 + +# Compatibility settings +redis-cursor-compatible yes +resp3-enabled yes +json-max-nesting-depth 1024 +json-storage-format json + +# Experimental Transactional Context (Default: no) +txn-context-enabled no +lua-strict-key-accessing no + +################################ SECURITY (TLS) ############################## + +# TLS is disabled by default (port 0) +tls-port 0 +tls-auth-clients no +tls-prefer-server-ciphers yes +tls-session-caching no +tls-replication no + +################################ LOGGING & MONITORING ######################## + +# SLOW LOG +slowlog-log-slower-than 100000 +slowlog-max-len 128 +slowlog-dump-logfile-level off + +# PERF LOG +profiling-sample-ratio 0 +profiling-sample-record-max-len 256 +profiling-sample-record-threshold-ms 100 + +################################ CRON (MAINTENANCE) ########################## + +# Compaction Checker (Newer Syntax) +# Checks for compaction needs between 0:00 and 7:00 AM daily +compaction-checker-cron * 0-7 * * * + +# Auto-resize estimated keyspace +# dbsize-scan-cron 0 * * * * + +################################ MIGRATION ################################### + +migrate-type raw-key-value +migrate-speed 4096 +migrate-pipeline-size 16 +migrate-sequence-gap 10000 +migrate-batch-size-kb 16 +migrate-batch-rate-limit-mb 16 + +################################ ROCKSDB TUNING ############################## +# Modern defaults for performance + +rocksdb.block_cache_size 4096 +rocksdb.block_cache_type lru +rocksdb.max_open_files 8096 +rocksdb.write_buffer_size 64 +rocksdb.target_file_size_base 128 +rocksdb.max_write_buffer_number 4 +rocksdb.min_write_buffer_number_to_merge 1 +rocksdb.max_background_jobs 4 +rocksdb.max_subcompactions 2 +rocksdb.wal_compression no +rocksdb.max_total_wal_size 512 +rocksdb.dump_malloc_stats yes +rocksdb.wal_ttl_seconds 10800 +rocksdb.wal_size_limit_mb 16384 +rocksdb.block_size 16384 +rocksdb.cache_index_and_filter_blocks yes +rocksdb.compression snappy +rocksdb.compression_level 32767 +rocksdb.compaction_readahead_size 2097152 +rocksdb.compression_start_level 2 + +# ASYNC IO: Enabled for performance (Requires modern Kernel/Docker) +rocksdb.read_options.async_io yes + +rocksdb.write_options.sync no +rocksdb.write_options.disable_wal no +rocksdb.write_options.no_slowdown no +rocksdb.rate_limiter_auto_tuned yes +rocksdb.partition_filters yes + +# BLOB DB (Large value optimization) +rocksdb.enable_blob_files no +rocksdb.min_blob_size 4096 +rocksdb.blob_file_size 268435456 +rocksdb.enable_blob_garbage_collection yes +rocksdb.blob_garbage_collection_age_cutoff 25 +enable-blob-cache no + +# LEVEL COMPACTION +rocksdb.level_compaction_dynamic_level_bytes yes +rocksdb.max_bytes_for_level_base 268435456 +rocksdb.max_bytes_for_level_multiplier 10 + +################################ NAMESPACES ################################## +# Imported from your AIL configuration + +namespace.cor ail_correls +namespace.crawl ail_crawlers +namespace.db ail_datas +namespace.dup ail_dups +namespace.lg ail_langs +namespace.obj ail_objs +namespace.rel ail_rels +namespace.se ail_searchs +namespace.stat ail_stats +namespace.tag ail_tags +namespace.tl ail_tls +namespace.track ail_trackers \ No newline at end of file diff --git a/other_installers/docker/kvrocks_6383.conf b/other_installers/docker/kvrocks_6383.conf deleted file mode 100644 index 1949f1e7..00000000 --- a/other_installers/docker/kvrocks_6383.conf +++ /dev/null @@ -1,915 +0,0 @@ -################################ GENERAL ##################################### - -# By default kvrocks listens for connections from localhost interface. -# It is possible to listen to just one or multiple interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# bind 0.0.0.0 -bind 127.0.0.1 - -# Unix socket. -# -# Specify the path for the unix socket that will be used to listen for -# incoming connections. There is no default, so kvrocks will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/kvrocks.sock -# unixsocketperm 777 - -# Accept connections on the specified port, default is 6666. -port 6383 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# The number of worker's threads, increase or decrease would affect the performance. -workers 8 - -# By default, kvrocks does not run as a daemon. Use 'yes' if you need it. -# Note that kvrocks will write a PID file in /var/run/kvrocks.pid when daemonized -daemonize no - -# Kvrocks implements the cluster solution that is similar to the Redis cluster solution. -# You can get cluster information by CLUSTER NODES|SLOTS|INFO command, it also is -# adapted to redis-cli, redis-benchmark, Redis cluster SDK, and Redis cluster proxy. -# But kvrocks doesn't support communicating with each other, so you must set -# cluster topology by CLUSTER SETNODES|SETNODEID commands, more details: #219. -# -# PLEASE NOTE: -# If you enable cluster, kvrocks will encode key with its slot id calculated by -# CRC16 and modulo 16384, encoding key with its slot id makes it efficient to -# migrate keys based on the slot. So if you enabled at first time, cluster mode must -# not be disabled after restarting, and vice versa. That is to say, data is not -# compatible between standalone mode with cluster mode, you must migrate data -# if you want to change mode, otherwise, kvrocks will make data corrupt. -# -# Default: no - -cluster-enabled no - -# By default, namespaces are stored in the configuration file and won't be replicated -# to replicas. This option allows to change this behavior, so that namespaces are also -# propagated to slaves. Note that: -# 1) it won't replicate the 'masterauth' to prevent breaking master/replica replication -# 2) it will overwrite replica's namespace with master's namespace, so be careful of in-using namespaces -# 3) cannot switch off the namespace replication once it's enabled -# -# Default: no -repl-namespace-enabled no - -# Persist the cluster nodes topology in local file($dir/nodes.conf). This configuration -# takes effect only if the cluster mode was enabled. -# -# If yes, it will try to load the cluster topology from the local file when starting, -# and dump the cluster nodes into the file if it was changed. -# -# Default: yes -persist-cluster-nodes-enabled yes - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients. However, if the server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# -# Once the limit is reached the server will close all the new connections sending -# an error 'max number of clients reached'. -# -maxclients 10000 - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running kvrocks. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since kvrocks is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -requirepass ail - -# If the master is password protected (using the "masterauth" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process. Otherwise, the master will -# refuse the slave request. -# -# masterauth foobared - -# Master-Salve replication would check db name is matched. if not, the slave should -# refuse to sync the db from master. Don't use the default value, set the db-name to identify -# the cluster. -db-name change.me.db - -# The working directory -# -# The DB will be written inside this directory -# Note that you must specify a directory here, not a file name. -dir DATA_KVROCKS - -# You can configure where to store your server logs by the log-dir. -# If you don't specify one, we will use the above `dir` as our default log directory. -# We also can send logs to stdout/stderr is as simple as: -# -# log-dir stdout - -# Log level -# Possible values: info, warning, error, fatal -# Default: info -log-level info - -# You can configure log-retention-days to control whether to enable the log cleaner -# and the maximum retention days that the INFO level logs will be kept. -# -# if set to -1, that means to disable the log cleaner. -# if set to 0, all previous INFO level logs will be immediately removed. -# if set to between 0 to INT_MAX, that means it will retent latest N(log-retention-days) day logs. - -# By default the log-retention-days is -1. -log-retention-days -1 - -# When running in daemonize mode, kvrocks writes a PID file in ${CONFIG_DIR}/kvrocks.pid by -# default. You can specify a custom pid file location here. -# pidfile /var/run/kvrocks.pid -pidfile DATA_KVROCKS/kvrocks.pid - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -slave-read-only yes - -# The slave priority is an integer number published by Kvrocks in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slave with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the replica as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to Get the desired effect. -tcp-backlog 511 - -# If the master is an old version, it may have specified replication threads -# that use 'port + 1' as listening port, but in new versions, we don't use -# extra port to implement replication. In order to allow the new replicas to -# copy old masters, you should indicate that the master uses replication port -# or not. -# If yes, that indicates master uses replication port and replicas will connect -# to 'master's listening port + 1' when synchronization. -# If no, that indicates master doesn't use replication port and replicas will -# connect 'master's listening port' when synchronization. -master-use-repl-port no - -# Currently, master only checks sequence number when replica asks for PSYNC, -# that is not enough since they may have different replication histories even -# the replica asking sequence is in the range of the master current WAL. -# -# We design 'Replication Sequence ID' PSYNC, we add unique replication id for -# every write batch (the operation of each command on the storage engine), so -# the combination of replication id and sequence is unique for write batch. -# The master can identify whether the replica has the same replication history -# by checking replication id and sequence. -# -# By default, it is not enabled since this stricter check may easily lead to -# full synchronization. -use-rsid-psync no - -# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of -# another kvrocks server. A few things to understand ASAP about kvrocks replication. -# -# 1) Kvrocks replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Kvrocks slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof -# slaveof 127.0.0.1 6379 - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out-of-date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all kinds of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# To guarantee slave's data safe and serve when it is in full synchronization -# state, slave still keep itself data. But this way needs to occupy much disk -# space, so we provide a way to reduce disk occupation, slave will delete itself -# entire database before fetching files from master during full synchronization. -# If you want to enable this way, you can set 'slave-delete-db-before-fullsync' -# to yes, but you must know that database will be lost if master is down during -# full synchronization, unless you have a backup of database. -# -# This option is similar redis replicas RDB diskless load option: -# repl-diskless-load on-empty-db -# -# Default: no -slave-empty-db-before-fullsync no - -# A Kvrocks master is able to list the address and port of the attached -# replicas in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover replica instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP address and port normally reported by a replica is -# obtained in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. -# -# Port: The port is communicated by the replica during the replication -# handshake, and is normally the port that the replica is using to -# listen for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the replica may actually be reachable via different IP and port -# pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# replica-announce-ip 5.5.5.5 -# replica-announce-port 1234 - -# If replicas need full synchronization with master, master need to create -# checkpoint for feeding replicas, and replicas also stage a checkpoint of -# the master. If we also keep the backup, it maybe occupy extra disk space. -# You can enable 'purge-backup-on-fullsync' if disk is not sufficient, but -# that may cause remote backup copy failing. -# -# Default: no -purge-backup-on-fullsync no - -# The maximum allowed rate (in MB/s) that should be used by replication. -# If the rate exceeds max-replication-mb, replication will slow down. -# Default: 0 (i.e. no limit) -max-replication-mb 0 - -# The maximum allowed aggregated write rate of flush and compaction (in MB/s). -# If the rate exceeds max-io-mb, io will slow down. -# 0 is no limit -# Default: 0 -max-io-mb 0 - -# The maximum allowed space (in GB) that should be used by RocksDB. -# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail. -# Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization -# Default: 0 (i.e. no limit) -max-db-size 0 - -# The maximum backup to keep, server cron would run every minutes to check the num of current -# backup, and purge the old backup if exceed the max backup num to keep. If max-backup-to-keep -# is 0, no backup would be kept. But now, we only support 0 or 1. -max-backup-to-keep 1 - -# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup. -# default: 1 day -max-backup-keep-hours 24 - -# max-bitmap-to-string-mb use to limit the max size of bitmap to string transformation(MB). -# -# Default: 16 -max-bitmap-to-string-mb 16 - -# Whether to enable SCAN-like cursor compatible with Redis. -# If enabled, the cursor will be unsigned 64-bit integers. -# If disabled, the cursor will be a string. -# Default: no -redis-cursor-compatible yes - -# Whether to enable the RESP3 protocol. -# NOTICE: RESP3 is still under development, don't enable it in production environment. -# -# Default: no -# resp3-enabled no - -# Maximum nesting depth allowed when parsing and serializing -# JSON documents while using JSON commands like JSON.SET. -# Default: 1024 -json-max-nesting-depth 1024 - -# The underlying storage format of JSON data type -# NOTE: This option only affects newly written/updated key-values -# The CBOR format may reduce the storage size and speed up JSON commands -# Available values: json, cbor -# Default: json -json-storage-format json - -################################## TLS ################################### - -# By default, TLS/SSL is disabled, i.e. `tls-port` is set to 0. -# To enable it, `tls-port` can be used to define TLS-listening ports. -# tls-port 0 - -# Configure a X.509 certificate and private key to use for authenticating the -# server to connected clients, masters or cluster peers. -# These files should be PEM formatted. -# -# tls-cert-file kvrocks.crt -# tls-key-file kvrocks.key - -# If the key file is encrypted using a passphrase, it can be included here -# as well. -# -# tls-key-file-pass secret - -# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL -# clients and peers. Kvrocks requires an explicit configuration of at least one -# of these, and will not implicitly use the system wide configuration. -# -# tls-ca-cert-file ca.crt -# tls-ca-cert-dir /etc/ssl/certs - -# By default, clients on a TLS port are required -# to authenticate using valid client side certificates. -# -# If "no" is specified, client certificates are not required and not accepted. -# If "optional" is specified, client certificates are accepted and must be -# valid if provided, but are not required. -# -# tls-auth-clients no -# tls-auth-clients optional - -# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended -# that older formally deprecated versions are kept disabled to reduce the attack surface. -# You can explicitly specify TLS versions to support. -# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", -# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. -# To enable only TLSv1.2 and TLSv1.3, use: -# -# tls-protocols "TLSv1.2 TLSv1.3" - -# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information -# about the syntax of this string. -# -# Note: this configuration applies only to <= TLSv1.2. -# -# tls-ciphers DEFAULT:!MEDIUM - -# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more -# information about the syntax of this string, and specifically for TLSv1.3 -# ciphersuites. -# -# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 - -# When choosing a cipher, use the server's preference instead of the client -# preference. By default, the server follows the client's preference. -# -# tls-prefer-server-ciphers yes - -# By default, TLS session caching is enabled to allow faster and less expensive -# reconnections by clients that support it. Use the following directive to disable -# caching. -# -# tls-session-caching no - -# Change the default number of TLS sessions cached. A zero value sets the cache -# to unlimited size. The default size is 20480. -# -# tls-session-cache-size 5000 - -# Change the default timeout of cached TLS sessions. The default timeout is 300 -# seconds. -# -# tls-session-cache-timeout 60 - -# By default, a replica does not attempt to establish a TLS connection -# with its master. -# -# Use the following directive to enable TLS on replication links. -# -# tls-replication yes - -################################## SLOW LOG ################################### - -# The Kvrocks Slow Log is a mechanism to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Kvrocks -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that -1 value disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 100000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -# If you run kvrocks from upstart or systemd, kvrocks can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting kvrocks into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -################################## PERF LOG ################################### - -# The Kvrocks Perf Log is a mechanism to log queries' performance context that -# exceeded a specified execution time. This mechanism uses rocksdb's -# Perf Context and IO Stats Context, Please see: -# https://github.com/facebook/rocksdb/wiki/Perf-Context-and-IO-Stats-Context -# -# This mechanism is enabled when profiling-sample-commands is not empty and -# profiling-sample-ratio greater than 0. -# It is important to note that this mechanism affects performance, but it is -# useful for troubleshooting performance bottlenecks, so it should only be -# enabled when performance problems occur. - -# The name of the commands you want to record. Must be original name of -# commands supported by Kvrocks. Use ',' to separate multiple commands and -# use '*' to record all commands supported by Kvrocks. -# Example: -# - Single command: profiling-sample-commands get -# - Multiple commands: profiling-sample-commands get,mget,hget -# -# Default: empty -# profiling-sample-commands "" - -# Ratio of the samples would be recorded. It is a number between 0 and 100. -# We simply use the rand to determine whether to record the sample or not. -# -# Default: 0 -profiling-sample-ratio 0 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the perf log with PERFLOG RESET. -# -# Default: 256 -profiling-sample-record-max-len 256 - -# profiling-sample-record-threshold-ms use to tell the kvrocks when to record. -# -# Default: 100 millisecond -profiling-sample-record-threshold-ms 100 - -################################## CRON ################################### - -# Compact Scheduler, auto compact at schedule time -# time expression format is the same as crontab(currently only support * and int) -# e.g. compact-cron 0 3 * * * 0 4 * * * -# would compact the db at 3am and 4am everyday -# compact-cron 0 3 * * * - -# The hour range that compaction checker would be active -# e.g. compaction-checker-range 0-7 means compaction checker would be worker between -# 0-7am every day. -compaction-checker-range 0-7 - -# When the compaction checker is triggered, the db will periodically pick the SST file -# with the highest "deleted percentage" (i.e. the percentage of deleted keys in the SST -# file) to compact, in order to free disk space. -# However, if a specific SST file was created more than "force-compact-file-age" seconds -# ago, and its percentage of deleted keys is higher than -# "force-compact-file-min-deleted-percentage", it will be forcely compacted as well. - -# Default: 172800 seconds; Range: [60, INT64_MAX]; -# force-compact-file-age 172800 -# Default: 10 %; Range: [1, 100]; -# force-compact-file-min-deleted-percentage 10 - -# Bgsave scheduler, auto bgsave at scheduled time -# time expression format is the same as crontab(currently only support * and int) -# e.g. bgsave-cron 0 3 * * * 0 4 * * * -# would bgsave the db at 3am and 4am every day - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance, the KEYS command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command KEYS b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command KEYS "" - -################################ MIGRATE ##################################### -# If the network bandwidth is completely consumed by the migration task, -# it will affect the availability of kvrocks. To avoid this situation, -# migrate-speed is adopted to limit the migrating speed. -# Migrating speed is limited by controlling the duration between sending data, -# the duration is calculated by: 1000000 * migrate-pipeline-size / migrate-speed (us). -# Value: [0,INT_MAX], 0 means no limit -# -# Default: 4096 -migrate-speed 4096 - -# In order to reduce data transmission times and improve the efficiency of data migration, -# pipeline is adopted to send multiple data at once. Pipeline size can be set by this option. -# Value: [1, INT_MAX], it can't be 0 -# -# Default: 16 -migrate-pipeline-size 16 - -# In order to reduce the write forbidden time during migrating slot, we will migrate the incremental -# data several times to reduce the amount of incremental data. Until the quantity of incremental -# data is reduced to a certain threshold, slot will be forbidden write. The threshold is set by -# this option. -# Value: [1, INT_MAX], it can't be 0 -# -# Default: 10000 -migrate-sequence-gap 10000 - -################################ ROCKSDB ##################################### - -# Specify the capacity of column family block cache. A larger block cache -# may make requests faster while more keys would be cached. Max Size is 400*1024. -# Default: 4096MB -rocksdb.block_cache_size 4096 - -# Specify the type of cache used in the block cache. -# Accept value: "lru", "hcc" -# "lru" stands for the cache with the LRU(Least Recently Used) replacement policy. -# -# "hcc" stands for the Hyper Clock Cache, a lock-free cache alternative -# that offers much improved CPU efficiency vs. LRU cache under high parallel -# load or high contention. -# -# default lru -rocksdb.block_cache_type lru - -# A global cache for table-level rows in RocksDB. If almost always point -# lookups, enlarging row cache may improve read performance. Otherwise, -# if we enlarge this value, we can lessen metadata/subkey block cache size. -# -# Default: 0 (disabled) -# Deprecated -#rocksdb.row_cache_size 0 - -# Number of open files that can be used by the DB. You may need to -# increase this if your database has a large working set. Value -1 means -# files opened are always kept open. You can estimate number of files based -# on target_file_size_base and target_file_size_multiplier for level-based -# compaction. For universal-style compaction, you can usually set it to -1. -# Default: 8096 -rocksdb.max_open_files 8096 - -# Amount of data to build up in memory (backed by an unsorted log -# on disk) before converting to a sorted on-disk file. -# -# Larger values increase performance, especially during bulk loads. -# Up to max_write_buffer_number write buffers may be held in memory -# at the same time, -# so you may wish to adjust this parameter to control memory usage. -# Also, a larger write buffer will result in a longer recovery time -# the next time the database is opened. -# -# Note that write_buffer_size is enforced per column family. -# See db_write_buffer_size for sharing memory across column families. - -# default is 64MB -rocksdb.write_buffer_size 64 - -# Target file size for compaction, target file size for Level N can be calculated -# by target_file_size_base * (target_file_size_multiplier ^ (L-1)) -# -# Default: 128MB -rocksdb.target_file_size_base 128 - -# The maximum number of write buffers that are built up in memory. -# The default and the minimum number is 2, so that when 1 write buffer -# is being flushed to storage, new writes can continue to the other -# write buffer. -# If max_write_buffer_number > 3, writing will be slowed down to -# options.delayed_write_rate if we are writing to the last write buffer -# allowed. -rocksdb.max_write_buffer_number 4 - -# Maximum number of concurrent background jobs (compactions and flushes). -# For backwards compatibility we will set `max_background_jobs = -# max_background_compactions + max_background_flushes` in the case where user -# sets at least one of `max_background_compactions` or `max_background_flushes` -# (we replace -1 by 1 in case one option is unset). -rocksdb.max_background_jobs 4 - -# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs -# Maximum number of concurrent background compaction jobs, submitted to -# the default LOW priority thread pool. -rocksdb.max_background_compactions -1 - -# DEPRECATED: it is automatically decided based on the value of rocksdb.max_background_jobs -# Maximum number of concurrent background memtable flush jobs, submitted by -# default to the HIGH priority thread pool. If the HIGH priority thread pool -# is configured to have zero threads, flush jobs will share the LOW priority -# thread pool with compaction jobs. -rocksdb.max_background_flushes -1 - -# This value represents the maximum number of threads that will -# concurrently perform a compaction job by breaking it into multiple, -# smaller ones that are run simultaneously. -# Default: 2 -# Invalid config -#rocksdb.max_sub_compactions 2 - -# In order to limit the size of WALs, RocksDB uses DBOptions::max_total_wal_size -# as the trigger of column family flush. Once WALs exceed this size, RocksDB -# will start forcing the flush of column families to allow deletion of some -# oldest WALs. This config can be useful when column families are updated at -# non-uniform frequencies. If there's no size limit, users may need to keep -# really old WALs when the infrequently-updated column families hasn't flushed -# for a while. -# -# In kvrocks, we use multiple column families to store metadata, subkeys, etc. -# If users always use string type, but use list, hash and other complex data types -# infrequently, there will be a lot of old WALs if we don't set size limit -# (0 by default in rocksdb), because rocksdb will dynamically choose the WAL size -# limit to be [sum of all write_buffer_size * max_write_buffer_number] * 4 if set to 0. -# -# Moreover, you should increase this value if you already set rocksdb.write_buffer_size -# to a big value, to avoid influencing the effect of rocksdb.write_buffer_size and -# rocksdb.max_write_buffer_number. -# -# default is 512MB -rocksdb.max_total_wal_size 512 - -# We implement the replication with rocksdb WAL, it would trigger full sync when the seq was out of range. -# wal_ttl_seconds and wal_size_limit_mb would affect how archived logs will be deleted. -# If WAL_ttl_seconds is not 0, then WAL files will be checked every WAL_ttl_seconds / 2 and those that -# are older than WAL_ttl_seconds will be deleted# -# -# Default: 3 Hours -rocksdb.wal_ttl_seconds 10800 - -# If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, -# WAL files will be checked every 10 min and if total size is greater -# then WAL_size_limit_MB, they will be deleted starting with the -# earliest until size_limit is met. All empty files will be deleted -# Default: 16GB -rocksdb.wal_size_limit_mb 16384 - -# Approximate size of user data packed per block. Note that the -# block size specified here corresponds to uncompressed data. The -# actual size of the unit read from disk may be smaller if -# compression is enabled. -# -# Default: 16KB -rocksdb.block_size 16384 - -# Indicating if we'd put index/filter blocks to the block cache -# -# Default: yes -rocksdb.cache_index_and_filter_blocks yes - -# Specify the compression to use. Only compress level greater -# than 2 to improve performance. -# Accept value: "no", "snappy", "lz4", "zstd", "zlib" -# default snappy -rocksdb.compression snappy - -# If non-zero, we perform bigger reads when doing compaction. If you're -# running RocksDB on spinning disks, you should set this to at least 2MB. -# That way RocksDB's compaction is doing sequential instead of random reads. -# When non-zero, we also force new_table_reader_for_compaction_inputs to -# true. -# -# Default: 2 MB -rocksdb.compaction_readahead_size 2097152 - -# he limited write rate to DB if soft_pending_compaction_bytes_limit or -# level0_slowdown_writes_trigger is triggered. - -# If the value is 0, we will infer a value from `rater_limiter` value -# if it is not empty, or 16MB if `rater_limiter` is empty. Note that -# if users change the rate in `rate_limiter` after DB is opened, -# `delayed_write_rate` won't be adjusted. -# -rocksdb.delayed_write_rate 0 -# If enable_pipelined_write is true, separate write thread queue is -# maintained for WAL write and memtable write. -# -# Default: no -rocksdb.enable_pipelined_write no - -# Soft limit on number of level-0 files. We start slowing down writes at this -# point. A value <0 means that no writing slow down will be triggered by -# number of files in level-0. -# -# Default: 20 -rocksdb.level0_slowdown_writes_trigger 20 - -# Maximum number of level-0 files. We stop writes at this point. -# -# Default: 40 -rocksdb.level0_stop_writes_trigger 40 - -# Number of files to trigger level-0 compaction. -# -# Default: 4 -rocksdb.level0_file_num_compaction_trigger 4 - -# if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec -# -# Default: 0 -rocksdb.stats_dump_period_sec 0 - -# if yes, the auto compaction would be disabled, but the manual compaction remain works -# -# Default: no -rocksdb.disable_auto_compactions no - -# BlobDB(key-value separation) is essentially RocksDB for large-value use cases. -# Since 6.18.0, The new implementation is integrated into the RocksDB core. -# When set, large values (blobs) are written to separate blob files, and only -# pointers to them are stored in SST files. This can reduce write amplification -# for large-value use cases at the cost of introducing a level of indirection -# for reads. Please see: https://github.com/facebook/rocksdb/wiki/BlobDB. -# -# Note that when enable_blob_files is set to yes, BlobDB-related configuration -# items will take effect. -# -# Default: no -rocksdb.enable_blob_files no - -# The size of the smallest value to be stored separately in a blob file. Values -# which have an uncompressed size smaller than this threshold are stored alongside -# the keys in SST files in the usual fashion. -# -# Default: 4096 byte, 0 means that all values are stored in blob files -rocksdb.min_blob_size 4096 - -# The size limit for blob files. When writing blob files, a new file is -# opened once this limit is reached. -# -# Default: 268435456 bytes -rocksdb.blob_file_size 268435456 - -# Enables garbage collection of blobs. Valid blobs residing in blob files -# older than a cutoff get relocated to new files as they are encountered -# during compaction, which makes it possible to clean up blob files once -# they contain nothing but obsolete/garbage blobs. -# See also rocksdb.blob_garbage_collection_age_cutoff below. -# -# Default: yes -rocksdb.enable_blob_garbage_collection yes - -# The percentage cutoff in terms of blob file age for garbage collection. -# Blobs in the oldest N blob files will be relocated when encountered during -# compaction, where N = (garbage_collection_cutoff/100) * number_of_blob_files. -# Note that this value must belong to [0, 100]. -# -# Default: 25 -rocksdb.blob_garbage_collection_age_cutoff 25 - - -# The purpose of the following three options are to dynamically adjust the upper limit of -# the data that each layer can store according to the size of the different -# layers of the LSM. Enabling this option will bring some improvements in -# deletion efficiency and space amplification, but it will lose a certain -# amount of read performance. -# If you want to know more details about Levels' Target Size, you can read RocksDB wiki: -# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size -# -# Default: yes -rocksdb.level_compaction_dynamic_level_bytes yes - -# The total file size of level-1 sst. -# -# Default: 268435456 bytes -rocksdb.max_bytes_for_level_base 268435456 - -# Multiplication factor for the total file size of L(n+1) layers. -# This option is a double type number in RocksDB, but kvrocks is -# not support the double data type number yet, so we use integer -# number instead of double currently. -# -# Default: 10 -rocksdb.max_bytes_for_level_multiplier 10 - -# This feature only takes effect in Iterators and MultiGet. -# If yes, RocksDB will try to read asynchronously and in parallel as much as possible to hide IO latency. -# In iterators, it will prefetch data asynchronously in the background for each file being iterated on. -# In MultiGet, it will read the necessary data blocks from those files in parallel as much as possible. - -# Default no -rocksdb.read_options.async_io no - -# If yes, the write will be flushed from the operating system -# buffer cache before the write is considered complete. -# If this flag is enabled, writes will be slower. -# If this flag is disabled, and the machine crashes, some recent -# rites may be lost. Note that if it is just the process that -# crashes (i.e., the machine does not reboot), no writes will be -# lost even if sync==false. -# -# Default: no -rocksdb.write_options.sync no - -# If yes, writes will not first go to the write ahead log, -# and the write may get lost after a crash. -# You must keep wal enabled if you use replication. -# -# Default: no -rocksdb.write_options.disable_wal no - -# If enabled and we need to wait or sleep for the write request, fails -# immediately. -# -# Default: no -rocksdb.write_options.no_slowdown no - -# If enabled, write requests are of lower priority if compaction is -# behind. In this case, no_slowdown = true, the request will be canceled -# immediately. Otherwise, it will be slowed down. -# The slowdown value is determined by RocksDB to guarantee -# it introduces minimum impacts to high priority writes. -# -# Default: no -rocksdb.write_options.low_pri no - -# If enabled, this writebatch will maintain the last insert positions of each -# memtable as hints in concurrent write. It can improve write performance -# in concurrent writes if keys in one writebatch are sequential. -# -# Default: no -rocksdb.write_options.memtable_insert_hint_per_batch no - - -# Support RocksDB auto-tune rate limiter for the background IO -# if enabled, Rate limiter will limit the compaction write if flush write is high -# Please see https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html -# -# Default: yes -rocksdb.rate_limiter_auto_tuned yes - -# Enable this option will schedule the deletion of obsolete files in a background thread -# on iterator destruction. It can reduce the latency if there are many files to be removed. -# see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io -# -# Default: yes -# rocksdb.avoid_unnecessary_blocking_io yes - -################################ NAMESPACE ##################################### -# namespace.test change.me - - --# investigation -> db ???? --# ail2ail -> a2a ???? - - -backup-dir DATA_KVROCKS/backup -log-dir DATA_KVROCKS -namespace.cor ail_correls -namespace.crawl ail_crawlers -namespace.db ail_datas -namespace.dup ail_dups -namespace.lg ail_langs -namespace.obj ail_objs -namespace.rel ail_rels -namespace.se ail_searchs -namespace.stat ail_stats -namespace.tag ail_tags -namespace.tl ail_tls -namespace.track ail_trackers \ No newline at end of file From 160910ed1e3805fd0aa3d378926e2adb5134e632 Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Tue, 18 Nov 2025 12:05:00 +0100 Subject: [PATCH 08/13] added option to define images in docker-compose as env variables. --- other_installers/docker/docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker/docker-compose.yml index 54e41a90..5cea1b11 100644 --- a/other_installers/docker/docker-compose.yml +++ b/other_installers/docker/docker-compose.yml @@ -3,7 +3,7 @@ version: '3.8' services: ail: container_name: ail - image: localhost/ail + image: ${KVROCKS_IMAGE:-localhost/ail} ports: - "7000:7000" environment: @@ -16,6 +16,6 @@ services: kvrocks: container_name: kvrocks - image: images.paas.redhat.com/it-cloud-ocp-proxy/apache/kvrocks:2.14.0 + image: ${KVROCKS_IMAGE:-apache/kvrocks:2.14.0} volumes: - ./kvrocks.conf:/var/lib/kvrocks/kvrocks.conf:Z \ No newline at end of file From 8db2db632c9df7e97b48a9fc8006a6ba6e79636b Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Wed, 19 Nov 2025 13:05:46 +0100 Subject: [PATCH 09/13] Added valkey to docker compose and some tweaks. --- other_installers/docker/core.cfg | 19 ++-- other_installers/docker/docker-compose.yml | 102 +++++++++++++++++++-- 2 files changed, 103 insertions(+), 18 deletions(-) diff --git a/other_installers/docker/core.cfg b/other_installers/docker/core.cfg index e886fe03..e8f3df25 100644 --- a/other_installers/docker/core.cfg +++ b/other_installers/docker/core.cfg @@ -24,7 +24,7 @@ sentiment_lexicon_file = sentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon [Pystemon] dir = /home/pystemon/pystemon/ -redis_host = ail-redis-cache-master +redis_host = cache redis_port = 6379 redis_db = 10 @@ -149,35 +149,34 @@ max_execution_time = 120 [Tracker_Regex] max_execution_time = 60 -##### Redis ##### +##### Redis / Valkey ##### [Redis_Cache] -#host = localhost -host = ail-redis-cache-master +host = cache port = 6379 db = 0 [Redis_Log] -host = ail-redis-log-mastert +host = log port = 6379 db = 0 [Redis_Log_submit] -host = ail-redis-log-master +host = log-submit port = 6379 db = 1 [Redis_Queues] -host = ail-redis-queues-master +host = queues port = 6379 db = 0 [Redis_Process] -host = ail-redis-queues-master +host = process port = 6379 db = 2 [Redis_Mixer_Cache] -host = ail-redis-queues-mastert +host = mixer-cache port = 6379 db = 1 @@ -279,7 +278,7 @@ channel = 102 bind = tcp://127.0.0.1:5556 [RedisPubSub] -host = ail-redis-queues-master +host = queues port = 6379 db = 0 diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker/docker-compose.yml index 5cea1b11..670c1ca9 100644 --- a/other_installers/docker/docker-compose.yml +++ b/other_installers/docker/docker-compose.yml @@ -1,21 +1,107 @@ version: '3.8' +# Define a reusable base for the valkey services to keep the config DRY +x-valkey-service: &valkey-service + image: ${VALKEY_IMAGE:-valkey/valkey:8} + restart: unless-stopped + networks: + - ail-network + healthcheck: + test: ["CMD", "valkey-cli", "ping"] + interval: 5s + timeout: 3s + retries: 5 + services: + kvrocks: + image: ${KVROCKS_IMAGE:-apache/kvrocks:2.14.0} + container_name: ail-kvrocks + restart: unless-stopped + networks: + - ail-network + volumes: + - kvrocks_data:/var/lib/kvrocks + - ./kvrocks.conf:/var/lib/kvrocks/kvrocks.conf:Z + healthcheck: + test: ["CMD", "redis-cli", "-h", "localhost", "-p", "6383", "-a", "ail", "ping"] + interval: 5s + timeout: 3s + retries: 5 + + cache: + <<: *valkey-service + container_name: ail-cache + volumes: + - cache_data:/data + + log: + <<: *valkey-service + container_name: ail-log + volumes: + - log_data:/data + + log-submit: + <<: *valkey-service + container_name: ail-log-submit + volumes: + - log-submit_data:/data + + queues: + <<: *valkey-service + container_name: ail-queues + volumes: + - queues_data:/data + + process: + <<: *valkey-service + container_name: ail-process + volumes: + - process_data:/data + + mixer-cache: + <<: *valkey-service + container_name: ail-mixer-cache + volumes: + - mixer-cache_data:/data + ail: + image: ${AIL_IMAGE:-localhost/ail} container_name: ail - image: ${KVROCKS_IMAGE:-localhost/ail} + restart: unless-stopped ports: - "7000:7000" + networks: + - ail-network environment: - SKIP_LAUNCH_REDIS=true - - SKIP_LAUNCH_KVROCKS=false + - SKIP_LAUNCH_KVROCKS=true - SKIP_CHECK_REDIS=true - SKIP_CHECK_KVROCKS=true depends_on: - - kvrocks + kvrocks: + condition: service_healthy + cache: + condition: service_healthy + log: + condition: service_healthy + log-submit: + condition: service_healthy + queues: + condition: service_healthy + process: + condition: service_healthy + mixer_cache: + condition: service_healthy - kvrocks: - container_name: kvrocks - image: ${KVROCKS_IMAGE:-apache/kvrocks:2.14.0} - volumes: - - ./kvrocks.conf:/var/lib/kvrocks/kvrocks.conf:Z \ No newline at end of file +networks: + ail-network: + driver: bridge + +volumes: + kvrocks_data: + cache_data: + log_data: + log-submit_data: + queues_data: + process_data: + mixer-cache_data: \ No newline at end of file From 5a57faef66edfff166c3a1a928efd7148444d354 Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Wed, 19 Nov 2025 13:34:36 +0100 Subject: [PATCH 10/13] fixed hypen typo in docker-compose for mixer-cache --- other_installers/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker/docker-compose.yml index 670c1ca9..0df85f21 100644 --- a/other_installers/docker/docker-compose.yml +++ b/other_installers/docker/docker-compose.yml @@ -90,7 +90,7 @@ services: condition: service_healthy process: condition: service_healthy - mixer_cache: + mixer-cache: condition: service_healthy networks: From 37e326c00e8b6dfc89dc7fc6d157a8e38a56ec63 Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Wed, 19 Nov 2025 13:47:35 +0100 Subject: [PATCH 11/13] added config to ail container --- other_installers/docker/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker/docker-compose.yml index 0df85f21..675a9cf9 100644 --- a/other_installers/docker/docker-compose.yml +++ b/other_installers/docker/docker-compose.yml @@ -77,6 +77,8 @@ services: - SKIP_LAUNCH_KVROCKS=true - SKIP_CHECK_REDIS=true - SKIP_CHECK_KVROCKS=true + volumes: + - core.cfg:/home/ail/ail-framework/configs/core.cfg:Z depends_on: kvrocks: condition: service_healthy From 2b3d773579459607943d36c2113c3ba8cb642b85 Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Wed, 19 Nov 2025 13:49:06 +0100 Subject: [PATCH 12/13] fixed typo --- other_installers/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other_installers/docker/docker-compose.yml b/other_installers/docker/docker-compose.yml index 675a9cf9..8a3174db 100644 --- a/other_installers/docker/docker-compose.yml +++ b/other_installers/docker/docker-compose.yml @@ -78,7 +78,7 @@ services: - SKIP_CHECK_REDIS=true - SKIP_CHECK_KVROCKS=true volumes: - - core.cfg:/home/ail/ail-framework/configs/core.cfg:Z + - ./core.cfg:/home/ail/ail-framework/configs/core.cfg:Z depends_on: kvrocks: condition: service_healthy From 75dfb01d3630243482872150983c729165a01298 Mon Sep 17 00:00:00 2001 From: Jakub Bittner Date: Wed, 19 Nov 2025 15:33:55 +0100 Subject: [PATCH 13/13] added Readme for docker containers --- other_installers/docker/README.md | 103 ++++++++++++++++++++++++------ 1 file changed, 84 insertions(+), 19 deletions(-) diff --git a/other_installers/docker/README.md b/other_installers/docker/README.md index ce2287b6..8587a803 100644 --- a/other_installers/docker/README.md +++ b/other_installers/docker/README.md @@ -1,25 +1,29 @@ -# Container Installation +# AIL Framework Docker Installation -Still unde development - mostly notes rather than comprehensive manual +This document provides instructions on how to build and run the AIL framework using Docker or Podman with `docker-compose` or `podman-compose`. +The provided setup is designed to run AIL in a multi-container environment, with separate containers for the AIL application, Valkey instances for caching and queues, and a Kvrocks instance for persistent storage. -## Build +## Prerequisites -Get the source +- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/getting-started/installation) +- `docker-compose` or `podman-compose` -``` -git clone -b dev https://github.com/rht-jbittner/ail-framework.git +## Build the AIL Container Image + +First, clone the AIL framework repository and initialize the submodules: + +```bash +git clone https://github.com/ail-project/ail-framework.git cd ail-framework git submodule update --init --recursive ``` -Build primary container. In this example we disable most dependency builds as we do not need it in pure AIL container. -``` -podman build -t ail\ +Next, build the AIL container image using the provided `Dockerfile`. The following command builds an image named `localhost/ail`, which is the default image used in the `docker-compose.yml` file. + +```bash +podman build -t localhost/ail \ --build-arg "BASE_IMAGE=ubuntu:24.04" \ - --build-arg "http_proxy=$http_proxy" \ - --build-arg "https_proxy=$https_proxy" \ - --build-arg "no_proxy=$no_proxy" \ --build-arg "SKIP_REDIS=1" \ --build-arg "SKIP_PGPDUMP=1" \ --build-arg "SKIP_YARA=1" \ @@ -31,20 +35,81 @@ podman build -t ail\ -f other_installers/docker/Dockerfile . ``` -Comment: skipping tlsh build (`--build-arg "SKIP_TLSH=1"`) is possible, but additional chages would need to be added to install_virtualenv.sh file. In this case it probably does not matter, but package python3-tlsh exists in Ubuntu. +**Note on Build Arguments:** The `SKIP_*` build arguments are used to disable the installation of dependencies that are provided by other containers in the compose setup (like Redis/Valkey and Kvrocks). This keeps the AIL container image smaller and more focused on the application itself. + +## Running the AIL Framework + +The recommended way to run the AIL framework is by using `podman-compose` (or `docker-compose`) with the provided `docker-compose.yml` file. + +### 1. Launch the Services + +Navigate to the `other_installers/docker` directory and start all the services in the background: + +```bash +cd other_installers/docker +podman-compose up -d +``` + +This will start the following services: +- `ail`: The main AIL application container. +- `kvrocks`: The Kvrocks database for persistent storage. +- `cache`, `log`, `log-submit`, `queues`, `process`, `mixer-cache`: Six separate Valkey instances for various caching and queuing purposes. + +### 2. Create a Default User + +After the containers have started, you need to create an initial user to log in to the web interface. + +```bash +podman exec -it ail /bin/bash -c ". ./AILENV/bin/activate && cd var/www && python3 ./create_default_user.py" +``` + +Follow the prompts to create the user. + +### 3. Accessing AIL + +The AIL web interface will be available at `https://localhost:7000/`. + +### Managing the Services + +- **View Logs**: To view the logs of all running services, use: + ```bash + podman-compose logs -f + ``` + To view the logs of a specific service (e.g., `ail`): + ```bash + podman-compose logs -f ail + ``` + +- **Stop Services**: To stop and remove the containers, networks, and volumes, use: + ```bash + podman-compose down + ``` + +### Using Custom Images -## Start Container +You can override the default Valkey and Kvrocks images by setting environment variables before running `podman-compose up`: -Simple way to start AIL container. We disable redis and kvrock related stuff as we have it deployed separately (not covered in this document). +```bash +KVROCKS_IMAGE=apache/kvrocks:2.14.0 VALKEY_IMAGE=valkey/valkey:8 podman-compose up -d ``` + +## Configuration + +The AIL framework can be configured by editing the `core.cfg` file located in the `other_installers/docker` directory. This file is mounted into the `ail` container and will be applied on startup. + +## Running the AIL Container Standalone (for Debugging) + +It is also possible to run the AIL container by itself, but this is mainly intended for debugging and development. When running standalone, you will need to ensure that the AIL container can connect to externally running Redis/Valkey and Kvrocks instances. + +Here is a basic example of how to run the AIL container standalone: + +```bash podman run --rm -p 7000:7000 --name ail \ -e SKIP_LAUNCH_REDIS=true \ - -e SKIP_LAUNCH_KVROCKS=false \ + -e SKIP_LAUNCH_KVROCKS=true \ -e SKIP_CHECK_REDIS=true \ -e SKIP_CHECK_KVROCKS=true \ localhost/ail ``` -## Create default user - -podman exec -it ail /bin/bash -c ". ./AILENV/bin/activate && cd var/www && python3 ./create_default_user.py" +In this mode, you would need to modify `core.cfg` to point to your database instances.