diff --git a/.bashrc b/.bashrc new file mode 100644 index 0000000..f020123 --- /dev/null +++ b/.bashrc @@ -0,0 +1,181 @@ +# ~/.bashrc: executed by bash(1) for non-login shells. +# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) +# for examples + +# If not running interactively, don't do anything +case $- in + *i*) ;; + *) return;; +esac + +# don't put duplicate lines or lines starting with space in the history. +# See bash(1) for more options +HISTCONTROL=ignoreboth + +# append to the history file, don't overwrite it +shopt -s histappend + +# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) +HISTSIZE=1000 +HISTFILESIZE=2000 + +# check the window size after each command and, if necessary, +# update the values of LINES and COLUMNS. +shopt -s checkwinsize + +# If set, the pattern "**" used in a pathname expansion context will +# match all files and zero or more directories and subdirectories. +#shopt -s globstar + +# make less more friendly for non-text input files, see lesspipe(1) +[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" + +# set variable identifying the chroot you work in (used in the prompt below) +if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then + debian_chroot=$(cat /etc/debian_chroot) +fi + +# set a fancy prompt (non-color, unless we know we "want" color) +case "$TERM" in + xterm-color|*-256color) color_prompt=yes;; +esac + +# uncomment for a colored prompt, if the terminal has the capability; turned +# off by default to not distract the user: the focus in a terminal window +# should be on the output of commands, not on the prompt +#force_color_prompt=yes + +if [ -n "$force_color_prompt" ]; then + if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then + # We have color support; assume it's compliant with Ecma-48 + # (ISO/IEC-6429). (Lack of such support is extremely rare, and such + # a case would tend to support setf rather than setaf.) + color_prompt=yes + else + color_prompt= + fi +fi + +if [ "$color_prompt" = yes ]; then + PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' +else + PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' +fi +unset color_prompt force_color_prompt + +# If this is an xterm set the title to user@host:dir +case "$TERM" in +xterm*|rxvt*) + PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" + ;; +*) + ;; +esac + +# enable color support of ls and also add handy aliases +if [ -x /usr/bin/dircolors ]; then + test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" + alias ls='ls --color=auto' + #alias dir='dir --color=auto' + #alias vdir='vdir --color=auto' + + alias grep='grep --color=auto' + alias fgrep='fgrep --color=auto' + alias egrep='egrep --color=auto' +fi + +# colored GCC warnings and errors +#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' + +# some more ls aliases +alias ll='ls -alF' +alias la='ls -A' +alias l='ls -CF' + +# Add an "alert" alias for long running commands. Use like so: +# sleep 10; alert +alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' + +# Alias definitions. +# You may want to put all your additions into a separate file like +# ~/.bash_aliases, instead of adding them here directly. +# See /usr/share/doc/bash-doc/examples in the bash-doc package. + +if [ -f ~/.bash_aliases ]; then + . ~/.bash_aliases +fi + +# enable programmable completion features (you don't need to enable +# this, if it's already enabled in /etc/bash.bashrc and /etc/profile +# sources /etc/bash.bashrc). +if ! shopt -oq posix; then + if [ -f /usr/share/bash-completion/bash_completion ]; then + . /usr/share/bash-completion/bash_completion + elif [ -f /etc/bash_completion ]; then + . /etc/bash_completion + fi +fi + + + +dc() { + DC_FILE="${DOCKER_COMPOSE_FILE:-/home/ubuntu/deployment/docker-compose.yml}" + docker-compose -f "$DC_FILE" "$@" +} + +#alias dc="docker-compose -f $DC_FILE" +alias logs="docker-compose -f $DC_FILE logs" +alias n-logs="docker-compose -f $DC_FILE logs nginx" +alias ms-logs="docker-compose -f $DC_FILE logs ethereum-reader" +alias ew-logs="docker-compose -f $DC_FILE logs ethereum-writer" +alias s-logs="docker-compose -f $DC_FILE logs signer" +alias ms-exec="docker-compose -f $DC_FILE exec ethereum-reader sh" +alias ew-exec="docker-compose -f $DC_FILE exec ethereum-writer sh" +alias s-exec="docker-compose -f $DC_FILE exec signer sh" +alias docker=podman +alias sync="rsync -aq --progress --exclude='.venv' --exclude='.git' $HOME/orbs-node-on-host/ $ORBS_ROOT" + +# fix crontab -e to use VIM + +if [ "$color_prompt" = yes ]; then + # PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' + PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\] $(parse_git_branch)\$ ' +else + PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' +fi +unset color_prompt force_color_prompt + + +export EDITOR=vim +export VISUAL=vim + +# Enable history search with Up and Down arrows +bind '"\e[A": history-search-backward' +bind '"\e[B": history-search-forward' + +# need to install fzf - apt install fzf + +# Use fzf for history search +fzf-history-widget() { + local selected num + selected=$(history | fzf --tac +s --query="$READLINE_LINE") + if [[ -n $selected ]]; then + READLINE_LINE=$(echo $selected | sed -E 's/^[ ]*[0-9]+[ ]+//') + READLINE_POINT=${#READLINE_LINE} + fi +} + +bind -x '"\C-r": fzf-history-widget' + +parse_git_branch() { + git branch 2>/dev/null | sed -n '/\* /s///p' +} + +shopt -s histappend + +# Save each command to the history file immediately and reload the history +if [[ -n "$PROMPT_COMMAND" ]]; then + PROMPT_COMMAND="$PROMPT_COMMAND; history -a; history -c; history -r" +else + PROMPT_COMMAND="history -a; history -c; history -r" +fi diff --git a/.github/workflows/manager.yml b/.github/workflows/control.yml similarity index 64% rename from .github/workflows/manager.yml rename to .github/workflows/control.yml index 13aed01..c86c147 100644 --- a/.github/workflows/manager.yml +++ b/.github/workflows/control.yml @@ -1,4 +1,4 @@ -name: Manager validation +name: Control Validation on: pull_request: @@ -14,18 +14,18 @@ jobs: uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v6.1.0 with: - python-version: "3.10" + python-version: "3.11" - name: Install Poetry - uses: snok/install-poetry@v1 + uses: snok/install-poetry@v1.4.1 - name: Install dependencies with Poetry run: | poetry install --no-interaction - working-directory: manager + working-directory: control - name: Run pytest run: make test - working-directory: manager + working-directory: control diff --git a/.github/workflows/smoke-test.yml b/.github/workflows/smoke-test.yml index 3f52477..b66b57d 100644 --- a/.github/workflows/smoke-test.yml +++ b/.github/workflows/smoke-test.yml @@ -29,7 +29,7 @@ jobs: env: ETH_ENDPOINT: ${{ secrets.ETH_ENDPOINT }} run: | - docker run -d -p 80:80 -e ETH_ENDPOINT=$ETH_ENDPOINT --privileged --name smoke_test test-ubuntu tail -f /dev/null + docker run -d -p 80:80 -e ETH_ENDPOINT=$ETH_ENDPOINT -e ORBS_ROOT="/home/ubuntu" --privileged --name smoke_test test-ubuntu tail -f /dev/null - name: Run install script run: | @@ -43,13 +43,13 @@ jobs: run: | curl -f http://localhost/service/ethereum-reader/status - - name: Check manager's status is being served using legacy name (boyar) + - name: Check control's status is being served using legacy name (boyar) run: | curl -f http://localhost/service/boyar/status - - name: Check manager's status is being served using v4 name + - name: Check control's status is being served using v4 name run: | - curl -f http://localhost/service/manager/status + curl -f http://localhost/service/control/status - name: Check logger's status is being served using legacy name (logs-service) run: | diff --git a/.gitignore b/.gitignore index 99ad11c..8f77e15 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,17 @@ config.json *venv +home/ +docs/ node_modules/ __pycache__/ +home/ ignore uv.sh .env log.txt +registry-data +node_exporter* + +**/.DS_Store diff --git a/Dockerfile b/Dockerfile index fb78fd4..77b4e41 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,9 @@ FROM ubuntu:22.04 ARG DEBIAN_FRONTEND=noninteractive # Add sudo to make more like EC2 instance -RUN apt-get update && apt-get install -y software-properties-common python3 python3-pip sudo locales vim +#RUN apt-get update && apt-get install -y software-properties-common python3 python3-pip sudo locales vim +# Add sudo to make more like EC2 instance +RUN apt-get update && apt-get install -y fzf software-properties-common python3 python3-pip sudo locales vim curl rsync git # EC2 instances usually have locale settings RUN locale-gen en_US.UTF-8 && update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 @@ -35,8 +37,8 @@ RUN echo 'alias ew-exec="docker-compose -f /home/ubuntu/deployment/docker-compos RUN echo 'alias s-exec="docker-compose -f /home/ubuntu/deployment/docker-compose.yml exec signer sh"' >> ~/.bashrc COPY --chown=ubuntu:ubuntu setup setup -COPY --chown=ubuntu:ubuntu manager manager +COPY --chown=ubuntu:ubuntu control control COPY --chown=ubuntu:ubuntu deployment deployment COPY --chown=ubuntu:ubuntu logging logging -CMD ["/bin/bash"] +CMD ["/bin/bash"] \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..49a0019 --- /dev/null +++ b/Makefile @@ -0,0 +1,28 @@ +registry: + @mkdir -p registry-data + @docker rm -f local-registry 2>/dev/null || true + docker run -d -p 6000:5000 -v $$(realpath registry-data):/var/lib/registry --name local-registry registry:2 + +build_docker_dev: + @docker buildx build --platform linux/arm64 -f local-dev.Dockerfile -t test-ubuntu_arm64 . + +run_docker_dev: + @if [ -n "$(vol)" ]; then \ + docker run --privileged -v $$(pwd):/home/ubuntu/orbs-node-on-host -v $(vol):/home/ubuntu/orbs-node-remote-repo-simulate -p 80:80 --rm -it test-ubuntu_arm64; \ + else \ + docker run --privileged -v $$(pwd):/home/ubuntu/orbs-node-on-host -p 80:80 --rm -it test-ubuntu_arm64; \ + fi + #@docker run --privileged -v $$(pwd):/home/ubuntu/orbs-node-on-host -p 80:80 --rm -it test-ubuntu_arm64 + #@docker run --privileged -v $$(pwd):/opt/orbs-node -v $$(pwd)/control/.venvdocker:/home/ubuntu/control/.venv -v $$(pwd)/deployment:/home/ubuntu/deployment -v $$(pwd)/logging:/home/ubuntu/logging -v $$(pwd)/control:/home/ubuntu/control -v $$(pwd)/setup:/home/ubuntu/setup -p 80:80 --rm -it test-ubuntu_arm64 + #@docker run --privileged -v $$(pwd):/opt/orbs-node-on-host -v $$(pwd)/control/.venvdocker:/home/ubuntu/control/.venv -v $$(pwd)/deployment:/home/ubuntu/deployment -v $$(pwd)/logging:/home/ubuntu/logging -v $$(pwd)/control:/home/ubuntu/control -v $$(pwd)/setup:/home/ubuntu/setup -p 80:80 --rm -it test-ubuntu_arm64 + +run_docker_dev2: + @docker run --privileged -v $$(pwd)/control/.venvdocker:/home/ubuntu/control/.venv -v $$(pwd)/deployment:/home/ubuntu/deployment -v $$(pwd)/logging:/home/ubuntu/logging -v $$(pwd)/control:/home/ubuntu/control -v $$(pwd)/setup:/home/ubuntu/setup -p 81:80 --rm -it test-ubuntu_arm64 + +run_node: + . setup/scripts/base.sh && \ + docker-compose -f "$$DOCKER_COMPOSE_FILE" up -d + +stop_node: + . setup/scripts/base.sh && \ + docker-compose -f "$$DOCKER_COMPOSE_FILE" down diff --git a/README.md b/README.md index e7b876e..debc8ee 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,13 @@ ## What's this? -This repo is temporarily being used to hold all the Orbs v3 node validator install, manager and deployment files. In the future, they will be split into different repos +This repo is temporarily being used to hold all the Orbs v3 node validator install, control and deployment files. In the future, they will be split into different repos ## Folders -- `deployment` - Manifest files. These will eventually live at https://github.com/orbs-network/v3-deployment -- `manager` - Validator Python manager. These files will eventually live at https://github.com/orbs-network/v3-node-manager -- `setup` - Install scripts. These files will eventually live by themselves in this current repo (https://github.com/orbs-network/v3-node-setup) +- `deployment` - Manifest files. +- `control` - Validator Python control (manager) which replaced boyar. +- `setup` - Install scripts. - `logging` - A service to expose container logs. These files will also live elsewhere in the future TBD ## Developing @@ -24,7 +24,7 @@ This repo is temporarily being used to hold all the Orbs v3 node validator insta docker run \ -v $(pwd)/deployment:/home/ubuntu/deployment \ -v $(pwd)/logging:/home/ubuntu/logging \ - -v $(pwd)/manager:/home/ubuntu/manager \ + -v $(pwd)/control:/home/ubuntu/control \ -v $(pwd)/setup:/home/ubuntu/setup \ -p 80:80 --rm -it --privileged test-ubuntu ``` @@ -58,3 +58,32 @@ From Mac host, run `curl http://localhost/service/ethereum-reader/status` #### Healthcheck always shows "starting" [Podman uses systemd timers to run healtchecks periodically](https://github.com/containers/podman/issues/19326), which do not work in our dev Docker-in-Docker setup. As a workaround, you can run the command [`podman healthcheck run SERVICE`](https://docs.podman.io/en/v4.4/markdown/podman-healthcheck-run.1.html) to manually run a specific container healthcheck. + + +# New version - jordan notes: - + +Run locally on MacOS: + +Build the dev guardian image: + +```make build_docker_dev``` + +Run the dev guardian image: + +```make run_docker_dev``` + +Run the installation script after getting docker container prompt: + +```source ./orbs-node-on-host/setup/install.sh --skip-req``` + +Inside the container, playing with docker-compose, make sure to run the dc command, not docker-compose, it's +an alias to docker-compose with mapping to the right docker-compose-dev.yml if any. + +```dc up -d``` for example. + +To synchronize changes made by local host to the container (e.g. after modifying a file in the host), run: + +```sync``` + + + diff --git a/containers.conf b/containers.conf new file mode 100644 index 0000000..220c1f8 --- /dev/null +++ b/containers.conf @@ -0,0 +1,12 @@ +[containers] +netns="host" +userns="host" +ipcns="host" +utsns="host" +cgroupns="host" +cgroups="disabled" +log_driver = "k8s-file" +[engine] +cgroup_manager = "cgroupfs" +events_logger="file" +runtime="crun" diff --git a/manager/.gitignore b/control/.gitignore similarity index 100% rename from manager/.gitignore rename to control/.gitignore diff --git a/manager/.pylintrc b/control/.pylintrc similarity index 100% rename from manager/.pylintrc rename to control/.pylintrc diff --git a/manager/.vscode/extensions.json b/control/.vscode/extensions.json similarity index 100% rename from manager/.vscode/extensions.json rename to control/.vscode/extensions.json diff --git a/manager/.vscode/settings.json b/control/.vscode/settings.json similarity index 95% rename from manager/.vscode/settings.json rename to control/.vscode/settings.json index 0fbc6f3..91df67d 100644 --- a/manager/.vscode/settings.json +++ b/control/.vscode/settings.json @@ -41,7 +41,7 @@ "editor.codeActionsOnSave": { "source.organizeImports": true }, - "editor.defaultFormatter": "ms-python.black-formatter", + "editor.defaultFormatter": "charliermarsh.ruff", "isort.args": [ "--profile", "black" diff --git a/manager/Makefile b/control/Makefile similarity index 71% rename from manager/Makefile rename to control/Makefile index 443c4a3..c22a050 100644 --- a/manager/Makefile +++ b/control/Makefile @@ -1,6 +1,12 @@ -include .makerc export +install_local_mac: + @POETRY_VIRTUALENVS_IN_PROJECT=false POETRY_VIRTUALENVS_PATH=./.venvmac poetry install + +use_local_mac: + @source .venvmac/bin/activate + install: poetry install diff --git a/manager/README.md b/control/README.md similarity index 60% rename from manager/README.md rename to control/README.md index b098290..460aed9 100644 --- a/manager/README.md +++ b/control/README.md @@ -1,10 +1,10 @@ -# v3-node-manager repo +# v3-node-control repo WIP ## What's this? -Python manager for Orbs v3 node validator +Python control for Orbs v3 node validator ### What does it do? @@ -17,6 +17,6 @@ Python manager for Orbs v3 node validator ## Developing -1. `make install` will install dependencies -2. `make shell` will activate virtual environment +1. `make shell` will activate virtual environment +2. `make install` will install dependencies 3. `make test` runs unit tests diff --git a/manager/poetry.lock b/control/poetry.lock similarity index 87% rename from manager/poetry.lock rename to control/poetry.lock index 5024756..7808ee4 100644 --- a/manager/poetry.lock +++ b/control/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "astroid" @@ -592,6 +592,68 @@ files = [ {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, ] +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + [[package]] name = "requests" version = "2.31.0" @@ -766,4 +828,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "56c40b8d25c1a0a6bcd736d843651e8e907d891dfcb5bc2b37bc460ae95ef1ea" +content-hash = "9a61577270018362cfc58806efc3f9f6f6277204c298a26db784cb60f02249e9" diff --git a/manager/poetry.toml b/control/poetry.toml similarity index 100% rename from manager/poetry.toml rename to control/poetry.toml diff --git a/manager/pyproject.toml b/control/pyproject.toml similarity index 77% rename from manager/pyproject.toml rename to control/pyproject.toml index c95679d..833a30b 100644 --- a/manager/pyproject.toml +++ b/control/pyproject.toml @@ -1,14 +1,16 @@ [tool.poetry] -name = "manager" +name = "control" version = "0.1.0" -description = "Python manager for Orbs v4 node validator" +description = "Python control for Orbs v4 node validator" authors = ["Luke Rogerson "] readme = "README.md" +package-mode = false [tool.poetry.dependencies] python = "^3.10" psutil = "^5.9.5" docker = "^6.1.3" +pyyaml = "^6.0.2" [tool.poetry.group.dev.dependencies] black = "^23.9.1" @@ -24,4 +26,4 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.black] -line-length=160 \ No newline at end of file +line-length=160 diff --git a/manager/setup.cfg b/control/setup.cfg similarity index 100% rename from manager/setup.cfg rename to control/setup.cfg diff --git a/control/src/__init__.py b/control/src/__init__.py new file mode 100644 index 0000000..a7d89be --- /dev/null +++ b/control/src/__init__.py @@ -0,0 +1,9 @@ +# Control package for Orbs v4 node validator +__version__ = "0.1.0" + +from .system_monitor import SystemMonitor +from .logger import logger +from .updater import updater +from .config import status_file + +__all__ = ["SystemMonitor", "logger", "status_file", "updater"] diff --git a/control/src/config.py b/control/src/config.py new file mode 100644 index 0000000..54b458d --- /dev/null +++ b/control/src/config.py @@ -0,0 +1,12 @@ +"""Configuration file for the control service.""" + +import os + +BASE_DIR = os.environ.get("BASE_DIR") or "/opt/orbs" +os.makedirs(f"{BASE_DIR}/control", exist_ok=True) + +CONTROL_DIR = os.path.join(BASE_DIR, "control") + +status_file = os.path.join(CONTROL_DIR, "status.json") +log_file = os.path.join(CONTROL_DIR, "log.txt") +errors_file = os.path.join(CONTROL_DIR, "errors.txt") diff --git a/control/src/docker-compose-healthchecks.py b/control/src/docker-compose-healthchecks.py new file mode 100755 index 0000000..94a7df6 --- /dev/null +++ b/control/src/docker-compose-healthchecks.py @@ -0,0 +1,77 @@ +import os + +import yaml +import subprocess +import sys + + +def load_docker_compose(file_path): + """Load the docker-compose.yml file.""" + try: + with open(file_path, 'r') as file: + return yaml.safe_load(file) + except Exception as e: + print(f"Error reading docker-compose.yml: {e}") + sys.exit(1) + + +def extract_healthchecks(compose_data): + """Extract healthcheck test commands from the docker-compose data.""" + healthchecks = {} + services = compose_data.get("services", {}) + + for service_name, service_config in services.items(): + healthcheck = service_config.get("healthcheck", {}) + test_command = healthcheck.get("test") + + if isinstance(test_command, str): # Ensure it's a string + healthchecks[service_name] = test_command + + return healthchecks + + +def execute_command_in_container(container_name, command): + """Execute the given command inside a running container.""" + try: + result = subprocess.run( + ["podman", "exec", container_name] + command.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + return result.returncode, result.stdout.strip(), result.stderr.strip() + except Exception as e: + return 1, "", f"Error executing command: {e}" + + +def main(): + compose_file = os.getenv('DOCKER_COMPOSE_FILE') + compose_data = load_docker_compose(compose_file) + healthchecks = extract_healthchecks(compose_data) + + if not healthchecks: + print("No healthchecks found in docker-compose.yml.") + return + + print("Found healthchecks:") + for service, command in healthchecks.items(): + print(f"Service: {service}, Command: {command}") + + print("\nExecuting healthcheck commands:") + for service, command in healthchecks.items(): + print(f"\n--- Service: {service} ---") + # Assume the container name matches the service name (may need adjustment) + container_name = service + exit_code, stdout, stderr = execute_command_in_container(container_name, command) + + print(f"Command: {command}") + print(f"Exit Code: {exit_code}") + print("Output:") + print(stdout or "No output") + if stderr: + print("Error Output:") + print(stderr) + + +if __name__ == "__main__": + main() diff --git a/manager/src/logger.py b/control/src/logger.py similarity index 90% rename from manager/src/logger.py rename to control/src/logger.py index bef31be..3068ceb 100644 --- a/manager/src/logger.py +++ b/control/src/logger.py @@ -1,4 +1,4 @@ -"""Logger module for the manager service. Outputs both to stdout and a log file.""" +"""Logger module for the control service. Outputs both to stdout and a log file.""" import logging diff --git a/control/src/main.py b/control/src/main.py new file mode 100755 index 0000000..b73a779 --- /dev/null +++ b/control/src/main.py @@ -0,0 +1,65 @@ +"""Main entry point of the control""" + +import docker +import sys +from config import status_file +from logger import logger +from system_monitor import SystemMonitor +import updater +from updater import get_guardian_node_id +from utils import run_command +from os import getenv + +system_monitor = SystemMonitor(client=docker.from_env()) + +data = { + "currentVersion": "0.0.0", + "scheduledVersion": None, + "updateScheduled": None, + "updateScheduledFor": None, +} + + +def main(): + """Main entry point of the control module""" + + cmd = None + if len(sys.argv) > 1: + cmd = sys.argv[1] + + logger.info(f"Running controll module on node [{get_guardian_node_id()}] ...") + updater.set_error("") + + # TODO - add back when we split into separate repos + # # Fetch all the tags from the remote repository + # run_command("git fetch origin --tags") + + # Get the latest tag + # latest_tag = run_command("git describe --tags $(git rev-list --tags --max-count=1)") + + if cmd == "poll": + try: + updater.compare() + except Exception as e: + updater.set_error(f"An error occurred while comparing: {e}") + + if cmd is None: + # hard coded for now + latest_tag = "0.0.1" + + # update control info + if latest_tag and latest_tag != data["currentVersion"]: + # checkout_command = f"git checkout {latest_tag}" + # run_command(checkout_command) # checkout the latest tag + docker_compose_file = getenv("DOCKER_COMPOSE_FILE") + + error = run_command(f"docker-compose -f {docker_compose_file} up -d") + if error: + print("Error running docker-compose") + + system_monitor.update() + system_monitor.persist(status_file) + + +if __name__ == "__main__": + main() diff --git a/manager/src/system_monitor.py b/control/src/system_monitor.py similarity index 71% rename from manager/src/system_monitor.py rename to control/src/system_monitor.py index a4154e4..a250342 100644 --- a/manager/src/system_monitor.py +++ b/control/src/system_monitor.py @@ -1,13 +1,15 @@ -""" A helper class for getting system metrics and status. """ +"""A helper class for getting system metrics and status.""" import json +import subprocess from datetime import datetime import docker import psutil from logger import logger -from system_monitor_types import Payload, Status +from system_monitor_types import Payload, Status, Version +from updater import get_error, get_status_for_ui, get_updating_state_for_ui class SystemMonitor: @@ -26,9 +28,11 @@ class SystemMonitor: timestamp: str = "" status: str = "" error: str = "" + extra: str = "" metrics: dict services: dict start_time: float + version: str = "" _client: docker.DockerClient @@ -37,14 +41,15 @@ def __init__(self, client: docker.DockerClient) -> None: self.metrics = {} self.services = {} + self.version = "" self.start_time = datetime.now().timestamp() self._client = client - def __str__(self): + def __str__(self) -> str: return self.__dump_json() - def __repr__(self): + def __repr__(self) -> str: return self.__dump_json() def get(self) -> Status: @@ -56,26 +61,85 @@ def get(self) -> Status: Timestamp=self.timestamp, Status=self.status, Error=self.error, - Payload=Payload(Metrics=self.metrics, Services=self.services), + Extra=self.extra, + Payload=Payload( + Version=Version(Semantic=self.version), + Metrics=self.metrics, + Services=self.services, + ), ) - def update(self): + # def set_status (self, status, error: str): + # if status == self.status and error == self.error: + # return + # + # if status == "": + # status = "OK" + # + # self.status = status + # self.error = error + # if error != "": + # logger.error("Status changed: "+status+ ", err:"+error) + # else: + # logger.info("Status changed: "+status+ ", err:"+error) + + def run_with_stderr(self, cmd: str) -> str: + # split cmd to list + cmd_list = cmd.split() + + result = subprocess.run( + cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True + ) + + return result.stdout.strip() + + def update(self) -> None: """Updates the status of the system""" logger.info("Updating system metrics and services info") now = datetime.now() metrics = self._get_metrics(now) + timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z" - self.timestamp = now.isoformat() - self.status = f"RAM = {round(metrics['MemoryUsedMBytes'], 2)}mb, CPU = {metrics['CPULoadPercent']}%" + # self.timestamp = now.isoformat() + self.timestamp = timestamp + # self.status = f"RAM = {round(metrics['MemoryUsedMBytes'], 2)}mb, CPU = {metrics['CPULoadPercent']}%" + self.status = get_status_for_ui() + self.error = get_error() # TODO: What exactly is an error in this context? - self.error = "" + # self.error = "" + # self.extra = get_status_for_ui() + # self.extra = "updating" + self.extra = get_updating_state_for_ui() self.metrics = metrics self.services = self._get_docker_service_info() + self.version = self._get_version() + + logger.info("System status updated.") + + def _get_version(self) -> str: + # Get current git commit and git tag if available and combine them to a single version string. + + commit = self.run_with_stderr("git rev-parse HEAD") + try: + tag = self.run_with_stderr("git describe --tags --exact-match") - def persist(self, status_file_path: str): + if tag == "": + tag = "untagged" + + if tag.find("fatal") > -1: + raise Exception(f"Git returned: {tag}") + + return f"{commit} / {tag}" + except Exception as e: + logger.error( + f"An error occurred while fetching the current git tag: {e}, using commit {commit} instead." + ) + return f"{commit} / notag" + + def persist(self, status_file_path: str) -> None: """Persists the status of the system to a file""" logger.info("Persisting system status to file: %s", status_file_path) @@ -144,7 +208,7 @@ def _get_process_info(self) -> list[dict]: for proc in psutil.process_iter(["pid", "name", "cmdline", "memory_info"]): if proc.ppid() == 1: # Only include parent processes - memory_used_mb = "" + memory_used_mb = 0.0 if proc.info["memory_info"] is not None: memory_used_mb = self.__convert_bytes_to_mbytes( proc.info["memory_info"].rss @@ -177,15 +241,20 @@ def _get_docker_service_info(self) -> list[dict]: service_info = [] for container in self._client.containers.list(): + logger.info("Container: %s", container.name) container_attrs = container.attrs image = container_attrs.get("Image") if image is None: image = "(None)" + cmdConf = "" + if container.attrs["Config"]["Cmd"] is not None: + cmdConf = " ".join(container.attrs["Config"]["Cmd"]) + service_data = { "Name": container.name, "Image": image, - "Command": " ".join(container.attrs["Config"]["Cmd"]), + "Command": cmdConf, "Environment": self.__get_filtered_env_vars( container.attrs["Config"]["Env"] ), @@ -204,6 +273,8 @@ def _get_docker_service_info(self) -> list[dict]: service_info.append(service_data) + logger.info("Fetching done.") + return service_info def __is_not_blacklisted(self, env_var: str) -> bool: @@ -221,6 +292,6 @@ def __convert_bytes_to_mbytes(self, _bytes: int) -> float: """Converts bytes to megabytes""" return round(_bytes / 1024 / 1024, 6) - def __dump_json(self): + def __dump_json(self) -> str: """Returns a pretty-printed string representation of the status object""" return json.dumps(self.get(), indent=4) diff --git a/control/src/system_monitor_types.py b/control/src/system_monitor_types.py new file mode 100644 index 0000000..a0012c6 --- /dev/null +++ b/control/src/system_monitor_types.py @@ -0,0 +1,23 @@ +from typing import Any, Dict, TypedDict + + +class Version(TypedDict): + Semantic: str + + +class Payload(TypedDict): + """Further breakdown of the status object""" + + Version: Version + Metrics: Dict[str, Any] + Services: Dict[str, Any] + + +class Status(TypedDict): + """Corresponds to v2 status object""" + + Timestamp: str + Status: str + Error: str + Extra: str + Payload: Payload diff --git a/control/src/updater.py b/control/src/updater.py new file mode 100644 index 0000000..0281d79 --- /dev/null +++ b/control/src/updater.py @@ -0,0 +1,308 @@ +import hashlib +import os +import re +import subprocess +from datetime import datetime, timedelta + +import requests +import yaml +from logger import logger + +globalError = "" +statusForUi = [] +isInUpdatingState = False +updateTargetTime = 0 + +# ---- UPDATE-DESCRIPTOR-BEGIN ---- +# targetNodes: +# - id: 8c824c84e03de12e73fe286222c00faa3d8fd152 +# - id: 1c824c84e03de12e73fe286222c00faa3d8fd152 +# - id: * +# updateResolution: 1440 +# updateMode: immediate , scheduled +# updateInAction: false +# commit: 64816f4876aa1483ba79ee5e9b061985ccd2b6b1 +# ---- UPDATE-DESCRIPTOR-END ---- + + +def get_updating_state_for_ui(): + global isInUpdatingState, updateTargetTime + + if isInUpdatingState: + return '{"status": "updating", "time": ' + str(updateTargetTime) + "}" + else: + return "" + # return "updating" if isInUpdatingState else "" + + +def get_status_for_ui(): + global statusForUi + if len(statusForUi) == 0: + return "OK" + return ", ".join(statusForUi) + + +def set_status_for_ui(status): + global statusForUi + status = status.replace(",", " ") + if status != "OK" and status != "Error": + status = "• " + status + + if len(statusForUi) > 0 and statusForUi[len(statusForUi) - 1] == status: + return + + statusForUi.insert(0, status) + if len(statusForUi) > 5: + statusForUi = statusForUi[:5] + + +def set_error(error): + global globalError + if globalError == error: + return + + logger.error(f"Setting status error: {error}") + globalError = error + set_status_for_ui("Error") + + +def get_error(): + global globalError + return globalError + + +def fetch_remote_descriptor(): + # url = os.getenv('DOCKER_COMPOSE_DESCRIPTOR_URL', "https://raw.githubusercontent.com/orbs-network/v3-node-setup/refs/heads/main/deployment/docker-compose.yml") + url = os.getenv("DOCKER_COMPOSE_REMOTE_GIT_PATH", "origin/main:deployment/docker-compose.yml") + + try: + logger.info(f"Fetching remote descriptor from remote git {url}") + data = os.popen(f"git fetch origin").read() + logger.info(f"Fetch result: {data}") + data = os.popen(f"git show {url}").read() + # response = requests.get(url) + # response.raise_for_status() # Check for HTTP errors + # data = response.text + except requests.exceptions.RequestException as e: + logger.error(f"An error occurred while fetching the file: {e}") + data = None + + return data + + +def fetch_and_parse_metadata(): + # try: + logger.info("Fetching and parsing metadata...") + content = fetch_remote_descriptor() + + # Extract the metadata section + descriptor_begin = "# ---- UPDATE-DESCRIPTOR-BEGIN ----" + descriptor_end = "# ---- UPDATE-DESCRIPTOR-END ----" + + # Find the metadata section between begin and end markers + metadata_match = re.search(rf"{re.escape(descriptor_begin)}(.*?){re.escape(descriptor_end)}", content, re.DOTALL) + + if not metadata_match: + raise ValueError("Descriptor section not found in file") + + # Extract metadata content and strip the comments + metadata_content = metadata_match.group(1) + metadata_content = re.sub(r"^\s*#\s*", "", metadata_content, flags=re.MULTILINE).strip() + + # Parse as YAML and return + metadata_dict = yaml.safe_load(metadata_content) + return metadata_dict + + # except requests.exceptions.RequestException as e: + # print(f"An error occurred while fetching the file: {e}") + # return None + # except ValueError as e: + # print(f"Error: {e}") + # return None + # except Exception as e: + # print(f"An error occurred: {e}") + # return None + + +# def get_current_git_tag (): +# try: +# logger.info("Fetching current git tag...") +# tag = os.popen("git describe --tags $(git rev-list --tags --max-count=1)").read().strip() +# return tag +# except Exception as e: +# logger.error(f"An error occurred while fetching the current git tag: {e}") +# return None + + +def get_current_git_tag(): + try: + logger.info("Fetching current git tag...") + tag = os.popen("git describe --tags --exact-match").read().strip() + return tag + except Exception as e: + logger.error(f"An error occurred while fetching the current git tag: {e}") + return None + + +def get_current_git_commit_hash(): + try: + commit_hash = os.popen("git rev-parse HEAD").read().strip() + logger.info(f"Fetching current git commit hash {commit_hash}") + return commit_hash + except Exception as e: + logger.error(f"An error occurred while fetching the current git commit hash: {e}") + return None + + +def get_guardian_node_id(): + return os.getenv("NODE_ADDRESS", None) + + +def get_timestamp_of_commit_hash(commit_hash): + try: + unixtime = os.popen(f"git show -s --format=%ct {commit_hash}").read().strip() + timestamp = datetime.fromtimestamp(int(unixtime)) + logger.info(f"Baseline commit hash timestamp: {timestamp} for commit: {commit_hash}") + + return timestamp + + except Exception as e: + logger.error(f"An error occurred while fetching the timestamp of commit hash: {e}") + return None + + +def get_my_update_schedule_window_time(spread_minutes, commit_hash): + hash_value = get_guardian_node_id() + hash_int = int(hashlib.sha256(hash_value.encode()).hexdigest(), 16) + minute_of_day = hash_int % spread_minutes + # today = datetime.now().replace(second=0, microsecond=0) + baseline_time = get_timestamp_of_commit_hash(commit_hash) + # today = datetime.strptime(baseline_time, "%Y-%m-%d %H:%M:%S %z").replace(second=0, microsecond=0) + target_time = baseline_time + timedelta(minutes=minute_of_day) + + return target_time + + +def extract_branch_name(): + git_url = os.getenv("DOCKER_COMPOSE_REMOTE_GIT_PATH", "origin/main:deployment/docker-compose.yml") + + if ":" in git_url: + branch = git_url.split(":")[0] # Get the part before the colon + if "/" in branch: + return branch.split("/")[-1] # Get the part after the last '/' + return branch + return None # Return None if format is invalid + + +def get_remote_latest_commit_hash(): + # Step 1: Get the current branch name + branch_name = extract_branch_name() + + # Step 2: Get the latest commit ID from the remote for the current branch + commit_id = subprocess.check_output(["git", "ls-remote", "origin", branch_name], text=True).split()[0] + + logger.info(f"Latest commit hash for branch {branch_name}: {commit_id}") + + return commit_id + + +def compare(): + global isInUpdatingState, updateTargetTime + + logger.info("Comparing current state with metadata") + + metadata = fetch_and_parse_metadata() + guardian_node_id = get_guardian_node_id() + + if guardian_node_id is None: + logger.error("Guardian node ID not found") + return + + # Check if I'm in the target list in any way. + target_nodes = metadata.get("targetNodes", []) + am_i_a_target = False + for node in target_nodes: + if node.get("id") == guardian_node_id or node.get("id") == "*": + am_i_a_target = True + break + + logger.info(f"Am I a target node? {am_i_a_target}") + + if not am_i_a_target: + return + + # Check if the update is in action + update_in_action = metadata.get("updateInAction", False) + + if not update_in_action: + logger.info("Update is NOT in action, skipping") + return + + # Check if I need to update myself. + + isInUpdatingState = True + + updateMode = metadata.get("updateMode", "immediate") + + current_git_tag = get_current_git_tag() + current_commit_hash = get_current_git_commit_hash() + scheduled_commit_hash = metadata.get("commit") + if scheduled_commit_hash == "latest": + scheduled_commit_hash = get_remote_latest_commit_hash() + + if updateMode == "scheduled": + logger.info("Scheduled update mode") + updateResolution = metadata.get("updateResolution", 1440) + logger.info(f"Update resolution: {updateResolution} minutes") + timeToUpdate = get_my_update_schedule_window_time(updateResolution, scheduled_commit_hash) + updateTargetTime = int(timeToUpdate.timestamp()) + + set_status_for_ui(f"Update scheduled for {timeToUpdate}") + + logger.info(f"Time to update: {timeToUpdate}") + if datetime.now() < timeToUpdate: + logger.info("Not my time to update") + return + + updateTargetTime = 0 + + if current_commit_hash.startswith(scheduled_commit_hash) or current_git_tag == scheduled_commit_hash: + logger.info(f"I'm up to date with commit hash: {current_commit_hash} / {current_git_tag}, scheduled commit hash: {scheduled_commit_hash}") + set_status_for_ui(f"I'm up to date with commit hash: {current_commit_hash} / {current_git_tag}") + else: + logger.info(f"I need to update, current commit hash: {current_commit_hash}, scheduled commit hash: {scheduled_commit_hash}") + set_status_for_ui(f"Update in progress for commit hash: {scheduled_commit_hash}") + trigger_update(scheduled_commit_hash) + + isInUpdatingState = False + + +def trigger_update(scheduled_commit_hash): + logger.info(f"Triggering update for commit hash: {scheduled_commit_hash}") + + if os.getenv("DONT_UPDATE", "false") == "true": + set_status_for_ui("DONT_UPDATE is set to true - skipping update") + logger.info("DONT_UPDATE is set to true, skipping update") + return + + # fetch latest changes. + + logger.info("Fetching latest changes") + res = os.popen("git fetch origin").read() + logger.info(res) + + # stash any local changes + logger.info("Stashing any local changes") + res = os.popen("git stash").read() + logger.info(res) + + logger.info(f"Checking out commit {scheduled_commit_hash}") + res = os.popen(f"git checkout {scheduled_commit_hash}").read() + logger.info(res) + + docker_compose_file = os.getenv("DOCKER_COMPOSE_FILE") + logger.info(f"Running docker-compose -f {docker_compose_file} up -d") + res = os.popen(f"docker-compose -f {docker_compose_file} up -d").read() + logger.info(res) + + logger.info("Update completed") diff --git a/manager/src/utils.py b/control/src/utils.py similarity index 97% rename from manager/src/utils.py rename to control/src/utils.py index 574481d..1a14428 100644 --- a/manager/src/utils.py +++ b/control/src/utils.py @@ -1,8 +1,7 @@ """ -Various helper functions used in the node manager +Various helper functions used in the node control """ - import select import subprocess from typing import Optional diff --git a/control/tests/conftest.py b/control/tests/conftest.py new file mode 100644 index 0000000..53f3b60 --- /dev/null +++ b/control/tests/conftest.py @@ -0,0 +1,15 @@ +import os +import sys +import tempfile +from pathlib import Path + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +SRC_DIR = PROJECT_ROOT / "src" + +if str(SRC_DIR) not in sys.path: + sys.path.insert(0, str(SRC_DIR)) + +# Set a temporary BASE_DIR for the tests +if "BASE_DIR" not in os.environ: + os.environ["BASE_DIR"] = tempfile.mkdtemp(prefix="control-tests-") + diff --git a/control/tests/system_monitor_test.py b/control/tests/system_monitor_test.py new file mode 100644 index 0000000..1f953bd --- /dev/null +++ b/control/tests/system_monitor_test.py @@ -0,0 +1,25 @@ +"""SystemMonitor class tests""" + +from pytest_mock import MockerFixture + +from system_monitor import SystemMonitor + + +def test_get_initial_response(mocker: MockerFixture) -> None: + """Test that the initial get() response returns the correct values""" + + system_monitor = SystemMonitor(client=mocker.Mock()) + + status = system_monitor.get() + + assert status == { + "Timestamp": "", + "Status": "", + "Error": "", + "Extra": "", + "Payload": { + "Version": {"Semantic": ""}, + "Metrics": {}, + "Services": {}, + }, + } diff --git a/deployment/README.md b/deployment/README.md index 555ee71..f9e0153 100644 --- a/deployment/README.md +++ b/deployment/README.md @@ -7,7 +7,7 @@ Source of truth for manifest files for Orbs v3 validator nodes. ## how is it used `v3-node-setup` clones this repo to the target node -`v3-manager` is responsible to keep it up to date upon new releases +`v3-control` is responsible to keep it up to date upon new releases ## files - `docker-compose.yml` - descriptor of the nodes service topoly and image versions. diff --git a/deployment/docker-compose-dev-local.yml b/deployment/docker-compose-dev-local.yml new file mode 100644 index 0000000..d315880 --- /dev/null +++ b/deployment/docker-compose-dev-local.yml @@ -0,0 +1,136 @@ +# ---- UPDATE-DESCRIPTOR-BEGIN ---- +# targetNodes: +# - id: 8c824c84e03de12e73fe286222c00faa3d8fd152 +# - id: 1c824c84e03de12e73fe286222c00faa3d8fd152 +# - id: "*" +# updateResolution: 1440 +# updateMode: immediate +# updateInAction: false +# commit: 656fdc20 +# ---- UPDATE-DESCRIPTOR-END ---- + +services: + nginx: + container_name: nginx + image: nginx:latest + #image: fabiocicerchia/nginx-lua:latest + ports: + - 80:80 + volumes: + - $ORBS_ROOT/deployment/nginx/conf.d:/etc/nginx/conf.d + - /opt/orbs:/opt/orbs + depends_on: + - logger + healthcheck: + test: service nginx status || exit 1 + + ethereum-reader: + container_name: ethereum-reader + image: host.docker.internal:6000/management-service:latest + command: "npm start" + volumes: + - /opt/orbs/ethereum-reader:/opt/orbs/status + - ${DOCKER_COMPOSE_FILE}:/opt/orbs/deployment.yml + environment: + - DOCKER_COMPOSE_DESCRIPTOR_URL + - INJECT_FAKE_GUARDIAN={"name":"jordalishNode","ip":"0x7F000001"} + - NODE_ADDRESS + - ETHEREUM_ENDPOINT + - CHAIN=ethereum + - MODE=dev + healthcheck: + test: node healthcheck.js || exit 1 + + ethereum-writer: + container_name: ethereum-writer + image: lukerogerson1/ethereum-writer:v1.7.3-main + command: "npm start" + volumes: + - /opt/orbs/ethereum-writer:/opt/orbs/status + environment: + - ETHEREUM_ELECTIONS_CONTRACT=0x02Ca9F2c5dD0635516241efD480091870277865b + - ETHEREUM_ENDPOINT + - NODE_PRIVATE_KEY + - SIGNER_ENDPOINT + - NODE_ADDRESS + - MANAGEMENT_SERVICE_ENDPOINT=http://nginx/service/ethereum-reader + healthcheck: + test: node healthcheck.js || exit 1 + + matic-reader: + container_name: matic-reader + image: host.docker.internal:6000/management-service:latest + command: "npm start" + volumes: + - /opt/orbs/matic-reader:/opt/orbs/status + - ${DOCKER_COMPOSE_FILE}:/opt/orbs/deployment.yml + environment: + - INJECT_FAKE_GUARDIAN={"name":"jordalishNode","ip":"0x7F000001"} + - NODE_ADDRESS + - ETHEREUM_GENESIS_CONTRACT=0x35eA0D75b2a3aB06393749B4651DfAD1Ffd49A77 + - ETHEREUM_ENDPOINT=${MATIC_ENDPOINT} + - ETHEREUM_FIRST_BLOCK=21700000 + - ETHEREUM_POLL_INTERVAL_SECONDS=300 + - CHAIN=polygon + - MODE=dev + healthcheck: + test: node healthcheck.js || exit 1 + + matic-writer: + container_name: matic-writer + image: lukerogerson1/ethereum-writer:v1.7.3-main + command: "npm start" + volumes: + - /opt/orbs/matic-writer:/opt/orbs/status + environment: + - ETHEREUM_ELECTIONS_CONTRACT=0x94f2da1ef22649c642500e8B1C3252A4670eE95b + - ETHEREUM_ENDPOINT=${MATIC_ENDPOINT} + - NODE_PRIVATE_KEY + - SIGNER_ENDPOINT + - NODE_ADDRESS + - MANAGEMENT_SERVICE_ENDPOINT=http://nginx/service/matic-reader + - ETHEREUM_DISCOUNT_GAS_PRICE_FACTOR=1 + healthcheck: + test: node healthcheck.js || exit 1 + + signer: + container_name: signer + image: orbsnetworkstaging/signer:v2.6.1-immediate + command: "/opt/orbs/orbs-signer" + volumes: + - /opt/orbs/signer:/opt/orbs/status + environment: + - NODE_PRIVATE_KEY + - NODE_ADDRESS + - HTTP_ADDRESS=:80 + healthcheck: + test: "/opt/orbs/healthcheck --url http://localhost:80 --output /opt/orbs/status/status.json" + + logger: + container_name: logger + image: orbsnetworkstaging/v4-logger:v0.0.1 + volumes: + - $PODMAN_SOCKET_PATH:/var/run/docker.sock + - /opt/orbs/logger:/opt/orbs/status + environment: + - STATUS_FILE_PATH=/opt/orbs/status/status.json + healthcheck: + # TODO: improve this healthcheck + test: ping -c 1 logger || exit 1 + + vm-l3-dummy-service: + container_name: vm-l3-dummy-service + image: host.docker.internal:6000/l3-dummy-service:latest + volumes: + - $PODMAN_SOCKET_PATH:/var/run/docker.sock + - /opt/orbs/vm-l3-dummy-service:/opt/orbs/status + environment: + - STATUS_FILE_PATH=/opt/orbs/status/status.json + healthcheck: + # TODO: improve this healthcheck + test: ping -c 1 logger || exit 1 + +networks: + default: + name: custom_network + external: true \ No newline at end of file diff --git a/deployment/docker-compose-dev-remote.yml b/deployment/docker-compose-dev-remote.yml new file mode 100644 index 0000000..9e43e7a --- /dev/null +++ b/deployment/docker-compose-dev-remote.yml @@ -0,0 +1,212 @@ +# ---- UPDATE-DESCRIPTOR-BEGIN ---- +# targetNodes: +# - id: 8c824c84e03de12e73fe286222c00faa3d8fd152 +# - id: 1c824c84e03de12e73fe286222c00faa3d8fd152 +# - id: "*" +# # used 1440 to spread over a day. +# updateResolution: 120 +# # can be immediate or scheduled +# updateMode: immediate +# updateInAction: true +# # can be commit id or latest +# commit: latest +# ---- UPDATE-DESCRIPTOR-END ---- + +x-logging: &default-logging + driver: "json-file" + options: + max-size: "200m" + max-file: "3" + +services: + nginx: + logging: *default-logging + container_name: nginx + image: nginx:latest + restart: unless-stopped + #image: fabiocicerchia/nginx-lua:latest + ports: + - 80:80 + volumes: + - $ORBS_ROOT/deployment/nginx/conf.d:/etc/nginx/conf.d + - /opt/orbs:/opt/orbs + depends_on: + - logger + healthcheck: + test: service nginx status || exit 1 + + ethereum-reader: + logging: *default-logging + networks: + default: + aliases: + - management-service + container_name: ethereum-reader + image: orbsnetworkstaging/management-service:1736783819 + restart: unless-stopped + command: "npm start" + volumes: + - /opt/orbs/ethereum-reader:/opt/orbs/status + - ${DOCKER_COMPOSE_FILE}:/opt/orbs/deployment.yml + environment: + - DOCKER_COMPOSE_DESCRIPTOR_URL + #- INJECT_FAKE_GUARDIAN={"name":"jordalishNode","ip":"0x7F000001"} + - NODE_ADDRESS + - ETHEREUM_ENDPOINT + - CHAIN=ethereum + - MODE=dev + healthcheck: + test: node healthcheck.js || exit 1 + + ethereum-writer: + logging: *default-logging + container_name: ethereum-writer + image: orbsnetworkstaging/ethereum-writer:v1.8.0-immediate + restart: unless-stopped + command: "npm start" + volumes: + - /opt/orbs/ethereum-writer:/opt/orbs/status + environment: + - ETHEREUM_ELECTIONS_CONTRACT=0x02Ca9F2c5dD0635516241efD480091870277865b + - ETHEREUM_ENDPOINT + - NODE_PRIVATE_KEY + - SIGNER_ENDPOINT + - NODE_ADDRESS + - MANAGEMENT_SERVICE_ENDPOINT=http://nginx/service/ethereum-reader + healthcheck: + test: node healthcheck.js || exit 1 + + matic-reader: + logging: *default-logging + container_name: matic-reader + image: orbsnetworkstaging/management-service:1736783819 + restart: unless-stopped + command: "npm start" + volumes: + - /opt/orbs/matic-reader:/opt/orbs/status + - ${DOCKER_COMPOSE_FILE}:/opt/orbs/deployment.yml + environment: + - NODE_ADDRESS + - ETHEREUM_GENESIS_CONTRACT=0x35eA0D75b2a3aB06393749B4651DfAD1Ffd49A77 + - ETHEREUM_ENDPOINT=${MATIC_ENDPOINT} + - ETHEREUM_FIRST_BLOCK=21700000 + - ETHEREUM_POLL_INTERVAL_SECONDS=300 + - CHAIN=polygon + - MODE=dev + healthcheck: + test: node healthcheck.js || exit 1 + + matic-writer: + logging: *default-logging + container_name: matic-writer + image: orbsnetworkstaging/ethereum-writer:v1.8.0-immediate + restart: unless-stopped + command: "npm start" + volumes: + - /opt/orbs/matic-writer:/opt/orbs/status + environment: + - ETHEREUM_ELECTIONS_CONTRACT=0x94f2da1ef22649c642500e8B1C3252A4670eE95b + - ETHEREUM_ENDPOINT=${MATIC_ENDPOINT} + - NODE_PRIVATE_KEY + - SIGNER_ENDPOINT + - NODE_ADDRESS + - MANAGEMENT_SERVICE_ENDPOINT=http://nginx/service/matic-reader + - ETHEREUM_DISCOUNT_GAS_PRICE_FACTOR=1 + healthcheck: + test: node healthcheck.js || exit 1 + + signer: + logging: *default-logging + container_name: signer + #image: orbsnetworkstaging/signer:v2.6.1-immediate + image: orbsnetworkstaging/signer:v2.6.0-59e1b92f + restart: unless-stopped + #command: "/opt/orbs/orbs-signer" + volumes: + - /opt/orbs/signer:/opt/orbs/status + environment: + - MULTILOG_DISABLED=1 + - NODE_PRIVATE_KEY + - NODE_ADDRESS + - HTTP_ADDRESS=:80 + healthcheck: + test: "/opt/orbs/healthcheck --url http://localhost:80 --output /opt/orbs/status/status.json" + + logger: + logging: *default-logging + container_name: logger + image: orbsnetworkstaging/v4-logger:v0.0.1 + restart: unless-stopped + volumes: + - $PODMAN_SOCKET_PATH:/var/run/docker.sock + - /opt/orbs/logger:/opt/orbs/status + environment: + - STATUS_FILE_PATH=/opt/orbs/status/status.json + healthcheck: + # TODO: improve this healthcheck + test: ping -c 1 logger || exit 1 + + + vm-notifications: + logging: *default-logging + restart: unless-stopped + container_name: vm-notifications + #image: orbsnetwork/vm-notifications:v1.0.2 + image: orbsnetwork/vm-notifications:v1.0.31 + entrypoint: ["/opt/orbs/service"] + #command: ["--config","/run/secrets/config.json","--config","/run/secrets/keys.json"] + volumes: + - $PODMAN_SOCKET_PATH:/var/run/docker.sock + - /opt/orbs/vm-notifications:/opt/orbs/status + environment: + - MULTILOG_DISABLED=1 + - STATUS_FILE_PATH=/opt/orbs/status/status.json + healthcheck: + # TODO: improve this healthcheck + test: ping -c 1 logger || exit 1 + + vm-lambda: + logging: *default-logging + restart: unless-stopped + container_name: vm-lambda + #image: orbsnetwork/vm-lambda:v0.0.27 + image: orbsnetworkstaging/vm-lambda:jordan9 + entrypoint: ["/opt/orbs/service"] + #command: ["--config","/run/secrets/config.json","--config","/run/secrets/keys.json"] + volumes: + - $PODMAN_SOCKET_PATH:/var/run/docker.sock + - /opt/orbs/vm-lambda:/opt/orbs/status + - NODE_ENV=stagingv4 + - MULTILOG_DISABLED=1 + environment: + - MULTILOG_DISABLED=1 + - STATUS_FILE_PATH=/opt/orbs/status/status.json + # healthcheck: + # TODO: improve this healthcheck + #test: "/opt/orbs/healthcheck.sh --url http://localhost:80 --output /opt/orbs/status/status.json" + # depends_on: + #- ethereum-reader + + vm-twap: + logging: *default-logging + restart: unless-stopped + container_name: vm-twap + image: orbsnetwork/vm-twap:v1.19.31 + entrypoint: ["/opt/orbs/service"] + #command: ["--config","/run/secrets/config.json","--config","/run/secrets/keys.json"] + volumes: + - $PODMAN_SOCKET_PATH:/var/run/docker.sock + - /opt/orbs/vm-twap:/opt/orbs/status + environment: + - MULTILOG_DISABLED=1 + - STATUS_FILE_PATH=/opt/orbs/status/status.json + # healthcheck: + # TODO: improve this healthcheck + #test: "/opt/orbs/healthcheck.sh --url http://localhost:80 --output /opt/orbs/status/status.json" + depends_on: + - ethereum-reader + +networks: + default: + name: custom_network + external: true diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml index d6b719e..5b7a4cf 100644 --- a/deployment/docker-compose.yml +++ b/deployment/docker-compose.yml @@ -1,5 +1,3 @@ -version: "3.8" - services: nginx: container_name: nginx @@ -7,7 +5,7 @@ services: ports: - 80:80 volumes: - - $HOME/deployment/nginx/conf.d:/etc/nginx/conf.d + - $ORBS_ROOT/deployment/nginx/conf.d:/etc/nginx/conf.d - /opt/orbs:/opt/orbs depends_on: - logger @@ -21,6 +19,8 @@ services: volumes: - /opt/orbs/ethereum-reader:/opt/orbs/status environment: + - DOCKER_COMPOSE_DESCRIPTOR_URL + - INJECT_FAKE_GUARDIAN={"name":"jordalishNode","ip":"0x7F000001"} - NODE_ADDRESS - ETHEREUM_ENDPOINT healthcheck: @@ -49,6 +49,7 @@ services: volumes: - /opt/orbs/matic-reader:/opt/orbs/status environment: + - INJECT_FAKE_GUARDIAN={"name":"jordalishNode","ip":"0x7F000001"} - NODE_ADDRESS - ETHEREUM_GENESIS_CONTRACT=0x35eA0D75b2a3aB06393749B4651DfAD1Ffd49A77 - ETHEREUM_ENDPOINT=${MATIC_ENDPOINT} diff --git a/deployment/nginx/conf.d/default.conf b/deployment/nginx/conf.d/default.conf index 726c509..6c0d944 100644 --- a/deployment/nginx/conf.d/default.conf +++ b/deployment/nginx/conf.d/default.conf @@ -3,12 +3,12 @@ server { server_name _; # rewrite all plural service(s) and log(s) to a single form location ~* ^/services/(.*)$ { - rewrite ^/services/(.*)$ /service/$1 last; + rewrite ^/services/(.*)$ /service/$1 last; } - - # rewrite all "boyar" with "manager" + + # rewrite all "boyar" with "control" location ~* ^/service/boyar/(.*)$ { - rewrite ^/service/boyar/(.*)$ /service/manager/$1 last; + rewrite ^/service/boyar/(.*)$ /service/control/$1 last; } # rewrite all "log-service" with "logger" location ~* ^/service/logs-service/(.*)$ { @@ -18,20 +18,20 @@ server { location ~* ^/service/management-service/(.*)$ { rewrite ^/service/management-service/(.*)$ /service/ethereum-reader/$1 last; } - + # handle static log(s) from /opt/orbs for non container services - location ~* ^/service/(manager|updater|recovery)/logs?$ { + location ~* ^/service/(control|updater|recovery)/logs?$ { default_type text/plain; alias /opt/orbs/$1/log.txt; } # Expose container status pages - location ~* ^/service/([a-zA-Z_-]+)/status$ { + location ~* ^/service/([a-zA-Z0-9_-]+)/status$ { default_type application/json; alias /opt/orbs/$1/status.json; } - # manager log is a special case as it is not a service + # control log is a special case as it is not a vm-docker service # Expose container logs (rewrite request to always be in /log in single ) location ~ ^/service/([a-zA-Z0-9_.-]+)/logs?$ { diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000..329580b --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +echo "source ./orbs-node-on-host/setup/install.sh --skip-req" > /home/ubuntu/.bash_history + +echo "" +echo "Welcome to the development L3 node setup" +echo "----------------------------------------" +echo "Please run the following commands to setup the node (or arrow up to run the last command):" +echo "" +echo "source ./orbs-node-on-host/setup/install.sh --skip-req" +echo "" +exec /bin/bash diff --git a/local-dev.Dockerfile b/local-dev.Dockerfile new file mode 100644 index 0000000..6217746 --- /dev/null +++ b/local-dev.Dockerfile @@ -0,0 +1,102 @@ +# This creates a temp image simulating an Ubuntu EC2 to test the installer script + +FROM ubuntu:22.04 +ARG DEBIAN_FRONTEND=noninteractive +ARG UBUNTU_VERSION=22.04 + +# Add sudo to make more like EC2 instance +RUN apt-get update && apt-get install -y fzf software-properties-common python3 python3-pip sudo locales vim curl rsync git + +# EC2 instances usually have locale settings +RUN locale-gen en_US.UTF-8 && update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 +ENV LANG=en_US.UTF-8 \ + LANGUAGE=en_US:en \ + LC_ALL=en_US.UTF-8 + +# Needed to allow crons to run in the container +RUN echo "#!/bin/sh\nexit 0" > /usr/sbin/policy-rc.d + +# Use non-root user (Docker by default uses root) +RUN useradd -ms /bin/bash ubuntu && \ + echo "ubuntu ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/ubuntu && \ + chmod 0440 /etc/sudoers.d/ubuntu + +# Prepare to install podman 4.6.2 +RUN key_url="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${UBUNTU_VERSION}/Release.key" && \ +sources_url="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_${UBUNTU_VERSION}" && \ +echo "deb $sources_url/ /" | tee /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list && \ +curl -fsSL $key_url | gpg --dearmor | tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg && \ +apt-get update + +ARG INSTALL_PACKAGES="podman fuse-overlayfs openssh-client ucpp" + +# Update the package list and install required packages. +RUN apt-get install -y $INSTALL_PACKAGES && \ + ln -s /usr/bin/ucpp /usr/local/bin/cpp && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Prepare necessary stuff to let podman run as rootless. + +RUN echo "ubuntu:1:999\nubuntu:1001:64535" > /etc/subuid && \ + echo "ubuntu:1:999\nubuntu:1001:64535" > /etc/subgid + +ADD /containers.conf /etc/containers/containers.conf +ADD /podman-containers.conf /home/podman/.config/containers/containers.conf + +RUN mkdir -p /home/ubuntu/.local/share/containers && \ + chown ubuntu:ubuntu -R /home/ubuntu && \ + chmod 644 /etc/containers/containers.conf + +# Modify storage configuration for running with fuse-overlay storage inside the container +RUN sed -e 's|^#mount_program|mount_program|g' \ + -e '/additionalimage.*/a "/var/lib/shared",' \ + -e 's|^mountopt[[:space:]]*=.*$|mountopt = "nodev,fsync=0"|g' \ + /usr/share/containers/storage.conf \ + > /etc/containers/storage.conf + +# Setup internal Podman to pass subscriptions down from host to internal container +RUN printf '/run/secrets/etc-pki-entitlement:/run/secrets/etc-pki-entitlement\n/run/secrets/rhsm:/run/secrets/rhsm\n' > /etc/containers/mounts.conf + +# Define volumes for container storage +VOLUME /var/lib/containers +VOLUME /home/ubuntu/.local/share/containers + +# Create shared directories and locks +RUN mkdir -p /var/lib/shared/overlay-images \ + /var/lib/shared/overlay-layers \ + /var/lib/shared/vfs-images \ + /var/lib/shared/vfs-layers && \ + touch /var/lib/shared/overlay-images/images.lock && \ + touch /var/lib/shared/overlay-layers/layers.lock && \ + touch /var/lib/shared/vfs-images/images.lock && \ + touch /var/lib/shared/vfs-layers/layers.lock + +# Copy the entrypoint script and make it executable +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +COPY .bashrc /home/ubuntu/.bashrc +RUN chown ubuntu:ubuntu /home/ubuntu/.bashrc + +USER ubuntu + +WORKDIR /home/ubuntu + +## Shortcuts for docker-compose actions +#RUN echo 'alias dc="docker-compose"' >> ~/.bashrc +#RUN echo 'alias logs="docker-compose -f /home/ubuntu/deployment/docker-compose.yml logs"' >> ~/.bashrc +#RUN echo 'alias n-logs="docker-compose -f /home/ubuntu/deployment/docker-compose.yml logs nginx"' >> ~/.bashrc +#RUN echo 'alias ms-logs="docker-compose -f /home/ubuntu/deployment/docker-compose.yml logs ethereum-reader"' >> ~/.bashrc +#RUN echo 'alias ew-logs="docker-compose -f /home/ubuntu/deployment/docker-compose.yml logs ethereum-writer"' >> ~/.bashrc +#RUN echo 'alias s-logs="docker-compose -f /home/ubuntu/deployment/docker-compose.yml logs signer"' >> ~/.bashrc +#RUN echo 'alias ms-exec="docker-compose -f /home/ubuntu/deployment/docker-compose.yml exec ethereum-reader sh"' >> ~/.bashrc +#RUN echo 'alias ew-exec="docker-compose -f /home/ubuntu/deployment/docker-compose.yml exec ethereum-writer sh"' >> ~/.bashrc +#RUN echo 'alias s-exec="docker-compose -f /home/ubuntu/deployment/docker-compose.yml exec signer sh"' >> ~/.bashrc + +#COPY --chown=ubuntu:ubuntu setup setup +#COPY --chown=ubuntu:ubuntu control control +#COPY --chown=ubuntu:ubuntu deployment deployment +#COPY --chown=ubuntu:ubuntu logging logging + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/manager/src/config.py b/manager/src/config.py deleted file mode 100644 index aa80019..0000000 --- a/manager/src/config.py +++ /dev/null @@ -1,12 +0,0 @@ -""" Configuration file for the manager service. """ - -import os - -BASE_DIR = os.environ.get("BASE_DIR") or "/opt/orbs" -os.makedirs(f"{BASE_DIR}/manager", exist_ok=True) - -MANAGER_DIR = os.path.join(BASE_DIR, "manager") - -status_file = os.path.join(MANAGER_DIR, "status.json") -log_file = os.path.join(MANAGER_DIR, "log.txt") -errors_file = os.path.join(MANAGER_DIR, "errors.txt") diff --git a/manager/src/manager.py b/manager/src/manager.py deleted file mode 100755 index 3b6900e..0000000 --- a/manager/src/manager.py +++ /dev/null @@ -1,50 +0,0 @@ -""" Main entry point of the manager """ - -import docker - -from config import status_file -from logger import logger -from system_monitor import SystemMonitor -from utils import run_command - -system_monitor = SystemMonitor(client=docker.from_env()) - -data = { - "currentVersion": "0.0.0", - "scheduledVersion": None, - "updateScheduled": None, - "updateScheduledFor": None, -} - - -def main(): - """Main entry point of the manager""" - - logger.info("Running manager...") - - # TODO - add back when we split into separate repos - # # Fetch all the tags from the remote repository - # run_command("git fetch origin --tags") - - # Get the latest tag - # latest_tag = run_command("git describe --tags $(git rev-list --tags --max-count=1)") - - # hard coded for now - latest_tag = "0.0.1" - - # upddate manager info - if latest_tag and latest_tag != data["currentVersion"]: - # checkout_command = f"git checkout {latest_tag}" - # run_command(checkout_command) # checkout the latest tag - error = run_command( - "docker-compose -f $HOME/deployment/docker-compose.yml up -d" - ) - if error: - print("Error running docker-compose") - - system_monitor.update() - system_monitor.persist(status_file) - - -if __name__ == "__main__": - main() diff --git a/manager/src/system_monitor_types.py b/manager/src/system_monitor_types.py deleted file mode 100644 index c7bf59c..0000000 --- a/manager/src/system_monitor_types.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import TypedDict - - -class Status(TypedDict): - """Corresponds to v2 status object""" - - Timestamp: str - Status: str - Error: str - Payload: dict - - -class Payload(TypedDict): - """Further breakdown of the status object""" - - Metrics: dict - Services: dict diff --git a/manager/tests/system_monitor_test.py b/manager/tests/system_monitor_test.py deleted file mode 100644 index d8f5127..0000000 --- a/manager/tests/system_monitor_test.py +++ /dev/null @@ -1,16 +0,0 @@ -""" SystemMonitor class tests """ - -from system_monitor import SystemMonitor - - -def test_get_initial_response(mocker): - """Test that the initial get() response returns the correct values""" - - system_monitor = SystemMonitor(client=mocker.Mock()) - - status = system_monitor.get() - - assert status["Timestamp"] == "" - assert status["Status"] == "" - assert status["Error"] == "" - assert status["Payload"] == {"Metrics": {}, "Services": {}} diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..0065aed --- /dev/null +++ b/package-lock.json @@ -0,0 +1,6 @@ +{ + "name": "v3-node-setup", + "lockfileVersion": 2, + "requires": true, + "packages": {} +} diff --git a/podman-containers.conf b/podman-containers.conf new file mode 100644 index 0000000..2bdd95a --- /dev/null +++ b/podman-containers.conf @@ -0,0 +1,5 @@ +[containers] +volumes = [ + "/proc:/proc", +] +default_sysctls = [] diff --git a/setup/README.md b/setup/README.md index 767e6af..86d950a 100644 --- a/setup/README.md +++ b/setup/README.md @@ -9,4 +9,4 @@ Install scripts and related files for setting up a Orbs v3 validator node - installs prereqs - generates node address - or prompts for one - produces node.env file -- starts the manager +- starts the control module diff --git a/setup/deployment-poll.cron b/setup/deployment-poll.cron index 4ec20bb..5382dd2 100644 --- a/setup/deployment-poll.cron +++ b/setup/deployment-poll.cron @@ -1 +1,2 @@ -* * * * * cd $HOME/deployment && python3 $HOME/manager/src/manager.py +* * * * * bash -c 'set -a; source $ORBS_ROOT/deployment/.env; set +a; cd $ORBS_ROOT/deployment && $ORBS_ROOT/control/.venv/bin/python3 $ORBS_ROOT/control/src/main.py poll >> /tmp/control.out 2>&1' +# new line is necessary diff --git a/setup/install.sh b/setup/install.sh index 43f30e2..992eda6 100755 --- a/setup/install.sh +++ b/setup/install.sh @@ -1,6 +1,9 @@ #!/bin/bash +#set -x + +export SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + -source $HOME/setup/scripts/base.sh echo -e "${BLUE} ██████╗ ██████╗ ██████╗ ███████╗ @@ -11,19 +14,33 @@ echo -e "${BLUE} ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚══════╝ ${NC}" +# env vars (ORBS_ROOT/.env) +# script directories +source $SCRIPT_DIR/scripts/base.sh # Check minimum machine specs are met -source $HOME/setup/scripts/validate-min-specs.sh "$@" +source $ORBS_ROOT/setup/scripts/validate-min-specs.sh "$@" # Install necessary dependencies -source $HOME/setup/scripts/install-dependencies.sh "$@" -# Download required node repositories -source $HOME/setup/scripts/clone-repos.sh "$@" +source $ORBS_ROOT/setup/scripts/install-dependencies.sh "$@" +# Download required node repositories - aftre splitted to different repos? +# source $ORBS_ROOT/setup/scripts/clone-repos.sh "$@" # Generate node address keys -source $HOME/setup/scripts/handle-node-address.sh "$@" +source $ORBS_ROOT/setup/scripts/handle-node-address.sh "$@" # Collect Guardian details -source $HOME/setup/scripts/handle-guardian-info.sh "$@" -# Generate env files needed for manager -source $HOME/setup/scripts/generate-env-files.sh "$@" -# Setup manager -source $HOME/setup/scripts/setup-manager.sh "$@" +source $ORBS_ROOT/setup/scripts/handle-guardian-info.sh "$@" +# Generate env files needed for control +source $ORBS_ROOT/setup/scripts/generate-env-files.sh "$@" + +# Generate env files needed for control +source $ORBS_ROOT/setup/scripts/generate-env-files.sh "$@" + +set -a # Automatically export all variables +source $ORBS_ROOT/deployment/.env +set +a + +# Setup control +source $ORBS_ROOT/setup/scripts/setup-control.sh "$@" + # Perform final health check -source $HOME/setup/scripts/health-check.sh "$@" +source $ORBS_ROOT/setup/scripts/health-check.sh "$@" + + diff --git a/setup/scripts/base.sh b/setup/scripts/base.sh index 6a89c01..39cd642 100755 --- a/setup/scripts/base.sh +++ b/setup/scripts/base.sh @@ -23,4 +23,38 @@ loginctl enable-linger $username # This errors when running in Docker container sudo mkdir -p /opt/orbs sudo chown -R $username:$username /opt/orbs/ -sudo chmod -R 755 /opt/orbs/ \ No newline at end of file +sudo chmod -R 755 /opt/orbs/ + +if [ -f /.dockerenv ]; then + export RUNNING_IN_DOCKER=true +fi + +if [ "$RUNNING_IN_DOCKER" = "true" ]; then + echo -e "${YELLOW} Running in Docker container ! ${NC}" + + export ORBS_ROOT=${ORBS_ROOT:-$HOME/orbs-node} # if ORBS_ROOT is set, use it, otherwise use default mapped to host directory + echo -e "${BLUE} ORBS_ROOT: $ORBS_ROOT ${NC}" + git clone $HOME/orbs-node-on-host $ORBS_ROOT + rsync -aq --progress --exclude='.venv' --exclude='.git' $HOME/orbs-node-on-host/ $ORBS_ROOT +else + echo -e "${YELLOW} Running on host ! ${NC}" + + export ORBS_ROOT=`git rev-parse --show-toplevel` +fi + +echo -e "${BLUE} ORBS_ROOT: $ORBS_ROOT ${NC}" + +export DOCKER_COMPOSE_FILE=$ORBS_ROOT/deployment/docker-compose.yml + +set -a # Automatically export all variables +if [ -f $ORBS_ROOT/deployment/.env ]; then + . $ORBS_ROOT/deployment/.env +fi +set +a + + + + + + + diff --git a/setup/scripts/clone-repos.sh b/setup/scripts/clone-repos.sh index 82659b9..7164551 100755 --- a/setup/scripts/clone-repos.sh +++ b/setup/scripts/clone-repos.sh @@ -6,15 +6,15 @@ # # TODO: this is the wrong repo # git clone https://github.com/orbs-network/v3-deployment.git deployment # # Disable detached head warning. This is fine as we are checking out tags -# cd $HOMEdeployment && git config advice.detachedHead false && cd .. +# cd $ORBS_ROOTdeployment && git config advice.detachedHead false && cd .. # echo -e "${GREEN}Node deployment files downloaded!${NC}" # echo "------------------------------------" -# # ----- CLONE MANAGER ----- +# # ----- CLONE CONTROL/MANAGER ----- # echo -e "${BLUE}Downloading node manager...${NC}" # git clone https://github.com/orbs-network/v3-node-manager.git manager # cd $HOMEmanager && git config advice.detachedHead false && cd .. -# echo -e "${GREEN}Node manager downloaded!${NC}" +# echo -e "${GREEN}Node control downloaded!${NC}" # echo "------------------------------------" \ No newline at end of file diff --git a/setup/scripts/generate-env-files.sh b/setup/scripts/generate-env-files.sh index e02e2bb..e63c413 100755 --- a/setup/scripts/generate-env-files.sh +++ b/setup/scripts/generate-env-files.sh @@ -2,13 +2,13 @@ echo -e "${BLUE}Populating required env files...${NC}" -chmod +x $HOME/setup/generate_env_files.py -env_dir=$HOME/deployment +chmod +x $ORBS_ROOT/setup/generate_env_files.py +env_dir=$ORBS_ROOT/deployment shared_name=shared.env env_file=.env if [[ ! -f "$env_dir/$env_file" || $* == *--new-keys* ]]; then - $HOME/setup/generate_env_files.py --keys $keys_path --env_dir $env_dir --env_file $env_file --shared $shared_name + $ORBS_ROOT/setup/generate_env_files.py --keys $keys_path --env_dir $env_dir --env_file $env_file --shared $shared_name if [ $? -eq 0 ]; then echo -e "${GREEN}Env files were successfully stored under $env_dir ${NC}" @@ -18,4 +18,4 @@ if [[ ! -f "$env_dir/$env_file" || $* == *--new-keys* ]]; then fi fi -echo "------------------------------------" \ No newline at end of file +echo "------------------------------------" diff --git a/setup/scripts/handle-node-address.sh b/setup/scripts/handle-node-address.sh index f1ab2be..9a0c2e1 100755 --- a/setup/scripts/handle-node-address.sh +++ b/setup/scripts/handle-node-address.sh @@ -5,17 +5,17 @@ keys_path=/opt/orbs/keys.json if [[ ! -f $keys_path || $* == *--new-keys* ]]; then echo -e "${BLUE}Node address generation${NC}" - chmod +x $HOME/setup/generate_wallet.py + chmod +x $ORBS_ROOT/setup/generate_wallet.py while true; do read -sp "Press [Enter] to create a new wallet or provide a private key you wish to import: " input if [[ -z "$input" ]]; then echo -e ${YELLOW}"\nYou chose to create a new wallet${NC}" - $HOME/setup/generate_wallet.py --path $keys_path --new_key + $ORBS_ROOT/setup/generate_wallet.py --path $keys_path --new_key break elif [[ $input =~ ^(0x)?[0-9a-fA-F]{64}$ ]]; then echo -e "${YELLOW}\nThe private key is valid. Importing the wallet...${NC}" - $HOME/setup/generate_wallet.py --path $keys_path --import_key $input + $ORBS_ROOT/setup/generate_wallet.py --path $keys_path --import_key $input break else echo -e "${YELLOW}\nInvalid input. A valid private key should be a 64-character hexadecimal string (optionally prefixed with '0x'). Please try again.${NC}" diff --git a/setup/scripts/health-check.sh b/setup/scripts/health-check.sh index 7bcd646..a03674d 100755 --- a/setup/scripts/health-check.sh +++ b/setup/scripts/health-check.sh @@ -1,14 +1,13 @@ #!/bin/bash check_services() { - compose_file="$HOME/deployment/docker-compose.yml" # Get the number of services defined in docker-compose file - num_services=$(docker-compose -f $compose_file config --services | wc -l) + num_services=$(docker-compose -f $DOCKER_COMPOSE_FILE config --services | wc -l) for i in {1..5} do # Get the number of services that are up - num_up=$(docker-compose -f $compose_file ps | grep "Up" | wc -l) + num_up=$(docker-compose -f $DOCKER_COMPOSE_FILE ps | grep "Up" | wc -l) if [ $num_up -eq $num_services ]; then echo "All services are up and running." return 0 @@ -26,9 +25,20 @@ if check_services; then if [ $mgmt_svs_status_code -eq 200 ]; then echo -e "${GREEN}Installation complete! 🚀🚀🚀${NC}" echo "------------------------------------" - echo -e "\n👉👉👉 ${YELLOW}Please register your Guardian using the following website: https://guardians.orbs.network?name=$name&website=$website&ip=$myip&node_address=$public_add ${NC} 👈👈👈\n" # TODO: only show once - during first installation + echo -e "👉👉👉 ${YELLOW}Please register your Guardian using:" + echo -e "website: https://guardians.orbs.network?name=$name&website=$website&ip=$myip&node_address=$public_add=${NC}" + echo -e "👈👈👈" + echo -e "name: $name" + echo -e "node_address: $public_add" + echo -e "website: $website" + echo -e "ip: $myip" + echo -e "------------------------------------" else echo -e "${RED}Installation incomplete!${NC}" + # + echo -e "Please check the logs for more information." + # todo: add logs path + echo -e "------------------------------------" fi else echo -e "${RED}Installation incomplete!${NC}" diff --git a/setup/scripts/install-dependencies.sh b/setup/scripts/install-dependencies.sh index ac2d67b..d05cd2b 100755 --- a/setup/scripts/install-dependencies.sh +++ b/setup/scripts/install-dependencies.sh @@ -4,6 +4,14 @@ echo -e "${BLUE}Installing dependencies. Please be patient as this may take seve UBUNTU_VERSION='22.04' +ln -s $DOCKER_COMPOSE_FILE /opt/orbs/deployment.yml + +# Need to explicitly add docker.io registry +echo "[registries.search]" | sudo tee /etc/containers/registries.conf +echo "registries = ['docker.io']" | sudo tee -a /etc/containers/registries.conf +echo "[registries.insecure]" | sudo tee -a /etc/containers/registries.conf +echo "registries = ['host.docker.internal:6000']" | sudo tee -a /etc/containers/registries.conf + # TODO: I suspect it is dangerous to run upgrade each time installer script is run if [ -f /etc/needrestart/needrestart.conf ]; then sudo sed -i "s/#\$nrconf{restart} = 'i';/\$nrconf{restart} = 'a';/" /etc/needrestart/needrestart.conf # disables the restart modal @@ -22,7 +30,11 @@ sudo apt update -qq sudo apt-get install -qq -y software-properties-common podman curl git cron jq > "$redirect" 2>&1 echo -e "${BLUE}$(podman --version)${NC}" # https://docs.docker.com/compose/install/standalone/ -sudo curl -SL https://github.com/docker/compose/releases/download/v2.20.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose +if [ "$(arch)" == "x86_64" ]; then + sudo curl -SL https://github.com/docker/compose/releases/download/v2.30.2/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose +else + sudo curl -SL https://github.com/docker/compose/releases/download/v2.30.2/docker-compose-linux-aarch64 -o /usr/local/bin/docker-compose +fi sudo chmod +x /usr/local/bin/docker-compose echo -e "${BLUE}$(docker-compose --version)${NC}" @@ -43,7 +55,7 @@ else sudo sysctl -p # INSTALL NODE EXPORTER - cd $HOME + cd $ORBS_ROOT NODE_EXPORTER_VERSION="0.18.1" curl -L https://github.com/prometheus/node_exporter/releases/download/v${NODE_EXPORTER_VERSION}/node_exporter-${NODE_EXPORTER_VERSION}.linux-amd64.tar.gz -o node_exporter.tar.gz tar xvfz node_exporter.tar.gz && mv node_exporter-${NODE_EXPORTER_VERSION}.linux-amd64/node_exporter . @@ -56,7 +68,7 @@ Description=Node Exporter [Service] User=$(whoami) -ExecStart=$HOME/node_exporter --collector.tcpstat +ExecStart=$ORBS_ROOT/node_exporter --collector.tcpstat Restart=always StandardOutput=file:/var/log/node_exporter.log StandardError=file:/var/log/node_exporter.err.log @@ -71,6 +83,9 @@ EOF fi +# Create custom network for containers to be able to address to DNS 172.20.0.1 in nginx as a resolver. +podman network create --subnet 172.20.0.0/16 custom_network + echo "alias docker=podman" >> ~/.bashrc source ~/.bashrc @@ -101,21 +116,17 @@ else echo -e "${GREEN}Pip is already installed!${NC}" fi -sudo pip install -r $HOME/setup/requirements.txt +sudo pip install -r $ORBS_ROOT/setup/requirements.txt -# Install Poetry Python package manager (only temporarily needed for Manager until published as package) +# Install Poetry Python package mancontrolager (should control be until published as package?) sudo apt-get install -y python-is-python3 curl -sSL https://install.python-poetry.org | python3 - export PATH="/home/ubuntu/.local/bin:$PATH" >> ~/.bashrc source ~/.bashrc -# Install Manager dependencies with Poetry (only temporarily needed for Manager until published as package) -cd $HOME/manager && poetry install && cd $HOME +# Install Control dependencies with Poetry (only temporarily needed for Control until published as package) +cd $ORBS_ROOT/control && poetry install && cd $ORBS_ROOT sudo systemctl enable cron -# Need to explicitly add docker.io registry -echo "[registries.search]" | sudo tee /etc/containers/registries.conf -echo "registries = ['docker.io']" | sudo tee -a /etc/containers/registries.conf - -echo -e "${GREEN}Finished installing dependencies!${NC}" +#echo -e "${GREEN}Finished installing dependencies!${NC}" echo "------------------------------------" \ No newline at end of file diff --git a/setup/scripts/setup-control.sh b/setup/scripts/setup-control.sh new file mode 100755 index 0000000..8e23a94 --- /dev/null +++ b/setup/scripts/setup-control.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo -e "${BLUE}Starting control...${NC}" + +# Activate Python virtual environment (only temporarily needed for control until published as package) +source $ORBS_ROOT/control/.venv/bin/activate + +cp $ORBS_ROOT/setup/node-version.json /opt/orbs + +python3 $ORBS_ROOT/control/src/main.py + +echo -e "${GREEN}Control started!${NC}" +echo "------------------------------------" + +# ----- SETUP control CRON ----- +echo -e "${BLUE}Adding scheduled control run...${NC}" + +sudo crontab $ORBS_ROOT/setup/deployment-poll.cron -u $username +sudo service cron restart + +echo -e "${GREEN}control schedule set!${NC}" +echo "------------------------------------" \ No newline at end of file diff --git a/setup/scripts/setup-manager.sh b/setup/scripts/setup-manager.sh deleted file mode 100755 index 59b6604..0000000 --- a/setup/scripts/setup-manager.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -echo -e "${BLUE}Starting manager...${NC}" - -# Activate Python virtual environment (only temporarily needed for Manager until published as package) -source $HOME/manager/.venv/bin/activate - -cp $HOME/setup/node-version.json /opt/orbs - -python3 $HOME/manager/src/manager.py - -echo -e "${GREEN}Manager started!${NC}" -echo "------------------------------------" - -# ----- SETUP MANAGER CRON ----- -echo -e "${BLUE}Adding scheduled manager run...${NC}" - -sudo crontab $HOME/setup/deployment-poll.cron -u $username -sudo service cron restart - -echo -e "${GREEN}Manager schedule set!${NC}" -echo "------------------------------------" \ No newline at end of file