diff --git a/.flake8 b/.flake8 deleted file mode 100644 index a34d601c..00000000 --- a/.flake8 +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 100 -exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,venv,helper-scripts,.venv diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000..5df55bfe --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,20 @@ +- always keep the changes minimal and purposeful +- focus on fixing the exact problem or implementing the exact feature +- keep the code simple, do not write defensive code +- do not describe your changes in details after you made changes, focus on writing code +- do not generate any documentation, the code should be self-explanatory +- do not generate any in-line comments +- for the new files, always add a license header, same format as in the existing files +- no commented out code +- no console logs in production code +- no unused imports +- no redundant code - move repeated logic into helper functions +- use type hints to specify the expected types of function arguments and return values + +- check `pyproject.toml` for formatting rules +- always lint changes using `uv run ruff check` +- tests should be placed in `tests/` directory, follow the existing structure and code style +- always use `uv` to run all commands in the repo (e.g., `uv run ruff`, `uv run pytest`, etc.) +- for running tests, export environment variables in the terminal before running the tests: `. ./scripts/export_env.sh` + +- additional external context is located in context directory \ No newline at end of file diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index abdf44a9..dd86ba23 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,180 +1,78 @@ name: Build and publish + on: pull_request: types: [closed] branches: - - master - develop - beta - stable - 'v*.*.*' jobs: - create_release: + build_and_release: if: github.event.pull_request.merged - name: Create release + name: Build and create release runs-on: ubuntu-22.04 - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} - version: ${{ steps.export_outputs.outputs.version }} - branch: ${{ steps.export_outputs.outputs.branch }} + permissions: + contents: write steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true - - name: Checkout submodules - run: git submodule update --init - - - name: Install ubuntu dependencies - run: | - sudo apt-get update - sudo apt-get install python-setuptools + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: 3.13 - - name: Set Versions + - name: Calculate version + id: versioning run: | bash ./scripts/set_versions_ga.sh - - - name: Set release + + - name: Determine prerelease status + id: release_info run: | - if [[ "$BRANCH" == "stable" ]]; then - export PRERELEASE=false + if [[ "${{ env.BRANCH }}" == "stable" ]]; then + echo "prerelease=false" >> $GITHUB_OUTPUT else - export PRERELEASE=true + echo "prerelease=true" >> $GITHUB_OUTPUT fi - echo "PRERELEASE=$PRERELEASE" >> $GITHUB_ENV - - name: Create Release - id: create_release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build binaries + id: build + run: | + mkdir -p ${{ github.workspace }}/dist + docker build . -t node-cli-builder + + docker run --rm -v ${{ github.workspace }}/dist:/app/dist node-cli-builder \ + bash scripts/build.sh ${{ env.VERSION }} ${{ env.BRANCH }} skale + + docker run --rm -v ${{ github.workspace }}/dist:/app/dist node-cli-builder \ + bash scripts/build.sh ${{ env.VERSION }} ${{ env.BRANCH }} fair + + echo "dist_path=${{ github.workspace }}/dist" >> $GITHUB_OUTPUT + + - name: Generate checksums + run: | + cd ${{ steps.build.outputs.dist_path }} + for file in skale-*; do + sha512sum "$file" > "$file.sha512" + done + echo "Checksums generated:" + ls -l *.sha512 + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 with: tag_name: ${{ env.VERSION }} - release_name: ${{ env.VERSION }} + name: Release ${{ env.VERSION }} draft: false - prerelease: ${{ env.PRERELEASE }} - - name: Export outputs - id: export_outputs - run: | - echo "::set-output name=version::$VERSION" - echo "::set-output name=branch::$BRANCH" - - build_and_publish_normal: - if: github.event.pull_request.merged - needs: create_release - name: Build and publish for ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-22.04 - asset_name: skale-${{ needs.create_release.outputs.version }}-Linux-x86_64 - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.11 - uses: actions/setup-python@v1 - with: - python-version: 3.11 - - - name: Install ubuntu dependencies - if: matrix.os == 'ubuntu-22.04' - run: | - sudo apt-get update - - - name: Checkout submodules - run: git submodule update --init - - - name: Build normal binary - run: | - mkdir -p ./dist - docker build . -t node-cli-builder - docker run -v /home/ubuntu/dist:/app/dist node-cli-builder scripts/build.sh ${{ needs.create_release.outputs.version }} ${{ needs.create_release.outputs.branch }} normal - ls -altr /home/ubuntu/dist/ - docker rm -f $(docker ps -aq) - - - name: Save sha512sum - run: | - sudo sha512sum /home/ubuntu/dist/${{ matrix.asset_name }} | sudo tee > /dev/null /home/ubuntu/dist/sha512sum - - - name: Upload release binary - id: upload-release-asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create_release.outputs.upload_url }} - asset_path: /home/ubuntu/dist/${{ matrix.asset_name }} - asset_name: ${{ matrix.asset_name }} - asset_content_type: application/octet-stream - - - name: Upload release checksum - id: upload-release-checksum - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create_release.outputs.upload_url }} - asset_path: /home/ubuntu/dist/sha512sum - asset_name: ${{ matrix.asset_name }}.sha512 - asset_content_type: text/plain - - build_and_publish_sync: - if: github.event.pull_request.merged - needs: create_release - name: Build and publish for ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-22.04 - asset_name: skale-${{ needs.create_release.outputs.version }}-Linux-x86_64-sync - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.11 - uses: actions/setup-python@v1 - with: - python-version: 3.11 - - - name: Install ubuntu dependencies - if: matrix.os == 'ubuntu-22.04' - run: | - sudo apt-get update - - - name: Checkout submodules - run: git submodule update --init - - - name: Build sync release binary - run: | - mkdir -p ./dist - docker build . -t node-cli-builder - docker run -v /home/ubuntu/dist:/app/dist node-cli-builder scripts/build.sh ${{ needs.create_release.outputs.version }} ${{ needs.create_release.outputs.branch }} sync - ls -altr /home/ubuntu/dist/ - docker rm -f $(docker ps -aq) - - - name: Save sha512sum - run: | - sudo sha512sum /home/ubuntu/dist/${{ matrix.asset_name }} | sudo tee > /dev/null /home/ubuntu/dist/sha512sum - - - name: Upload release sync CLI - id: upload-sync-release-asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create_release.outputs.upload_url }} - asset_path: /home/ubuntu/dist/${{ matrix.asset_name }} - asset_name: ${{ matrix.asset_name }} - asset_content_type: application/octet-stream - - - name: Upload release sync CLI checksum - id: upload-sync-release-checksum - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create_release.outputs.upload_url }} - asset_path: /home/ubuntu/dist/sha512sum - asset_name: ${{ matrix.asset_name }}.sha512 - asset_content_type: text/plain + prerelease: ${{ steps.release_info.outputs.prerelease }} + generate_release_notes: true + files: | + ${{ steps.build.outputs.dist_path }}/skale-* + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 31625095..5f589d47 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,14 +1,14 @@ name: Test -on: [push, pull_request] +on: [push] jobs: test: runs-on: ubuntu-22.04 strategy: matrix: - python-version: [3.11] + python-version: ['3.13'] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: submodules: true @@ -16,10 +16,19 @@ jobs: run: git submodule update --init - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + - name: Install uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Cache uv + uses: actions/cache@v4 + with: + path: ~/.cache/uv + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }} + - name: Install ubuntu dependencies run: | sudo apt-get update @@ -27,42 +36,50 @@ jobs: - name: Install python dependencies run: | - python -m pip install --upgrade pip - pip install -e .[dev] + uv venv + uv pip install -e ".[dev]" + + - name: Generate info + run: bash ./scripts/generate_info.sh 1.0.0 my-branch skale - - name: Lint with flake8 + - name: Check with ruff run: | - flake8 . + uv run ruff check - - name: Build binary - normal + - name: Build docker image + run: docker build . -t node-cli-builder + + - name: Build binary - skale run: | mkdir -p ./dist - docker build . -t node-cli-builder - docker run -v /home/ubuntu/dist:/app/dist node-cli-builder scripts/build.sh test test normal + docker run -v /home/ubuntu/dist:/app/dist node-cli-builder bash scripts/build.sh test test skale docker rm -f $(docker ps -aq) - - name: Check build - normal + - name: Check build - skale run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64 - - name: Build binary - sync + - name: Build binary - fair run: | mkdir -p ./dist - docker build . -t node-cli-builder - docker run -v /home/ubuntu/dist:/app/dist node-cli-builder scripts/build.sh test test sync + docker run -v /home/ubuntu/dist:/app/dist node-cli-builder bash scripts/build.sh test test fair docker rm -f $(docker ps -aq) - - name: Check build - sync - run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64-sync + - name: Check build - fair + run: sudo /home/ubuntu/dist/skale-test-Linux-x86_64-fair - name: Run prepare test build run: | - scripts/build.sh test test normal + uv run bash scripts/build.sh test test skale + + - name: Run redis + run: | + bash ./helper-scripts/redis/run.sh - name: Run tests run: | export PYTHONPATH=${PYTHONPATH}:/usr/lib/python3/dist-packages/ - bash ./scripts/run_tests.sh + uv run bash ./scripts/run_tests.sh - name: Run nftables tests run: | - scripts/run_nftables_test.sh + uv run scripts/run_nftables_test.sh diff --git a/.gitignore b/.gitignore index de02cf68..65232943 100644 --- a/.gitignore +++ b/.gitignore @@ -113,10 +113,16 @@ node_cli/cli/info.py meta.json -disk_mountpoint.txt +block_device.txt sgx_server_url.txt resource_allocation.json conf.json test-env -nginx.conf \ No newline at end of file +nginx.conf +tests/.skale/node_data/docker.json +tests/.skale/node_data/node_options.json +tests/.skale/config/nginx.conf.j2 + +.zed +uv.lock \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index c77efb4b..7fc37545 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,30 +1,32 @@ -FROM python:3.11-bookworm - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt install -y \ - git \ - build-essential \ - software-properties-common \ - zlib1g-dev \ - libssl-dev \ - libffi-dev \ - swig \ - iptables \ - nftables \ - python3-nftables \ - libxslt-dev \ - kmod - - -RUN mkdir /app +FROM python:3.13-slim-bookworm AS builder + +COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv + WORKDIR /app -COPY . . +COPY pyproject.toml ./ + +RUN uv pip install --system --no-cache ".[dev]" + +FROM python:3.13-slim-bookworm -ENV PATH=/app/buildvenv/bin:$PATH -ENV PYTHONPATH="{PYTHONPATH}:/usr/lib/python3/dist-packages" +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + git \ + iptables \ + nftables \ + python3-nftables \ + kmod \ + wget \ + binutils && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +COPY --from=builder /usr/local/lib/python3.13/site-packages /usr/local/lib/python3.13/site-packages +COPY --from=builder /usr/local/bin /usr/local/bin + +COPY . . -RUN python3.11 -m venv /app/buildvenv && \ - pip install --upgrade pip && \ - pip install wheel setuptools==63.2.0 && \ - pip install -e '.[dev]' +ENV PYTHONPATH="/app:/usr/lib/python3/dist-packages" +ENV COLUMNS=80 diff --git a/README.md b/README.md index 12741e27..5a2ef37f 100644 --- a/README.md +++ b/README.md @@ -4,64 +4,100 @@ ![Test](https://github.com/skalenetwork/node-cli/workflows/Test/badge.svg) [![Discord](https://img.shields.io/discord/534485763354787851.svg)](https://discord.gg/vvUtWJB) -SKALE Node CLI, part of the SKALE suite of validator tools, is the command line to setup, register and maintain your SKALE node. +SKALE Node CLI, part of the SKALE suite of validator tools, is the command line interface to setup, register and maintain your SKALE node. It comes in three distinct build types: Standard (for validator nodes), Passive (for dedicated sChain synchronization), and Fair. ## Table of Contents 1. [Installation](#installation) -2. [CLI usage](#cli-usage)\ - 2.1 [Top level commands](#top-level-commands)\ - 2.2 [Node](#node-commands)\ - 2.3 [Wallet](#wallet-commands)\ - 2.4 [sChains](#schain-commands)\ - 2.5 [Health](#health-commands)\ - 2.6 [SSL](#ssl-commands)\ - 2.7 [Logs](#logs-commands)\ - 2.8 [Resources allocation](#resources-allocation-commands)\ - 2.9 [Validate](#validate-commands) -3. [Sync CLI usage](#sync-cli-usage)\ - 3.1 [Top level commands](#top-level-commands-sync)\ - 3.2 [Sync node commands](#sync-node-commands) -4. [Exit codes](#exit-codes) -5. [Development](#development) + 1. [Standard Node Binary](#standard-node-binary) + 2. [Passive Node Binary](#passive-node-binary) + 3. [Fair Node Binary](#fair-node-binary) + 4. [Permissions and Testing](#permissions-and-testing) +2. [Standard Node Usage (`skale` - Normal Build)](#standard-node-usage-skale---normal-build) + 1. [Top level commands (Standard)](#top-level-commands-standard) + 2. [Node commands (Standard)](#node-commands-standard) + 3. [Wallet commands (Standard)](#wallet-commands-standard) + 4. [sChain commands (Standard)](#schain-commands-standard) + 5. [Health commands (Standard)](#health-commands-standard) + 6. [SSL commands (Standard)](#ssl-commands-standard) + 7. [Logs commands (Standard)](#logs-commands-standard) +3. [Passive Node Usage (`skale` - Passive Build)](#passive-node-usage-skale---passive-build) + 1. [Top level commands (Passive)](#top-level-commands-passive) + 2. [Passive node commands](#passive-node-commands) +4. [Fair Node Usage (`fair`)](#fair-node-usage-fair) + 1. [Top level commands (Fair)](#top-level-commands-fair) + 2. [Fair Boot commands](#fair-boot-commands) + 3. [Fair Node commands](#fair-node-commands) + 4. [Fair Chain commands](#fair-chain-commands) + 5. [Fair Wallet commands](#fair-wallet-commands) + 6. [Fair Logs commands](#fair-logs-commands) + 7. [Fair SSL commands](#fair-ssl-commands) + 8. [Fair Staking commands](#fair-staking-commands) + 9. [Passive Fair Node commands](#passive-fair-node-commands) +5. [Exit codes](#exit-codes) +6. [Development](#development) + +*** ## Installation -* Prerequisites +### Prerequisites -Ensure that the following package is installed: **docker**, **docker-compose** +Ensure that the following packages are installed: **docker**, **docker-compose** (1.27.4+) -* Download the executable +### SKALE Node Binary + +This binary (`skale-VERSION-OS`) is used for managing SKALE validator nodes. ```shell -VERSION_NUM={put the version number here} && sudo -E bash -c "curl -L https://github.com/skalenetwork/node-cli/releases/download/$VERSION_NUM/skale-$VERSION_NUM-`uname -s`-`uname -m` > /usr/local/bin/skale" +# Replace {version} with the desired release version (e.g., 3.0.0) +CLI_VERSION={version} && \ +sudo -E bash -c "curl -L https://github.com/skalenetwork/node-cli/releases/download/$CLI_VERSION/skale-$CLI_VERSION-`uname -s`-`uname -m` > /usr/local/bin/skale" ``` -For Sync node version: +### Fair Node Binary + +This binary (`skale-VERSION-OS-fair`) is used for managing nodes on the Fair network. ```shell -VERSION_NUM={put the version number here} && sudo -E bash -c "curl -L https://github.com/skalenetwork/node-cli/releases/download/$VERSION_NUM/skale-$VERSION_NUM-`uname -s`-`uname -m`-sync > /usr/local/bin/skale" +# Replace {version} with the desired release version (e.g., 3.0.0) +CLI_VERSION={version} && \ +sudo -E bash -c "curl -L https://github.com/skalenetwork/node-cli/releases/download/$CLI_VERSION/skale-$CLI_VERSION-`uname -s`-`uname -m`-fair > /usr/local/bin/fair" ``` -* Apply executable permissions to the downloaded binary: +### Permissions and Testing + +Apply executable permissions to the downloaded binary (adjust name accordingly): ```shell -chmod +x /usr/local/bin/skale +# For Standard or Passive binary +sudo chmod +x /usr/local/bin/skale + +# For Fair binary +sudo chmod +x /usr/local/bin/fair ``` -* Test the installation +Test the installation: ```shell +# Standard or Passive build skale --help + +# Fair build +fair --help ``` -## CLI usage +*** + +## Standard Node Usage (`skale` - Normal Build) + +Commands available in the **standard `skale` binary** for managing nodes. -### Top level commands +### Top level commands (Standard) #### Info -Print build info +Print build info for the `skale` (normal) binary. ```shell skale info @@ -69,7 +105,7 @@ skale info #### Version -Print version number +Print version number for the `skale` (normal) binary. ```shell skale version @@ -79,13 +115,13 @@ Options: * `--short` - prints version only, without additional text. -### Node commands +### Node commands (Standard) > Prefix: `skale node` #### Node information -Get base info about SKALE node +Get base info about the standard SKALE node. ```shell skale node info @@ -93,13 +129,13 @@ skale node info Options: -`-f/--format json/text` - optional +* `-f/--format json/text` - optional. #### Node initialization -Initialize a SKALE node on current machine +Initialize a standard SKALE node on the current machine. -> :warning: **Please avoid re-initialization**: First run `skale node info` to confirm current state of intialization. +> :warning: **Avoid re-initializing a node that’s already initialized**: Run `skale node info` first to confirm the current initialization state. ```shell skale node init [ENV_FILE] @@ -107,29 +143,32 @@ skale node init [ENV_FILE] Arguments: -* `ENV_FILE` - path to .env file (required parameters are listed in the `skale node init` command) +* `ENV_FILE` - path to .env file (required). -You should specify the following environment variables: +Required environment variables in `ENV_FILE`: -* `SGX_SERVER_URL` - SGX server URL -* `DISK_MOUNTPOINT` - disk mount point for storing sChains data -* `DOCKER_LVMPY_STREAM` - stream of `docker-lvmpy` to use -* `CONTAINER_CONFIGS_STREAM` - stream of `skale-node` to use -* `ENDPOINT` - RPC endpoint of the node in the network where SKALE Manager is deployed -* `MANAGER_CONTRACTS_ABI_URL` - URL to SKALE Manager contracts ABI and addresses -* `IMA_CONTRACTS_ABI_URL` - URL to IMA contracts ABI and addresses -* `FILEBEAT_URL` - URL to the Filebeat log server -* `ENV_TYPE` - environement type (mainnet, testnet, etc) +* `SGX_SERVER_URL` - SGX server URL. +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g. /dev/sdc) +* `DOCKER_LVMPY_VERSION` - Version of `docker-lvmpy`. +* `NODE_VERSION` - Version of `skale-node`. +* `ENDPOINT` - RPC endpoint of the network where SKALE Manager is deployed. +* `MANAGER_CONTRACTS` - SKALE Manager `message_proxy_mainnet` contract alias or address. +* `IMA_CONTRACTS` - IMA `skale_manager` contract alias or address. +* `FILEBEAT_HOST` - URL of the Filebeat log server. +* `ENV_TYPE` - Environment type (e.g., 'mainnet', 'testnet', 'qanet', 'devnet'). + +> In `MANAGER_CONTRACTS` and `IMA_CONTRACTS` fields, if you are using a recognized network (e.g., 'Mainnet', 'Holesky', 'local'), you can use a recognized alias (e.g., 'production', 'grants'). You can check the list of recognized networks and aliases in [contract deployments](https://github.com/skalenetwork/skale-contracts/tree/deployments). +> :warning: If you are using a custom network or a contract which isn't recognized by the underlying `skale-contracts` library, you **MUST** provide a direct contract address. Optional variables: * `TG_API_KEY` - Telegram API key * `TG_CHAT_ID` - Telegram chat ID -* `MONITORING_CONTAINERS` - will enable monitoring containers (`filebeat`, `cadvisor`, `prometheus`) +* `MONITORING_CONTAINERS` - Enable monitoring containers (`cadvisor`, `node-exporter`). #### Node initialization from backup -Restore SKALE node on another machine +Restore a standard SKALE node on another machine from a backup. ```shell skale node restore [BACKUP_PATH] [ENV_FILE] @@ -137,139 +176,148 @@ skale node restore [BACKUP_PATH] [ENV_FILE] Arguments: -* `BACKUP_PATH` - path to the archive with backup data generated by `skale node backup` command -* `ENV_FILE` - path to .env file (required parameters are listed in the `skale node init` command) +* `BACKUP_PATH` - Path to the archive created by `skale node backup`. +* `ENV_FILE` - Path to .env file with configuration for the restored node. #### Node backup -Generate backup file to restore SKALE node on another machine +Generate a backup archive of the standard SKALE node's state. ```shell -skale node backup [BACKUP_FOLDER_PATH] [ENV_FILE] +skale node backup [BACKUP_FOLDER_PATH] ``` Arguments: -* `BACKUP_FOLDER_PATH` - path to the folder where the backup file will be saved +* `BACKUP_FOLDER_PATH` - Path to the folder where the backup file will be saved. #### Node Registration +Register the standard node with the SKALE Manager contract. + ```shell -skale node register +skale node register --name --ip --domain [--port ] ``` Required arguments: -* `--ip` - public IP for RPC connections and consensus -* `--domain`/`-d` - SKALE node domain name -* `--name` - SKALE node name +* `--ip` - Public IP for RPC connections and consensus. +* `--domain`/`-d` - SKALE node domain name. +* `--name` - SKALE node name. Optional arguments: -* `--port` - public port - beginning of the port range for node SKALE Chains (default: `10000`) +* `--port` - Base port for node sChains (default: `10000`). #### Node update -Update SKALE node on current machine +Update the standard SKALE node software and configuration. ```shell -skale node update [ENV_FILEPATH] +skale node update [ENV_FILEPATH] [--yes] ``` -Options: - -* `--yes` - update without additional confirmation - Arguments: -* `ENV_FILEPATH` - path to env file where parameters are defined +* `ENV_FILEPATH` - Path to the .env file containing potentially updated parameters. + +Options: -You can also specify a file with environment variables -which will update parameters in env file used during skale node init. +* `--yes` - Update without confirmation prompt. #### Node turn-off -Turn-off SKALE node on current machine and optionally set it to the maintenance mode +Turn off the standard SKALE node containers. ```shell -skale node turn-off +skale node turn-off [--maintenance-on] [--yes] ``` Options: -* `--maintenance-on` - set SKALE node into maintenance mode before turning off -* `--yes` - turn off without additional confirmation +* `--maintenance-on` - Set node to maintenance mode before turning off. +* `--yes` - Turn off without confirmation. #### Node turn-on -Turn on SKALE node on current machine and optionally disable maintenance mode +Turn on the standard SKALE node containers. ```shell -skale node turn-on [ENV_FILEPATH] +skale node turn-on [ENV_FILEPATH] [--maintenance-off] [--yes] ``` -Options: - -* `--maintenance-off` - turn off maintenance mode after turning on the node -* `--yes` - turn on without additional confirmation - Arguments: -* `ENV_FILEPATH` - path to env file where parameters are defined +* `ENV_FILEPATH` - Path to the .env file. -You can also specify a file with environment variables -which will update parameters in env file used during skale node init. +Options: + +* `--maintenance-off` - Turn off maintenance mode after turning on. +* `--yes` - Turn on without additional confirmation. #### Node maintenance -Set SKALE node into maintenance mode +Control the node's maintenance status in SKALE Manager. ```shell -skale node maintenance-on +# Set maintenance ON +skale node maintenance-on [--yes] + +# Set maintenance OFF +skale node maintenance-off ``` Options: -* `--yes` - set without additional confirmation +* `--yes` - Perform action without additional confirmation. -Switch off maintenance mode +#### Domain name + +Set the standard node's domain name. ```shell -skale node maintenance-off +skale node set-domain --domain [--yes] ``` -#### Domain name +Required Options: + +* `--domain`/`-d` - The new SKALE node domain name. + +Options: -Set SKALE node domain name +* `--yes` - Set without additional confirmation. + +#### Skale Node Signature + +Get the node signature for a validator ID. ```shell -skale node set-domain +skale node signature ``` -Options: +Arguments: -* `--domain`/`-d` - SKALE node domain name -* `--yes` - set without additional confirmation +* `VALIDATOR_ID` - The ID of the validator requesting the signature. -### Wallet commands +### Wallet commands (Standard) > Prefix: `skale wallet` -Commands related to Ethereum wallet associated with SKALE node +Commands related to the Ethereum wallet associated with the standard SKALE node. #### Wallet information ```shell -skale wallet info +skale wallet info [-f json/text] ``` Options: -`-f/--format json/text` - optional +* `-f/--format json/text` - optional. #### Wallet setting -Set local wallet for the SKALE node +Set the local wallet private key for the node. ```shell skale wallet set --private-key $ETH_PRIVATE_KEY @@ -277,319 +325,955 @@ skale wallet set --private-key $ETH_PRIVATE_KEY #### Send ETH tokens -Send ETH tokens from SKALE node wallet to specific address +Send ETH from the node's wallet. ```shell -skale wallet send [ADDRESS] [AMOUNT] +skale wallet send [--yes] ``` Arguments: -* `ADDRESS` - Ethereum receiver address -* `AMOUNT` - Amount of ETH tokens to send +* `RECEIVER_ADDRESS` - Ethereum receiver address. +* `AMOUNT_ETH` - Amount of ETH tokens to send. Optional arguments: -`--yes` - Send without additional confirmation +* `--yes` - Send without additional confirmation. -### sChain commands +### sChain commands (Standard) > Prefix: `skale schains` -#### SKALE Chain list +Commands for interacting with sChains managed by the standard node. -List of SKALE Chains served by connected node +#### List sChains + +List of SKALE Chains served by connected node. ```shell skale schains ls ``` -#### SKALE Chain configuration +#### Get sChain config + +Show the configuration for a specific SKALE Chain. ```shell -skale schains config SCHAIN_NAME +skale schains config ``` -#### SKALE Chain DKG status +#### Get DKG status -List DKG status for each SKALE Chain on the node +List DKG status for each SKALE Chain on the node. ```shell skale schains dkg ``` -#### SKALE Chain info +#### Get sChain info -Show information about SKALE Chain on node +Show information about a specific SKALE Chain on the node. ```shell -skale schains info SCHAIN_NAME +skale schains info [--json] ``` Options: -* `--json` - Show info in JSON format +* `--json` - Show info in JSON format. -#### SKALE Chain repair +#### Repair sChain -Turn on repair mode for SKALE Chain +Turn on repair mode for a specific SKALE Chain. ```shell -skale schains repair SCHAIN_NAME +skale schains repair ``` -### Health commands +### Health commands (Standard) > Prefix: `skale health` -#### SKALE containers +Commands to check the health of the standard node and its components. + +#### List containers -List all SKALE containers running on the connected node +List all SKALE containers running on the connected node. ```shell -skale health containers +skale health containers [-a/--all] ``` Options: -* `-a/--all` - list all containers (by default - only running) +* `-a/--all` - list all containers (by default - only running). -#### sChains healthchecks +#### Get sChains healthchecks -Show health check results for all SKALE Chains on the node +Show health check results for all SKALE Chains on the node. ```shell -skale health schains +skale health schains [--json] ``` Options: -* `--json` - Show data in JSON format +* `--json` - Show data in JSON format. -#### SGX +#### Check SGX server status Status of the SGX server. Returns the SGX server URL and connection status. ```shell -$ skale health sgx - -SGX server status: -┌────────────────┬────────────────────────────┐ -│ SGX server URL │ https://0.0.0.0:1026/ │ -├────────────────┼────────────────────────────┤ -│ Status │ CONNECTED │ -└────────────────┴────────────────────────────┘ +skale health sgx ``` -### SSL commands +### SSL commands (Standard) > Prefix: `skale ssl` -#### SSL Status +Manage SSL certificates for the standard node. + +#### Check SSL Status -Status of the SSL certificates on the node +Status of the SSL certificates on the node. ```shell skale ssl status ``` -Admin API URL: \[GET] `/api/ssl/status` +Admin API URL: `[GET] /api/ssl/status` #### Upload certificates -Upload new SSL certificates +Upload new SSL certificates. ```shell -skale ssl upload +skale ssl upload -c -k [-f/--force] ``` -##### Options +Options: -* `-c/--cert-path` - Path to the certificate file -* `-k/--key-path` - Path to the key file -* `-f/--force` - Overwrite existing certificates +* `-c/--cert-path` - Path to the certificate file. +* `-k/--key-path` - Path to the key file. +* `-f/--force` - Overwrite existing certificates. -Admin API URL: \[GET] `/api/ssl/upload` +Admin API URL: `[POST] /api/ssl/upload` -#### Check ssl certificate +#### Check certificate -Check ssl certificate be connecting to healthcheck ssl server +Check SSL certificate by connecting to the health-check SSL server. ```shell -skale ssl check +skale ssl check [-c ] [-k ] [--type ] [--port ] [--no-client] ``` -##### Options +Options: -* `-c/--cert-path` - Path to the certificate file (default: uploaded using `skale ssl upload` certificate) -* `-k/--key-path` - Path to the key file (default: uploaded using `skale ssl upload` key) -* `--type/-t` - Check type (`openssl` - openssl cli check, `skaled` - skaled-based check, `all` - both) -* `--port/-p` - Port to start healthcheck server (defualt: `4536`) -* `--no-client` - Skip client connection (only make sure server started without errors) +* `-c/--cert-path` - Path to the certificate file (default: uploaded using `skale ssl upload` certificate). +* `-k/--key-path` - Path to the key file (default: uploaded using `skale ssl upload` key). +* `--type/-t` - Check type (`openssl` - openssl cli check, `skaled` - skaled-based check, `all` - both). +* `--port/-p` - Port to start healthcheck server (default: `4536`). +* `--no-client` - Skip client connection (only make sure server started without errors). -### Logs commands +### Logs commands (Standard) > Prefix: `skale logs` -#### CLI Logs +Access logs for the standard node. + +#### Fetch CLI Logs Fetch node CLI logs: ```shell -skale logs cli +skale logs cli [--debug] ``` Options: -* `--debug` - show debug logs; more detailed output +* `--debug` - show debug logs; more detailed output. -#### Dump Logs +#### Dump All Node Logs Dump all logs from the connected node: ```shell -skale logs dump [PATH] +skale logs dump [TARGET_PATH] [-c/--container ] ``` -Optional arguments: +Arguments: -* `--container`, `-c` - Dump logs only from specified container +* `TARGET_PATH` - Optional path to save the log dump archive. -### Resources allocation commands +Options: + +* `--container`, `-c` - Dump logs only from specified container. + +*** + +## Passive Node Usage (`skale` - Passive Build) -> Prefix: `skale resources-allocation` +Commands available in the **passive `skale` binary** for managing dedicated Passive nodes. +Note that this binary contains a **different set of commands** compared to the standard build. -#### Show allocation file +### Top level commands (Passive) -Show resources allocation file: +#### Info (Passive) + +Print build info for the `skale` (passive) binary. ```shell -skale resources-allocation show +skale info ``` -#### Generate/update +#### Version (Passive) -Generate/update allocation file: +Print version number for the `skale` (passive) binary. ```shell -skale resources-allocation generate [ENV_FILE] +skale version +``` + +Options: + +* `--short` - prints version only, without additional text. + +### Passive node commands + +> Prefix: `skale passive-node` + +#### Passive node initialization + +Initialize a dedicated Passive node on the current machine. + +```shell +skale passive-node init [ENV_FILE] [--indexer | --archive] [--snapshot] [--snapshot-from ] [--yes] ``` Arguments: -* `ENV_FILE` - path to .env file (required parameters are listed in the `skale node init` command) +* `ENV_FILE` - path to .env file (required). + +Required environment variables in `ENV_FILE`: + +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g. /dev/sdc). +* `DOCKER_LVMPY_VERSION` - Version of `docker-lvmpy`. +* `NODE_VERSION` - Version of `skale-node`. +* `ENDPOINT` - RPC endpoint of the network where SKALE Manager is deployed. +* `MANAGER_CONTRACTS` - SKALE Manager alias or address. +* `IMA_CONTRACTS` - IMA alias or address. +* `SCHAIN_NAME` - Name of the specific SKALE chain to sync. +* `ENV_TYPE` - Environment type (e.g., 'mainnet', 'testnet'). + +> In `MANAGER_CONTRACTS` and `IMA_CONTRACTS` fields, if you are using a recognized network (e.g., 'Mainnet', 'Holesky', 'local'), you can use a recognized alias (e.g., 'production', 'grants'). You can check the list of recognized networks and aliases in [contract deployments](https://github.com/skalenetwork/skale-contracts/tree/deployments). +> :warning: If you are using a custom network or a contract which isn't recognized by the underlying `skale-contracts` library, you **MUST** provide a direct contract address. Options: -* `--yes` - generate without additional confirmation -* `-f/--force` - rewrite allocation file if it exists +* `--indexer` - Run in indexer mode (disables block rotation). +* `--archive` - Run in archive mode (enable historic state and disable block rotation). +* `--snapshot` - Start sync node from snapshot. +* `--snapshot-from ` - Specify the IP of another node to download a snapshot from. +* `--yes` - Initialize without additional confirmation. + +#### Passive node update + +Update the Passive node software and configuration. + +```shell +skale passive-node update [ENV_FILEPATH] [--yes] +``` + +Arguments: + +* `ENV_FILEPATH` - Path to the .env file. + +Options: -### Validate commands +* `--yes` - Update without additionalconfirmation. -> Prefix: `skale validate` +> NOTE: You can just update a file with environment variables used during `skale passive-node init`. -#### Validate abi +#### Passive node cleanup -Check whether ABI files contain valid JSON data +Remove all data and containers for the Passive node. ```shell -skale validate abi +skale passive-node cleanup [--yes] ``` Options: -* `--json` - show validation result in json format +* `--yes` - Cleanup without confirmation. -## Sync CLI usage +> WARNING: This command removes all Passive node data. -### Top level commands sync +*** -#### Info +## Fair Node Usage (`fair`) + +Commands available in the **`fair` binary** for managing nodes on the Fair network. + +### Top level commands (Fair) + +#### Fair Info -Print build info +Print build info for the `fair` binary. ```shell -skale info +fair info ``` -#### Version +#### Fair Version -Print version number +Print version number for the `fair` binary. ```shell -skale version +fair version [--short] ``` Options: * `--short` - prints version only, without additional text. -### Sync node commands +### Fair Boot commands + +> Prefix: `fair boot` + +Commands for a Fair node in the Boot phase. + +#### Fair Boot Info + +Get information about the Fair node during boot phase. + +```shell +fair boot info [--format FORMAT] +``` + +Options: + +* `--format`/`-f` - Output format (`json` or `text`). + +#### Fair Boot Initialization + +Initialize the Fair node boot phase. + +```shell +fair boot init +``` + +Arguments: + +* `ENV_FILE` - Path to the environment file containing configuration. + +Required environment variables in `ENV_FILE`: + +* `SGX_SERVER_URL` - SGX server URL. +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g. /dev/sdc). +* `NODE_VERSION` - Version of `skale-node`. +* `ENDPOINT` - RPC endpoint of the network where Fair Manager is deployed. +* `MANAGER_CONTRACTS` - SKALE Manager alias or address. +* `IMA_CONTRACTS` - IMA alias or address (*Note: Required by boot service, may not be used by Fair itself*). +* `FILEBEAT_HOST` - URL/IP:Port of the Filebeat log server. +* `ENV_TYPE` - Environment type (e.g., 'mainnet', 'devnet'). + +Optional variables: + +* `MONITORING_CONTAINERS` - Enable monitoring containers (`cadvisor`, `node-exporter`). + +#### Fair Boot Registration + +Register the Fair node with Fair Manager *during* the boot phase. + +```shell +fair boot register --name --ip --domain [--port ] +``` + +Options: + +* `--name`/`-n` - Fair node name (required). +* `--ip` - Public IP for RPC connections & consensus (required). +* `--domain`/`-d` - Fair node domain name (e.g., `fair1.example.com`, required). +* `--port`/`-p` - Base port for node sChains (default: from configuration). + +#### Fair Boot Signature + +Get the node signature for a validator ID during boot phase. + +```shell +fair boot signature +``` + +Arguments: + +* `VALIDATOR_ID` - The ID of the validator requesting the signature. + +#### Fair Boot Update + +Update the Fair node software during boot phase. + +```shell +fair boot update [--yes] [--pull-config SCHAIN] +``` + +Arguments: + +* `ENV_FILE` - Path to the environment file for node configuration. + +Required environment variables in `ENV_FILE`: + +* `SGX_SERVER_URL` - SGX server URL. +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g. /dev/sdc). +* `NODE_VERSION` - Version of `skale-node`. +* `ENDPOINT` - RPC endpoint of the network where Fair Manager is deployed. +* `MANAGER_CONTRACTS` - SKALE Manager alias or address. +* `IMA_CONTRACTS` - IMA alias or address (*Note: Required by boot service, may not be used by Fair itself*). +* `FILEBEAT_HOST` - URL/IP:Port of the Filebeat log server. +* `ENV_TYPE` - Environment type (e.g., 'mainnet', 'devnet'). + +Optional variables: + +* `MONITORING_CONTAINERS` - Enable monitoring containers (`cadvisor`, `node-exporter`). + +Options: + +* `--yes` - Update without confirmation prompt. +* `--pull-config` - Pull configuration for specific sChain (hidden option). + +### Fair Node commands + +> Prefix: `fair node` + +Commands for managing a Fair node during its regular operation (main phase). + +#### Fair Node Info + +Get information about the Fair node. + +```shell +fair node info [--format FORMAT] +``` + +Options: + +* `--format`/`-f` - Output format (`json` or `text`). + +#### Fair Node Initialization + +Initialize the regular operation phase of the Fair node. + +```shell +fair node init +``` + +Arguments: + +* `ENV_FILEPATH` - Path to the environment file for node configuration. + +Required environment variables in `ENV_FILEPATH`: + +* `FAIR_CONTRACTS` - Fair contracts alias or address (e.g., `mainnet`). +* `NODE_VERSION` - Version of `skale-node`. +* `BOOT_ENDPOINT` - RPC endpoint of the Fair network (e.g., `https://rpc.fair.cloud/`). +* `SGX_SERVER_URL` - SGX server URL (e.g., `https://127.0.0.1:1026/`). +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g., `/dev/sdc`). +* `ENV_TYPE` - Environment type (e.g., `mainnet`). + +Optional variables: + +* `ENFORCE_BTRFS` - Format existing filesystem on attached disk (`True`/`False`). +* `FILEBEAT_HOST` - URL of the Filebeat log server to send logs. + +#### Fair Node Registration + +Register the Fair node with the specified IP address. + +```shell +fair node register --ip +``` + +Options: + +* `--ip` - Public IP address for the Fair node (required). + +#### Fair Node Update + +Update the Fair node software. + +```shell +fair node update [--yes] [--force-skaled-start] +``` + +Arguments: + +* `ENV_FILEPATH` - Path to the environment file for node configuration. + +Required environment variables in `ENV_FILEPATH`: + +* `FAIR_CONTRACTS` - Fair contracts alias or address (e.g., `mainnet`). +* `NODE_VERSION` - Version of `skale-node`. +* `BOOT_ENDPOINT` - RPC endpoint of the Fair network (e.g., `https://rpc.fair.cloud/`). +* `SGX_SERVER_URL` - SGX server URL (e.g., `https://127.0.0.1:1026/`). +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g., `/dev/sdc`). +* `ENV_TYPE` - Environment type (e.g., `mainnet`). + +Optional variables: + +* `ENFORCE_BTRFS` - Format existing filesystem on attached disk (`True`/`False`). +* `FILEBEAT_HOST` - URL of the Filebeat log server to send logs. + +Options: + +* `--yes` - Update without confirmation prompt. +* `--force-skaled-start` - Force skaled container to start (hidden option). + +#### Fair Node turn-off + +Turn off the Fair node containers. + +```shell +fair node turn-off [--yes] +``` + +Options: + +* `--yes` - Turn off without confirmation. + +#### Fair Node turn-on + +Turn on the Fair node containers. + +```shell +fair node turn-on [ENV_FILEPATH] [--yes] +``` + +Arguments: + +* `ENV_FILEPATH` - Path to the .env file. + +Options: + +* `--yes` - Turn on without additional confirmation. + +#### Fair Node Migrate + +Switch from boot phase to regular Fair node operation. + +```shell +fair node migrate [--yes] +``` + +Arguments: + +* `ENV_FILEPATH` - Path to the environment file for node configuration. + +Required environment variables in `ENV_FILEPATH`: + +* `FAIR_CONTRACTS` - Fair contracts alias or address (e.g., `mainnet`). +* `NODE_VERSION` - Version of `skale-node`. +* `BOOT_ENDPOINT` - RPC endpoint of the Fair network (e.g., `https://rpc.fair.cloud/`). +* `SGX_SERVER_URL` - SGX server URL (e.g., `https://127.0.0.1:1026/`). +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g., `/dev/sdc`). +* `ENV_TYPE` - Environment type (e.g., `mainnet`). + +Optional variables: + +* `ENFORCE_BTRFS` - Format existing filesystem on attached disk (`True`/`False`). +* `FILEBEAT_HOST` - URL of the Filebeat log server to send logs. + +Options: + +* `--yes` - Migrate without confirmation prompt. + +#### Fair Node Repair + +Toggle fair chain repair mode. + +```shell +fair node repair [--snapshot-from SOURCE] [--yes] +``` + +Options: + +* `--snapshot-from` - Source for snapshots (`any` by default, hidden option). +* `--yes` - Proceed without confirmation prompt. + +#### Fair Node Backup + +Generate a backup archive of the Fair node's state. + +```shell +fair node backup +``` + +Arguments: + +* `BACKUP_FOLDER_PATH` - Path to the folder where the backup file will be saved. + +#### Fair Node Restore + +Restore a Fair node from a backup archive. + +```shell +fair node restore [--config-only] +``` + +Arguments: + +* `BACKUP_PATH` - Path to the backup archive. +* `ENV_FILE` - Path to the .env file for the restored node configuration. + +Options: + +* `--config-only` - Only restore configuration files. + +#### Fair Node Cleanup + +Cleanup Fair node data and configuration. + +```shell +fair node cleanup [--yes] +``` + +Options: + +* `--yes` - Cleanup without confirmation prompt. + +#### Fair Node Change IP + +Change the IP address of the Fair node. + +```shell +fair node change-ip +``` + +Arguments: + +* `IP_ADDRESS` - New public IP address for the Fair node. + +### Fair Chain commands + +> Prefix: `fair chain` + +Commands for managing and monitoring the Fair chain state and configuration. + +#### Fair Chain Record + +Get information about the Fair chain record, including chain name, configuration status, DKG status, and operational metadata. + +```shell +fair chain record [--json] +``` + +Options: + +* `--json` - Output in JSON format instead of formatted table. + +#### Fair Chain Checks + +Get the status of Fair chain checks, including configuration checks and skaled checks. + +```shell +fair chain checks [--json] +``` + +Options: + +* `--json` - Output in JSON format instead of formatted table. + +### Fair Wallet commands + +> Prefix: `fair wallet` + +Commands for managing the node wallet. + +#### Fair Wallet Info + +Get information about the SKALE node wallet. + +```shell +fair wallet info [--format FORMAT] +``` + +Options: + +* `--format`/`-f` - Output format (`json` or `text`). + +#### Fair Wallet Send + +Send ETH from SKALE node wallet to an address. + +```shell +fair wallet send
[--yes] +``` + +Arguments: + +* `ADDRESS` - Destination address for ETH transfer. +* `AMOUNT` - Amount of ETH to send (as float). + +Options: + +* `--yes` - Send without confirmation prompt. + +### Fair Logs commands + +> Prefix: `fair logs` + +Commands for managing and accessing node logs. + +#### Fair CLI Logs + +Fetch the logs of the node-cli. + +```shell +fair logs cli [--debug] +``` + +Options: + +* `--debug` - Show debug logs instead of regular logs. + +#### Fair Logs Dump + +Dump all logs from the connected node. + +```shell +fair logs dump [--container CONTAINER] +``` + +Arguments: + +* `PATH` - Path where the logs dump will be saved. + +Options: + +* `--container`/`-c` - Dump logs only from specified container. + +### Fair SSL commands + +> Prefix: `fair ssl` + +Commands for managing SSL certificates for sChains. + +#### Fair SSL Status + +Check the status of SSL certificates on the node. + +```shell +fair ssl status +``` + +#### Fair SSL Upload -> Prefix: `skale sync-node` +Upload SSL certificate files to the node. -#### Sync node initialization +```shell +fair ssl upload --cert-path --key-path [--force] +``` + +Options: + +* `--cert-path`/`-c` - Path to the SSL certificate file (required). +* `--key-path`/`-k` - Path to the SSL private key file (required). +* `--force`/`-f` - Overwrite existing certificates. + +#### Fair SSL Check + +Check SSL certificate validity and connectivity. + +```shell +fair ssl check [--cert-path CERT_PATH] [--key-path KEY_PATH] [--port PORT] [--type TYPE] [--no-client] [--no-wss] +``` + +Options: + +* `--cert-path`/`-c` - Path to the certificate file (default: system default). +* `--key-path`/`-k` - Path to the key file (default: system default). +* `--port`/`-p` - Port to start SSL health check server (default: from configuration). +* `--type`/`-t` - Check type: `all`, `openssl`, or `skaled` (default: `all`). +* `--no-client` - Skip client connection for openssl check. +* `--no-wss` - Skip WSS server starting for skaled check. + +### Fair Staking commands + +> Prefix: `fair staking` + +Commands for interacting with the Fair staking functionality. + +#### Add allowed receiver + +Allow an address to receive staking fees. + +```shell +fair staking add-receiver +``` + +Arguments: + +* `RECEIVER_ADDRESS` - Address to add to the allowed receivers list. + +#### Remove allowed receiver + +Remove an address from the allowed receivers list. + +```shell +fair staking remove-receiver +``` + +Arguments: + +* `RECEIVER_ADDRESS` - Address to remove from the allowed receivers list. + +Workflow (fees): request fees -> review exit requests -> claim request. + +#### Request fees + +Create a request to claim a specific amount of earned fees (FAIR). Use `--all` to request all. -Initialize full sync SKALE node on current machine +```shell +fair staking request-fees +fair staking request-fees --all +``` + +#### Request send fees + +Create a request to send a specific amount (or all) of earned fees to an address. ```shell -skale sync-node init [ENV_FILE] +fair staking request-send-fees +fair staking request-send-fees --all ``` Arguments: -* `ENV_FILE` - path to .env file (required parameters are listed in the `skale sync-node init` command) +* `TO_ADDRESS` - Destination address for the fee transfer. +* `AMOUNT` - Amount of fees to include in the request (FAIR). + +#### Claim request + +Claim a previously created request by its request ID once it is unlocked. + +```shell +fair staking claim-request +``` + +#### Get exit requests + +List exit (fee withdrawal) requests for the current wallet. Use `--json` for raw JSON output. + +```shell +fair staking exit-requests +fair staking exit-requests --json +``` -You should specify the following environment variables: +Default output (non-JSON) shows: `request_id`, `user`, `node_id`, `amount_wei`, `amount_fair`, `unlock_date (ISO)`. -* `DISK_MOUNTPOINT` - disk mount point for storing sChains data -* `DOCKER_LVMPY_STREAM` - stream of `docker-lvmpy` to use -* `CONTAINER_CONFIGS_STREAM` - stream of `skale-node` to use -* `ENDPOINT` - RPC endpoint of the node in the network where SKALE Manager is deployed -* `MANAGER_CONTRACTS_ABI_URL` - URL to SKALE Manager contracts ABI and addresses -* `IMA_CONTRACTS_ABI_URL` - URL to IMA contracts ABI and addresses -* `SCHAIN_NAME` - name of the SKALE chain to sync -* `ENV_TYPE` - environement type (mainnet, testnet, etc) +#### Get earned fee amount + +Get the currently earned (unrequested) fee amount. + +```shell +fair staking earned-fee-amount +``` + +#### Set fee rate + +Set the fee rate (uint16 value) used by the staking logic. + +```shell +fair staking set-fee-rate +``` + +Arguments: + +* `FEE_RATE` - Fee rate value as integer (uint16). + +### Passive Fair Node commands + +> Prefix: `fair passive-node` (passive Fair build) + +Commands for operating a passive Fair node (sync/indexer/archive). + +#### Passive Fair Node Initialization + +Initialize a passive Fair node. + +```shell +fair passive-node init --id [--indexer | --archive] [--snapshot ] +``` + +Arguments: + +* `ENV_FILEPATH` - Path to the environment file with configuration. + +Required environment variables in `ENV_FILEPATH`: + +* `FAIR_CONTRACTS` - Fair Manager contracts alias or address. +* `NODE_VERSION` - Version of `skale-node`. +* `BOOT_ENDPOINT` - RPC endpoint of Fair network. +* `BLOCK_DEVICE` - Absolute path to a dedicated raw block device (e.g., `/dev/sdc`). +* `ENV_TYPE` - Environment type (e.g., `mainnet`, `devnet`). Options: -* `--indexer` - run sync node in indexer mode (disable block rotation) -* `--archive` - enable historic state and disable block rotation (can't be used with `--indexer`) -* `--snapshot` - start sync node from snapshot -* `--snapshot-from` - specify the IP of the node to take snapshot from -* `--yes` - initialize without additional confirmation +* `--id` - Numerical node identifier (required). +* `--indexer` - Run in indexer mode (no block rotation). +* `--archive` - Run in archive mode (historical state kept; disables block rotation). Mutually exclusive with `--indexer`. +* `--snapshot ` - Start from provided snapshot URL or from any available source (not allowed together with `--indexer` or `--archive`). + +By default runs a regular sync node. + +#### Passive Fair Node Update + +Update software / configs for passive Fair node. -#### Sync node update +```shell +fair passive-node update [--yes] +``` -Update full sync SKALE node on current machine +#### Passive Fair Node turn-off + +Turn off the Fair passive node containers. ```shell -skale sync-node update [ENV_FILEPATH] +fair passive-node turn-off [--yes] ``` Options: -* `--yes` - update without additional confirmation +* `--yes` - Turn off without confirmation. + +#### Passive Fair Node turn-on + +Turn on the Fair passive node containers. + +```shell +fair passive-node turn-on [ENV_FILEPATH] [--yes] +``` Arguments: -* `ENV_FILEPATH` - path to env file where parameters are defined +* `ENV_FILEPATH` - Path to the .env file. + +Options: -> NOTE: You can just update a file with environment variables used during `skale sync-node init`. +* `--yes` - Turn on without additional confirmation. -#### Sync node cleanup +#### Passive Fair Node Cleanup -Cleanup full sync SKALE node on current machine +Remove all passive Fair node data and containers. ```shell -skale sync-node cleanup +fair passive-node cleanup [--yes] ``` Options: -* `--yes` - cleanup without additional confirmation +* `--yes` - Proceed without confirmation. -> WARNING: This command will remove all data from the node. +*** ## Exit codes @@ -607,41 +1291,75 @@ Exit codes conventions for SKALE CLI tools `*` - `validator-cli` only\ `**` - `node-cli` only +*** + ## Development ### Setup repo +#### Dependencies + +* Python 3.11 +* Git + +#### Clone the repository + +Clone with HTTPS: + +```shell +git clone https://github.com/skalenetwork/node-cli.git +``` + +Or with SSH: + +```shell +git clone git@github.com:skalenetwork/node-cli.git +``` + +#### Create and source virtual environment + +```shell +python3.11 -m venv venv +source venv/bin/activate +``` + #### Install development dependencies ```shell -pip install -e .[dev] +pip install -e ".[dev]" ``` -##### Add flake8 git hook +#### Generate info.py locally -In file `.git/hooks/pre-commit` add: +Specify the build type (`normal`, `passive`, or `fair`): ```shell -#!/bin/sh -flake8 . +# Example for Standard build +./scripts/generate_info.sh 1.0.0 my-branch normal + +# Example for Passive build +./scripts/generate_info.sh 1.0.0 my-branch passive + +# Example for Fair build +./scripts/generate_info.sh 1.0.0 my-branch fair ``` -### Debugging +#### Add linting git hook -Run commands in dev mode: +In file `.git/hooks/pre-commit` add: ```shell -ENV=dev python main.py YOUR_COMMAND +#!/bin/sh +./venv/bin/ruff check . ``` -### Setting up Travis +> **Note:** This hook assumes your virtual environment is named 'venv' and is located at the root of the repository. -Required environment variables: +Make the hook executable: -* `ACCESS_KEY_ID` - DO Spaces/AWS S3 API Key ID -* `SECRET_ACCESS_KEY` - DO Spaces/AWS S3 Secret access key -* `GITHUB_EMAIL` - Email of GitHub user -* `GITHUB_OAUTH_TOKEN` - GitHub auth token +```shell +chmod +x .git/hooks/pre-commit +``` ## Contributing diff --git a/helper-scripts b/helper-scripts index 2541831d..c1f67269 160000 --- a/helper-scripts +++ b/helper-scripts @@ -1 +1 @@ -Subproject commit 2541831d3a8bf6691d994f37f379ac36d760c0a4 +Subproject commit c1f67269955126ac400c2445d2aa81f1a387d964 diff --git a/main.spec b/main.spec index e3844bc1..8b7da407 100644 --- a/main.spec +++ b/main.spec @@ -1,16 +1,15 @@ # -*- mode: python -*- -import importlib.util - +import os block_cipher = None a = Analysis( ['node_cli/main.py'], - pathex=['.'], + pathex=[SPECPATH], datas=[ - ("./text.yml", "data"), - ("./datafiles/skaled-ssl-test", "data/datafiles") + (os.path.join(SPECPATH, "text.yml"), "data"), + (os.path.join(SPECPATH, "datafiles/skaled-ssl-test"), "data/datafiles") ], hiddenimports=[], hookspath=[], diff --git a/node_cli/cli/__init__.py b/node_cli/cli/__init__.py index 1fe20f2e..6f899042 100644 --- a/node_cli/cli/__init__.py +++ b/node_cli/cli/__init__.py @@ -1,4 +1,6 @@ -__version__ = '2.6.3' +from importlib.metadata import version + +__version__ = version('node-cli') if __name__ == '__main__': print(__version__) diff --git a/node_cli/cli/chain.py b/node_cli/cli/chain.py new file mode 100644 index 00000000..10826236 --- /dev/null +++ b/node_cli/cli/chain.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from node_cli.fair.chain import get_chain_record, get_chain_checks + + +@click.group() +def chain_cli(): + pass + + +@chain_cli.group(help='Fair chain commands') +def chain(): + pass + + +@chain.command('record', help='Get Fair chain record information') +@click.option('--json', 'raw', is_flag=True, help='Output in JSON format') +def chain_record(raw: bool) -> None: + get_chain_record(raw=raw) + + +@chain.command('checks', help='Get Fair chain checks status') +@click.option('--json', 'raw', is_flag=True, help='Output in JSON format') +def chain_checks(raw: bool) -> None: + get_chain_checks(raw=raw) diff --git a/node_cli/cli/exit.py b/node_cli/cli/exit.py index 1ef0223a..c7f57db3 100644 --- a/node_cli/cli/exit.py +++ b/node_cli/cli/exit.py @@ -24,10 +24,10 @@ from node_cli.utils.print_formatters import print_exit_status from node_cli.utils.helper import error_exit, get_request, post_request, abort_if_false from node_cli.utils.exit_codes import CLIExitCodes -from node_cli.utils.texts import Texts +from node_cli.utils.texts import safe_load_texts logger = logging.getLogger(__name__) -TEXTS = Texts() +TEXTS = safe_load_texts() BLUEPRINT_NAME = 'node' @@ -36,20 +36,21 @@ def exit_cli(): pass -@exit_cli.group('exit', help="Exit commands") +@exit_cli.group('exit', help='Exit commands') def node_exit(): pass -@node_exit.command('start', help="Start exiting process") -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to destroy your SKALE node?') +@node_exit.command('start', help='Start exiting process') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to destroy your SKALE node?', +) def start(): - status, payload = post_request( - blueprint=BLUEPRINT_NAME, - method='exit/start' - ) + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='exit/start') if status == 'ok': msg = TEXTS['exit']['start'] logger.info(msg) @@ -58,13 +59,10 @@ def start(): error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) -@node_exit.command('status', help="Get exit process status") +@node_exit.command('status', help='Get exit process status') @click.option('--format', '-f', type=click.Choice(['json', 'text'])) def status(format): - status, payload = get_request( - blueprint=BLUEPRINT_NAME, - method='exit/status' - ) + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='exit/status') if status == 'ok': exit_status = payload if format == 'json': @@ -75,6 +73,6 @@ def status(format): error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) -@node_exit.command('finalize', help="Finalize exit process") +@node_exit.command('finalize', help='Finalize exit process') def finalize(): pass diff --git a/node_cli/cli/fair_boot.py b/node_cli/cli/fair_boot.py new file mode 100644 index 00000000..d5ff6039 --- /dev/null +++ b/node_cli/cli/fair_boot.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from node_cli.configs import DEFAULT_NODE_BASE_PORT +from node_cli.core.node import get_node_info, get_node_signature +from node_cli.core.node import register_node as register +from node_cli.fair.boot import init, update +from node_cli.utils.helper import IP_TYPE, abort_if_false, error_exit, streamed_cmd + + +@click.group() +def fair_boot_cli(): + pass + + +@fair_boot_cli.group(help='Commands for the Fair Boot phase.') +def boot(): + pass + + +@boot.command('info', help='Get info about Fair node (Boot Phase).') +@click.option('--format', '-f', type=click.Choice(['json', 'text'])) +def fair_boot_info(format): + get_node_info(format) + + +@boot.command('init', help='Initialize Fair node (Boot Phase).') +@click.argument('env_file') +@streamed_cmd +def init_boot(env_file): + init(env_file) + + +@boot.command('register', help='Register Fair node in SKALE Manager (during Boot Phase).') +@click.option('--name', '-n', required=True, prompt='Enter fair node name', help='Fair node name') +@click.option( + '--ip', + prompt='Enter node public IP', + type=IP_TYPE, + help='Public IP for RPC connections & consensus (required)', +) +@click.option( + '--port', '-p', default=DEFAULT_NODE_BASE_PORT, type=int, help='Base port for node sChains' +) +@click.option('--domain', '-d', prompt='Enter node domain name', type=str, help='Node domain name') +@streamed_cmd +def register_boot(name, ip, port, domain): + register(name=name, p2p_ip=ip, public_ip=ip, port=port, domain_name=domain) + + +@boot.command('signature', help='Get fair node signature for a validator ID (during Boot Phase).') +@click.argument('validator_id') +def signature_boot(validator_id): + res = get_node_signature(validator_id) + if isinstance(res, dict) and 'error' in res: + error_exit(f'Error getting signature: {res.get("message", res)}') + print(f'Signature: {res}') + + +@boot.command('update', help='Update Fair node from .env file') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to update Fair node software?', +) +@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str) +@click.argument('env_file') +@streamed_cmd +def update_node(env_file, pull_config_for_schain): + update( + config_file=env_file, + pull_config_for_schain=pull_config_for_schain, + ) diff --git a/node_cli/cli/fair_node.py b/node_cli/cli/fair_node.py new file mode 100644 index 00000000..efdf3149 --- /dev/null +++ b/node_cli/cli/fair_node.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from node_cli.cli.info import TYPE +from node_cli.core.node import backup +from node_cli.fair.active import change_ip as change_ip_fair +from node_cli.fair.active import exit as exit_fair +from node_cli.fair.active import get_node_info, migrate_from_boot +from node_cli.fair.active import register as register_fair +from node_cli.fair.active import restore as restore_fair +from node_cli.fair.active import set_domain_name as set_domain_name_fair +from node_cli.fair.common import cleanup as cleanup_fair +from node_cli.fair.common import init as init_fair +from node_cli.fair.common import repair_chain +from node_cli.fair.common import turn_off as turn_off_fair +from node_cli.fair.common import turn_on as turn_on_fair +from node_cli.fair.common import update as update_fair +from node_cli.utils.helper import IP_TYPE, URL_OR_ANY_TYPE, abort_if_false, streamed_cmd +from node_cli.utils.node_type import NodeMode +from node_cli.utils.texts import safe_load_texts + +TEXTS = safe_load_texts() + + +@click.group() +def fair_node_cli(): + pass + + +@fair_node_cli.group(help='Commands for regular Fair Node operations.') +def node(): + pass + + +@node.command('info', help='Get info about Fair node.') +@click.option('--format', '-f', type=click.Choice(['json', 'text'])) +def fair_node_info(format): + get_node_info(format) + + +@node.command('init', help='Initialize regular Fair node') +@click.argument('config_file') +@streamed_cmd +def init_node(config_file: str): + init_fair(node_mode=NodeMode.ACTIVE, config_file=config_file) + + +@node.command('register', help=TEXTS['fair']['node']['register']['help']) +@click.option('--ip', required=True, type=IP_TYPE, help=TEXTS['fair']['node']['register']['ip']) +def register(ip: str) -> None: + register_fair(ip=ip) + + +@node.command('update', help='Update Fair node') +@click.argument('config_file') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to update Fair node software?', +) +@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str) +@click.option( + '--force-skaled-start', + 'force_skaled_start', + hidden=True, + type=bool, + default=False, + is_flag=True, +) +@streamed_cmd +def update_node(config_file: str, pull_config_for_schain, force_skaled_start: bool): + update_fair( + node_mode=NodeMode.ACTIVE, + config_file=config_file, + pull_config_for_schain=pull_config_for_schain, + force_skaled_start=force_skaled_start, + ) + + +@node.command('backup', help='Generate backup file for the Fair node.') +@click.argument('backup_folder_path') +@streamed_cmd +def backup_node(backup_folder_path): + backup(backup_folder_path) + + +@node.command('restore', help='Restore Fair node from a backup file.') +@click.argument('backup_path') +@click.argument('config_file') +@click.option( + '--config-only', + help='Only restore configuration files in .skale and artifacts', + is_flag=True, + hidden=True, +) +@streamed_cmd +def restore_node(backup_path, config_file, config_only): + restore_fair(backup_path, config_file, config_only) + + +@node.command('migrate', help='Switch from boot to regular Fair node.') +@click.argument('config_file') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to migrate to regular Fair node? The action cannot be undone', +) +@streamed_cmd +def migrate_node(config_file: str) -> None: + migrate_from_boot(config_file=config_file) + + +@node.command('repair', help='Toggle fair chain repair mode') +@click.option( + '--snapshot', + type=URL_OR_ANY_TYPE, + default='any', + hidden=True, + help=TEXTS['fair']['node']['repair']['snapshot'], +) +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt=TEXTS['fair']['node']['repair']['warning'], +) +@streamed_cmd +def repair(snapshot: str = 'any') -> None: + repair_chain(snapshot_from=snapshot) + + +@node.command('cleanup', help='Remove all FAIR node data and containers.') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to remove all FAIR node data and containers?', +) +@click.option('--prune', is_flag=True, help='Prune docker system.') +@streamed_cmd +def cleanup_node(prune): + cleanup_fair(node_mode=NodeMode.ACTIVE, prune=prune) + + +@node.command('change-ip', help=TEXTS['fair']['node']['change-ip']['help']) +@click.argument('ip', type=IP_TYPE) +@streamed_cmd +def change_ip(ip: str) -> None: + change_ip_fair(ip=ip) + + +@node.command('exit', help=TEXTS['fair']['node']['exit']['help']) +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt=TEXTS['fair']['node']['exit']['prompt'], +) +@streamed_cmd +def exit_node() -> None: + exit_fair() + + +@node.command('set-domain', help='Set node domain name') +@click.option('--domain', '-d', prompt='Enter node domain name', type=str, help='Node domain name') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to set domain name?', +) +@streamed_cmd +def set_domain_name(domain): + set_domain_name_fair(domain) + + +@node.command('turn-off', help='Turn off the node') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to turn off the node?', +) +@streamed_cmd +def turn_off_node() -> None: + turn_off_fair(node_type=TYPE) + + +@node.command('turn-on', help='Turn on the node') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to turn on the node?', +) +@click.argument('config_file') +@streamed_cmd +def turn_on_node(config_file: str) -> None: + turn_on_fair(env_file=config_file, node_type=TYPE) diff --git a/node_cli/cli/health.py b/node_cli/cli/health.py index fef51a3d..27607ecb 100644 --- a/node_cli/cli/health.py +++ b/node_cli/cli/health.py @@ -18,12 +18,12 @@ # along with this program. If not, see . import click -from node_cli.utils.texts import Texts +from node_cli.utils.texts import safe_load_texts from node_cli.core.health import get_containers, get_schains_checks, get_sgx_info -G_TEXTS = Texts() +G_TEXTS = safe_load_texts() TEXTS = G_TEXTS['health'] @@ -44,12 +44,7 @@ def containers(all): @health.command(help=TEXTS['schains_checks']['help']) -@click.option( - '--json', - 'json_format', - help=G_TEXTS['common']['json'], - is_flag=True -) +@click.option('--json', 'json_format', help=G_TEXTS['common']['json'], is_flag=True) def schains(json_format: bool) -> None: get_schains_checks(json_format) diff --git a/node_cli/cli/logs.py b/node_cli/cli/logs.py index 7472bcbf..de11e889 100644 --- a/node_cli/cli/logs.py +++ b/node_cli/cli/logs.py @@ -30,12 +30,12 @@ def logs_cli(): pass -@logs_cli.group(help="Logs commands") +@logs_cli.group(help='Logs commands') def logs(): pass -@logs.command(help="Fetch the logs of the node-cli") +@logs.command(help='Fetch the logs of the node-cli') @click.option('--debug', is_flag=True) def cli(debug): filepath = DEBUG_LOG_FILEPATH if debug else LOG_FILEPATH @@ -43,13 +43,8 @@ def cli(debug): print(fin.read()) -@logs.command(help="Dump all logs from the connected node") -@click.option( - '--container', - '-c', - help='Dump logs only from specified container', - default=None -) +@logs.command(help='Dump all logs from the connected node') +@click.option('--container', '-c', help='Dump logs only from specified container', default=None) @click.argument('path') def dump(container, path): res = create_logs_dump(path, container) diff --git a/node_cli/cli/lvmpy.py b/node_cli/cli/lvmpy.py index 473defa8..2e9772aa 100644 --- a/node_cli/cli/lvmpy.py +++ b/node_cli/cli/lvmpy.py @@ -20,11 +20,11 @@ import click from node_cli.utils.helper import abort_if_false -from node_cli.utils.texts import Texts +from node_cli.utils.texts import safe_load_texts from lvmpy.src.app import run as run_lvmpy from lvmpy.src.health import heal_service -G_TEXTS = Texts() +G_TEXTS = safe_load_texts() TEXTS = G_TEXTS['lvmpy'] @@ -44,7 +44,7 @@ def health(): is_flag=True, callback=abort_if_false, expose_value=False, - prompt=TEXTS['run']['prompt'] + prompt=TEXTS['run']['prompt'], ) def run(): run_lvmpy() @@ -56,7 +56,7 @@ def run(): is_flag=True, callback=abort_if_false, expose_value=False, - prompt=TEXTS['heal']['prompt'] + prompt=TEXTS['heal']['prompt'], ) def heal(): heal_service() diff --git a/node_cli/cli/node.py b/node_cli/cli/node.py index 8eee2d96..b34fd155 100644 --- a/node_cli/cli/node.py +++ b/node_cli/cli/node.py @@ -17,10 +17,15 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +from typing import get_args + import click -from node_cli.core.node import configure_firewall_rules +from skale_core.types import EnvType +from node_cli.cli.info import TYPE from node_cli.core.node import ( + cleanup as cleanup_skale, + configure_firewall_rules, get_node_signature, init, restore, @@ -33,18 +38,15 @@ turn_on, get_node_info, set_domain_name, - run_checks + run_checks, ) from node_cli.configs import DEFAULT_NODE_BASE_PORT -from node_cli.configs.env import ALLOWED_ENV_TYPES +from node_cli.core.node_options import upsert_node_mode from node_cli.utils.decorators import check_inited -from node_cli.utils.helper import ( - abort_if_false, - safe_load_texts, - streamed_cmd, - IP_TYPE -) -from node_cli.utils.meta import get_meta_info +from node_cli.utils.helper import abort_if_false, streamed_cmd, IP_TYPE +from node_cli.utils.node_type import NodeMode +from node_cli.utils.texts import safe_load_texts +from node_cli.utils.meta import CliMetaManager from node_cli.utils.print_formatters import print_meta_info @@ -56,70 +58,61 @@ def node_cli(): pass -@node_cli.group(help="SKALE node commands") +@node_cli.group(help='SKALE node commands') def node(): pass -@node.command('info', help="Get info about SKALE node") +@node.command('info', help='Get info about SKALE node') @click.option('--format', '-f', type=click.Choice(['json', 'text'])) def node_info(format): get_node_info(format) -@node.command('register', help="Register current node in the SKALE Manager") -@click.option( - '--name', '-n', - required=True, - prompt="Enter node name", - help='SKALE node name' -) +@node.command('register', help='Register current node in the SKALE Manager') +@click.option('--name', '-n', required=True, prompt='Enter node name', help='SKALE node name') @click.option( '--ip', - prompt="Enter node public IP", + prompt='Enter node public IP', type=IP_TYPE, - help='Public IP for RPC connections & consensus (required)' + help='Public IP for RPC connections & consensus (required)', ) @click.option( - '--port', '-p', - default=DEFAULT_NODE_BASE_PORT, - type=int, - help='Base port for node sChains' -) -@click.option( - '--domain', '-d', - prompt="Enter node domain name", - type=str, - help='Node domain name' + '--port', '-p', default=DEFAULT_NODE_BASE_PORT, type=int, help='Base port for node sChains' ) +@click.option('--domain', '-d', prompt='Enter node domain name', type=str, help='Node domain name') @streamed_cmd def register_node(name, ip, port, domain): register(name, ip, ip, port, domain) -@node.command('init', help="Initialize SKALE node") -@click.argument('env_file') +@node.command('init', help='Initialize SKALE node') +@click.argument('config_file') @streamed_cmd -def init_node(env_file): - init(env_file) +def init_node(config_file): + init(config_file=config_file, node_type=TYPE) @node.command('update', help='Update node from .env file') -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to update SKALE node software?') -@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str) @click.option( - '--unsafe', - 'unsafe_ok', - help='Allow unsafe update', - hidden=True, - is_flag=True + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to update SKALE node software?', ) -@click.argument('env_file') +@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str) +@click.option('--unsafe', 'unsafe_ok', help='Allow unsafe update', hidden=True, is_flag=True) +@click.argument('config_file') @streamed_cmd -def update_node(env_file, pull_config_for_schain, unsafe_ok): - update(env_file, pull_config_for_schain, unsafe_ok) +def update_node(config_file, pull_config_for_schain, unsafe_ok): + update( + node_mode=NodeMode.ACTIVE, + config_file=config_file, + pull_config_for_schain=pull_config_for_schain, + node_type=TYPE, + unsafe_ok=unsafe_ok, + ) @node.command('signature', help='Get node signature for given validator id') @@ -129,43 +122,50 @@ def signature(validator_id): print(f'Signature: {res}') -@node.command('backup', help="Generate backup file to restore SKALE node on another machine") +@node.command('backup', help='Generate backup file to restore SKALE node on another machine') @click.argument('backup_folder_path') @streamed_cmd def backup_node(backup_folder_path): backup(backup_folder_path) -@node.command('restore', help="Restore SKALE node on another machine") +@node.command('restore', help='Restore SKALE node on another machine') @click.argument('backup_path') @click.argument('env_file') @click.option( - '--no-snapshot', - help='Do not restore sChains from snapshot', - is_flag=True, - hidden=True + '--no-snapshot', help='Do not restore sChains from snapshot', is_flag=True, hidden=True ) @click.option( '--config-only', help='Only restore configuration files in .skale and artifacts', is_flag=True, - hidden=True + hidden=True, ) @streamed_cmd def restore_node(backup_path, env_file, no_snapshot, config_only): - restore(backup_path, env_file, no_snapshot, config_only) + restore( + backup_path=backup_path, + config_file=env_file, + no_snapshot=no_snapshot, + config_only=config_only, + node_type=TYPE, + ) -@node.command('maintenance-on', help="Set SKALE node into maintenance mode") -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to set SKALE node into maintenance mode?') +@node.command('maintenance-on', help='Set SKALE node into maintenance mode') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to set SKALE node into maintenance mode?', +) @streamed_cmd def set_node_in_maintenance(): set_maintenance_mode_on() -@node.command('maintenance-off', help="Remove SKALE node from maintenance mode") +@node.command('maintenance-off', help='Remove SKALE node from maintenance mode') @streamed_cmd def remove_node_from_maintenance(): set_maintenance_mode_off() @@ -173,92 +173,104 @@ def remove_node_from_maintenance(): @node.command('turn-off', help='Turn off the node') @click.option( - '--maintenance-on', - help='Set SKALE node into maintenance mode before turning off', - is_flag=True + '--maintenance-on', help='Set SKALE node into maintenance mode before turning off', is_flag=True ) -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to turn off the node?') @click.option( - '--unsafe', - 'unsafe_ok', - help='Allow unsafe turn-off', - hidden=True, - is_flag=True + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to turn off the node?', ) +@click.option('--unsafe', 'unsafe_ok', help='Allow unsafe turn-off', hidden=True, is_flag=True) @streamed_cmd def _turn_off(maintenance_on, unsafe_ok): - turn_off(maintenance_on, unsafe_ok) + turn_off(node_type=TYPE, maintenance_on=maintenance_on, unsafe_ok=unsafe_ok) @node.command('turn-on', help='Turn on the node') @click.option( - '--maintenance-off', - help='Turn off maintenance mode after turning on the node', - is_flag=True + '--maintenance-off', help='Turn off maintenance mode after turning on the node', is_flag=True ) @click.option( '--sync-schains', help='Run all sChains in the snapshot download mode', is_flag=True, - hidden=True + hidden=True, +) +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to turn on the node?', ) -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to turn on the node?') @click.argument('env_file') @streamed_cmd def _turn_on(maintenance_off, sync_schains, env_file): - turn_on(maintenance_off, sync_schains, env_file) + turn_on(maintenance_off, sync_schains, env_file, node_type=TYPE) -@node.command('set-domain', help="Set node domain name") +@node.command('set-domain', help='Set node domain name') +@click.option('--domain', '-d', prompt='Enter node domain name', type=str, help='Node domain name') @click.option( - '--domain', '-d', - prompt="Enter node domain name", - type=str, - help='Node domain name' + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to set domain name?', ) -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to set domain name?') @streamed_cmd def _set_domain_name(domain): set_domain_name(domain) -@node.command(help='Check if node meet network requirements') +@node.command(help='Check if node meets network requirements') @click.option( - '--network', '-n', - type=click.Choice(ALLOWED_ENV_TYPES), + '--network', + '-n', + type=click.Choice(get_args(EnvType)), default='mainnet', - help='Network to check' + help='Network to check', ) def check(network): - run_checks(network) + node_mode = upsert_node_mode() + run_checks(node_type=TYPE, node_mode=node_mode, network=network) @node.command(help='Reconfigure nftables rules') @click.option('--monitoring', is_flag=True) -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to reconfigure firewall rules?') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to reconfigure firewall rules?', +) def configure_firewall(monitoring): configure_firewall_rules(enable_monitoring=monitoring) @node.command(help='Show node version information') @check_inited -@click.option( - '--json', - 'raw', - is_flag=True, - help=TEXTS['common']['json'] -) +@click.option('--json', 'raw', is_flag=True, help=TEXTS['common']['json']) def version(raw: bool) -> None: - meta_info = get_meta_info(raw=raw) + meta_info = CliMetaManager().get_meta_info(raw=raw) if raw: print(meta_info) else: print_meta_info(meta_info) + + +@node.command('cleanup', help='Remove all SKALE node data and containers.') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to remove all SKALE node data and containers?', +) +@click.option('--prune', is_flag=True, help='Prune docker system.') +@streamed_cmd +def cleanup_node(prune): + cleanup_skale(node_mode=NodeMode.ACTIVE, prune=prune) diff --git a/node_cli/cli/passive_fair_node.py b/node_cli/cli/passive_fair_node.py new file mode 100644 index 00000000..7bac3581 --- /dev/null +++ b/node_cli/cli/passive_fair_node.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from node_cli.cli.info import TYPE +from node_cli.fair.common import cleanup as cleanup_fair +from node_cli.fair.common import init as init_fair +from node_cli.fair.common import turn_off as turn_off_fair +from node_cli.fair.common import turn_on as turn_on_fair +from node_cli.fair.common import update as update_fair +from node_cli.fair.passive import setup_fair_passive +from node_cli.utils.helper import ( + URL_OR_ANY_TYPE, + abort_if_false, + error_exit, + streamed_cmd, +) +from node_cli.utils.node_type import NodeMode +from node_cli.utils.texts import safe_load_texts + +TEXTS = safe_load_texts() + + +@click.group() +def passive_fair_node_cli(): + pass + + +@passive_fair_node_cli.group(help='Commands for passive Fair Node operations.') +def passive_node(): + pass + + +@passive_node.command('init', help='Initialize a passive Fair node') +@click.argument('config_file') +@click.option('--id', required=True, type=int, help=TEXTS['fair']['node']['setup']['id']) +@click.option('--indexer', help=TEXTS['passive_node']['init']['indexer'], is_flag=True) +@click.option('--archive', help=TEXTS['passive_node']['init']['archive'], is_flag=True) +@click.option( + '--snapshot', + type=URL_OR_ANY_TYPE, + default=None, + help=TEXTS['passive_node']['init']['snapshot_from'], +) +@streamed_cmd +def init_passive_node( + config_file: str, id: int, indexer: bool, archive: bool, snapshot: str | None +): + if indexer and archive: + error_exit('Cannot use both --indexer and --archive options') + if (indexer or archive) and snapshot == 'any': + error_exit('Cannot use any for indexer/archive node') + init_fair( + node_mode=NodeMode.PASSIVE, + config_file=config_file, + node_id=id, + indexer=indexer, + archive=archive, + snapshot=snapshot, + ) + + +@passive_node.command('update', help='Update Fair node') +@click.argument('config_file') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to update Fair node software?', +) +@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str) +@click.option( + '--force-skaled-start', + 'force_skaled_start', + hidden=True, + type=bool, + default=False, + is_flag=True, +) +@streamed_cmd +def update_node(config_file: str, pull_config_for_schain, force_skaled_start: bool): + update_fair( + node_mode=NodeMode.PASSIVE, + config_file=config_file, + pull_config_for_schain=pull_config_for_schain, + force_skaled_start=force_skaled_start, + ) + + +@passive_node.command('cleanup', help='Remove all FAIR node data and containers.') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to remove all FAIR node data and containers?', +) +@click.option('--prune', is_flag=True, help='Prune docker system.') +@streamed_cmd +def cleanup_node(prune): + cleanup_fair(node_mode=NodeMode.PASSIVE, prune=prune) + + +@passive_node.command('setup', help=TEXTS['fair']['node']['setup']['help']) +@click.option('--id', required=True, type=int, help=TEXTS['fair']['node']['setup']['id']) +def _setup(id: int) -> None: + setup_fair_passive(node_id=id) + + +@passive_node.command('turn-off', help='Turn off the node') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to turn off the node?', +) +@streamed_cmd +def turn_off_node() -> None: + turn_off_fair(node_type=TYPE) + + +@passive_node.command('turn-on', help='Turn on the node') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to turn on the node?', +) +@click.argument('config_file') +@streamed_cmd +def turn_on_node(config_file: str) -> None: + turn_on_fair(env_file=config_file, node_type=TYPE) diff --git a/node_cli/cli/sync_node.py b/node_cli/cli/passive_node.py similarity index 61% rename from node_cli/cli/sync_node.py rename to node_cli/cli/passive_node.py index 9dc4333a..f86ba99b 100644 --- a/node_cli/cli/sync_node.py +++ b/node_cli/cli/passive_node.py @@ -21,32 +21,27 @@ import click -from node_cli.core.node import init_sync, update_sync, cleanup_sync -from node_cli.utils.helper import ( - abort_if_false, - error_exit, - safe_load_texts, - streamed_cmd, - URL_TYPE, -) -from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.core.node import init_passive, update_passive, cleanup as cleanup_skale +from node_cli.utils.helper import abort_if_false, error_exit, streamed_cmd, URL_TYPE +from node_cli.utils.node_type import NodeMode +from node_cli.utils.texts import safe_load_texts G_TEXTS = safe_load_texts() -TEXTS = G_TEXTS['sync_node'] +TEXTS = G_TEXTS['passive_node'] @click.group() -def sync_node_cli(): +def passive_node_cli(): pass -@sync_node_cli.group(help='SKALE sync node commands') -def sync_node(): +@passive_node_cli.group(help='SKALE passive node commands') +def passive_node(): pass -@sync_node.command('init', help=TEXTS['init']['help']) +@passive_node.command('init', help=TEXTS['init']['help']) @click.argument('env_file') @click.option('--indexer', help=TEXTS['init']['indexer'], is_flag=True) @click.option('--archive', help=TEXTS['init']['archive'], is_flag=True) @@ -55,18 +50,15 @@ def sync_node(): '--snapshot-from', type=URL_TYPE, default=None, hidden=True, help=TEXTS['init']['snapshot_from'] ) @streamed_cmd -def _init_sync( +def _init_passive( env_file, indexer: bool, archive: bool, snapshot: bool, snapshot_from: Optional[str] ) -> None: if indexer and archive: - error_exit( - 'Cannot use both --indexer and --archive options', - exit_code=CLIExitCodes.FAILURE, - ) - init_sync(env_file, indexer, archive, snapshot, snapshot_from) + error_exit('Cannot use both --indexer and --archive options') + init_passive(env_file, indexer, archive, snapshot, snapshot_from) -@sync_node.command('update', help='Update sync node from .env file') +@passive_node.command('update', help='Update passive node from .env file') @click.option( '--yes', is_flag=True, @@ -74,21 +66,21 @@ def _init_sync( expose_value=False, prompt='Are you sure you want to update SKALE node software?', ) -@click.option('--unsafe', 'unsafe_ok', help='Allow unsafe update', hidden=True, is_flag=True) @click.argument('env_file') @streamed_cmd -def _update_sync(env_file, unsafe_ok): - update_sync(env_file) +def _update_passive(env_file): + update_passive(env_file) -@sync_node.command('cleanup', help='Remove sync node data and containers') +@passive_node.command('cleanup', help='Remove all SKALE node data and containers.') @click.option( '--yes', is_flag=True, callback=abort_if_false, expose_value=False, - prompt='Are you sure you want to remove all node containers and data?', + prompt='Are you sure you want to remove all SKALE node data and containers?', ) +@click.option('--prune', is_flag=True, help='Prune docker system.') @streamed_cmd -def _cleanup_sync() -> None: - cleanup_sync() +def cleanup_node(prune): + cleanup_skale(node_mode=NodeMode.PASSIVE, prune=prune) \ No newline at end of file diff --git a/node_cli/cli/resources_allocation.py b/node_cli/cli/resources_allocation.py deleted file mode 100644 index c8ed758c..00000000 --- a/node_cli/cli/resources_allocation.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# -# This file is part of node-cli -# -# Copyright (C) 2019 SKALE Labs -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -import json -import click - -from node_cli.core.resources import ( - get_resource_allocation_info, - generate_resource_allocation_config -) -from node_cli.utils.helper import abort_if_false, safe_load_texts - -TEXTS = safe_load_texts() - - -@click.group() -def resources_allocation_cli(): - pass - - -@resources_allocation_cli.group(help="Resources allocation commands") -def resources_allocation(): - pass - - -@resources_allocation.command('show', help="Show resources allocation file") -def show(): - resource_allocation_info = get_resource_allocation_info() - if resource_allocation_info: - print(json.dumps(resource_allocation_info, indent=4)) - else: - print('No resources allocation file on this machine') - - -@resources_allocation.command('generate', - help="Generate/update resources allocation file") -@click.argument('env_file') -@click.option( - '--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to generate/update resource allocation file?' -) -@click.option('--force', '-f', is_flag=True, - help='Rewrite if already exists') -def generate(env_file, force): - generate_resource_allocation_config(env_file=env_file, force=force) diff --git a/node_cli/cli/schains.py b/node_cli/cli/schains.py index c6ef4486..6bb1fce2 100644 --- a/node_cli/cli/schains.py +++ b/node_cli/cli/schains.py @@ -21,6 +21,8 @@ import click +from skale_core.settings import get_settings + from node_cli.utils.helper import abort_if_false, URL_TYPE from node_cli.core.schains import ( describe, @@ -30,8 +32,9 @@ show_config, show_dkg_info, show_schains, - toggle_schain_repair_mode + toggle_schain_repair_mode, ) +from node_cli.cli.info import TYPE @click.group() @@ -39,17 +42,13 @@ def schains_cli() -> None: pass -@schains_cli.group('schains', help="Node sChains commands") +@schains_cli.group('schains', help='Node sChains commands') def schains() -> None: pass -@schains.command(help="List of sChains served by connected node") -@click.option( - '-n', '--names', - help='Shows only chain names', - is_flag=True -) +@schains.command(help='List of sChains served by connected node') +@click.option('-n', '--names', help='Shows only chain names', is_flag=True) def ls(names: bool) -> None: if names: schains: str = get_schains_by_artifacts() @@ -58,17 +57,13 @@ def ls(names: bool) -> None: show_schains() -@schains.command(help="DKG statuses for each sChain on the node") -@click.option( - '--all', '-a', 'all_', - help='Shows active and deleted sChains', - is_flag=True -) +@schains.command(help='DKG statuses for each sChain on the node') +@click.option('--all', '-a', 'all_', help='Shows active and deleted sChains', is_flag=True) def dkg(all_: bool) -> None: show_dkg_info(all_) -@schains.command('config', help="sChain config") +@schains.command('config', help='sChain config') @click.argument('schain_name') def get_schain_config(schain_name: str) -> None: show_config(schain_name) @@ -82,15 +77,19 @@ def show_rules(schain_name: str) -> None: @schains.command('repair', help='Toggle schain repair mode') @click.argument('schain_name') -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure? Repair mode may corrupt working SKALE chain data.') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure? Repair mode may corrupt working SKALE chain data.', +) @click.option( '--snapshot-from', type=URL_TYPE, default=None, hidden=True, - help='Ip of the node from to download snapshot from' + help='Ip of the node from to download snapshot from', ) def repair(schain_name: str, snapshot_from: Optional[str] = None) -> None: toggle_schain_repair_mode(schain_name, snapshot_from=snapshot_from) @@ -98,12 +97,7 @@ def repair(schain_name: str, snapshot_from: Optional[str] = None) -> None: @schains.command('info', help='Show info about schain') @click.argument('schain_name') -@click.option( - '--json', - 'json_format', - help='Show info in JSON format', - is_flag=True -) +@click.option('--json', 'json_format', help='Show info in JSON format', is_flag=True) def info_(schain_name: str, json_format: bool) -> None: describe(schain_name, raw=json_format) @@ -112,11 +106,12 @@ def info_(schain_name: str, json_format: bool) -> None: @click.argument('schain_name') @click.argument('snapshot_path') @click.option('--schain-type', default='medium') -@click.option('--env-type', default=None) -def restore( - schain_name: str, - snapshot_path: str, - schain_type: str, - env_type: Optional[str] -) -> None: - restore_schain_from_snapshot(schain_name, snapshot_path) +def restore(schain_name: str, snapshot_path: str, schain_type: str) -> None: + settings = get_settings() + restore_schain_from_snapshot( + schain_name, + snapshot_path, + node_type=TYPE, + env_type=settings.env_type, + schain_type=schain_type, + ) diff --git a/node_cli/cli/ssl.py b/node_cli/cli/ssl.py index 89b81b51..c22ac226 100644 --- a/node_cli/cli/ssl.py +++ b/node_cli/cli/ssl.py @@ -21,7 +21,8 @@ from terminaltables import SingleTable from node_cli.utils.exit_codes import CLIExitCodes -from node_cli.utils.helper import safe_load_texts, error_exit +from node_cli.utils.helper import error_exit +from node_cli.utils.texts import safe_load_texts from node_cli.configs.ssl import DEFAULT_SSL_CHECK_PORT, SSL_CERT_FILEPATH, SSL_KEY_FILEPATH from node_cli.core.ssl import check_cert, upload_cert, cert_status @@ -35,12 +36,12 @@ def ssl_cli(): pass -@ssl_cli.group('ssl', help="sChains SSL commands") +@ssl_cli.group('ssl', help='sChains SSL commands') def ssl(): pass -@ssl.command(help="Status of the SSL certificates on the node") +@ssl.command(help='Status of the SSL certificates on the node') def status(): status, payload = cert_status() if status == 'ok': @@ -49,7 +50,7 @@ def status(): else: table_data = [ ['Issued to', payload['issued_to']], - ['Expiration date', payload['expiration_date']] + ['Expiration date', payload['expiration_date']], ] table = SingleTable(table_data) print('SSL certificates status:') @@ -58,69 +59,46 @@ def status(): error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) -@ssl.command(help="Upload new SSL certificates") +@ssl.command(help='Upload new SSL certificates') +@click.option('--key-path', '-k', prompt='Enter path to the key file', help='Path to the key file') @click.option( - '--key-path', '-k', - prompt="Enter path to the key file", - help='Path to the key file' -) -@click.option( - '--cert-path', '-c', - prompt="Enter path to the certificate file", - help='Path to the certificate file' + '--cert-path', + '-c', + prompt='Enter path to the certificate file', + help='Path to the certificate file', ) -@click.option('--force', '-f', is_flag=True, - help='Overwrite existing certificates') +@click.option('--force', '-f', is_flag=True, help='Overwrite existing certificates') def upload(key_path, cert_path, force): status, payload = upload_cert(cert_path, key_path, force) if status == 'ok': print(TEXTS['ssl']['uploaded']) else: - error_exit(payload, exit_code=CLIExitCodes.FAILURE) + error_exit(payload) -@ssl.command(help="Check certificates") -@click.option( - '--key-path', '-k', - help='Path to the key file', - default=SSL_KEY_FILEPATH -) -@click.option( - '--cert-path', '-c', - help='Path to the certificate file', - default=SSL_CERT_FILEPATH -) +@ssl.command(help='Check certificates') +@click.option('--key-path', '-k', help='Path to the key file', default=SSL_KEY_FILEPATH) +@click.option('--cert-path', '-c', help='Path to the certificate file', default=SSL_CERT_FILEPATH) @click.option( - '--port', '-p', - help='Port to start ssl healtcheck server', + '--port', + '-p', + help='Port to start ssl healthcheck server', type=int, - default=DEFAULT_SSL_CHECK_PORT + default=DEFAULT_SSL_CHECK_PORT, ) @click.option( - '--type', '-t', + '--type', + '-t', 'type_', help='Check type', type=click.Choice(['all', 'openssl', 'skaled']), - default='all' -) -@click.option( - '--no-client', - is_flag=True, - help='Skip client connection for openssl check' -) -@click.option( - '--no-wss', - is_flag=True, - help='Skip wss server starting for skaled check' + default='all', ) +@click.option('--no-client', is_flag=True, help='Skip client connection for openssl check') +@click.option('--no-wss', is_flag=True, help='Skip wss server starting for skaled check') def check(key_path, cert_path, port, no_client, type_, no_wss): status, payload = check_cert( - cert_path, - key_path, - port=port, - check_type=type_, - no_client=no_client, - no_wss=no_wss + cert_path, key_path, port=port, check_type=type_, no_client=no_client, no_wss=no_wss ) if status == 'ok': print(TEXTS['ssl']['check_passed']) diff --git a/node_cli/cli/staking.py b/node_cli/cli/staking.py new file mode 100644 index 00000000..8cb08d2d --- /dev/null +++ b/node_cli/cli/staking.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import click + +from node_cli.fair.staking import ( + add_allowed_receiver, + remove_allowed_receiver, + set_fee_rate, + request_fees, + request_send_fees, + claim_request, + get_earned_fee_amount, + get_exit_requests, +) +from node_cli.utils.helper import abort_if_false + + +@click.group() +def staking_cli(): + pass + + +@staking_cli.group(help='Staking commands') +def staking(): + pass + + +@staking.command('add-receiver', help='Add allowed receiver') +@click.argument('receiver') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to add allowed receiver?', +) +def _add_allowed_receiver(receiver: str) -> None: + add_allowed_receiver(receiver) + + +@staking.command('remove-receiver', help='Remove allowed receiver') +@click.argument('receiver') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to remove allowed receiver?', +) +def _remove_allowed_receiver(receiver: str) -> None: + remove_allowed_receiver(receiver) + + +@staking.command('set-fee-rate', help='Set fee rate (uint16, basis points; 25 = 2.5%)') +@click.argument('fee_rate', type=int) +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to set fee rate?', +) +def _set_fee_rate(fee_rate: int) -> None: + set_fee_rate(fee_rate) + + +@staking.command('request-fees', help='Create a request to claim fees (FAIR) or all with --all') +@click.argument('amount', type=float, required=False) +@click.option('--all', 'request_all', is_flag=True, help='Request all fees') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to request fees?', +) +def _request_fees(amount: float | None, request_all: bool) -> None: + if amount is None and not request_all: + raise click.UsageError('Provide or use --all') + request_fees(None if request_all else amount) + + +@staking.command( + 'request-send-fees', + help='Create a request to send fees to address (or all with --all)', +) +@click.argument('to') +@click.argument('amount', type=float, required=False) +@click.option('--all', 'send_all', is_flag=True, help='Request to send all fees to address') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to request to send fees?', +) +def _request_send_fees(to: str, amount: float | None, send_all: bool) -> None: + if amount is None and not send_all: + raise click.UsageError('Provide or use --all') + request_send_fees(to, None if send_all else amount) + + +@staking.command('claim-request', help='Claim previously created request by request ID') +@click.argument('request_id', type=int) +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to claim this request?', +) +def _claim_request(request_id: int) -> None: + claim_request(request_id) + + +@staking.command('earned-fee-amount', help='Get earned fee amount') +def _get_earned_fee_amount() -> None: + get_earned_fee_amount() + + +@staking.command('exit-requests', help='Get exit requests for current wallet') +@click.option('--json', 'raw', is_flag=True, help='Output in JSON format') +def _get_exit_requests(raw: bool) -> None: + get_exit_requests(raw=raw) diff --git a/node_cli/cli/wallet.py b/node_cli/cli/wallet.py index c794f969..b8f4f6b5 100644 --- a/node_cli/cli/wallet.py +++ b/node_cli/cli/wallet.py @@ -32,22 +32,26 @@ def wallet_cli(): pass -@wallet_cli.group('wallet', help="Node wallet commands") +@wallet_cli.group('wallet', help='Node wallet commands') def wallet(): pass -@wallet.command('info', help="Get info about SKALE node wallet") +@wallet.command('info', help='Get info about SKALE node wallet') @click.option('--format', '-f', type=click.Choice(['json', 'text'])) def wallet_info(format): get_wallet_info(format) -@wallet.command('send', help="Send ETH from SKALE node wallet to address") +@wallet.command('send', help='Send ETH from SKALE node wallet to address') @click.argument('address') @click.argument('amount', type=float) -@click.option('--yes', is_flag=True, callback=abort_if_false, - expose_value=False, - prompt='Are you sure you want to send ETH tokens?') +@click.option( + '--yes', + is_flag=True, + callback=abort_if_false, + expose_value=False, + prompt='Are you sure you want to send ETH tokens?', +) def send(address, amount): send_eth(address, amount) diff --git a/node_cli/configs/__init__.py b/node_cli/configs/__init__.py index feb9507a..7ed6afee 100644 --- a/node_cli/configs/__init__.py +++ b/node_cli/configs/__init__.py @@ -19,13 +19,13 @@ import os import sys -from node_cli.utils.global_config import read_g_config +from pathlib import Path +from node_cli.utils.global_config import read_g_config GLOBAL_SKALE_DIR = os.getenv('GLOBAL_SKALE_DIR') or '/etc/skale' GLOBAL_SKALE_CONF_FILENAME = 'conf.json' -GLOBAL_SKALE_CONF_FILEPATH = os.path.join( - GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILENAME) +GLOBAL_SKALE_CONF_FILEPATH = os.path.join(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILENAME) GLOBAL_CONFIG = read_g_config(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH) G_CONF_USER = GLOBAL_CONFIG['user'] @@ -35,7 +35,7 @@ FILESTORAGE_MAPPING = os.path.join(SKALE_STATE_DIR, 'filestorage') SNAPSHOTS_SHARED_VOLUME = 'shared-space' SCHAINS_MNT_DIR_REGULAR = '/mnt' -SCHAINS_MNT_DIR_SYNC = '/var/lib/skale/schains' +SCHAINS_MNT_DIR_SINGLE_CHAIN = '/var/lib/skale/schains' VOLUME_GROUP = 'schains' SKALE_DIR = os.path.join(G_CONF_HOME, '.skale') @@ -44,29 +44,31 @@ NODE_DATA_PATH = os.path.join(SKALE_DIR, 'node_data') SCHAIN_NODE_DATA_PATH = os.path.join(NODE_DATA_PATH, 'schains') NODE_CLI_STATUS_FILENAME = 'node_cli.status' + +SETTINGS_DIR = Path(NODE_DATA_PATH) / 'settings' +NODE_SETTINGS_PATH = SETTINGS_DIR / 'node.toml' +INTERNAL_SETTINGS_PATH = SETTINGS_DIR / 'internal.toml' + NODE_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'node_config.json') CONTAINER_CONFIG_PATH = os.path.join(SKALE_DIR, 'config') CONTAINER_CONFIG_TMP_PATH = os.path.join(SKALE_TMP_DIR, 'config') CONTRACTS_PATH = os.path.join(SKALE_DIR, 'contracts_info') REPORTS_PATH = os.path.join(SKALE_DIR, 'reports') BACKUP_CONTRACTS_PATH = os.path.join(SKALE_DIR, '.old_contracts_info') -INIT_ENV_FILEPATH = os.path.join(SKALE_DIR, '.env') SKALE_RUN_DIR = '/var/run/skale' -SGX_CERTIFICATES_DIR_NAME = 'sgx_certs' - COMPOSE_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'docker-compose.yml') -SYNC_COMPOSE_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'docker-compose-sync.yml') +FAIR_COMPOSE_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'docker-compose-fair.yml') STATIC_PARAMS_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, 'static_params.yaml') +FAIR_STATIC_PARAMS_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, 'fair_static_params.yaml') NGINX_TEMPLATE_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, 'nginx.conf.j2') NGINX_CONFIG_FILEPATH = os.path.join(NODE_DATA_PATH, 'nginx.conf') -NGINX_CONTAINER_NAME = 'skale_nginx' +NGINX_CONTAINER_NAME = 'sk_nginx' LOG_PATH = os.path.join(NODE_DATA_PATH, 'log') REMOVED_CONTAINERS_FOLDER_NAME = '.removed_containers' -REMOVED_CONTAINERS_FOLDER_PATH = os.path.join( - LOG_PATH, REMOVED_CONTAINERS_FOLDER_NAME) +REMOVED_CONTAINERS_FOLDER_PATH = os.path.join(LOG_PATH, REMOVED_CONTAINERS_FOLDER_NAME) ETH_STATE_PATH = os.path.join(NODE_DATA_PATH, 'eth-state') NODE_CERTS_PATH = os.path.join(NODE_DATA_PATH, 'ssl') @@ -74,9 +76,6 @@ SGX_CERTS_PATH = os.path.join(NODE_DATA_PATH, 'sgx_certs') SCHAINS_DATA_PATH = os.path.join(NODE_DATA_PATH, 'schains') -CURRENT_FILE_LOCATION = os.path.dirname(os.path.realpath(__file__)) -DOTENV_FILEPATH = os.path.join(os.path.dirname(CURRENT_FILE_LOCATION), '.env') - SRC_FILEBEAT_CONFIG_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'filebeat.yml') FILEBEAT_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'filebeat.yml') @@ -96,16 +95,13 @@ IPTABLES_RULES_STATE_FILEPATH = os.path.join(IPTABLES_DIR, 'rules.v4') DEFAULT_SSH_PORT = 22 -FLASK_SECRET_KEY_FILENAME = 'flask_db_key.txt' -FLASK_SECRET_KEY_FILE = os.path.join(NODE_DATA_PATH, FLASK_SECRET_KEY_FILENAME) - DOCKER_CONFIG_FILEPATH = '/etc/docker/daemon.json' HIDE_STREAM_LOG = os.getenv('HIDE_STREAM_LOG') def _get_env(): try: - sys._MEIPASS + sys._MEIPASS # type: ignore except AttributeError: return 'dev' return 'prod' @@ -118,7 +114,7 @@ def _get_env(): PARDIR = os.path.join(CURRENT_FILE_LOCATION, os.pardir) PROJECT_DIR = os.path.join(PARDIR, os.pardir) else: - PARDIR = os.path.join(sys._MEIPASS, 'data') + PARDIR = os.path.join(sys._MEIPASS, 'data') # type: ignore PROJECT_DIR = PARDIR TEXT_FILE = os.path.join(PROJECT_DIR, 'text.yml') @@ -140,16 +136,14 @@ def _get_env(): TM_INIT_TIMEOUT = 20 RESTORE_SLEEP_TIMEOUT = 20 - -MANAGER_CONTRACTS_FILEPATH = os.path.join(CONTRACTS_PATH, 'manager.json') -IMA_CONTRACTS_FILEPATH = os.path.join(CONTRACTS_PATH, 'ima.json') +INIT_TIMEOUT = 20 META_FILEPATH = os.path.join(NODE_DATA_PATH, 'meta.json') SKALE_NODE_REPO_URL = 'https://github.com/skalenetwork/skale-node.git' DOCKER_LVMPY_REPO_URL = 'https://github.com/skalenetwork/docker-lvmpy.git' -DOCKER_DEAMON_CONFIG_PATH = '/etc/docker/daemon.json' +DOCKER_DAEMON_CONFIG_PATH = '/etc/docker/daemon.json' DOCKER_DAEMON_HOSTS = ('fd://', 'unix:///var/run/skale/docker.sock') DOCKER_SERVICE_CONFIG_DIR = '/etc/systemd/system/docker.service.d' DOCKER_SERVICE_CONFIG_PATH = '/etc/systemd/system/docker.service.d/no-host.conf' @@ -172,3 +166,6 @@ def _get_env(): UFW_CONFIG_PATH = '/etc/default/ufw' UFW_IPV6_BEFORE_INPUT_CHAIN = 'ufw6-before-input' + +REDIS_URI: str = os.getenv('REDIS_URI', 'redis://@127.0.0.1:6379') +DEFAULT_SKALED_BASE_PORT: int = 10000 diff --git a/node_cli/configs/env.py b/node_cli/configs/env.py deleted file mode 100644 index 7b6bf116..00000000 --- a/node_cli/configs/env.py +++ /dev/null @@ -1,80 +0,0 @@ -import os -from dotenv import load_dotenv -from node_cli.configs import SKALE_DIR, CONTAINER_CONFIG_PATH - - -SKALE_DIR_ENV_FILEPATH = os.path.join(SKALE_DIR, '.env') -CONFIGS_ENV_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, '.env') - - -ALLOWED_ENV_TYPES = ['mainnet', 'testnet', 'qanet', 'devnet'] - -REQUIRED_PARAMS = { - 'CONTAINER_CONFIGS_STREAM': '', - 'ENDPOINT': '', - 'MANAGER_CONTRACTS_ABI_URL': '', - 'IMA_CONTRACTS_ABI_URL': '', - 'FILEBEAT_HOST': '', - 'DISK_MOUNTPOINT': '', - 'SGX_SERVER_URL': '', - 'DOCKER_LVMPY_STREAM': '', - 'ENV_TYPE': '', -} - -REQUIRED_PARAMS_SYNC = { - 'SCHAIN_NAME': '', - 'CONTAINER_CONFIGS_STREAM': '', - 'ENDPOINT': '', - 'MANAGER_CONTRACTS_ABI_URL': '', - 'IMA_CONTRACTS_ABI_URL': '', - 'DISK_MOUNTPOINT': '', - 'DOCKER_LVMPY_STREAM': '', - 'ENV_TYPE': '' -} - -OPTIONAL_PARAMS = { - 'MONITORING_CONTAINERS': '', - 'TELEGRAF': '', - 'INFLUX_TOKEN': '', - 'INFLUX_URL': '', - 'TG_API_KEY': '', - 'TG_CHAT_ID': '', - 'CONTAINER_CONFIGS_DIR': '', - 'DISABLE_DRY_RUN': '', - 'DEFAULT_GAS_LIMIT': '', - 'DEFAULT_GAS_PRICE_WEI': '', - 'SKIP_DOCKER_CONFIG': '', - 'ENFORCE_BTRFS': '', - 'SKIP_DOCKER_CLEANUP': '' -} - - -def absent_params(params): - return list(filter( - lambda key: key not in OPTIONAL_PARAMS and not params[key], - params) - ) - - -def get_env_config(env_filepath: str = SKALE_DIR_ENV_FILEPATH, sync_node: bool = False): - load_dotenv(dotenv_path=env_filepath) - params = REQUIRED_PARAMS_SYNC.copy() if sync_node else REQUIRED_PARAMS.copy() - params.update(OPTIONAL_PARAMS) - for option_name in params: - env_param = os.getenv(option_name) - if env_param is not None: - params[option_name] = str(env_param) - validate_params(params) - return params - - -def validate_params(params): # todo: temporary fix - if params['ENV_TYPE'] not in ALLOWED_ENV_TYPES: - raise NotValidEnvParamsError( - f'Allowed ENV_TYPE values are {ALLOWED_ENV_TYPES}. ' - f'Actual: "{params["ENV_TYPE"]}"' - ) - - -class NotValidEnvParamsError(Exception): - """Raised when something is wrong with provided env params""" diff --git a/node_cli/configs/routes.py b/node_cli/configs/routes.py index 285e56bd..336fea3e 100644 --- a/node_cli/configs/routes.py +++ b/node_cli/configs/routes.py @@ -19,7 +19,6 @@ import os - CURRENT_API_VERSION = 'v1' API_PREFIX = '/api' @@ -41,12 +40,25 @@ 'schains': ['config', 'list', 'dkg-statuses', 'firewall-rules', 'repair', 'get'], 'ssl': ['status', 'upload'], 'wallet': ['info', 'send-eth'], + 'fair-node': ['info', 'register', 'set-domain-name', 'change-ip', 'exit'], + 'fair-chain': ['record', 'checks'], + 'fair-node-passive': ['setup'], + 'fair-staking': [ + 'add-receiver', + 'remove-receiver', + 'set-fee-rate', + 'request-fees', + 'request-send-fees', + 'claim-request', + 'get-earned-fee-amount', + 'get-exit-requests', + ], } } class RouteNotFoundException(Exception): - """Raised when requested route is not found in provided API version""" + """Raised when requested route is not found in provided API version.""" def route_exists(blueprint, method, api_version): diff --git a/node_cli/core/checks.py b/node_cli/core/checks.py index cb4bae86..0424577f 100644 --- a/node_cli/core/checks.py +++ b/node_cli/core/checks.py @@ -30,28 +30,39 @@ from collections import namedtuple from functools import wraps from typing import ( - Any, Callable, cast, - Dict, Iterable, Iterator, - List, Optional, - Tuple, TypeVar, Union, ) + Any, + Callable, + cast, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + TypeVar, + Union, +) import docker # type: ignore import psutil # type: ignore -import yaml from debian import debian_support from packaging.version import parse as version_parse +from skale_core.types import EnvType + from node_cli.configs import ( CHECK_REPORT_PATH, CONTAINER_CONFIG_PATH, DOCKER_CONFIG_FILEPATH, DOCKER_DAEMON_HOSTS, REPORTS_PATH, - STATIC_PARAMS_FILEPATH ) from node_cli.core.host import is_ufw_ipv6_chain_exists, is_ufw_ipv6_option_enabled from node_cli.core.resources import get_disk_size +from node_cli.core.static_config import get_static_params +from node_cli.utils.docker_utils import NodeType from node_cli.utils.helper import run_cmd, safe_mkdir +from node_cli.utils.node_type import NodeMode logger = logging.getLogger(__name__) @@ -68,27 +79,12 @@ FuncList = List[Func] -def get_static_params( - env_type: str = 'mainnet', - config_path: str = CONTAINER_CONFIG_PATH -) -> Dict: - status_params_filename = os.path.basename(STATIC_PARAMS_FILEPATH) - static_params_filepath = os.path.join(config_path, status_params_filename) - with open(static_params_filepath) as requirements_file: - ydata = yaml.load(requirements_file, Loader=yaml.Loader) - return ydata['envs'][env_type] - - def check_quietly(check: Func, *args, **kwargs) -> CheckResult: try: return check(*args, **kwargs) except Exception as err: logger.exception('%s check errored') - return CheckResult( - name=check.__name__, - status='error', - info=repr(err) - ) + return CheckResult(name=check.__name__, status='error', info=repr(err)) class CheckType(enum.Enum): @@ -117,13 +113,8 @@ def wrapper(*args, **kwargs) -> CheckResult: return cast(Func, wrapper) -def generate_report_from_result( - check_result: List[CheckResult] -) -> List[Dict]: - report = [ - {'name': cr.name, 'status': cr.status} - for cr in check_result - ] +def generate_report_from_result(check_result: List[CheckResult]) -> List[Dict]: + report = [{'name': cr.name, 'status': cr.status} for cr in check_result] return report @@ -144,10 +135,7 @@ def get_report(report_path: str = CHECK_REPORT_PATH) -> List[Dict]: return saved_report -def save_report( - new_report: List[Dict], - report_path: str = CHECK_REPORT_PATH -) -> None: +def save_report(new_report: List[Dict], report_path: str = CHECK_REPORT_PATH) -> None: safe_mkdir(REPORTS_PATH) with open(report_path, 'w') as report_file: json.dump(new_report, report_file, indent=4) @@ -157,28 +145,17 @@ def merge_reports( old_report: List[Dict], new_report: List[Dict], ) -> List[Dict]: - return list(dedup( - itertools.chain( - new_report, - old_report - ), - key=lambda r: r['name'] - )) + return list(dedup(itertools.chain(new_report, old_report), key=lambda r: r['name'])) class BaseChecker: - def _ok( - self, - name: str, - info: Optional[Union[str, Dict]] = None - ) -> CheckResult: + def __init__(self, requirements: Dict) -> None: + self.requirements = requirements + + def _ok(self, name: str, info: Optional[Union[str, Dict]] = None) -> CheckResult: return CheckResult(name=name, status='ok', info=info) - def _failed( - self, - name: str, - info: Optional[Union[str, Dict]] = None - ) -> CheckResult: + def _failed(self, name: str, info: Optional[Union[str, Dict]] = None) -> CheckResult: return CheckResult(name=name, status='failed', info=info) def get_checks(self, check_type: CheckType = CheckType.ALL) -> FuncList: @@ -189,8 +166,9 @@ def get_checks(self, check_type: CheckType = CheckType.ALL) -> FuncList: methods = inspect.getmembers( type(self), - predicate=lambda m: inspect.isfunction(m) and - getattr(m, '_check_type', None) in allowed_types + predicate=lambda m: inspect.isfunction(m) + and getattr(m, '_check_type', None) in allowed_types + and self.requirements.get(m.__name__, None) != 'disabled', ) return [functools.partial(m[1], self) for m in methods] @@ -209,13 +187,11 @@ def check(self) -> ResultList: class MachineChecker(BaseChecker): def __init__( - self, - requirements: Dict, - disk_device: str, - network_timeout: Optional[int] = None) -> None: - self.requirements = requirements + self, requirements: Dict, disk_device: str, network_timeout: Optional[int] = None + ) -> None: self.disk_device = disk_device self.network_timeout = network_timeout or NETWORK_CHECK_TIMEOUT + super().__init__(requirements=requirements) @preinstall def cpu_total(self) -> CheckResult: @@ -242,11 +218,10 @@ def cpu_physical(self) -> CheckResult: @preinstall def memory(self) -> CheckResult: name = 'memory' - mem_info = psutil.virtual_memory().total, - actual = mem_info[0] + actual = psutil.virtual_memory().total expected = self.requirements['memory'] - actual_gb = round(actual / 1024 ** 3, 2) - expected_gb = round(expected / 1024 ** 3, 2) + actual_gb = round(actual / 1024**3, 2) + expected_gb = round(expected / 1024**3, 2) info = f'Expected RAM {expected_gb} GB, actual {actual_gb} GB' if actual < expected: return self._failed(name=name, info=info) @@ -258,8 +233,8 @@ def swap(self) -> CheckResult: name = 'swap' actual = psutil.swap_memory().total expected = self.requirements['swap'] - actual_gb = round(actual / 1024 ** 3, 2) - expected_gb = round(expected / 1024 ** 3, 2) + actual_gb = round(actual / 1024**3, 2) + expected_gb = round(expected / 1024**3, 2) info = f'Expected swap memory {expected_gb} GB, actual {actual_gb} GB' if actual < expected: return self._failed(name=name, info=info) @@ -274,8 +249,8 @@ def disk(self) -> CheckResult: name = 'disk' actual = self._get_disk_size() expected = self.requirements['disk'] - actual_gb = round(actual / 1024 ** 3, 2) - expected_gb = round(expected / 1024 ** 3, 2) + actual_gb = round(actual / 1024**3, 2) + expected_gb = round(expected / 1024**3, 2) info = f'Expected disk size {expected_gb} GB, actual {actual_gb} GB' if actual < expected: return self._failed(name=name, info=info) @@ -288,7 +263,8 @@ def network(self) -> CheckResult: try: socket.setdefaulttimeout(self.network_timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect( - (CLOUDFLARE_DNS_HOST, CLOUDFLARE_DNS_HOST_PORT)) + (CLOUDFLARE_DNS_HOST, CLOUDFLARE_DNS_HOST_PORT) + ) return self._ok(name=name) except socket.error as err: info = f'Network checking returned error: {err}' @@ -297,26 +273,19 @@ def network(self) -> CheckResult: class PackageChecker(BaseChecker): def __init__(self, requirements: Dict) -> None: - self.requirements = requirements + super().__init__(requirements=requirements) - def _check_apt_package(self, package_name: str, - version: str = None) -> CheckResult: + def _check_apt_package(self, package_name: str, version: str | None = None) -> CheckResult: # TODO: check versions - dpkg_cmd_result = run_cmd( - ['dpkg', '-s', package_name], check_code=False) + dpkg_cmd_result = run_cmd(['dpkg', '-s', package_name], check_code=False) output = dpkg_cmd_result.stdout.decode('utf-8').strip() if dpkg_cmd_result.returncode != 0: return self._failed(name=package_name, info=output) actual_version = self._version_from_dpkg_output(output) expected_version = self.requirements[package_name] - info = { - 'expected_version': expected_version, - 'actual_version': actual_version - } - compare_result = debian_support.version_compare( - actual_version, expected_version - ) + info = {'expected_version': expected_version, 'actual_version': actual_version} + compare_result = debian_support.version_compare(actual_version, expected_version) if compare_result == -1: return self._failed(name=package_name, info=info) else: @@ -342,31 +311,22 @@ def psmisc(self) -> CheckResult: def ufw_ipv6_disabled(self) -> CheckResult: name = 'ufw-ipv6' if is_ufw_ipv6_option_enabled(): - return self._failed( - name=name, - info='ufw ipv6 configuration should be disabled' - ) + return self._failed(name=name, info='ufw ipv6 configuration should be disabled') elif is_ufw_ipv6_chain_exists(): - return self._failed( - name=name, - info='ufw should be reloaded to switch off ipv6' - ) + return self._failed(name=name, info='ufw should be reloaded to switch off ipv6') else: return self._ok(name=name) def _version_from_dpkg_output(self, output: str) -> str: info_lines = map(lambda s: s.strip(), output.split('\n')) - v_line = next(filter( - lambda s: s.startswith('Version'), - info_lines - )) + v_line = next(filter(lambda s: s.startswith('Version'), info_lines)) return v_line.split()[1] class DockerChecker(BaseChecker): def __init__(self, requirements: Dict) -> None: self.docker_client = docker.from_env() - self.requirements = requirements + super().__init__(requirements=requirements) def _check_docker_command(self) -> Optional[str]: return shutil.which('docker') @@ -386,17 +346,11 @@ def docker_engine(self) -> CheckResult: version_info = self._get_docker_version_info() if not version_info: - return self._failed( - name=name, - info='Docker api request failed. Is docker installed?' - ) + return self._failed(name=name, info='Docker api request failed. Is docker installed?') logger.debug('Docker version info %s', version_info) actual_version = self.docker_client.version()['Version'] expected_version = self.requirements['docker-engine'] - info = { - 'expected_version': expected_version, - 'actual_version': actual_version - } + info = {'expected_version': expected_version, 'actual_version': actual_version} if version_parse(actual_version) < version_parse(expected_version): return self._failed(name=name, info=info) else: @@ -410,17 +364,11 @@ def docker_api(self) -> CheckResult: version_info = self._get_docker_version_info() if not version_info: - return self._failed( - name=name, - info='Docker api request failed. Is docker installed?' - ) + return self._failed(name=name, info='Docker api request failed. Is docker installed?') logger.debug('Docker version info %s', version_info) actual_version = version_info['ApiVersion'] expected_version = self.requirements['docker-api'] - info = { - 'expected_version': expected_version, - 'actual_version': actual_version - } + info = {'expected_version': expected_version, 'actual_version': actual_version} if version_parse(actual_version) < version_parse(expected_version): return self._failed(name=name, info=info) else: @@ -435,13 +383,10 @@ def docker_compose(self) -> CheckResult: return self._failed(name=name, info=info) v_cmd_result = run_cmd( - ['docker', 'compose', 'version'], - check_code=False, - separate_stderr=True + ['docker', 'compose', 'version'], check_code=False, separate_stderr=True ) output = v_cmd_result.stdout.decode('utf-8').rstrip() if v_cmd_result.returncode != 0: - info = f'Checking docker compose version failed with: {output}' return self._failed(name=name, info=output) actual_version = output.split(',')[0].split()[-1].strip() @@ -468,10 +413,7 @@ def _get_docker_config(self) -> Dict: def _check_docker_alive_option(self, config: Dict) -> Tuple: actual_value = config.get('live-restore', None) if actual_value is not True: - info = ( - 'Docker daemon live-restore option ' - 'should be set as "true"' - ) + info = 'Docker daemon live-restore option should be set as "true"' return False, info else: info = 'Docker daemon live-restore option is set as "true"' @@ -509,37 +451,36 @@ def hosts_config(self) -> CheckResult: return self._failed(name=name, info=info) -def get_checks( - checkers: List[BaseChecker], - check_type: CheckType = CheckType.ALL -) -> FuncList: +def get_checks(checkers: List[BaseChecker], check_type: CheckType = CheckType.ALL) -> FuncList: return list( itertools.chain.from_iterable( - ( - checker.get_checks(check_type=check_type) - for checker in checkers - ) + (checker.get_checks(check_type=check_type) for checker in checkers) ) ) -def get_all_checkers(disk: str, requirements: Dict) -> List[BaseChecker]: - return [ - MachineChecker(requirements['server'], disk), +def get_all_checkers(disk: str, requirements: Dict, node_mode: NodeMode) -> List[BaseChecker]: + checkers = [ PackageChecker(requirements['package']), - DockerChecker(requirements['docker']) + DockerChecker(requirements['docker']), ] + if node_mode == NodeMode.ACTIVE: + checkers.append(MachineChecker(requirements['server'], disk)) + return checkers def run_checks( disk: str, - env_type: str = 'mainnet', + node_type: NodeType, + node_mode: NodeMode, + env_type: EnvType = 'mainnet', config_path: str = CONTAINER_CONFIG_PATH, - check_type: CheckType = CheckType.ALL + check_type: CheckType = CheckType.ALL, ) -> ResultList: logger.info('Executing checks. Type: %s', check_type) - requirements = get_static_params(env_type, config_path) - checkers = get_all_checkers(disk, requirements) + requirements = get_static_params(node_type, env_type, config_path) + + checkers = get_all_checkers(disk, requirements, node_mode) checks = get_checks(checkers, check_type) results = [check() for check in checks] diff --git a/node_cli/core/docker_config.py b/node_cli/core/docker_config.py index d5241be3..67cf3d86 100644 --- a/node_cli/core/docker_config.py +++ b/node_cli/core/docker_config.py @@ -1,16 +1,16 @@ -import grp import enum +import grp import json import logging import os import pathlib +import shutil import time import typing from typing import Optional, Tuple - from node_cli.configs import ( - DOCKER_DEAMON_CONFIG_PATH, + DOCKER_DAEMON_CONFIG_PATH, DOCKER_DAEMON_HOSTS, DOCKER_SERVICE_CONFIG_DIR, DOCKER_SERVICE_CONFIG_PATH, @@ -18,9 +18,8 @@ NODE_DOCKER_CONFIG_PATH, SKALE_RUN_DIR, ) -from node_cli.utils.helper import run_cmd from node_cli.utils.docker_utils import docker_client, get_containers - +from node_cli.utils.helper import run_cmd logger = logging.getLogger(__name__) @@ -73,7 +72,7 @@ class DockerConfigResult(enum.IntEnum): def ensure_docker_service_config_dir( - docker_service_dir: Path = DOCKER_SERVICE_CONFIG_DIR + docker_service_dir: Path = DOCKER_SERVICE_CONFIG_DIR, ) -> DockerConfigResult: logger.info('Ensuring docker service dir') if not os.path.isdir(docker_service_dir): @@ -84,8 +83,7 @@ def ensure_docker_service_config_dir( def ensure_service_overriden_config( - config_filepath: - Optional[Path] = DOCKER_SERVICE_CONFIG_PATH + config_filepath: Optional[Path] = DOCKER_SERVICE_CONFIG_PATH, ) -> DockerConfigResult: logger.info('Ensuring docker service override config') config = get_content(config_filepath) @@ -95,8 +93,8 @@ def ensure_service_overriden_config( '[Service]', 'ExecStart=', 'ExecStart=/usr/bin/dockerd', - f'ExecStartPre=/bin/mkdir -p {socket_dir}' - ] + f'ExecStartPre=/bin/mkdir -p {socket_dir}', + ] ) if not os.path.isfile(config_filepath): @@ -105,37 +103,28 @@ def ensure_service_overriden_config( config_file.write(expected_config) return DockerConfigResult.CHANGED elif config != expected_config: - raise OverridenConfigExsitsError( - f'{config_filepath} already exists' - ) + raise OverridenConfigExsitsError(f'{config_filepath} already exists') return DockerConfigResult.UNCHANGED def ensure_docker_daemon_config( - daemon_config_path: Path = DOCKER_DEAMON_CONFIG_PATH, - daemon_hosts: Path = DOCKER_DAEMON_HOSTS + daemon_config_path: Path = DOCKER_DAEMON_CONFIG_PATH, daemon_hosts: Path = DOCKER_DAEMON_HOSTS ) -> None: logger.info('Ensuring docker daemon config') config = {} if os.path.isfile(daemon_config_path): with open(daemon_config_path, 'r') as daemon_config: config = json.load(daemon_config) - if config.get('live-restore') is True and \ - config.get('hosts') == daemon_hosts: + if config.get('live-restore') is True and config.get('hosts') == daemon_hosts: return DockerConfigResult.UNCHANGED - config.update({ - 'live-restore': True, - 'hosts': daemon_hosts - }) + config.update({'live-restore': True, 'hosts': daemon_hosts}) logger.info('Updating docker daemon config') with open(daemon_config_path, 'w') as daemon_config: json.dump(config, daemon_config) return DockerConfigResult.CHANGED -def restart_docker_service( - docker_service_name: str = 'docker' -) -> DockerConfigResult: +def restart_docker_service(docker_service_name: str = 'docker') -> DockerConfigResult: logger.info('Executing daemon-reload') run_cmd(['systemctl', 'daemon-reload']) @@ -149,18 +138,14 @@ def is_socket_existed(socket_path: Path = DOCKER_SOCKET_PATH) -> bool: def wait_for_socket_initialization( - socket_path: Path = DOCKER_SOCKET_PATH, - allowed_time: int = 300 + socket_path: Path = DOCKER_SOCKET_PATH, allowed_time: int = 300 ) -> None: logger.info('Waiting for docker inititalization') start_ts = time.time() - while int(time.time() - start_ts) < allowed_time and \ - not is_socket_existed(socket_path): + while int(time.time() - start_ts) < allowed_time and not is_socket_existed(socket_path): time.sleep(2) if not is_socket_existed(socket_path): - raise SocketInitTimeoutError( - f'Socket was not able to init in {allowed_time}' - ) + raise SocketInitTimeoutError(f'Socket was not able to init in {allowed_time}') logger.info('Socket initialized successfully') @@ -172,16 +157,10 @@ def ensure_run_dir(run_dir: Path = SKALE_RUN_DIR) -> DockerConfigResult: def assert_no_containers(ignore: Tuple[str] = ()): - containers = [ - c.name - for c in get_containers() - if c.name not in ignore - ] + containers = [c.name for c in get_containers() if c.name not in ignore] if len(containers) > 0: logger.fatal('%s containers exist', ' '.join(containers)) - raise ContainersExistError( - f'Existed containers amount {len(containers)}' - ) + raise ContainersExistError(f'Existed containers amount {len(containers)}') def configure_docker() -> None: @@ -190,13 +169,12 @@ def configure_docker() -> None: ensure_run_dir, ensure_docker_service_config_dir, ensure_service_overriden_config, - ensure_docker_daemon_config + ensure_docker_daemon_config, ) results = (task() for task in pre_restart_tasks) results = list(results) logger.info('Docker config changes %s', results) - if not is_socket_existed() or \ - any(r == DockerConfigResult.CHANGED for r in results): + if not is_socket_existed() or any(r == DockerConfigResult.CHANGED for r in results): restart_docker_service() wait_for_socket_initialization() @@ -205,3 +183,49 @@ def configure_docker() -> None: save_docker_group_id(group_id) logger.info('Docker configuration finished') + + +def remove_docker_service_override_config() -> None: + if os.path.isfile(DOCKER_SERVICE_CONFIG_PATH): + logger.info('Removing docker service override config') + os.remove(DOCKER_SERVICE_CONFIG_PATH) + + +def reset_docker_daemon_config() -> None: + if os.path.isfile(DOCKER_DAEMON_CONFIG_PATH): + logger.info('Resetting docker daemon config') + with open(DOCKER_DAEMON_CONFIG_PATH, 'r') as daemon_config: + config = json.load(daemon_config) + + config.pop('live-restore', None) + config.pop('hosts', None) + + if config: + with open(DOCKER_DAEMON_CONFIG_PATH, 'w') as daemon_config: + json.dump(config, daemon_config) + else: + os.remove(DOCKER_DAEMON_CONFIG_PATH) + + +def remove_node_docker_config() -> None: + if os.path.isfile(NODE_DOCKER_CONFIG_PATH): + logger.info('Removing node docker config') + os.remove(NODE_DOCKER_CONFIG_PATH) + + +def remove_skale_run_dir() -> None: + if os.path.isdir(SKALE_RUN_DIR): + shutil.rmtree(SKALE_RUN_DIR) + logger.info('Removed SKALE run directory') + + +def cleanup_docker_configuration() -> None: + """Cleanup all skale specific docker configuration files and directories""" + logger.info('Cleaning up docker configuration') + + remove_docker_service_override_config() + reset_docker_daemon_config() + remove_node_docker_config() + remove_skale_run_dir() + restart_docker_service() + logger.info('Docker configuration cleanup finished') diff --git a/node_cli/core/health.py b/node_cli/core/health.py index 247831db..72eb23ed 100644 --- a/node_cli/core/health.py +++ b/node_cli/core/health.py @@ -20,10 +20,7 @@ import json from terminaltables import SingleTable -from node_cli.utils.print_formatters import ( - print_containers, - print_schains_healthchecks -) +from node_cli.utils.print_formatters import print_containers, print_schains_healthchecks from node_cli.utils.helper import error_exit, get_request from node_cli.utils.exit_codes import CLIExitCodes @@ -33,9 +30,7 @@ def get_containers(_all): status, payload = get_request( - blueprint=BLUEPRINT_NAME, - method='containers', - params={'all': _all} + blueprint=BLUEPRINT_NAME, method='containers', params={'all': _all} ) if status == 'ok': print_containers(payload) @@ -44,10 +39,7 @@ def get_containers(_all): def get_schains_checks(json_format: bool = False) -> None: - status, payload = get_request( - blueprint=BLUEPRINT_NAME, - method='schains' - ) + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='schains') if status == 'ok': if not payload: print('No sChains found') @@ -61,10 +53,7 @@ def get_schains_checks(json_format: bool = False) -> None: def get_sgx_info(): - status, payload = get_request( - blueprint=BLUEPRINT_NAME, - method='sgx' - ) + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='sgx') if status == 'ok': data = payload table_data = [ @@ -73,7 +62,7 @@ def get_sgx_info(): ['SGXWallet Version', data['sgx_wallet_version']], ['Node SGX keyname', data['sgx_keyname']], ['Status HTTPS', data['status_https']], - ['Status ZMQ', data['status_zmq']] + ['Status ZMQ', data['status_zmq']], ] table = SingleTable(table_data) print(table.table) diff --git a/node_cli/core/host.py b/node_cli/core/host.py index 0456ac89..a040b8c4 100644 --- a/node_cli/core/host.py +++ b/node_cli/core/host.py @@ -17,36 +17,46 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -import json import logging import os -from shutil import copyfile +from shutil import chown from urllib.parse import urlparse +from skale_core.types import EnvType + from node_cli.core.resources import update_resource_allocation +from node_cli.utils.helper import error_exit from node_cli.configs import ( - ADMIN_PORT, AUTOLOAD_KERNEL_MODULES_PATH, - BTRFS_KERNEL_MODULE, DEFAULT_URL_SCHEME, NODE_DATA_PATH, - SKALE_DIR, CONTAINER_CONFIG_PATH, CONTRACTS_PATH, - ETH_STATE_PATH, NODE_CERTS_PATH, SGX_CERTS_PATH, - REPORTS_PATH, REDIS_DATA_PATH, - SCHAINS_DATA_PATH, LOG_PATH, + ADMIN_PORT, + AUTOLOAD_KERNEL_MODULES_PATH, + BTRFS_KERNEL_MODULE, + DEFAULT_URL_SCHEME, + NODE_DATA_PATH, + SKALE_DIR, + CONTAINER_CONFIG_PATH, + CONTRACTS_PATH, + ETH_STATE_PATH, + NODE_CERTS_PATH, + SGX_CERTS_PATH, + REPORTS_PATH, + REDIS_DATA_PATH, + SETTINGS_DIR, + SCHAINS_DATA_PATH, + LOG_PATH, REMOVED_CONTAINERS_FOLDER_PATH, - IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH, - SKALE_RUN_DIR, SKALE_STATE_DIR, SKALE_TMP_DIR, - UFW_CONFIG_PATH, UFW_IPV6_BEFORE_INPUT_CHAIN -) -from node_cli.configs.resource_allocation import ( - RESOURCE_ALLOCATION_FILEPATH + SKALE_RUN_DIR, + SKALE_STATE_DIR, + SKALE_TMP_DIR, + UFW_CONFIG_PATH, + UFW_IPV6_BEFORE_INPUT_CHAIN, + NGINX_CONFIG_FILEPATH, ) from node_cli.configs.cli_logger import LOG_DATA_PATH -from node_cli.configs.env import SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH from node_cli.core.nftables import NFTablesManager from node_cli.utils.helper import safe_mkdir -from node_cli.utils.print_formatters import print_abi_validation_errors -from node_cli.utils.helper import safe_load_texts, validate_abi +from node_cli.utils.texts import safe_load_texts TEXTS = safe_load_texts() @@ -65,55 +75,44 @@ def fix_url(url): return False -def get_flask_secret_key(): - secret_key_filepath = os.path.join(NODE_DATA_PATH, 'flask_db_key.txt') - with open(secret_key_filepath) as key_file: - return key_file.read().strip() - - -def prepare_host( - env_filepath: str, - env_type: str, - allocation: bool = False -): - logger.info('Preparing host started') - make_dirs() - save_env_params(env_filepath) +def prepare_host(env_type: EnvType, allocation: bool = False) -> None: + try: + logger.info('Preparing host started') + make_dirs() + chown(REDIS_DATA_PATH, user=999, group=1000) - if allocation: - update_resource_allocation(env_type) + if allocation: + update_resource_allocation(env_type) + except Exception as e: + error_exit(f'Failed to prepare host: {str(e)}') -def is_node_inited(): - return os.path.isfile(RESOURCE_ALLOCATION_FILEPATH) +def is_node_inited() -> bool: + return os.path.isfile(NGINX_CONFIG_FILEPATH) def make_dirs(): for dir_path in ( - SKALE_DIR, NODE_DATA_PATH, CONTAINER_CONFIG_PATH, - CONTRACTS_PATH, ETH_STATE_PATH, NODE_CERTS_PATH, - REMOVED_CONTAINERS_FOLDER_PATH, - SGX_CERTS_PATH, SCHAINS_DATA_PATH, LOG_PATH, - REPORTS_PATH, REDIS_DATA_PATH, - SKALE_RUN_DIR, SKALE_STATE_DIR, SKALE_TMP_DIR + SKALE_DIR, + NODE_DATA_PATH, + CONTAINER_CONFIG_PATH, + CONTRACTS_PATH, + ETH_STATE_PATH, + NODE_CERTS_PATH, + REMOVED_CONTAINERS_FOLDER_PATH, + SGX_CERTS_PATH, + SCHAINS_DATA_PATH, + LOG_PATH, + REPORTS_PATH, + REDIS_DATA_PATH, + SETTINGS_DIR, + SKALE_RUN_DIR, + SKALE_STATE_DIR, + SKALE_TMP_DIR, ): safe_mkdir(dir_path) -def save_env_params(env_filepath): - copyfile(env_filepath, SKALE_DIR_ENV_FILEPATH) - - -def link_env_file(): - if not (os.path.islink(CONFIGS_ENV_FILEPATH) or - os.path.isfile(CONFIGS_ENV_FILEPATH)): - logger.info( - 'Creating symlink %s → %s', - SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH - ) - os.symlink(SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH) - - def init_logs_dir(): safe_mkdir(LOG_DATA_PATH) safe_mkdir(REMOVED_CONTAINERS_FOLDER_PATH) @@ -130,10 +129,7 @@ def is_btrfs_module_autoloaded(modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH): modules = set( map( lambda line: line.strip(), - filter( - lambda line: not line.startswith('#'), - modules_file.readlines() - ) + filter(lambda line: not line.startswith('#'), modules_file.readlines()), ) ) return BTRFS_KERNEL_MODULE in modules @@ -144,9 +140,7 @@ def add_btrfs_module_to_autoload(modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH): modules_file.write(f'{BTRFS_KERNEL_MODULE}\n') -def ensure_btrfs_kernel_module_autoloaded( - modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH -): +def ensure_btrfs_kernel_module_autoloaded(modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH): logger.debug('Checking if btrfs is in %s', modules_filepath) if not is_btrfs_module_autoloaded(modules_filepath): logger.info('Adding btrfs module to %s', modules_filepath) @@ -155,24 +149,6 @@ def ensure_btrfs_kernel_module_autoloaded( logger.debug('btrfs is already in %s', modules_filepath) -def validate_abi_files(json_result=False): - results = [ - validate_abi(abi_filepath) - for abi_filepath in [ - MANAGER_CONTRACTS_FILEPATH, - IMA_CONTRACTS_FILEPATH - ] - ] - if any(r['status'] == 'error' for r in results): - print('Some files do not exist or are incorrect') - print_abi_validation_errors(results, raw=json_result) - else: - if json_result: - print(json.dumps({'result': 'ok'})) - else: - print('All abi files are correct json files!') - - def is_ufw_ipv6_option_enabled() -> bool: """Check if UFW is enabled and IPv6 is configured.""" if os.path.isfile(UFW_CONFIG_PATH): diff --git a/node_cli/core/logs.py b/node_cli/core/logs.py index d92cb37c..1563ddfc 100644 --- a/node_cli/core/logs.py +++ b/node_cli/core/logs.py @@ -23,9 +23,7 @@ import datetime from node_cli.utils.helper import run_cmd, safe_mkdir -from node_cli.utils.docker_utils import ( - save_container_logs, get_containers -) +from node_cli.utils.docker_utils import save_container_logs, get_containers from node_cli.configs import REMOVED_CONTAINERS_FOLDER_PATH, SKALE_TMP_DIR from node_cli.configs.cli_logger import LOG_DATA_PATH @@ -44,7 +42,7 @@ def create_logs_dump(path, filter_container=None): if filter_container: containers = get_containers(filter_container) else: - containers = get_containers('skale') + containers = get_containers('sk_*') for container in containers: log_filepath = os.path.join(containers_logs_path, f'{container.name}.log') @@ -60,7 +58,7 @@ def create_logs_dump(path, filter_container=None): def create_dump_dir(): - time = datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S") + time = datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%d--%H-%M-%S') folder_name = f'skale-logs-dump-{time}' folder_path = os.path.join(SKALE_TMP_DIR, folder_name) containers_path = os.path.join(folder_path, 'containers') diff --git a/node_cli/core/nftables.py b/node_cli/core/nftables.py index f46568f3..e0059fff 100644 --- a/node_cli/core/nftables.py +++ b/node_cli/core/nftables.py @@ -22,9 +22,9 @@ import os import shutil import sys +from dataclasses import dataclass from pathlib import Path from typing import Optional -from dataclasses import dataclass from node_cli.configs import ( ENV, @@ -32,7 +32,7 @@ NFTABLES_CHAIN_FOLDER_PATH, NFTABLES_MAIN_CONFIG_PATH, NFTABLES_SKALE_BASE_CONFIG_PATH, - NFTABLES_USER_CONFIG_PATH + NFTABLES_USER_CONFIG_PATH, ) from node_cli.utils.helper import get_ssh_port, run_cmd @@ -44,7 +44,8 @@ class ServicePort: DNS: int = 53 CADVISOR: int = 9100 EXPORTER: int = 8080 - WATCHDOG: int = 3009 + WATCHDOG_HTTP: int = 3009 + WATCHDOG_HTTPS: int = 311 HTTPS: int = 443 HTTP: int = 80 @@ -95,13 +96,7 @@ def __post_init__(self): if self.first_port is not None and self.last_port is None: self.last_port = self.first_port if all( - val is None - for val in ( - self.first_port, - self.last_port, - self.protocol, - self.icmp_type - ) + val is None for val in (self.first_port, self.last_port, self.protocol, self.icmp_type) ): raise NFTablesError('Rule has no meaningful fields') @@ -179,9 +174,9 @@ def update_chain_policy( chain: str, policy: str = POLICY, family: Optional[str] = None, - table: Optional[str] = None + table: Optional[str] = None, ) -> None: - """Update specified chain if it exists. Otherwise do nothing""" + """Update specified chain if it exists. Otherwise do nothing.""" family = family or self.family table = table or self.table if self.chain_exists(chain, family=family): @@ -211,7 +206,7 @@ def table_exists(self) -> bool: return False def create_table_if_not_exists(self) -> None: - """Create table only if it doesn't exist""" + """Create table only if it doesn't exist.""" if not self.table_exists(): cmd = {'nftables': [{'add': {'table': {'family': self.family, 'name': self.table}}}]} self.execute_cmd(cmd) @@ -220,7 +215,7 @@ def create_table_if_not_exists(self) -> None: logger.info('Table already exists: %s', self.table) def get_rules(self, chain: str) -> list[dict]: - """Get existing rules for a chain""" + """Get existing rules for a chain.""" try: cmd = f'list chain {self.family} {self.table} {chain}' rc, output, error = self.nft.cmd(cmd) @@ -249,7 +244,6 @@ def rule_exists(self, chain: str, new_rule_expr: list[dict]) -> bool: return False def add_drop_rule(self, rule: Rule) -> None: - expr = [] if rule.first_port: @@ -395,7 +389,7 @@ def add_rule(self, rule: Rule) -> None: rule.chain, rule.protocol, rule.first_port, - rule.last_port + rule.last_port, ) else: logger.info( @@ -403,7 +397,7 @@ def add_rule(self, rule: Rule) -> None: rule.chain, rule.protocol, rule.first_port, - rule.last_port + rule.last_port, ) def remove_rule(self, rule: Rule) -> None: @@ -464,7 +458,7 @@ def remove_rule(self, rule: Rule) -> None: rule.chain, rule.protocol, rule.first_port, - rule.last_port + rule.last_port, ) else: logger.info( @@ -472,7 +466,7 @@ def remove_rule(self, rule: Rule) -> None: rule.chain, rule.protocol, rule.first_port, - rule.last_port + rule.last_port, ) def add_connection_tracking_rule(self, chain: str) -> None: @@ -535,7 +529,6 @@ def add_loopback_rule(self, chain) -> None: def get_base_ruleset(self) -> str: self.nft.set_json_output(False) - output = '' try: cmd = f'list chain {self.family} {self.table} {self.chain}' rc, output, error = self.nft.cmd(cmd) @@ -545,10 +538,9 @@ def get_base_ruleset(self) -> str: finally: self.nft.set_json_output(True) - return output def setup_firewall(self, enable_monitoring: bool = False) -> None: - """Setup firewall rules""" + """Setup firewall rules.""" logger.info('Configuring firewall rules') try: @@ -568,7 +560,8 @@ def setup_firewall(self, enable_monitoring: bool = False) -> None: ServicePort.DNS, ServicePort.HTTPS, ServicePort.HTTP, - ServicePort.WATCHDOG + ServicePort.WATCHDOG_HTTP, + ServicePort.WATCHDOG_HTTPS, ] if enable_monitoring: tcp_ports.extend([ServicePort.EXPORTER, ServicePort.CADVISOR]) @@ -587,17 +580,14 @@ def setup_firewall(self, enable_monitoring: bool = False) -> None: chain=self.chain, first_port=SGXPort.HTTPS, last_port=SGXPort.ZMQ, - protocol='tcp' + protocol='tcp', ) ) self.add_drop_rule(Rule(chain=self.chain, protocol='udp')) logger.info('Making sure legacy chain has default policy %s', POLICY) self.update_chain_policy( - chain=LEGACY_CHAIN, - policy=POLICY, - family=LEGACY_FAMILY, - table=LEGACY_TABLE + chain=LEGACY_CHAIN, policy=POLICY, family=LEGACY_FAMILY, table=LEGACY_TABLE ) except Exception as e: @@ -606,12 +596,13 @@ def setup_firewall(self, enable_monitoring: bool = False) -> None: logger.info('Firewall rules are configured') def cleanup_legacy_rules(self, ssh: bool = False, dns: bool = False) -> None: - """Cleanups all node-cli generated rules""" + """Cleans up all node-cli generated rules.""" self.remove_drop_rule('tcp') self.remove_drop_rule('udp') tcp_ports = [ ServicePort.HTTPS, - ServicePort.WATCHDOG, + ServicePort.WATCHDOG_HTTP, + ServicePort.WATCHDOG_HTTPS, ServicePort.EXPORTER, ServicePort.CADVISOR, ServicePort.DNS, # tcp is redundant, making sure it's removed @@ -624,7 +615,7 @@ def cleanup_legacy_rules(self, ssh: bool = False, dns: bool = False) -> None: self.remove_rule(Rule(chain=self.chain, protocol='udp', first_port=ServicePort.DNS)) def flush_chain(self, chain: str) -> None: - """Remove all rules from a specific chain""" + """Remove all rules from a specific chain.""" json_cmd = { 'nftables': [ {'flush': {'chain': {'family': self.family, 'table': self.table, 'name': chain}}} @@ -678,10 +669,7 @@ def create_user_config_path() -> None: def update_main_nftables_config() -> None: logger.info('Updating main nftables rules') - content = ( - f'#!/usr/sbin/nft -f\nflush ruleset\n' - f'include "{NFTABLES_SKALE_BASE_CONFIG_PATH}";' - ) + content = f'#!/usr/sbin/nft -f\nflush ruleset\ninclude "{NFTABLES_SKALE_BASE_CONFIG_PATH}";' with open(NFTABLES_MAIN_CONFIG_PATH, 'w') as f: f.write(content) diff --git a/node_cli/core/nginx.py b/node_cli/core/nginx.py index e87b17db..340e3299 100644 --- a/node_cli/core/nginx.py +++ b/node_cli/core/nginx.py @@ -20,11 +20,12 @@ import logging import os.path -from node_cli.utils.docker_utils import restart_nginx_container, docker_client +from node_cli.cli.info import TYPE from node_cli.configs import NODE_CERTS_PATH, NGINX_TEMPLATE_FILEPATH, NGINX_CONFIG_FILEPATH +from node_cli.utils.node_type import NodeType +from node_cli.utils.docker_utils import restart_nginx_container, docker_client from node_cli.utils.helper import process_template - logger = logging.getLogger(__name__) @@ -34,10 +35,12 @@ def generate_nginx_config() -> None: ssl_on = check_ssl_certs() + skale_node = is_skale_node_nginx() template_data = { 'ssl': ssl_on, + 'skale_node': skale_node, } - logger.info(f'Processing nginx template. ssl: {ssl_on}') + logger.info(f'Processing nginx template. ssl: {ssl_on}, skale_node: {skale_node}') process_template(NGINX_TEMPLATE_FILEPATH, NGINX_CONFIG_FILEPATH, template_data) @@ -47,6 +50,10 @@ def check_ssl_certs(): return os.path.exists(crt_path) and os.path.exists(key_path) +def is_skale_node_nginx() -> bool: + return TYPE == NodeType.SKALE + + def reload_nginx() -> None: dutils = docker_client() generate_nginx_config() diff --git a/node_cli/core/node.py b/node_cli/core/node.py index 5e1d44ec..7c9e2409 100644 --- a/node_cli/core/node.py +++ b/node_cli/core/node.py @@ -28,66 +28,77 @@ import docker +from node_cli.cli import __version__ from node_cli.configs import ( BACKUP_ARCHIVE_NAME, CONTAINER_CONFIG_PATH, FILESTORAGE_MAPPING, - INIT_ENV_FILEPATH, LOG_PATH, RESTORE_SLEEP_TIMEOUT, SCHAINS_MNT_DIR_REGULAR, - SCHAINS_MNT_DIR_SYNC, + SCHAINS_MNT_DIR_SINGLE_CHAIN, SKALE_DIR, SKALE_STATE_DIR, TM_INIT_TIMEOUT, ) -from node_cli.cli import __version__ -from node_cli.configs.env import get_env_config, SKALE_DIR_ENV_FILEPATH from node_cli.configs.cli_logger import LOG_DATA_PATH as CLI_LOG_DATA_PATH - -from node_cli.core.host import is_node_inited, save_env_params, get_flask_secret_key from node_cli.core.checks import run_checks as run_host_checks +from node_cli.core.host import is_node_inited from node_cli.core.resources import update_resource_allocation +from node_cli.core.node_options import ( + active_fair, + active_skale, + upsert_node_mode, + passive_skale, + passive_fair, +) +from node_cli.migrations.focal_to_jammy import migrate as migrate_2_6 from node_cli.operations import ( + cleanup_skale_op, configure_nftables, - update_op, init_op, + init_passive_op, + restore_op, turn_off_op, turn_on_op, - restore_op, - init_sync_op, - update_sync_op, - cleanup_sync_op, + update_op, + update_passive_op, ) -from node_cli.utils.print_formatters import ( - print_failed_requirements_checks, - print_node_cmd_error, - print_node_info, +from node_cli.utils.decorators import check_inited, check_not_inited, check_user +from node_cli.utils.docker_utils import ( + BASE_FAIR_BOOT_COMPOSE_SERVICES, + BASE_FAIR_COMPOSE_SERVICES, + BASE_SKALE_COMPOSE_SERVICES, + BASE_PASSIVE_COMPOSE_SERVICES, + BASE_PASSIVE_FAIR_COMPOSE_SERVICES, + is_admin_running, + is_api_running, ) +from node_cli.utils.exit_codes import CLIExitCodes from node_cli.utils.helper import ( error_exit, get_request, post_request, - extract_env_params, ) -from node_cli.utils.meta import get_meta_info -from node_cli.utils.texts import Texts -from node_cli.utils.exit_codes import CLIExitCodes -from node_cli.utils.decorators import check_not_inited, check_inited, check_user -from node_cli.utils.docker_utils import is_admin_running, is_api_running, is_sync_admin_running -from node_cli.migrations.focal_to_jammy import migrate as migrate_2_6 - +from node_cli.utils.meta import CliMetaManager +from node_cli.utils.node_type import NodeType, NodeMode +from node_cli.utils.print_formatters import ( + print_failed_requirements_checks, + print_node_cmd_error, + print_node_info, +) +from node_cli.utils.settings import validate_and_save_node_settings +from skale_core.settings import get_settings +from node_cli.utils.texts import safe_load_texts logger = logging.getLogger(__name__) -TEXTS = Texts() +TEXTS = safe_load_texts() -SYNC_BASE_CONTAINERS_AMOUNT = 2 -BASE_CONTAINERS_AMOUNT = 5 BLUEPRINT_NAME = 'node' class NodeStatuses(Enum): - """This class contains possible node statuses""" + """This class contains possible node statuses.""" ACTIVE = 0 LEAVING = 1 @@ -97,17 +108,20 @@ class NodeStatuses(Enum): NOT_CREATED = 5 -def is_update_safe(sync_node: bool = False) -> bool: - if not sync_node and not is_admin_running() and not is_api_running(): - return True - if sync_node and not is_sync_admin_running(): - return True +def is_update_safe(node_mode: NodeMode) -> bool: + if not is_admin_running(): + if node_mode == NodeMode.PASSIVE: + return True + elif not is_api_running(): + return True status, payload = get_request(BLUEPRINT_NAME, 'update-safe') if status == 'error': return False - safe = payload['update_safe'] + if not isinstance(payload, dict): + return False + safe = bool(payload.get('update_safe')) if not safe: - logger.info('Locked schains: %s', payload['unsafe_chains']) + logger.info('Locked schains: %s', payload.get('unsafe_chains')) return safe @@ -138,74 +152,85 @@ def register_node(name, p2p_ip, public_ip, port, domain_name): @check_not_inited -def init(env_filepath): - env = compose_node_env(env_filepath) - if env is None: - return +def init(config_file: str, node_type: NodeType) -> None: + node_mode = NodeMode.ACTIVE + settings = validate_and_save_node_settings(config_file, node_type, node_mode) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) - inited_ok = init_op(env_filepath, env) - if not inited_ok: - error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) + init_op(settings=settings, compose_env=compose_env, node_mode=node_mode) logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) - if not is_base_containers_alive(): + if not is_base_containers_alive(node_type=node_type, node_mode=node_mode): error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) logger.info('Generating resource allocation file ...') - update_resource_allocation(env['ENV_TYPE']) + update_resource_allocation(settings.env_type) logger.info('Init procedure finished') @check_not_inited -def restore(backup_path, env_filepath, no_snapshot=False, config_only=False): - env = compose_node_env(env_filepath) - if env is None: - return - save_env_params(env_filepath) - env['SKALE_DIR'] = SKALE_DIR - - if not no_snapshot: - logger.info('Adding BACKUP_RUN to env ...') - env['BACKUP_RUN'] = 'True' # should be str - - restored_ok = restore_op(env, backup_path, config_only=config_only) +def restore( + backup_path: str, + config_file: str, + node_type: NodeType, + no_snapshot: bool = False, + config_only: bool = False, +): + node_mode = NodeMode.ACTIVE + settings = validate_and_save_node_settings(config_file, node_type, node_mode) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + + restored_ok = restore_op( + settings=settings, + compose_env=compose_env, + backup_path=backup_path, + node_type=node_type, + config_only=config_only, + backup_run=not no_snapshot, + ) if not restored_ok: error_exit('Restore operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) time.sleep(RESTORE_SLEEP_TIMEOUT) logger.info('Generating resource allocation file ...') - update_resource_allocation(env['ENV_TYPE']) + update_resource_allocation(settings.env_type) print('Node is restored from backup') @check_not_inited -def init_sync( - env_filepath: str, indexer: bool, archive: bool, snapshot: bool, snapshot_from: Optional[str] +def init_passive( + config_file: str, indexer: bool, archive: bool, snapshot: bool, snapshot_from: Optional[str] ) -> None: - env = compose_node_env(env_filepath, sync_node=True) - if env is None: - return - inited_ok = init_sync_op(env_filepath, env, indexer, archive, snapshot, snapshot_from) - if not inited_ok: - error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) + node_mode = NodeMode.PASSIVE + settings = validate_and_save_node_settings(config_file, NodeType.SKALE, node_mode) + compose_env = compose_node_env(node_type=NodeType.SKALE, node_mode=node_mode) + init_passive_op( + settings=settings, + compose_env=compose_env, + indexer=indexer, + archive=archive, + snapshot=snapshot, + snapshot_from=snapshot_from, + ) logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) - if not is_base_containers_alive(sync_node=True): + if not is_base_containers_alive(node_type=NodeType.SKALE, node_mode=node_mode): error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) - logger.info('Sync node initialized successfully') + logger.info('Passive node initialized successfully') @check_inited @check_user -def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None: +def update_passive(config_file: str) -> None: logger.info('Node update started') - prev_version = get_meta_info().version + prev_version = CliMetaManager().get_meta_info().version if (__version__ == 'test' or __version__.startswith('2.6')) and prev_version == '2.5.0': migrate_2_6() - env = compose_node_env(env_filepath, sync_node=True) - update_ok = update_sync_op(env_filepath, env) + settings = validate_and_save_node_settings(config_file, NodeType.SKALE, NodeMode.PASSIVE) + compose_env = compose_node_env(node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE) + update_ok = update_passive_op(settings=settings, compose_env=compose_env) if update_ok: logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) - alive = is_base_containers_alive(sync_node=True) + alive = is_base_containers_alive(node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE) if not update_ok or not alive: print_node_cmd_error() return @@ -213,70 +238,56 @@ def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None: logger.info('Node update finished') -@check_inited @check_user -def cleanup_sync() -> None: - env = compose_node_env(SKALE_DIR_ENV_FILEPATH, save=False, sync_node=True) - schain_name = env['SCHAIN_NAME'] - cleanup_sync_op(env, schain_name) - logger.info('Sync node was cleaned up, all containers and data removed') - - -def compose_node_env( - env_filepath, - inited_node=False, - sync_schains=None, - pull_config_for_schain=None, - sync_node=False, - save: bool = True, -): - if env_filepath is not None: - env_params = extract_env_params(env_filepath, sync_node=sync_node, raise_for_status=True) - if save: - save_env_params(env_filepath) - else: - env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=sync_node) +def cleanup(node_mode: NodeMode, prune: bool = False) -> None: + node_mode = upsert_node_mode(node_mode=node_mode) + env = compose_node_env(NodeType.SKALE, node_mode) + cleanup_skale_op(node_mode=node_mode, compose_env=env, prune=prune) + logger.info('SKALE node was cleaned up, all containers and data removed') + - mnt_dir = SCHAINS_MNT_DIR_SYNC if sync_node else SCHAINS_MNT_DIR_REGULAR +def compose_node_env(node_type: NodeType, node_mode: NodeMode) -> dict[str, str]: + st = get_settings() + if node_mode == NodeMode.PASSIVE or node_type == NodeType.FAIR: + mnt_dir = SCHAINS_MNT_DIR_SINGLE_CHAIN + else: + mnt_dir = SCHAINS_MNT_DIR_REGULAR env = { 'SKALE_DIR': SKALE_DIR, 'SCHAINS_MNT_DIR': mnt_dir, 'FILESTORAGE_MAPPING': FILESTORAGE_MAPPING, 'SKALE_LIB_PATH': SKALE_STATE_DIR, - **env_params, + 'FILEBEAT_HOST': st.filebeat_host, } - if inited_node and not sync_node: - flask_secret_key = get_flask_secret_key() - env['FLASK_SECRET_KEY'] = flask_secret_key - if sync_schains and not sync_node: - env['BACKUP_RUN'] = 'True' - if pull_config_for_schain: - env['PULL_CONFIG_FOR_SCHAIN'] = pull_config_for_schain return {k: v for k, v in env.items() if v != ''} @check_inited @check_user -def update(env_filepath: str, pull_config_for_schain: str, unsafe_ok: bool = False) -> None: - if not unsafe_ok and not is_update_safe(): +def update( + config_file: str, + pull_config_for_schain: Optional[str], + node_type: NodeType, + node_mode: NodeMode, + unsafe_ok: bool = False, +) -> None: + node_mode = upsert_node_mode(node_mode=node_mode) + + if not unsafe_ok and not is_update_safe(node_mode=node_mode): error_msg = 'Cannot update safely' error_exit(error_msg, exit_code=CLIExitCodes.UNSAFE_UPDATE) - prev_version = get_meta_info().version + prev_version = CliMetaManager().get_meta_info().version if (__version__ == 'test' or __version__.startswith('2.6')) and prev_version == '2.5.0': migrate_2_6() logger.info('Node update started') - env = compose_node_env( - env_filepath, - inited_node=True, - sync_schains=False, - pull_config_for_schain=pull_config_for_schain, - ) - update_ok = update_op(env_filepath, env) + settings = validate_and_save_node_settings(config_file, node_type, node_mode) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + update_ok = update_op(settings=settings, compose_env=compose_env, node_mode=node_mode) if update_ok: logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) - alive = is_base_containers_alive() + alive = is_base_containers_alive(node_type=node_type, node_mode=node_mode) if not update_ok or not alive: print_node_cmd_error() return @@ -299,7 +310,7 @@ def backup(path): def get_backup_filename(): - time = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') + time = datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%d-%H-%M-%S') return f'{BACKUP_ARCHIVE_NAME}-{time}.tar.gz' @@ -326,7 +337,7 @@ def pack_dir(source: str, dest: str, exclude: Tuple[str] = ()): def logfilter(tarinfo): path = Path(tarinfo.name) for e in exclude: - logger.debug('Cheking if %s is parent of %s', e, tarinfo.name) + logger.debug('Checking if %s is parent of %s', e, tarinfo.name) try: path.relative_to(e) except ValueError: @@ -347,7 +358,7 @@ def create_backup_archive(backup_filepath): cli_log_path = CLI_LOG_DATA_PATH container_log_path = LOG_PATH pack_dir(SKALE_DIR, backup_filepath, exclude=(cli_log_path, container_log_path)) - print(f'Backup archive succesfully created {backup_filepath}') + print(f'Backup archive successfully created {backup_filepath}') def set_maintenance_mode_on(): @@ -378,24 +389,34 @@ def set_maintenance_mode_off(): @check_inited @check_user -def turn_off(maintenance_on: bool = False, unsafe_ok: bool = False) -> None: - if not unsafe_ok and not is_update_safe(): +def turn_off(node_type: NodeType, maintenance_on: bool = False, unsafe_ok: bool = False) -> None: + node_mode = upsert_node_mode() + if not unsafe_ok and not is_update_safe(node_mode=node_mode): error_msg = 'Cannot turn off safely' error_exit(error_msg, exit_code=CLIExitCodes.UNSAFE_UPDATE) if maintenance_on: set_maintenance_mode_on() - env = compose_node_env(SKALE_DIR_ENV_FILEPATH, save=False) - turn_off_op(env=env) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + turn_off_op(compose_env=compose_env, node_type=node_type, node_mode=node_mode) @check_inited @check_user -def turn_on(maintenance_off, sync_schains, env_file): - env = compose_node_env(env_file, inited_node=True, sync_schains=sync_schains) - turn_on_op(env) +def turn_on(maintenance_off: bool, sync_schains: bool, env_file: str, node_type: NodeType) -> None: + node_mode = upsert_node_mode() + settings = validate_and_save_node_settings(env_file, node_type, node_mode) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + backup_run = sync_schains and node_mode != NodeMode.PASSIVE + turn_on_op( + settings=settings, + compose_env=compose_env, + node_type=node_type, + node_mode=node_mode, + backup_run=backup_run, + ) logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) - if not is_base_containers_alive(): + if not is_base_containers_alive(node_type=node_type, node_mode=node_mode): print_node_cmd_error() return logger.info('Node turned on') @@ -403,12 +424,39 @@ def turn_on(maintenance_off, sync_schains, env_file): set_maintenance_mode_off() -def is_base_containers_alive(sync_node: bool = False): +def get_expected_container_names( + node_type: NodeType, + node_mode: NodeMode, + is_fair_boot: bool, +) -> list[str]: + if node_type == NodeType.FAIR and is_fair_boot: + services = BASE_FAIR_BOOT_COMPOSE_SERVICES + elif active_fair(node_type, node_mode): + services = BASE_FAIR_COMPOSE_SERVICES + elif passive_fair(node_type, node_mode): + services = BASE_PASSIVE_FAIR_COMPOSE_SERVICES + elif active_skale(node_type, node_mode): + services = BASE_SKALE_COMPOSE_SERVICES + elif passive_skale(node_type, node_mode): + services = BASE_PASSIVE_COMPOSE_SERVICES + return list(services.values()) + + +def is_base_containers_alive( + node_type: NodeType, + node_mode: NodeMode, + is_fair_boot: bool = False, +) -> bool: + base_container_names = get_expected_container_names(node_type, node_mode, is_fair_boot) + dclient = docker.from_env() - containers = dclient.containers.list() - skale_containers = list(filter(lambda c: c.name.startswith('skale_'), containers)) - containers_amount = SYNC_BASE_CONTAINERS_AMOUNT if sync_node else BASE_CONTAINERS_AMOUNT - return len(skale_containers) >= containers_amount + running_container_names = set(container.name for container in dclient.containers.list()) + + for base_container in base_container_names: + if base_container not in running_container_names: + return False + + return True def get_node_info_plain(): @@ -449,6 +497,8 @@ def set_domain_name(domain_name): def run_checks( + node_type: NodeType, + node_mode: NodeMode, network: str = 'mainnet', container_config_path: str = CONTAINER_CONFIG_PATH, disk: Optional[str] = None, @@ -458,13 +508,13 @@ def run_checks( return if disk is None: - env = get_env_config() - disk = env['DISK_MOUNTPOINT'] - failed_checks = run_host_checks(disk, network, container_config_path) + settings = get_settings() + disk = settings.block_device + failed_checks = run_host_checks(disk, node_type, node_mode, network, container_config_path) if not failed_checks: - print('Requirements checking succesfully finished!') + print('Requirements checking successfully finished!') else: - print('Node is not fully meet the requirements!') + print('Node does not fully meet the requirements!') print_failed_requirements_checks(failed_checks) diff --git a/node_cli/core/node_options.py b/node_cli/core/node_options.py index 70573a65..764e3b5f 100644 --- a/node_cli/core/node_options.py +++ b/node_cli/core/node_options.py @@ -19,17 +19,16 @@ import logging +from node_cli.utils.node_type import NodeMode, NodeType from node_cli.utils.helper import read_json, write_json, init_file from node_cli.configs.node_options import NODE_OPTIONS_FILEPATH + logger = logging.getLogger(__name__) class NodeOptions: - def __init__( - self, - filepath: str = NODE_OPTIONS_FILEPATH - ): + def __init__(self, filepath: str = NODE_OPTIONS_FILEPATH): self.filepath = filepath init_file(filepath, {}) @@ -44,7 +43,7 @@ def _set(self, field_name: str, field_value) -> None: @property def archive(self) -> bool: - return self._get('archive') + return self._get('archive') or False @archive.setter def archive(self, archive: bool) -> None: @@ -52,7 +51,7 @@ def archive(self, archive: bool) -> None: @property def catchup(self) -> bool: - return self._get('catchup') + return self._get('catchup') or False @catchup.setter def catchup(self, catchup: bool) -> None: @@ -60,11 +59,79 @@ def catchup(self, catchup: bool) -> None: @property def historic_state(self) -> bool: - return self._get('historic_state') + return self._get('historic_state') or False @historic_state.setter def historic_state(self, historic_state: bool) -> None: return self._set('historic_state', historic_state) + @property + def node_mode(self) -> NodeMode: + return NodeMode(self._get('node_mode')) + + @node_mode.setter + def node_mode(self, node_mode: NodeMode) -> None: + return self._set('node_mode', node_mode.value) + def all(self) -> dict: return read_json(self.filepath) + + +def mark_active_node() -> None: + node_options = NodeOptions() + node_options.node_mode = NodeMode.ACTIVE + logger.info('Node marked as active.') + + +def mark_passive_node() -> None: + node_options = NodeOptions() + node_options.node_mode = NodeMode.PASSIVE + logger.info('Node marked as passive.') + + +def set_passive_node_options( + archive: bool, + indexer: bool, +) -> None: + node_options = NodeOptions() + node_options.node_mode = NodeMode.PASSIVE + node_options.archive = archive or indexer + node_options.catchup = archive or indexer + node_options.historic_state = archive + logger.info('Node options set for passive mode.') + + +class NodeModeMismatchError(Exception): + pass + + +def upsert_node_mode(node_mode: NodeMode | None = None) -> NodeMode: + node_options = NodeOptions() + try: + options_mode = node_options.node_mode + if node_mode is not None and options_mode != node_mode: + raise NodeModeMismatchError( + f'Cannot change node mode from {options_mode} to {node_mode}' + ) + return options_mode + except ValueError: + if node_mode is None: + raise NodeModeMismatchError('Node mode is not set') + node_options.node_mode = node_mode + return node_mode + + +def active_skale(node_type: NodeType, node_mode: NodeMode) -> bool: + return node_mode == NodeMode.ACTIVE and node_type == NodeType.SKALE + + +def active_fair(node_type: NodeType, node_mode: NodeMode) -> bool: + return node_mode == NodeMode.ACTIVE and node_type == NodeType.FAIR + + +def passive_skale(node_type: NodeType, node_mode: NodeMode) -> bool: + return node_mode == NodeMode.PASSIVE and node_type == NodeType.SKALE + + +def passive_fair(node_type: NodeType, node_mode: NodeMode) -> bool: + return node_mode == NodeMode.PASSIVE and node_type == NodeType.FAIR diff --git a/node_cli/core/resources.py b/node_cli/core/resources.py index f47ef792..34f4cac5 100644 --- a/node_cli/core/resources.py +++ b/node_cli/core/resources.py @@ -24,16 +24,24 @@ import psutil +from skale_core.types import EnvType + +from node_cli.utils.settings import validate_and_save_node_settings from node_cli.utils.docker_utils import ensure_volume from node_cli.utils.schain_types import SchainTypes -from node_cli.utils.helper import ( - write_json, read_json, run_cmd, extract_env_params, safe_load_yml -) +from node_cli.utils.helper import write_json, read_json, run_cmd, safe_load_yml +from node_cli.utils.node_type import NodeType, NodeMode from node_cli.configs import ALLOCATION_FILEPATH, STATIC_PARAMS_FILEPATH, SNAPSHOTS_SHARED_VOLUME from node_cli.configs.resource_allocation import ( - RESOURCE_ALLOCATION_FILEPATH, TIMES, TIMEOUT, - TEST_DIVIDER, SMALL_DIVIDER, MEDIUM_DIVIDER, LARGE_DIVIDER, - MEMORY_FACTOR, MAX_CPU_SHARES + RESOURCE_ALLOCATION_FILEPATH, + TIMES, + TIMEOUT, + TEST_DIVIDER, + SMALL_DIVIDER, + MEDIUM_DIVIDER, + LARGE_DIVIDER, + MEMORY_FACTOR, + MAX_CPU_SHARES, ) logger = logging.getLogger(__name__) @@ -50,7 +58,7 @@ def __init__(self, value, fractional=False): 'test': value / TEST_DIVIDER, 'small': value / SMALL_DIVIDER, 'medium': value / MEDIUM_DIVIDER, - 'large': value / LARGE_DIVIDER + 'large': value / LARGE_DIVIDER, } if not fractional: for k in self.values: @@ -67,10 +75,7 @@ def get_resource_allocation_info(): return None -def compose_resource_allocation_config( - env_type: str, - params_by_env_type: Dict = None -) -> Dict: +def compose_resource_allocation_config(env_type: EnvType, params_by_env_type: Dict = None) -> Dict: params_by_env_type = params_by_env_type or safe_load_yml(STATIC_PARAMS_FILEPATH) common_config = params_by_env_type['common'] schain_cpu_alloc, ima_cpu_alloc = get_cpu_alloc(common_config) @@ -78,45 +83,40 @@ def compose_resource_allocation_config( schain_allocation_data = safe_load_yml(ALLOCATION_FILEPATH) return { - 'schain': { + 'skaled': { 'cpu_shares': schain_cpu_alloc.dict(), 'mem': schain_mem_alloc.dict(), 'disk': schain_allocation_data[env_type]['disk'], 'volume_limits': schain_allocation_data[env_type]['volume_limits'], # noqa - 'leveldb_limits': schain_allocation_data[env_type]['leveldb_limits'] # noqa + 'leveldb_limits': schain_allocation_data[env_type]['leveldb_limits'], # noqa }, - 'ima': { - 'cpu_shares': ima_cpu_alloc.dict(), - 'mem': ima_mem_alloc.dict() - } + 'ima': {'cpu_shares': ima_cpu_alloc.dict(), 'mem': ima_mem_alloc.dict()}, } -def generate_resource_allocation_config(env_file, force=False) -> None: +def generate_resource_allocation_config( + env_file: str, + node_type: NodeType, + node_mode: NodeMode, + force: bool = False, +) -> None: if not force and os.path.isfile(RESOURCE_ALLOCATION_FILEPATH): - msg = 'Resource allocation file is already exists' + msg = 'Resource allocation file already exists' logger.debug(msg) print(msg) return - env_params = extract_env_params(env_file) - if env_params is None: - return + settings = validate_and_save_node_settings(env_file, node_type, node_mode) logger.info('Generating resource allocation file ...') try: - update_resource_allocation( - env_params['ENV_TYPE'] - ) + update_resource_allocation(settings.env_type) except Exception as e: logger.exception(e) - print('Can\'t generate resource allocation file, check out CLI logs') + print("Can't generate resource allocation file, check out CLI logs") else: - print( - f'Resource allocation file generated: ' - f'{RESOURCE_ALLOCATION_FILEPATH}' - ) + print(f'Resource allocation file generated: {RESOURCE_ALLOCATION_FILEPATH}') -def update_resource_allocation(env_type: str) -> None: +def update_resource_allocation(env_type: EnvType) -> None: resource_allocation_config = compose_resource_allocation_config(env_type) write_json(RESOURCE_ALLOCATION_FILEPATH, resource_allocation_config) @@ -151,16 +151,10 @@ def get_cpu_alloc(common_config: Dict) -> ResourceAlloc: cpu_proportions = common_config['schain']['cpu'] schain_max_cpu_shares = int(cpu_proportions['skaled'] * MAX_CPU_SHARES) ima_max_cpu_shares = int(cpu_proportions['ima'] * MAX_CPU_SHARES) - return ( - ResourceAlloc(schain_max_cpu_shares), - ResourceAlloc(ima_max_cpu_shares) - ) + return ResourceAlloc(schain_max_cpu_shares), ResourceAlloc(ima_max_cpu_shares) -def verify_disk_size( - disk_device: str, - env_configs: dict, -) -> Dict: +def verify_disk_size(disk_device: str, env_configs: dict): disk_size = get_disk_size(disk_device) env_disk_size = env_configs['server']['disk'] check_disk_size(disk_size, env_disk_size) diff --git a/node_cli/core/schains.py b/node_cli/core/schains.py index 9b6c625c..8aac42f1 100644 --- a/node_cli/core/schains.py +++ b/node_cli/core/schains.py @@ -1,3 +1,22 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025 SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + import glob import logging import os @@ -5,36 +24,47 @@ import shutil import time from pathlib import Path - from typing import Dict, Optional +from skale_core.types import EnvType + +from lvmpy.src.core import mount, volume_mountpoint from node_cli.configs import ( ALLOCATION_FILEPATH, - NODE_CONFIG_PATH, NODE_CLI_STATUS_FILENAME, + NODE_CONFIG_PATH, SCHAIN_NODE_DATA_PATH, - SCHAINS_MNT_DIR_SYNC, + SCHAINS_MNT_DIR_SINGLE_CHAIN, ) -from node_cli.configs.env import get_env_config - -from node_cli.utils.helper import get_request, error_exit, safe_load_yml +from node_cli.utils.docker_utils import ensure_volume, is_volume_exists from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import ( + cleanup_dir_content, + error_exit, + get_request, + is_btrfs_subvolume, + read_json, + run_cmd, + safe_load_yml, + save_json, +) +from node_cli.utils.node_type import NodeType from node_cli.utils.print_formatters import ( print_dkg_statuses, print_firewall_rules, print_schain_info, print_schains, ) -from node_cli.utils.docker_utils import ensure_volume, is_volume_exists -from node_cli.utils.helper import read_json, run_cmd, save_json -from lvmpy.src.core import mount, volume_mountpoint - logger = logging.getLogger(__name__) BLUEPRINT_NAME = 'schains' +class NoDataDirForChainError(Exception): + """Raised when no data directory is found""" + + def get_schain_firewall_rules(schain: str) -> None: status, payload = get_request( blueprint=BLUEPRINT_NAME, method='firewall-rules', params={'schain_name': schain} @@ -182,11 +212,12 @@ def fillin_snapshot_folder(src_path: str, block_number: int) -> None: def restore_schain_from_snapshot( - schain: str, snapshot_path: str, env_type: Optional[str] = None, schain_type: str = 'medium' + schain: str, + snapshot_path: str, + node_type: NodeType, + env_type: EnvType, + schain_type: str = 'medium', ) -> None: - if env_type is None: - env_config = get_env_config() - env_type = env_config['ENV_TYPE'] ensure_schain_volume(schain, schain_type, env_type) block_number = get_block_number_from_path(snapshot_path) if block_number == -1: @@ -209,12 +240,12 @@ def get_schains_by_artifacts() -> str: return '\n'.join(os.listdir(SCHAIN_NODE_DATA_PATH)) -def get_schain_volume_size(schain_type: str, env_type: str) -> int: +def get_schain_volume_size(schain_type: str, env_type: EnvType) -> int: alloc = safe_load_yml(ALLOCATION_FILEPATH) return alloc[env_type]['disk'][schain_type] -def ensure_schain_volume(schain: str, schain_type: str, env_type: str) -> None: +def ensure_schain_volume(schain: str, schain_type: str, env_type: EnvType) -> None: if not is_volume_exists(schain): size = get_schain_volume_size(schain_type, env_type) ensure_volume(schain, size) @@ -222,25 +253,62 @@ def ensure_schain_volume(schain: str, schain_type: str, env_type: str) -> None: logger.warning('Volume %s already exists', schain) -def cleanup_sync_datadir(schain_name: str, base_path: str = SCHAINS_MNT_DIR_SYNC) -> None: - base_path = os.path.join(base_path, schain_name) - regular_folders_pattern = f'{base_path}/[!snapshots]*' - logger.info('Removing regular folders') - for filepath in glob.glob(regular_folders_pattern): - if os.path.isdir(filepath): - logger.debug('Removing recursively %s', filepath) - shutil.rmtree(filepath) - if os.path.isfile(filepath): - os.remove(filepath) - - logger.info('Removing subvolumes') - subvolumes_pattern = f'{base_path}/snapshots/*/*' - for filepath in glob.glob(subvolumes_pattern): - logger.debug('Deleting subvolume %s', filepath) - if os.path.isdir(filepath): - rm_btrfs_subvolume(filepath) - else: - os.remove(filepath) - logger.info('Cleaning up snapshots folder') - if os.path.isdir(base_path): - shutil.rmtree(base_path) +def cleanup_datadir_content(datadir_path: str) -> None: + regular_folders_pattern = f'{datadir_path}/[!snapshots]*' + logger.info('Removing regular folders of %s', datadir_path) + for path in glob.glob(regular_folders_pattern): + logger.debug('Removing recursively %s', path) + if os.path.isfile(path): + logger.debug('Deleting file in datadir: %s', path) + os.remove(path) + if os.path.isdir(path): + logger.debug('Deleting folder in datadir: %s', path) + shutil.rmtree(path) + + logger.info('Removing subvolumes of %s', datadir_path) + subvolumes_pattern = f'{datadir_path}/snapshots/*/*' + for path in glob.glob(subvolumes_pattern): + if is_btrfs_subvolume(path): + logger.debug('Deleting subvolume %s', path) + rm_btrfs_subvolume(path) + if os.path.isfile(path): + logger.debug('Deleting file in snapshots directory: %s', path) + os.remove(path) + if os.path.isdir(path): + logger.debug('Deleting folder in snapshots directory %s', path) + shutil.rmtree(path) + + shutil.rmtree(os.path.join(datadir_path, 'snapshots'), ignore_errors=True) + + +def cleanup_no_lvm_datadir( + chain_name: str = '', base_path: str = SCHAINS_MNT_DIR_SINGLE_CHAIN +) -> None: + if chain_name: + folders = [chain_name] + else: + folders = [f for f in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, f))] + if not folders: + raise NoDataDirForChainError( + f'No data directory found in {base_path}. ' + 'Please check the path or specify a chain name.' + ) + for folder_name in folders: + folder_path = os.path.join(base_path, folder_name) + if folder_name != 'shared-space': + logger.info('Removing datadir content for %s', folder_path) + cleanup_datadir_content(folder_path) + if os.path.isdir(folder_path): + shutil.rmtree(folder_path) + run_cmd(['umount', base_path]) + + +def cleanup_lvm_datadir(): + logger.info('Starting cleanup for active node...') + logger.info('Unmounting /mnt/schains-shared-space...') + run_cmd(['sudo', 'umount', '/mnt/schains-shared-space'], check_code=False) + logger.info('Cleaning up /mnt directory content...') + cleanup_dir_content('/mnt/') + logger.info('Removing LVM volume group "schains"...') + run_cmd(['sudo', 'lvremove', '-f', 'schains'], check_code=False) + logger.info('Active node cleanup finished.') diff --git a/node_cli/core/ssl/check.py b/node_cli/core/ssl/check.py index a5c7a0a5..327f72a6 100644 --- a/node_cli/core/ssl/check.py +++ b/node_cli/core/ssl/check.py @@ -29,7 +29,7 @@ DEFAULT_SSL_CHECK_PORT, SKALED_SSL_TEST_SCRIPT, SSL_CERT_FILEPATH, - SSL_KEY_FILEPATH + SSL_KEY_FILEPATH, ) @@ -42,13 +42,12 @@ def check_cert( port=DEFAULT_SSL_CHECK_PORT, check_type='all', no_client=False, - no_wss=False + no_wss=False, ): if check_type in ('all', 'openssl'): try: check_cert_openssl( - cert_path, key_path, - host='127.0.0.1', port=port, no_client=no_client + cert_path, key_path, host='127.0.0.1', port=port, no_client=no_client ) except Exception as err: logger.exception('Cerificate/key pair is incorrect') @@ -56,10 +55,7 @@ def check_cert( if check_type in ('skaled',): try: - check_cert_skaled( - cert_path, key_path, - host='127.0.0.1', port=port, no_wss=no_wss - ) + check_cert_skaled(cert_path, key_path, host='127.0.0.1', port=port, no_wss=no_wss) except Exception as err: logger.exception('Certificate/key pair is incorrect for skaled') return 'error', f'Skaled ssl check failed. {err}' @@ -73,12 +69,9 @@ def check_cert_openssl( host='127.0.0.1', port=DEFAULT_SSL_CHECK_PORT, no_client=False, - silent=False + silent=False, ): - with openssl_server( - host, port, cert_path, - key_path, silent=silent - ) as serv: + with openssl_server(host, port, cert_path, key_path, silent=silent) as serv: time.sleep(1) code = serv.poll() if code is not None: @@ -90,9 +83,7 @@ def check_cert_openssl( # Connect to ssl server if not no_client: if not check_endpoint(host, port): - raise SSLHealthcheckError( - f'Healthcheck port is closed on {host}:{port}' - ) + raise SSLHealthcheckError(f'Healthcheck port is closed on {host}:{port}') check_ssl_connection(host, port, silent=silent) logger.info('Healthcheck connection passed') @@ -100,28 +91,29 @@ def check_cert_openssl( @contextmanager def openssl_server(host, port, cert_path, key_path, silent=False): ssl_server_cmd = [ - 'openssl', 's_server', - '-cert', cert_path, - '-cert_chain', cert_path, - '-key', key_path, + 'openssl', + 's_server', + '-cert', + cert_path, + '-cert_chain', + cert_path, + '-key', + key_path, '-WWW', - '-accept', f'{host}:{port}', - '-verify_return_error', '-verify', '1' + '-accept', + f'{host}:{port}', + '-verify_return_error', + '-verify', + '1', ] logger.info(f'Staring healthcheck server on port {port} ...') expose_output = not silent - with detached_subprocess( - ssl_server_cmd, expose_output=expose_output - ) as dp: + with detached_subprocess(ssl_server_cmd, expose_output=expose_output) as dp: yield dp def check_cert_skaled( - cert_path, - key_path, - host='127.0.0.1', - port=DEFAULT_SSL_CHECK_PORT, - no_wss=False + cert_path, key_path, host='127.0.0.1', port=DEFAULT_SSL_CHECK_PORT, no_wss=False ): run_skaled_https_healthcheck(cert_path, key_path, host, port) if not no_wss: @@ -129,17 +121,18 @@ def check_cert_skaled( def run_skaled_https_healthcheck( - cert_path, - key_path, - host='127.0.0.1', - port=DEFAULT_SSL_CHECK_PORT + cert_path, key_path, host='127.0.0.1', port=DEFAULT_SSL_CHECK_PORT ): skaled_https_check_cmd = [ SKALED_SSL_TEST_SCRIPT, - '--ssl-cert', cert_path, - '--ssl-key', key_path, - '--bind', host, - '--port', str(port) + '--ssl-cert', + cert_path, + '--ssl-key', + key_path, + '--bind', + host, + '--port', + str(port), ] with detached_subprocess(skaled_https_check_cmd, expose_output=True) as dp: time.sleep(1) @@ -148,24 +141,23 @@ def run_skaled_https_healthcheck( logger.info('Skaled https check server successfully started') else: logger.error('Skaled https check server was failed to start') - raise SSLHealthcheckError( - 'Skaled https check was failed') + raise SSLHealthcheckError('Skaled https check was failed') -def run_skaled_wss_healthcheck( - cert_path, - key_path, - host='127.0.0.1', - port=DEFAULT_SSL_CHECK_PORT -): +def run_skaled_wss_healthcheck(cert_path, key_path, host='127.0.0.1', port=DEFAULT_SSL_CHECK_PORT): skaled_wss_check_cmd = [ SKALED_SSL_TEST_SCRIPT, - '--ssl-cert', cert_path, - '--ssl-key', key_path, - '--bind', host, - '--port', str(port), - '--proto', 'wss', - '--echo' + '--ssl-cert', + cert_path, + '--ssl-key', + key_path, + '--bind', + host, + '--port', + str(port), + '--proto', + 'wss', + '--echo', ] with detached_subprocess(skaled_wss_check_cmd, expose_output=True) as dp: @@ -173,8 +165,7 @@ def run_skaled_wss_healthcheck( code = dp.poll() if code is not None: logger.error('Skaled wss check server was failed to start') - raise SSLHealthcheckError( - 'Skaled wss check was failed') + raise SSLHealthcheckError('Skaled wss check was failed') else: logger.info('Skaled wss check server successfully started') @@ -196,9 +187,13 @@ def check_endpoint(host, port): def check_ssl_connection(host, port, silent=False): logger.info(f'Connecting to public ssl endpoint {host}:{port} ...') ssl_check_cmd = [ - 'openssl', 's_client', - '-connect', f'{host}:{port}', - '-verify_return_error', '-verify', '2' + 'openssl', + 's_client', + '-connect', + f'{host}:{port}', + '-verify_return_error', + '-verify', + '2', ] expose_output = not silent with detached_subprocess(ssl_check_cmd, expose_output=expose_output) as dp: diff --git a/node_cli/core/ssl/status.py b/node_cli/core/ssl/status.py index 31ab4b9a..9b035b1d 100644 --- a/node_cli/core/ssl/status.py +++ b/node_cli/core/ssl/status.py @@ -38,10 +38,9 @@ def cert_status(): if status == 'error': return err_result(CERTS_INVALID_FORMAT) else: - return ok_result(payload={ - 'issued_to': info['issued_to'], - 'expiration_date': info['expiration_date'] - }) + return ok_result( + payload={'issued_to': info['issued_to'], 'expiration_date': info['expiration_date']} + ) def get_cert_info(cert): @@ -50,14 +49,10 @@ def get_cert_info(cert): subject = crypto_cert.get_subject() issued_to = subject.CN expiration_date_raw = crypto_cert.get_notAfter() - expiration_date = parser.parse( - expiration_date_raw - ).strftime('%Y-%m-%dT%H:%M:%S') + expiration_date = parser.parse(expiration_date_raw).strftime('%Y-%m-%dT%H:%M:%S') except Exception as err: logger.exception('Error during parsing certs') return err_result(str(err)) - return ok_result({ - 'subject': subject, - 'issued_to': issued_to, - 'expiration_date': expiration_date - }) + return ok_result( + {'subject': subject, 'issued_to': issued_to, 'expiration_date': expiration_date} + ) diff --git a/node_cli/core/ssl/utils.py b/node_cli/core/ssl/utils.py index c11ebda8..c80b3a8a 100644 --- a/node_cli/core/ssl/utils.py +++ b/node_cli/core/ssl/utils.py @@ -17,13 +17,13 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import logging import os import shutil -import logging import subprocess from contextlib import contextmanager -from node_cli.configs.ssl import SSL_CERT_FILEPATH, SSL_KEY_FILEPATH, SSL_FOLDER_PATH +from node_cli.configs.ssl import SSL_CERT_FILEPATH, SSL_FOLDER_PATH, SSL_KEY_FILEPATH logger = logging.getLogger(__name__) @@ -50,8 +50,10 @@ def detached_subprocess(cmd, expose_output=False): logger.debug(f'Starting detached subprocess: {cmd}') p = subprocess.Popen( cmd, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL, - encoding='utf-8' + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + stdin=subprocess.DEVNULL, + encoding='utf-8', ) try: yield p diff --git a/node_cli/core/static_config.py b/node_cli/core/static_config.py new file mode 100644 index 00000000..06e0e7f4 --- /dev/null +++ b/node_cli/core/static_config.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025 SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import os + +import yaml + +from node_cli.configs import ( + CONTAINER_CONFIG_PATH, + FAIR_STATIC_PARAMS_FILEPATH, + STATIC_PARAMS_FILEPATH, +) +from node_cli.utils.node_type import NodeType + +from skale_core.types import EnvType + + +def get_static_params( + node_type: NodeType, + env_type: EnvType = 'mainnet', + config_path: str = CONTAINER_CONFIG_PATH, +) -> dict: + if node_type == NodeType.FAIR: + static_params_base_filepath = FAIR_STATIC_PARAMS_FILEPATH + else: + static_params_base_filepath = STATIC_PARAMS_FILEPATH + + static_params_filename = os.path.basename(static_params_base_filepath) + static_params_filepath = os.path.join(config_path, static_params_filename) + with open(static_params_filepath) as requirements_file: + ydata = yaml.load(requirements_file, Loader=yaml.Loader) + return ydata['envs'][env_type] + + +def get_fair_chain_name(env_type: EnvType) -> str: + node_type = NodeType.FAIR + params = get_static_params(node_type, env_type) + return params['info']['chain_name'] diff --git a/node_cli/core/wallet.py b/node_cli/core/wallet.py index d7d129ec..467b675b 100644 --- a/node_cli/core/wallet.py +++ b/node_cli/core/wallet.py @@ -18,11 +18,13 @@ # along with this program. If not, see . import json +import logging -from node_cli.utils.print_formatters import print_wallet_info, TEXTS -from node_cli.utils.helper import error_exit, get_request, post_request, logger from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import error_exit, get_request, post_request +from node_cli.utils.print_formatters import TEXTS, print_fair_wallet_info, print_wallet_info +logger = logging.getLogger(__name__) BLUEPRINT_NAME = 'wallet' @@ -33,16 +35,19 @@ def get_wallet_info(_format): if _format == 'json': print(json.dumps(payload)) else: - print_wallet_info(payload) + if type(payload) is str: + print(payload) + elif type(payload) is dict: + if payload.get('skale_balance'): + print_wallet_info(payload) + else: + print_fair_wallet_info(payload) else: error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) def send_eth(address: str, amount: float): - json_data = { - 'address': address, - 'amount': amount - } + json_data = {'address': address, 'amount': amount} status, payload = post_request(BLUEPRINT_NAME, 'send-eth', json=json_data) if status == 'ok': msg = TEXTS['wallet']['successful_transfer'] diff --git a/tests/.skale/node_data/.gitkeep b/node_cli/fair/__init__.py similarity index 100% rename from tests/.skale/node_data/.gitkeep rename to node_cli/fair/__init__.py diff --git a/node_cli/fair/active.py b/node_cli/fair/active.py new file mode 100644 index 00000000..2ec42bde --- /dev/null +++ b/node_cli/fair/active.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + + +import logging +import time +from typing import cast + +from node_cli.configs import DEFAULT_SKALED_BASE_PORT, RESTORE_SLEEP_TIMEOUT +from node_cli.core.host import is_node_inited +from node_cli.core.node import compose_node_env, is_base_containers_alive +from node_cli.operations import ( + FairUpdateType, + restore_fair_op, + update_fair_op, +) +from node_cli.utils.decorators import check_inited, check_not_inited, check_user +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import error_exit, get_request, post_request +from node_cli.utils.node_type import NodeMode, NodeType +from node_cli.utils.print_formatters import print_node_cmd_error, print_node_info_fair +from node_cli.utils.settings import validate_and_save_node_settings +from node_cli.utils.texts import safe_load_texts + +logger = logging.getLogger(__name__) +TEXTS = safe_load_texts() + +BLUEPRINT_NAME = 'fair-node' + + +def get_node_info_plain() -> dict: + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='info') + node_payload: dict = cast(dict, payload) + if status == 'ok': + return node_payload['node'] + else: + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +def get_node_info(format): + node_info = get_node_info_plain() + if format == 'json': + print(node_info) + else: + print_node_info_fair(node_info) + + +@check_inited +@check_user +def migrate_from_boot( + config_file: str, +) -> None: + logger.info('Migrating from boot to fair node...') + settings = validate_and_save_node_settings(config_file, NodeType.FAIR, NodeMode.ACTIVE) + compose_env = compose_node_env(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + migrate_ok = update_fair_op( + settings=settings, + compose_env=compose_env, + node_mode=NodeMode.ACTIVE, + update_type=FairUpdateType.FROM_BOOT, + force_skaled_start=False, + ) + alive = is_base_containers_alive(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + if not migrate_ok or not alive: + print_node_cmd_error() + return + else: + logger.info('Migration from boot to fair completed successfully') + + +@check_inited +@check_user +def register(ip: str) -> None: + if not is_node_inited(): + print(TEXTS['fair']['node']['not_inited']) + return + + json_data = {'ip': ip, 'port': DEFAULT_SKALED_BASE_PORT} + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='register', json=json_data) + if status == 'ok': + msg = TEXTS['fair']['node']['registered'] + logger.info(msg) + print(msg) + else: + error_msg = payload + logger.error(f'Registration error {error_msg}') + error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +@check_inited +@check_user +def change_ip(ip: str) -> None: + if not is_node_inited(): + print(TEXTS['fair']['node']['not_inited']) + return + + json_data = {'ip': ip, 'port': DEFAULT_SKALED_BASE_PORT} + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='change-ip', json=json_data) + if status == 'ok': + msg = TEXTS['fair']['node']['ip_changed'] + logger.info(msg) + print(msg) + else: + error_msg = payload + logger.error(f'Change IP error {error_msg}') + error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +@check_inited +@check_user +def exit() -> None: + if not is_node_inited(): + print(TEXTS['fair']['node']['not_inited']) + return + + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='exit', json={}) + if status == 'ok': + msg = TEXTS['fair']['node']['exited'] + logger.info(msg) + print(msg) + else: + error_msg = payload + logger.error(f'Node exit error {error_msg}') + error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +@check_not_inited +def restore(backup_path: str, config_file: str, config_only: bool = False): + node_mode = NodeMode.ACTIVE + settings = validate_and_save_node_settings(config_file, NodeType.FAIR, node_mode) + compose_env = compose_node_env(node_type=NodeType.FAIR, node_mode=node_mode) + + restored_ok = restore_fair_op( + node_mode=node_mode, + settings=settings, + compose_env=compose_env, + backup_path=backup_path, + config_only=config_only, + ) + if not restored_ok: + error_exit('Restore operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) + time.sleep(RESTORE_SLEEP_TIMEOUT) + print('Fair node is restored from backup') + + +@check_inited +@check_user +def set_domain_name(domain_name): + if not is_node_inited(): + print(TEXTS['fair']['node']['not_inited']) + return + + status, payload = post_request( + blueprint=BLUEPRINT_NAME, method='set-domain-name', json={'domain_name': domain_name} + ) + if status == 'ok': + msg = TEXTS['node']['domain_name_changed'] + logger.info(msg) + print(msg) + else: + error_msg = payload + logger.error(f'Setting domain name error {error_msg}') + error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE) diff --git a/node_cli/fair/boot.py b/node_cli/fair/boot.py new file mode 100644 index 00000000..d9da98fa --- /dev/null +++ b/node_cli/fair/boot.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + + +import logging +import time + +from node_cli.configs import TM_INIT_TIMEOUT +from node_cli.core.node import compose_node_env, is_base_containers_alive +from node_cli.core.node_options import upsert_node_mode +from node_cli.operations import init_fair_boot_op, update_fair_boot_op +from node_cli.utils.decorators import check_inited, check_not_inited, check_user +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import error_exit +from node_cli.utils.node_type import NodeMode, NodeType +from node_cli.utils.print_formatters import print_node_cmd_error +from node_cli.utils.settings import validate_and_save_node_settings + +logger = logging.getLogger(__name__) + + +@check_not_inited +def init(config_file: str) -> None: + node_mode = NodeMode.ACTIVE + node_type = NodeType.FAIR + settings = validate_and_save_node_settings(config_file, node_type, node_mode) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + + init_fair_boot_op(settings=settings, compose_env=compose_env, node_mode=node_mode) + logger.info('Waiting for fair containers initialization') + time.sleep(TM_INIT_TIMEOUT) + if not is_base_containers_alive(node_type=node_type, node_mode=node_mode, is_fair_boot=True): + error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) + logger.info('Init fair procedure finished') + + +@check_inited +@check_user +def update(config_file: str, pull_config_for_schain: str) -> None: + logger.info('Fair boot node update started') + node_mode = upsert_node_mode(node_mode=NodeMode.ACTIVE) + settings = validate_and_save_node_settings(config_file, NodeType.FAIR, node_mode) + compose_env = compose_node_env(node_type=NodeType.FAIR, node_mode=node_mode) + migrate_ok = update_fair_boot_op( + settings=settings, + compose_env=compose_env, + node_mode=NodeMode.ACTIVE, + ) + if migrate_ok: + logger.info('Waiting for containers initialization') + time.sleep(TM_INIT_TIMEOUT) + alive = is_base_containers_alive( + node_type=NodeType.FAIR, node_mode=node_mode, is_fair_boot=True + ) + if not migrate_ok or not alive: + print_node_cmd_error() + return + else: + logger.info('Fair boot node update finished successfully!') diff --git a/node_cli/fair/chain.py b/node_cli/fair/chain.py new file mode 100644 index 00000000..8ca380ae --- /dev/null +++ b/node_cli/fair/chain.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import json +from typing import Any, Dict + +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import error_exit, get_request +from node_cli.utils.print_formatters import print_chain_record, print_chain_checks + +BLUEPRINT_NAME = 'fair-chain' + + +def get_chain_record_plain() -> Dict[str, Any]: + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='record') + if status == 'ok': + if isinstance(payload, dict): + return payload.get('record', {}) + else: + error_exit('Invalid response format', exit_code=CLIExitCodes.BAD_API_RESPONSE) + else: + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +def get_chain_record(raw: bool = False) -> None: + record = get_chain_record_plain() + if raw: + print(json.dumps(record, indent=4)) + else: + print_chain_record(record) + + +def get_chain_checks_plain() -> Dict[str, Any]: + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='checks') + if status == 'ok': + if isinstance(payload, dict): + return payload + else: + error_exit('Invalid response format', exit_code=CLIExitCodes.BAD_API_RESPONSE) + else: + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +def get_chain_checks(raw: bool = False) -> None: + checks = get_chain_checks_plain() + if raw: + print(json.dumps(checks, indent=4)) + else: + print_chain_checks(checks) diff --git a/node_cli/fair/common.py b/node_cli/fair/common.py new file mode 100644 index 00000000..98f10a7f --- /dev/null +++ b/node_cli/fair/common.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import time + +from node_cli.configs import INIT_TIMEOUT, TM_INIT_TIMEOUT +from node_cli.core.node import compose_node_env, is_base_containers_alive +from node_cli.core.node_options import upsert_node_mode +from node_cli.fair.passive import setup_fair_passive +from node_cli.operations import ( + FairUpdateType, + cleanup_fair_op, + init_fair_op, + repair_fair_op, + turn_off_op, + turn_on_op, + update_fair_op, +) +from node_cli.utils.decorators import check_inited, check_not_inited, check_user +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import error_exit +from node_cli.utils.node_type import NodeMode, NodeType +from node_cli.utils.print_formatters import print_node_cmd_error +from node_cli.utils.settings import validate_and_save_node_settings +from node_cli.utils.texts import safe_load_texts +from skale_core.settings import get_settings + +logger = logging.getLogger(__name__) +TEXTS = safe_load_texts() + + +@check_not_inited +def init( + node_mode: NodeMode, + config_file: str, + node_id: int | None = None, + indexer: bool = False, + archive: bool = False, + snapshot: str | None = None, +) -> None: + settings = validate_and_save_node_settings(config_file, NodeType.FAIR, node_mode) + compose_env = compose_node_env(node_type=NodeType.FAIR, node_mode=node_mode) + + init_ok = init_fair_op( + settings=settings, + compose_env=compose_env, + node_mode=node_mode, + indexer=indexer, + archive=archive, + snapshot=snapshot, + ) + if not init_ok: + error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) + time.sleep(INIT_TIMEOUT) + + if node_mode == NodeMode.PASSIVE and node_id is not None: + setup_fair_passive(node_id) + + print('Fair node is initialized') + + +@check_inited +@check_user +def cleanup(node_mode: NodeMode, prune: bool = False) -> None: + node_mode = upsert_node_mode(node_mode=node_mode) + compose_env = compose_node_env(node_type=NodeType.FAIR, node_mode=node_mode) + cleanup_fair_op(node_mode=node_mode, compose_env=compose_env, prune=prune) + logger.info('Fair node was cleaned up, all containers and data removed') + + +@check_inited +@check_user +def update( + node_mode: NodeMode, + config_file: str, + pull_config_for_schain: str | None = None, + force_skaled_start: bool = False, +) -> None: + logger.info( + 'Updating fair node: %s, pull_config_for_schain: %s, force_skaled_start: %s', + config_file, + pull_config_for_schain, + force_skaled_start, + ) + node_mode = upsert_node_mode(node_mode=node_mode) + + settings = validate_and_save_node_settings(config_file, NodeType.FAIR, node_mode) + compose_env = compose_node_env(node_type=NodeType.FAIR, node_mode=node_mode) + update_ok = update_fair_op( + settings=settings, + compose_env=compose_env, + node_mode=node_mode, + update_type=FairUpdateType.REGULAR, + force_skaled_start=force_skaled_start, + ) + alive = is_base_containers_alive(node_type=NodeType.FAIR, node_mode=node_mode) + if not update_ok or not alive: + print_node_cmd_error() + return + else: + logger.info('Fair update completed successfully') + + +def repair_chain(snapshot_from: str = 'any') -> None: + settings = get_settings() + repair_fair_op(env_type=settings.env_type, snapshot_from=snapshot_from) + + +@check_inited +@check_user +def turn_off(node_type: NodeType) -> None: + node_mode = upsert_node_mode() + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + turn_off_op(compose_env=compose_env, node_type=node_type, node_mode=node_mode) + + +@check_inited +@check_user +def turn_on(env_file: str, node_type: NodeType) -> None: + node_mode = upsert_node_mode() + settings = validate_and_save_node_settings(env_file, node_type, node_mode) + compose_env = compose_node_env(node_type=node_type, node_mode=node_mode) + turn_on_op(settings=settings, compose_env=compose_env, node_type=node_type, node_mode=node_mode) + logger.info('Waiting for containers initialization') + time.sleep(TM_INIT_TIMEOUT) + if not is_base_containers_alive(node_type=node_type, node_mode=node_mode): + print_node_cmd_error() + return + logger.info('Node turned on') diff --git a/node_cli/fair/passive.py b/node_cli/fair/passive.py new file mode 100644 index 00000000..dcd175d9 --- /dev/null +++ b/node_cli/fair/passive.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging + +from node_cli.core.host import is_node_inited +from node_cli.utils.texts import safe_load_texts +from node_cli.utils.helper import error_exit, post_request +from node_cli.utils.exit_codes import CLIExitCodes + +logger = logging.getLogger(__name__) +TEXTS = safe_load_texts() +BLUEPRINT_NAME = 'fair-node-passive' + + +def setup_fair_passive(node_id: int) -> None: + if not is_node_inited(): + print(TEXTS['fair']['node']['not_inited']) + return + + json_data = {'id': node_id} + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='setup', json=json_data) + if status == 'ok': + msg = TEXTS['fair']['node']['setup_complete'] + logger.info(msg) + print(msg) + else: + error_msg = payload + logger.error(f'Setup error {error_msg}') + error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE) diff --git a/node_cli/fair/record/__init__.py b/node_cli/fair/record/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/node_cli/fair/record/chain_record.py b/node_cli/fair/record/chain_record.py new file mode 100644 index 00000000..b6d48886 --- /dev/null +++ b/node_cli/fair/record/chain_record.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# +# This file is part of Node cli +# +# Copyright (C) 2025 SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + + +import logging +from typing import cast +from datetime import datetime + +from skale_core.types import EnvType + +from node_cli.core.static_config import get_fair_chain_name +from node_cli.fair.record.redis_record import FlatRedisRecord, FieldInfo + +logger = logging.getLogger(__name__) + + +CHAIN_RECORD_FIELDS: dict[str, FieldInfo] = { + 'name': FieldInfo('name', str, ''), + 'config_version': FieldInfo('config_version', str, '0.0.0'), + 'repair_date': FieldInfo('repair_date', datetime, datetime.fromtimestamp(0)), + 'repair_ts': FieldInfo('repair_ts', int, None), + 'snapshot_from': FieldInfo('snapshot_from', str, None), + 'force_skaled_start': FieldInfo('force_skaled_start', bool, False), +} + + +class ChainRecord(FlatRedisRecord): + def _record_fields(self) -> dict[str, FieldInfo]: + return CHAIN_RECORD_FIELDS + + @property + def config_version(self) -> str: + return cast(str, self._get_field('config_version')) + + @property + def repair_date(self) -> datetime: + return cast(datetime, self._get_field('repair_date')) + + @property + def snapshot_from(self) -> str | None: + return cast(str | None, self._get_field('snapshot_from')) + + @property + def repair_ts(self) -> int | None: + return cast(int | None, self._get_field('repair_ts')) + + @property + def force_skaled_start(self) -> bool: + return cast(bool, self._get_field('force_skaled_start')) + + def set_config_version(self, version: str) -> None: + self._set_field('config_version', version) + + def set_repair_date(self, date: datetime) -> None: + self._set_field('repair_date', date) + + def set_snapshot_from(self, value: str | None) -> None: + self._set_field('snapshot_from', value) + + def set_repair_ts(self, value: int | None) -> None: + self._set_field('repair_ts', value) + + def set_force_skaled_start(self, value: bool) -> None: + self._set_field('force_skaled_start', value) + + +def get_fair_chain_record(env_type: EnvType) -> ChainRecord: + return ChainRecord(get_fair_chain_name(env_type)) + + +def migrate_chain_record(env_type: EnvType, node_version: str) -> None: + logger.info('Migrating fair chain record, setting config version to %s', node_version) + record = get_fair_chain_record(env_type) + record.set_config_version(node_version) + + +def update_chain_record(env_type: EnvType, force_skaled_start: bool) -> None: + record = get_fair_chain_record(env_type) + record.set_force_skaled_start(force_skaled_start) + logger.info('Updated fair chain record with force_skaled_start=%s', force_skaled_start) diff --git a/node_cli/fair/record/redis_record.py b/node_cli/fair/record/redis_record.py new file mode 100644 index 00000000..f40aa898 --- /dev/null +++ b/node_cli/fair/record/redis_record.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import abc +import logging +from dataclasses import dataclass +from datetime import datetime +from typing import Any + +import redis + +from node_cli.configs import REDIS_URI + +logger = logging.getLogger(__name__) + +cpool: redis.ConnectionPool = redis.ConnectionPool.from_url(REDIS_URI) +rs: redis.Redis = redis.Redis(connection_pool=cpool) + + +@dataclass +class FieldInfo: + name: str + type: type + default: str | int | bool | datetime | None + + +class FlatRedisRecord: + def __init__(self, name: str): + self.name = name + if not self._exists(): + self._set_defaults() + self._save() + + def to_dict(self) -> dict: + return self.mget(*self._record_fields().keys()) + + def mget(self, *args) -> dict[str, Any]: + key_names = [self._get_field_key(field_name) for field_name in args] + raw_res = rs.mget(*key_names) + return { + key_name: self._deserialize_field(value, self._record_fields()[key_name].type) + for value, key_name in zip(raw_res, args) + } + + def mset(self, **kwargs) -> None: + key_names = [self._get_field_key(field_name) for field_name in kwargs.keys()] + values = [ + self._serialize_field(value, self._record_fields()[field_name].type) + for value, field_name in zip(kwargs.values(), kwargs.keys()) + ] + rs.mset(dict(zip(key_names, values))) + + def delete(self) -> None: + rs.delete(*self._key_names()) + + def _get_field_key(self, field_name: str) -> str: + return f'{self.name}_{field_name}' + + def _serialize_datetime(self, dt: datetime) -> str: + return dt.isoformat() + + def _deserialize_datetime(self, value: str) -> datetime: + return datetime.fromisoformat(value) + + def _get_field(self, field_name: str): + key = self._get_field_key(field_name) + value = rs.get(key) + return self._deserialize_field(value, self._record_fields()[field_name].type) + + def _set_field(self, field_name: str, value) -> None: + key = self._get_field_key(field_name) + serialized_value = self._serialize_field(value, self._record_fields()[field_name].type) + logger.info('Setting field %s to value %s', field_name, serialized_value) + rs.set(key, serialized_value) + + def _deserialize_field(self, value, field_type: type): + if value is None: + return None + val = value.decode('utf-8') + if field_type is datetime: + return self._deserialize_datetime(val) + elif field_type is bool: + return bool(int(val)) + elif field_type is int: + return int(val) + else: + return val + + def _serialize_field(self, value, field_type: type): + if field_type is datetime: + return self._serialize_datetime(value) + elif field_type is bool: + return int(value) + elif field_type is int: + return value + else: + return str(value) + + def _key_names(self) -> list[str]: + return [self._get_field_key(field_name) for field_name in self._record_fields().keys()] + + def _set_defaults(self) -> None: + record_fields = self._record_fields() + defaults_to_set = { + field_name: field_info.default + for field_name, field_info in record_fields.items() + if field_info.default is not None + } + if defaults_to_set: + self.mset(**defaults_to_set) + + def _exists(self) -> bool: + return rs.exists(self._get_field_key('name')) > 0 + + def _save(self) -> None: + self._set_field('name', self.name) + + @abc.abstractmethod + def _record_fields(self) -> dict[str, FieldInfo]: + """Return a list of FieldInfo objects representing the fields of the record.""" + + @classmethod + def _redis_key_to_field_name(cls, key: bytes) -> str: + return key[:-5].decode('utf-8') + + @classmethod + def find_all(cls) -> list['FlatRedisRecord']: + name_keys = rs.keys('*_name') + records = [] + for key in name_keys: + chain_name = cls._redis_key_to_field_name(key) + records.append(cls(chain_name)) + return records + + def __eq__(self, other) -> bool: + if not isinstance(other, FlatRedisRecord): + return False + return self.name == other.name diff --git a/node_cli/fair/staking.py b/node_cli/fair/staking.py new file mode 100644 index 00000000..95b93adf --- /dev/null +++ b/node_cli/fair/staking.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +from typing import Any +import json +from datetime import datetime, timezone + +from node_cli.utils.decorators import check_inited +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import error_exit, post_request + +BLUEPRINT_NAME = 'fair-staking' + + +def _handle_response(status: str, payload: Any, success: str | None = None) -> None: + if status == 'ok': + print(success if success is not None else 'OK') + else: + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +@check_inited +def add_allowed_receiver(receiver: str) -> None: + status, payload = post_request( + blueprint=BLUEPRINT_NAME, method='add-receiver', json={'receiver': receiver} + ) + _handle_response(status, payload, success=f'Allowed receiver added: {receiver}') + + +@check_inited +def remove_allowed_receiver(receiver: str) -> None: + status, payload = post_request( + blueprint=BLUEPRINT_NAME, method='remove-receiver', json={'receiver': receiver} + ) + _handle_response(status, payload, success=f'Allowed receiver removed: {receiver}') + + +@check_inited +def request_fees(amount: float | None) -> None: + json_data: dict[str, Any] = {} + if amount is not None: + json_data['amount'] = amount + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='request-fees', json=json_data) + _handle_response( + status, + payload, + success='All fees requested' if amount is None else f'Fees requested: {amount}', + ) + + +@check_inited +def request_send_fees(to: str, amount: float | None) -> None: + json_data: dict[str, Any] = {'to': to} + if amount is not None: + json_data['amount'] = amount + status, payload = post_request( + blueprint=BLUEPRINT_NAME, method='request-send-fees', json=json_data + ) + _handle_response(status, payload, success=f'Fees request to send to {to} created') + + +@check_inited +def set_fee_rate(fee_rate: int) -> None: + status, payload = post_request( + blueprint=BLUEPRINT_NAME, method='set-fee-rate', json={'feeRate': fee_rate} + ) + _handle_response(status, payload, success=f'Fee rate set to {fee_rate}') + + +@check_inited +def claim_request(request_id: int) -> None: + status, payload = post_request( + blueprint=BLUEPRINT_NAME, method='claim-request', json={'requestId': request_id} + ) + _handle_response(status, payload, success=f'Request claimed: {request_id}') + + +@check_inited +def get_earned_fee_amount() -> None: + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='get-earned-fee-amount') + if status == 'ok' and isinstance(payload, dict): + amount_wei = payload.get('amount_wei') + amount_ether = payload.get('amount_ether') + print(f'Earned fee amount: {amount_wei} wei ({amount_ether} FAIR)') + return + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) + + +@check_inited +def get_exit_requests(raw: bool = False) -> None: + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='get-exit-requests') + if status == 'ok' and isinstance(payload, dict): + exit_requests = payload.get('exit_requests') + if not isinstance(exit_requests, list): + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) + if raw: + print(json.dumps(exit_requests, indent=2)) + return + for req in exit_requests: + try: + request_id = req.get('request_id') + user = req.get('user') + node_id = req.get('node_id') + amount = req.get('amount') + unlock_date = req.get('unlock_date') + amount_fair = None + if isinstance(amount, int): + amount_fair = amount / 10**18 + unlock_iso = None + if isinstance(unlock_date, int): + unlock_iso = datetime.fromtimestamp(unlock_date, tz=timezone.utc).isoformat() + base = ( + f'request_id: {request_id} | user: {user} | node_id: {node_id} | ' + f'amount_wei: {amount} | amount_fair: {amount_fair} | ' + f'unlock_date: {unlock_date}' + ) + print(base + (f' ({unlock_iso})' if unlock_iso else '')) + except Exception: # noqa: BLE001 + print(req) + return + error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE) diff --git a/node_cli/main.py b/node_cli/main.py index 6fefa7b9..9016f79d 100644 --- a/node_cli/main.py +++ b/node_cli/main.py @@ -27,6 +27,7 @@ import click from node_cli.cli import __version__ +from node_cli.cli.exit import exit_cli from node_cli.cli.health import health_cli from node_cli.cli.info import BUILD_DATETIME, COMMIT, BRANCH, OS, VERSION, TYPE from node_cli.cli.logs import logs_cli @@ -35,14 +36,17 @@ from node_cli.cli.schains import schains_cli from node_cli.cli.wallet import wallet_cli from node_cli.cli.ssl import ssl_cli -from node_cli.cli.exit import exit_cli -from node_cli.cli.validate import validate_cli -from node_cli.cli.resources_allocation import resources_allocation_cli -from node_cli.cli.sync_node import sync_node_cli - -from node_cli.utils.helper import safe_load_texts, init_default_logger -from node_cli.configs import LONG_LINE +from node_cli.cli.passive_node import passive_node_cli +from node_cli.cli.fair_boot import fair_boot_cli +from node_cli.cli.fair_node import fair_node_cli +from node_cli.cli.passive_fair_node import passive_fair_node_cli +from node_cli.cli.chain import chain_cli +from node_cli.cli.staking import staking_cli from node_cli.core.host import init_logs_dir +from node_cli.utils.node_type import NodeType +from node_cli.configs import LONG_LINE +from node_cli.utils.helper import init_default_logger +from node_cli.utils.texts import safe_load_texts from node_cli.utils.helper import error_exit TEXTS = safe_load_texts() @@ -50,12 +54,23 @@ logger = logging.getLogger(__name__) -@click.group() -def cli(): - pass +@click.group(invoke_without_command=True) +@click.pass_context +def cli(ctx): + if ctx.invoked_subcommand is None: + print(ctx.get_help()) + ctx.exit(0) + + start_time = time.time() + init_logs_dir() + init_default_logger() + args = sys.argv + # todo: hide secret variables (passwords, private keys) + logger.debug(f'cmd: {" ".join(str(x) for x in args)}, v.{__version__}') + ctx.call_on_close(lambda: logger.debug('Execution time: %d seconds', time.time() - start_time)) -@cli.command('version', help="Show SKALE node CLI version") +@cli.command('version', help='Show SKALE node CLI version') @click.option('--short', is_flag=True) def version(short): if short: @@ -64,9 +79,10 @@ def version(short): print(f'SKALE Node CLI version: {VERSION}') -@cli.command('info', help="Show SKALE node CLI info") +@cli.command('info', help='Show SKALE node CLI info') def info(): - print(inspect.cleandoc(f''' + print( + inspect.cleandoc(f""" {LONG_LINE} Version: {__version__} Full version: {VERSION} @@ -75,26 +91,33 @@ def info(): Commit: {COMMIT} Git branch: {BRANCH} {LONG_LINE} - ''')) + """) + ) -def get_sources_list() -> List[click.MultiCommand]: - if TYPE == 'sync': - return [cli, sync_node_cli, ssl_cli] +def get_command_groups() -> List[click.Group]: + if TYPE == NodeType.FAIR: + return [ # type: ignore + logs_cli, + fair_boot_cli, + fair_node_cli, + passive_fair_node_cli, + chain_cli, + staking_cli, + wallet_cli, + ssl_cli, + ] else: - return [ - cli, + return [ # type: ignore health_cli, schains_cli, logs_cli, - resources_allocation_cli, node_cli, - sync_node_cli, + passive_node_cli, wallet_cli, ssl_cli, exit_cli, - validate_cli, - lvmpy_cli + lvmpy_cli, ] @@ -102,26 +125,18 @@ def handle_exception(exc_type, exc_value, exc_traceback): if issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return - logger.error("Uncaught exception", - exc_info=(exc_type, exc_value, exc_traceback)) + logger.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback)) sys.excepthook = handle_exception if __name__ == '__main__': - start_time = time.time() - init_logs_dir() - init_default_logger() - args = sys.argv - # todo: hide secret variables (passwords, private keys) - logger.debug(f'cmd: {" ".join(str(x) for x in args)}, v.{__version__}') - sources = get_sources_list() - cmd_collection = click.CommandCollection(sources=sources) + for group in get_command_groups(): + for cmd_name, cmd_obj in group.commands.items(): + cli.add_command(cmd_obj, cmd_name) try: - cmd_collection() + cli() except Exception as err: traceback.print_exc() - logger.debug('Execution time: %d seconds', time.time() - start_time) error_exit(err) - logger.debug('Execution time: %d seconds', time.time() - start_time) diff --git a/node_cli/migrations/fair/from_boot.py b/node_cli/migrations/fair/from_boot.py new file mode 100644 index 00000000..784908f3 --- /dev/null +++ b/node_cli/migrations/fair/from_boot.py @@ -0,0 +1,63 @@ +import logging +import os +from pathlib import Path + +from node_cli.core.docker_config import restart_docker_service +from node_cli.utils.helper import run_cmd + +logger = logging.getLogger(__name__) + +NFT_CHAIN_BASE_PATH = '/etc/nft.conf.d/skale/chains' +NFT_COMMITTEE_SCOPE_CHAIN_NAME = 'fair-committee' + + +class NoLegacyNFTChainConfigError(Exception): + pass + + +def rename_chain_file(old_filepath: str, new_filepath: str) -> None: + old_path = Path(old_filepath) + new_path = Path(new_filepath) + if not old_path.exists(): + raise NoLegacyNFTChainConfigError(f'File {old_filepath} does not exists') + + old_path.rename(Path(new_path)) + + +def rename_chain_in_config(config_path: str, old_chain_name: str, new_chain_name: str) -> None: + content = '' + with open(config_path, 'r') as f: + content = f.read() + + updated_content = content.replace(old_chain_name, new_chain_name) + + with open(config_path, 'w') as f: + f.write(updated_content) + + +def migrate_nft_chain(chain_name: str) -> None: + after_boot_chain_path = os.path.join(NFT_CHAIN_BASE_PATH, f'skale-{chain_name}.conf') + new_chain_name = NFT_COMMITTEE_SCOPE_CHAIN_NAME + after_migration_chain_path = os.path.join( + NFT_CHAIN_BASE_PATH, f'{NFT_COMMITTEE_SCOPE_CHAIN_NAME}.conf' + ) + logger.debug('Renaming %s to %s', after_boot_chain_path, after_migration_chain_path) + if os.path.isfile(after_boot_chain_path): + rename_chain_in_config(after_boot_chain_path, f'skale-{chain_name}', new_chain_name) + if os.path.isfile(after_migration_chain_path): + os.remove(after_boot_chain_path) + else: + rename_chain_file(after_boot_chain_path, after_migration_chain_path) + + +def reload_nft(): + run_cmd(['nft', '-f', '/etc/nftables.conf']) + + +def migrate_nftables_from_boot(chain_name: str): + logger.info('Starting nftables migration from boot') + migrate_nft_chain(chain_name=chain_name) + logger.info('Reloading nftables rules') + reload_nft() + logger.info('Restart docker service') + restart_docker_service() diff --git a/node_cli/migrations/focal_to_jammy.py b/node_cli/migrations/focal_to_jammy.py index dae43820..030312c1 100644 --- a/node_cli/migrations/focal_to_jammy.py +++ b/node_cli/migrations/focal_to_jammy.py @@ -25,7 +25,7 @@ LEGACY_TABLE, POLICY, NFTablesManager, - remove_legacy_saved_rules + remove_legacy_saved_rules, ) from node_cli.utils.helper import run_cmd @@ -39,7 +39,7 @@ '443', # https '53', # dns '3009', # watchdog http - '9100' # node exporter + '9100', # node exporter ] ALLOWED_INCOMING_UDP_PORTS = [ diff --git a/node_cli/operations/__init__.py b/node_cli/operations/__init__.py index 5c53ec18..7a72b442 100644 --- a/node_cli/operations/__init__.py +++ b/node_cli/operations/__init__.py @@ -20,11 +20,21 @@ from node_cli.operations.base import ( # noqa update as update_op, init as init_op, - init_sync as init_sync_op, - update_sync as update_sync_op, + init_passive as init_passive_op, + update_passive as update_passive_op, turn_off as turn_off_op, turn_on as turn_on_op, restore as restore_op, - cleanup_sync as cleanup_sync_op, + cleanup as cleanup_skale_op, configure_nftables, ) +from node_cli.operations.fair import ( # noqa + init_fair_boot as init_fair_boot_op, + init as init_fair_op, + update_fair_boot as update_fair_boot_op, + update as update_fair_op, + FairUpdateType, + restore as restore_fair_op, + repair as repair_fair_op, + cleanup as cleanup_fair_op, +) diff --git a/node_cli/operations/base.py b/node_cli/operations/base.py index a4ab9b4d..803fe299 100644 --- a/node_cli/operations/base.py +++ b/node_cli/operations/base.py @@ -17,76 +17,82 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import functools +import logging import time +from typing import Optional import distro -import functools -import logging -from typing import Dict, Optional -from node_cli.cli.info import VERSION +from skale_core.settings import BaseNodeSettings, SkalePassiveSettings, SkaleSettings, get_settings + +from node_cli.cli.info import TYPE, VERSION from node_cli.configs import ( CONTAINER_CONFIG_PATH, CONTAINER_CONFIG_TMP_PATH, - SKALE_DIR, GLOBAL_SKALE_DIR, + NFTABLES_CHAIN_FOLDER_PATH, + SKALE_DIR, ) +from node_cli.core.checks import CheckType +from node_cli.core.checks import run_checks as run_host_checks +from node_cli.core.docker_config import cleanup_docker_configuration, configure_docker from node_cli.core.host import ( ensure_btrfs_kernel_module_autoloaded, - link_env_file, prepare_host, ) - -from node_cli.core.docker_config import configure_docker -from node_cli.core.nginx import generate_nginx_config from node_cli.core.nftables import configure_nftables -from node_cli.core.node_options import NodeOptions -from node_cli.core.resources import update_resource_allocation, init_shared_space_volume - -from node_cli.operations.common import ( - backup_old_contracts, - download_contracts, - configure_filebeat, - configure_flask, - unpack_backup_archive, +from node_cli.core.nginx import generate_nginx_config +from node_cli.core.node_options import ( + mark_active_node, + set_passive_node_options, + upsert_node_mode, ) -from node_cli.operations.volume import ( - cleanup_volume_artifacts, - ensure_filestorage_mapping, - prepare_block_device, +from node_cli.core.resources import init_shared_space_volume, update_resource_allocation +from node_cli.core.schains import ( + cleanup_lvm_datadir, + cleanup_no_lvm_datadir, + update_node_cli_schain_status, ) -from node_cli.operations.docker_lvmpy import lvmpy_install # noqa -from node_cli.operations.skale_node import ( +from node_cli.operations.common import configure_filebeat, unpack_backup_archive +from node_cli.operations.config_repo import ( download_skale_node, sync_skale_node, update_images, ) -from node_cli.core.checks import CheckType, run_checks as run_host_checks -from node_cli.core.schains import ( - update_node_cli_schain_status, - cleanup_sync_datadir, +from node_cli.operations.docker_lvmpy import lvmpy_install +from node_cli.operations.volume import ( + cleanup_volume_artifacts, + ensure_filestorage_mapping, + prepare_block_device, ) from node_cli.utils.docker_utils import ( compose_rm, compose_up, docker_cleanup, remove_dynamic_containers, + system_prune, ) -from node_cli.utils.meta import get_meta_info, update_meta +from node_cli.utils.helper import cleanup_dir_content, rm_dir +from node_cli.utils.meta import CliMetaManager, FairCliMetaManager +from node_cli.utils.node_type import NodeMode, NodeType from node_cli.utils.print_formatters import print_failed_requirements_checks -from node_cli.utils.helper import str_to_bool, rm_dir - +from node_cli.utils.settings import save_internal_settings logger = logging.getLogger(__name__) def checked_host(func): @functools.wraps(func) - def wrapper(env_filepath: str, env: Dict, *args, **kwargs): - download_skale_node(env['CONTAINER_CONFIGS_STREAM'], env.get('CONTAINER_CONFIGS_DIR')) + def wrapper( + settings: BaseNodeSettings, compose_env: dict, node_mode: NodeMode, *args, **kwargs + ): + download_skale_node(settings.node_version, settings.container_configs_dir or None) failed_checks = run_host_checks( - env['DISK_MOUNTPOINT'], - env['ENV_TYPE'], + settings.block_device, + TYPE, + node_mode, + settings.env_type, CONTAINER_CONFIG_TMP_PATH, check_type=CheckType.PREINSTALL, ) @@ -94,13 +100,15 @@ def wrapper(env_filepath: str, env: Dict, *args, **kwargs): print_failed_requirements_checks(failed_checks) return False - result = func(env_filepath, env, *args, **kwargs) + result = func(settings, compose_env, node_mode, *args, **kwargs) if not result: return result failed_checks = run_host_checks( - env['DISK_MOUNTPOINT'], - env['ENV_TYPE'], + settings.block_device, + TYPE, + node_mode, + settings.env_type, CONTAINER_CONFIG_PATH, check_type=CheckType.POSTINSTALL, ) @@ -113,207 +121,268 @@ def wrapper(env_filepath: str, env: Dict, *args, **kwargs): @checked_host -def update(env_filepath: str, env: Dict) -> None: - compose_rm(env) +def update(settings: BaseNodeSettings, compose_env: dict, node_mode: NodeMode) -> bool: + compose_rm(node_type=NodeType.SKALE, node_mode=node_mode, env=compose_env) remove_dynamic_containers() sync_skale_node() ensure_btrfs_kernel_module_autoloaded() - if env.get('SKIP_DOCKER_CONFIG') != 'True': + if not settings.skip_docker_config: configure_docker() - enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) - configure_nftables(enable_monitoring=enable_monitoring) - - backup_old_contracts() - download_contracts(env) + configure_nftables(enable_monitoring=settings.monitoring_containers) - lvmpy_install(env) + lvmpy_install(settings.block_device) generate_nginx_config() - prepare_host(env_filepath, env['ENV_TYPE'], allocation=True) - init_shared_space_volume(env['ENV_TYPE']) + prepare_host(settings.env_type, allocation=True) + save_internal_settings(node_type=NodeType.SKALE, node_mode=node_mode) + init_shared_space_volume(settings.env_type) - current_stream = get_meta_info().config_stream - skip_cleanup = env.get('SKIP_DOCKER_CLEANUP') == 'True' - if not skip_cleanup and current_stream != env['CONTAINER_CONFIGS_STREAM']: + meta_manager = CliMetaManager() + current_stream = meta_manager.get_meta_info().config_stream + if not settings.skip_docker_cleanup and current_stream != settings.node_version: logger.info( 'Stream version was changed from %s to %s', current_stream, - env['CONTAINER_CONFIGS_STREAM'], + settings.node_version, ) docker_cleanup() - update_meta( + skale_settings = get_settings(SkaleSettings) + meta_manager.update_meta( VERSION, - env['CONTAINER_CONFIGS_STREAM'], - env['DOCKER_LVMPY_STREAM'], + settings.node_version, + skale_settings.docker_lvmpy_version, distro.id(), distro.version(), ) - update_images(env=env) - compose_up(env) + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.SKALE, + node_mode=node_mode, + ) + compose_up(env=compose_env, settings=settings, node_type=NodeType.SKALE, node_mode=node_mode) return True @checked_host -def init(env_filepath: str, env: dict) -> bool: +def init(settings: BaseNodeSettings, compose_env: dict, node_mode: NodeMode) -> None: sync_skale_node() - ensure_btrfs_kernel_module_autoloaded() - if env.get('SKIP_DOCKER_CONFIG') != 'True': + if not settings.skip_docker_config: configure_docker() - enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) - configure_nftables(enable_monitoring=enable_monitoring) + configure_nftables(enable_monitoring=settings.monitoring_containers) - prepare_host(env_filepath, env_type=env['ENV_TYPE']) - link_env_file() - download_contracts(env) + prepare_host(env_type=settings.env_type) + save_internal_settings(node_type=NodeType.SKALE, node_mode=node_mode) + + mark_active_node() configure_filebeat() - configure_flask() generate_nginx_config() - lvmpy_install(env) - init_shared_space_volume(env['ENV_TYPE']) + lvmpy_install(settings.block_device) + init_shared_space_volume(settings.env_type) - update_meta( + skale_settings = get_settings(SkaleSettings) + meta_manager = CliMetaManager() + meta_manager.update_meta( VERSION, - env['CONTAINER_CONFIGS_STREAM'], - env['DOCKER_LVMPY_STREAM'], + settings.node_version, + skale_settings.docker_lvmpy_version, distro.id(), distro.version(), ) - update_resource_allocation(env_type=env['ENV_TYPE']) - update_images(env=env) - - compose_up(env) - return True + update_resource_allocation(env_type=settings.env_type) + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.SKALE, + node_mode=node_mode, + ) + compose_up(env=compose_env, settings=settings, node_type=NodeType.SKALE, node_mode=node_mode) -def init_sync( - env_filepath: str, - env: dict, +def init_passive( + settings: BaseNodeSettings, + compose_env: dict, indexer: bool, archive: bool, snapshot: bool, snapshot_from: Optional[str], -) -> bool: - cleanup_volume_artifacts(env['DISK_MOUNTPOINT']) - download_skale_node(env.get('CONTAINER_CONFIGS_STREAM'), env.get('CONTAINER_CONFIGS_DIR')) +) -> None: + cleanup_volume_artifacts(settings.block_device) + download_skale_node(settings.node_version, settings.container_configs_dir or None) sync_skale_node() - if env.get('SKIP_DOCKER_CONFIG') != 'True': + if not settings.skip_docker_config: configure_docker() - enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) - configure_nftables(enable_monitoring=enable_monitoring) + configure_nftables(enable_monitoring=settings.monitoring_containers) - prepare_host( - env_filepath, - env_type=env['ENV_TYPE'], + prepare_host(env_type=settings.env_type) + save_internal_settings(node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE) + failed_checks = run_host_checks( + settings.block_device, + TYPE, + NodeMode.PASSIVE, + settings.env_type, + CONTAINER_CONFIG_PATH, + check_type=CheckType.PREINSTALL, ) + if failed_checks: + print_failed_requirements_checks(failed_checks) - node_options = NodeOptions() - node_options.archive = archive or indexer - node_options.catchup = archive or indexer - node_options.historic_state = archive + set_passive_node_options(archive=archive, indexer=indexer) ensure_filestorage_mapping() - link_env_file() - download_contracts(env) generate_nginx_config() - prepare_block_device(env['DISK_MOUNTPOINT'], force=env['ENFORCE_BTRFS'] == 'True') + passive_settings = get_settings(SkalePassiveSettings) + prepare_block_device(settings.block_device, force=passive_settings.enforce_btrfs) - update_meta( + meta_manager = CliMetaManager() + meta_manager.update_meta( VERSION, - env['CONTAINER_CONFIGS_STREAM'], - env['DOCKER_LVMPY_STREAM'], + settings.node_version, + None, distro.id(), distro.version(), ) - update_resource_allocation(env_type=env['ENV_TYPE']) + update_resource_allocation(env_type=settings.env_type) - schain_name = env['SCHAIN_NAME'] - if snapshot or snapshot_from: + if passive_settings.schain_name and (snapshot or snapshot_from): ts = int(time.time()) - update_node_cli_schain_status(schain_name, repair_ts=ts, snapshot_from=snapshot_from) - - update_images(env=env, sync_node=True) + update_node_cli_schain_status( + passive_settings.schain_name, repair_ts=ts, snapshot_from=snapshot_from + ) - compose_up(env, sync_node=True) - return True + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.SKALE, + node_mode=NodeMode.PASSIVE, + ) + compose_up( + env=compose_env, settings=settings, node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE + ) -def update_sync(env_filepath: str, env: Dict) -> bool: - compose_rm(env, sync_node=True) +def update_passive(settings: BaseNodeSettings, compose_env: dict) -> bool: + compose_rm(env=compose_env, node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE) remove_dynamic_containers() - cleanup_volume_artifacts(env['DISK_MOUNTPOINT']) - download_skale_node(env['CONTAINER_CONFIGS_STREAM'], env.get('CONTAINER_CONFIGS_DIR')) + cleanup_volume_artifacts(settings.block_device) + download_skale_node(settings.node_version, settings.container_configs_dir or None) sync_skale_node() - if env.get('SKIP_DOCKER_CONFIG') != 'True': + if not settings.skip_docker_config: configure_docker() - enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) - configure_nftables(enable_monitoring=enable_monitoring) + configure_nftables(enable_monitoring=settings.monitoring_containers) ensure_filestorage_mapping() - backup_old_contracts() - download_contracts(env) - prepare_block_device(env['DISK_MOUNTPOINT'], force=env['ENFORCE_BTRFS'] == 'True') + passive_settings = get_settings(SkalePassiveSettings) + prepare_block_device(settings.block_device, force=passive_settings.enforce_btrfs) generate_nginx_config() - prepare_host(env_filepath, env['ENV_TYPE'], allocation=True) + prepare_host(settings.env_type, allocation=True) + save_internal_settings(node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE) + + failed_checks = run_host_checks( + settings.block_device, + TYPE, + NodeMode.PASSIVE, + settings.env_type, + CONTAINER_CONFIG_PATH, + check_type=CheckType.PREINSTALL, + ) + if failed_checks: + print_failed_requirements_checks(failed_checks) - update_meta( + meta_manager = CliMetaManager() + meta_manager.update_meta( VERSION, - env['CONTAINER_CONFIGS_STREAM'], - env['DOCKER_LVMPY_STREAM'], + settings.node_version, + None, distro.id(), distro.version(), ) - update_images(env=env, sync_node=True) - - compose_up(env, sync_node=True) + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.SKALE, + node_mode=NodeMode.PASSIVE, + ) + compose_up( + env=compose_env, settings=settings, node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE + ) return True -def turn_off(env: dict, sync_node: bool = False) -> None: +def turn_off(compose_env: dict, node_type: NodeType, node_mode: NodeMode) -> None: logger.info('Turning off the node...') - compose_rm(env=env, sync_node=sync_node) + compose_rm(env=compose_env, node_type=node_type, node_mode=node_mode) remove_dynamic_containers() logger.info('Node was successfully turned off') -def turn_on(env: dict) -> None: +def turn_on( + settings: BaseNodeSettings, + compose_env: dict, + node_type: NodeType, + node_mode: NodeMode, + backup_run: bool = False, +) -> None: logger.info('Turning on the node...') - update_meta( - VERSION, - env['CONTAINER_CONFIGS_STREAM'], - env['DOCKER_LVMPY_STREAM'], - distro.id(), - distro.version(), - ) - if env.get('SKIP_DOCKER_CONFIG') != 'True': + if node_type == NodeType.FAIR: + meta_manager = FairCliMetaManager() + meta_manager.update_meta( + VERSION, + settings.node_version, + distro.id(), + distro.version(), + ) + else: + skale_settings = get_settings((SkaleSettings, SkalePassiveSettings)) + docker_lvmpy_version = ( + skale_settings.docker_lvmpy_version + if isinstance(skale_settings, SkaleSettings) + else None + ) + meta_manager = CliMetaManager() + meta_manager.update_meta( + VERSION, settings.node_version, docker_lvmpy_version, distro.id(), distro.version() + ) + if not settings.skip_docker_config: configure_docker() - enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) - configure_nftables(enable_monitoring=enable_monitoring) + configure_nftables(enable_monitoring=settings.monitoring_containers) + save_internal_settings(node_type=node_type, node_mode=node_mode, backup_run=backup_run) logger.info('Launching containers on the node...') - compose_up(env) + compose_up(env=compose_env, settings=settings, node_type=node_type, node_mode=node_mode) -def restore(env, backup_path, config_only=False): +def restore( + settings: BaseNodeSettings, + compose_env: dict, + backup_path: str, + node_type: NodeType, + config_only: bool = False, + backup_run: bool = False, +) -> bool: + node_mode = upsert_node_mode(node_mode=NodeMode.ACTIVE) unpack_backup_archive(backup_path) failed_checks = run_host_checks( - env['DISK_MOUNTPOINT'], - env['ENV_TYPE'], + settings.block_device, + TYPE, + node_mode, + settings.env_type, CONTAINER_CONFIG_PATH, check_type=CheckType.PREINSTALL, ) @@ -323,31 +392,32 @@ def restore(env, backup_path, config_only=False): ensure_btrfs_kernel_module_autoloaded() - if env.get('SKIP_DOCKER_CONFIG') != 'True': + if not settings.skip_docker_config: configure_docker() - enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) - configure_nftables(enable_monitoring=enable_monitoring) + configure_nftables(enable_monitoring=settings.monitoring_containers) - link_env_file() - lvmpy_install(env) - init_shared_space_volume(env['ENV_TYPE']) + lvmpy_install(settings.block_device) + init_shared_space_volume(settings.env_type) - update_meta( + skale_settings = get_settings(SkaleSettings) + meta_manager = CliMetaManager() + meta_manager.update_meta( VERSION, - env['CONTAINER_CONFIGS_STREAM'], - env['DOCKER_LVMPY_STREAM'], + settings.node_version, + skale_settings.docker_lvmpy_version, distro.id(), distro.version(), ) - update_resource_allocation(env_type=env['ENV_TYPE']) - + save_internal_settings(node_type=node_type, node_mode=node_mode, backup_run=backup_run) if not config_only: - compose_up(env) + compose_up(env=compose_env, settings=settings, node_type=node_type, node_mode=node_mode) failed_checks = run_host_checks( - env['DISK_MOUNTPOINT'], - env['ENV_TYPE'], + settings.block_device, + TYPE, + node_mode, + settings.env_type, CONTAINER_CONFIG_PATH, check_type=CheckType.POSTINSTALL, ) @@ -357,8 +427,24 @@ def restore(env, backup_path, config_only=False): return True -def cleanup_sync(env, schain_name: str) -> None: - turn_off(env, sync_node=True) - cleanup_sync_datadir(schain_name=schain_name) +def cleanup_passive(compose_env: dict, schain_name: str) -> None: + turn_off(compose_env, node_type=NodeType.SKALE, node_mode=NodeMode.PASSIVE) + cleanup_no_lvm_datadir(chain_name=schain_name) + rm_dir(GLOBAL_SKALE_DIR) + rm_dir(SKALE_DIR) + + +def cleanup( + node_mode: NodeMode, compose_env: dict, schain_name: Optional[str] = None, prune: bool = False +) -> None: + turn_off(compose_env, node_type=NodeType.SKALE, node_mode=node_mode) + if prune: + system_prune() + if node_mode == NodeMode.PASSIVE: + cleanup_no_lvm_datadir(chain_name=schain_name) + else: + cleanup_lvm_datadir() rm_dir(GLOBAL_SKALE_DIR) rm_dir(SKALE_DIR) + cleanup_dir_content(NFTABLES_CHAIN_FOLDER_PATH) + cleanup_docker_configuration() diff --git a/node_cli/operations/common.py b/node_cli/operations/common.py index cfe79b42..c595a3b8 100644 --- a/node_cli/operations/common.py +++ b/node_cli/operations/common.py @@ -17,65 +17,30 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import logging import os +import shutil import stat import tarfile -import logging -import shutil -import secrets - -import urllib.request from shutil import copyfile -from distutils.dir_util import copy_tree from node_cli.configs import ( - CONTRACTS_PATH, - BACKUP_CONTRACTS_PATH, - G_CONF_HOME, FILEBEAT_CONFIG_PATH, - FLASK_SECRET_KEY_FILE, - IMA_CONTRACTS_FILEPATH, - MANAGER_CONTRACTS_FILEPATH, - SRC_FILEBEAT_CONFIG_PATH + G_CONF_HOME, + SRC_FILEBEAT_CONFIG_PATH, ) logger = logging.getLogger(__name__) -def backup_old_contracts(): - logging.info('Copying old contracts ABIs') - copy_tree(CONTRACTS_PATH, BACKUP_CONTRACTS_PATH) - - -def download_contracts(env): - urllib.request.urlretrieve(env['MANAGER_CONTRACTS_ABI_URL'], MANAGER_CONTRACTS_FILEPATH) - urllib.request.urlretrieve(env['IMA_CONTRACTS_ABI_URL'], IMA_CONTRACTS_FILEPATH) - - def configure_filebeat(): logger.info('Configuring filebeat...') copyfile(SRC_FILEBEAT_CONFIG_PATH, FILEBEAT_CONFIG_PATH) shutil.chown(FILEBEAT_CONFIG_PATH, user='root') - os.chmod( - FILEBEAT_CONFIG_PATH, - stat.S_IREAD | - stat.S_IWRITE | - stat.S_IEXEC - ) + os.chmod(FILEBEAT_CONFIG_PATH, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) logger.info('Filebeat configured') -def configure_flask(): - if os.path.isfile(FLASK_SECRET_KEY_FILE): - logger.info('Flask secret key already exists') - else: - logger.info('Generating Flask secret key...') - flask_secret_key = secrets.token_urlsafe(16) - with open(FLASK_SECRET_KEY_FILE, 'w') as f: - f.write(flask_secret_key) - logger.info('Flask secret key generated and saved') - - def unpack_backup_archive(backup_path: str) -> None: logger.info('Unpacking backup archive...') with tarfile.open(backup_path) as tar: diff --git a/node_cli/operations/config_repo.py b/node_cli/operations/config_repo.py new file mode 100644 index 00000000..19cf284a --- /dev/null +++ b/node_cli/operations/config_repo.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2021 SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import os +import shutil +from typing import Optional + +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.helper import rm_dir, rsync_dirs, safe_mkdir, error_exit +from node_cli.utils.git_utils import clone_repo +from node_cli.utils.docker_utils import compose_pull, compose_build +from node_cli.configs import CONTAINER_CONFIG_PATH, CONTAINER_CONFIG_TMP_PATH, SKALE_NODE_REPO_URL +from node_cli.utils.node_type import NodeType, NodeMode + + +logger = logging.getLogger(__name__) + + +def update_images( + compose_env: dict, container_configs_dir: str, node_type: NodeType, node_mode: NodeMode +) -> None: + if container_configs_dir: + compose_build(env=compose_env, node_type=node_type, node_mode=node_mode) + else: + compose_pull(env=compose_env, node_type=node_type, node_mode=node_mode) + + +def download_skale_node(stream: Optional[str] = None, src: Optional[str] = None) -> None: + """Copies SKALE node config from local directory if present. If not, downloads it from repo.""" + if not src and not stream: + error_exit('Either src path or stream must be provided') + + try: + rm_dir(CONTAINER_CONFIG_TMP_PATH) + safe_mkdir(CONTAINER_CONFIG_TMP_PATH) + dest = CONTAINER_CONFIG_TMP_PATH + + if src: + if not os.path.isdir(src): + error_exit(f'Source directory does not exist: {src}') + logger.info(f'Syncing config files from {src}') + rsync_dirs(src, dest) + else: + if not stream: + error_exit('Stream must be provided if src is not specified in download_skale_node') + logger.info(f'Cloning config files from {SKALE_NODE_REPO_URL} ({stream})') + clone_repo(SKALE_NODE_REPO_URL, dest, stream) + + except (OSError, RuntimeError) as err: + rm_dir(CONTAINER_CONFIG_TMP_PATH) + error_exit( + f'Failed to download node configuration: {err}', + exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR, + ) + + +def sync_skale_node(): + if os.path.isdir(CONTAINER_CONFIG_PATH): + shutil.rmtree(CONTAINER_CONFIG_PATH) + shutil.move(CONTAINER_CONFIG_TMP_PATH, f'{CONTAINER_CONFIG_PATH}/') diff --git a/node_cli/operations/docker_lvmpy.py b/node_cli/operations/docker_lvmpy.py index 1810516b..eb307d55 100644 --- a/node_cli/operations/docker_lvmpy.py +++ b/node_cli/operations/docker_lvmpy.py @@ -33,15 +33,16 @@ LVMPY_CRON_LOG_PATH, LVMPY_CRON_SCHEDULE_MINUTES, SCHAINS_MNT_DIR_REGULAR, - VOLUME_GROUP + VOLUME_GROUP, ) +from node_cli.operations.volume import ensure_filestorage_mapping from lvmpy.src.install import setup as setup_lvmpy logger = logging.getLogger(__name__) def update_docker_lvmpy_env(env): - env['PHYSICAL_VOLUME'] = env['DISK_MOUNTPOINT'] + env['PHYSICAL_VOLUME'] = env['BLOCK_DEVICE'] env['VOLUME_GROUP'] = 'schains' env['FILESTORAGE_MAPPING'] = FILESTORAGE_MAPPING env['MNT_DIR'] = SCHAINS_MNT_DIR_REGULAR @@ -49,29 +50,16 @@ def update_docker_lvmpy_env(env): return env -def ensure_filestorage_mapping(mapping_dir=FILESTORAGE_MAPPING): - if not os.path.isdir(FILESTORAGE_MAPPING): - os.makedirs(FILESTORAGE_MAPPING) - - def sync_docker_lvmpy_repo(env): if os.path.isdir(DOCKER_LVMPY_PATH): shutil.rmtree(DOCKER_LVMPY_PATH) - sync_repo( - DOCKER_LVMPY_REPO_URL, - DOCKER_LVMPY_PATH, - env["DOCKER_LVMPY_STREAM"] - ) + sync_repo(DOCKER_LVMPY_REPO_URL, DOCKER_LVMPY_PATH, env['DOCKER_LVMPY_VERSION']) -def lvmpy_install(env): +def lvmpy_install(block_device: str) -> None: ensure_filestorage_mapping() logging.info('Configuring and starting lvmpy') - setup_lvmpy( - block_device=env['DISK_MOUNTPOINT'], - volume_group=VOLUME_GROUP, - exec_start=LVMPY_RUN_CMD - ) + setup_lvmpy(block_device=block_device, volume_group=VOLUME_GROUP, exec_start=LVMPY_RUN_CMD) init_healing_cron() logger.info('docker-lvmpy is configured and started') @@ -86,7 +74,5 @@ def init_healing_cron(): if legacy_line in jobs: c.remove_all(command=legacy_line) if cron_line not in jobs: - job = c.new( - command=cron_line - ) + job = c.new(command=cron_line) job.minute.every(LVMPY_CRON_SCHEDULE_MINUTES) diff --git a/node_cli/operations/fair.py b/node_cli/operations/fair.py new file mode 100644 index 00000000..3a48fdbd --- /dev/null +++ b/node_cli/operations/fair.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2021-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import logging +import time +from enum import Enum + +import distro + +from skale_core.settings import BaseNodeSettings, FairBaseSettings, FairSettings, get_settings +from skale_core.types import EnvType + +from node_cli.cli.info import TYPE, VERSION +from node_cli.configs import ( + CONTAINER_CONFIG_PATH, + GLOBAL_SKALE_DIR, + NFTABLES_CHAIN_FOLDER_PATH, + SKALE_DIR, +) +from node_cli.core.checks import CheckType +from node_cli.core.checks import run_checks as run_host_checks +from node_cli.core.docker_config import cleanup_docker_configuration, configure_docker +from node_cli.core.host import ensure_btrfs_kernel_module_autoloaded, prepare_host +from node_cli.core.nftables import configure_nftables +from node_cli.core.nginx import generate_nginx_config +from node_cli.core.schains import cleanup_no_lvm_datadir +from node_cli.core.static_config import get_fair_chain_name +from node_cli.core.node_options import mark_active_node, set_passive_node_options, upsert_node_mode +from node_cli.fair.record.chain_record import ( + get_fair_chain_record, + migrate_chain_record, + update_chain_record, +) +from node_cli.migrations.fair.from_boot import migrate_nftables_from_boot +from node_cli.operations.base import checked_host, turn_off +from node_cli.operations.common import configure_filebeat, unpack_backup_archive +from node_cli.operations.config_repo import ( + sync_skale_node, + update_images, +) +from node_cli.operations.volume import cleanup_volume_artifacts, prepare_block_device +from node_cli.utils.docker_utils import ( + BASE_PASSIVE_FAIR_COMPOSE_SERVICES, + REDIS_SERVICE_DICT, + REDIS_START_TIMEOUT, + compose_rm, + compose_up, + docker_cleanup, + is_admin_running, + remove_dynamic_containers, + start_container_by_name, + stop_container_by_name, + system_prune, + wait_for_container, +) +from node_cli.utils.helper import cleanup_dir_content, rm_dir +from node_cli.utils.meta import FairCliMetaManager +from node_cli.utils.print_formatters import print_failed_requirements_checks +from node_cli.utils.node_type import NodeMode, NodeType +from node_cli.utils.settings import save_internal_settings + +logger = logging.getLogger(__name__) + + +class FairUpdateType(Enum): + REGULAR = 'regular' + INFRA_ONLY = 'infra_only' + FROM_BOOT = 'from_boot' + + +@checked_host +def init_fair_boot( + settings: BaseNodeSettings, + compose_env: dict, + node_mode: NodeMode, +) -> None: + sync_skale_node() + cleanup_volume_artifacts(settings.block_device) + + ensure_btrfs_kernel_module_autoloaded() + if not settings.skip_docker_config: + configure_docker() + + configure_nftables(enable_monitoring=settings.monitoring_containers) + + prepare_host(env_type=settings.env_type) + save_internal_settings(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + mark_active_node() + + configure_filebeat() + generate_nginx_config() + fair_settings = get_settings((FairSettings, FairBaseSettings)) + prepare_block_device(settings.block_device, force=fair_settings.enforce_btrfs) + + meta_manager = FairCliMetaManager() + meta_manager.update_meta( + VERSION, + settings.node_version, + distro.id(), + distro.version(), + ) + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.FAIR, + node_mode=NodeMode.ACTIVE, + ) + + compose_up( + env=compose_env, + settings=settings, + node_type=NodeType.FAIR, + node_mode=NodeMode.ACTIVE, + is_fair_boot=True, + ) + + +@checked_host +def init( + settings: BaseNodeSettings, + compose_env: dict, + node_mode: NodeMode, + indexer: bool, + archive: bool, + snapshot: str | None, +) -> bool: + sync_skale_node() + ensure_btrfs_kernel_module_autoloaded() + cleanup_volume_artifacts(settings.block_device) + + if not settings.skip_docker_config: + configure_docker() + + configure_nftables() + configure_filebeat() + generate_nginx_config() + + prepare_host(env_type=settings.env_type) + save_internal_settings(node_type=NodeType.FAIR, node_mode=node_mode) + + fair_settings = get_settings((FairSettings, FairBaseSettings)) + prepare_block_device(settings.block_device, force=fair_settings.enforce_btrfs) + + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.FAIR, + node_mode=node_mode, + ) + compose_up( + env=compose_env, + settings=settings, + node_type=NodeType.FAIR, + node_mode=node_mode, + services=list(REDIS_SERVICE_DICT), + ) + + upsert_node_mode(node_mode=node_mode) + if node_mode == NodeMode.PASSIVE: + logger.info('Setting passive node options') + set_passive_node_options(archive=archive, indexer=indexer) + if snapshot: + logger.info('Waiting %s seconds for redis to start', REDIS_START_TIMEOUT) + time.sleep(REDIS_START_TIMEOUT) + trigger_skaled_snapshot_mode(env_type=settings.env_type, snapshot_from=snapshot) + + meta_manager = FairCliMetaManager() + meta_manager.update_meta( + VERSION, + settings.node_version, + distro.id(), + distro.version(), + ) + + compose_up(env=compose_env, settings=settings, node_type=NodeType.FAIR, node_mode=node_mode) + wait_for_container(BASE_PASSIVE_FAIR_COMPOSE_SERVICES['api']) + time.sleep(REDIS_START_TIMEOUT) + return True + + +@checked_host +def update_fair_boot( + settings: BaseNodeSettings, + compose_env: dict, + node_mode: NodeMode = NodeMode.ACTIVE, +) -> bool: + compose_rm(node_type=NodeType.FAIR, node_mode=node_mode, env=compose_env) + remove_dynamic_containers() + cleanup_volume_artifacts(settings.block_device) + + sync_skale_node() + ensure_btrfs_kernel_module_autoloaded() + + if not settings.skip_docker_config: + configure_docker() + + configure_nftables(enable_monitoring=settings.monitoring_containers) + + generate_nginx_config() + fair_settings = get_settings((FairSettings, FairBaseSettings)) + prepare_block_device(settings.block_device, force=fair_settings.enforce_btrfs) + + prepare_host(settings.env_type) + save_internal_settings(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + + meta_manager = FairCliMetaManager() + current_stream = meta_manager.get_meta_info().config_stream + if not settings.skip_docker_cleanup and current_stream != settings.node_version: + logger.info( + 'Stream version was changed from %s to %s', + current_stream, + settings.node_version, + ) + docker_cleanup() + + meta_manager.update_meta( + VERSION, + settings.node_version, + distro.id(), + distro.version(), + ) + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.FAIR, + node_mode=NodeMode.ACTIVE, + ) + compose_up( + env=compose_env, + settings=settings, + node_type=NodeType.FAIR, + node_mode=NodeMode.ACTIVE, + is_fair_boot=True, + ) + return True + + +@checked_host +def update( + settings: BaseNodeSettings, + compose_env: dict, + node_mode: NodeMode, + update_type: FairUpdateType, + force_skaled_start: bool, +) -> bool: + compose_rm(node_type=NodeType.FAIR, node_mode=node_mode, env=compose_env) + if update_type not in (FairUpdateType.INFRA_ONLY, FairUpdateType.FROM_BOOT): + remove_dynamic_containers() + + sync_skale_node() + ensure_btrfs_kernel_module_autoloaded() + + if not settings.skip_docker_config: + configure_docker() + + configure_nftables() + generate_nginx_config() + + prepare_host(settings.env_type, allocation=True) + save_internal_settings(node_type=NodeType.FAIR, node_mode=node_mode) + meta_manager = FairCliMetaManager() + current_stream = meta_manager.get_meta_info().config_stream + if not settings.skip_docker_cleanup and current_stream != settings.node_version: + logger.info( + 'Stream version was changed from %s to %s', + current_stream, + settings.node_version, + ) + docker_cleanup() + + meta_manager.update_meta( + VERSION, + settings.node_version, + distro.id(), + distro.version(), + ) + + fair_chain_name = get_fair_chain_name(settings.env_type) + if update_type == FairUpdateType.FROM_BOOT: + migrate_nftables_from_boot(chain_name=fair_chain_name) + + update_images( + compose_env=compose_env, + container_configs_dir=settings.container_configs_dir, + node_type=NodeType.FAIR, + node_mode=node_mode, + ) + + compose_up( + env=compose_env, + settings=settings, + node_type=NodeType.FAIR, + node_mode=node_mode, + services=list(REDIS_SERVICE_DICT), + ) + wait_for_container(REDIS_SERVICE_DICT['redis']) + time.sleep(REDIS_START_TIMEOUT) + if update_type == FairUpdateType.FROM_BOOT: + migrate_chain_record(settings.env_type, settings.node_version) + update_chain_record(settings.env_type, force_skaled_start=force_skaled_start) + compose_up(env=compose_env, settings=settings, node_type=NodeType.FAIR, node_mode=node_mode) + return True + + +def restore( + node_mode: NodeMode, + settings: BaseNodeSettings, + compose_env: dict, + backup_path: str, + config_only: bool = False, +) -> bool: + unpack_backup_archive(backup_path) + failed_checks = run_host_checks( + settings.block_device, + TYPE, + node_mode, + settings.env_type, + CONTAINER_CONFIG_PATH, + check_type=CheckType.PREINSTALL, + ) + if failed_checks: + print_failed_requirements_checks(failed_checks) + return False + + ensure_btrfs_kernel_module_autoloaded() + + if not settings.skip_docker_config: + configure_docker() + + configure_nftables(enable_monitoring=settings.monitoring_containers) + + meta_manager = FairCliMetaManager() + meta_manager.update_meta( + VERSION, + settings.node_version, + distro.id(), + distro.version(), + ) + + if not config_only: + compose_up(env=compose_env, settings=settings, node_type=NodeType.FAIR, node_mode=node_mode) + + failed_checks = run_host_checks( + settings.block_device, + TYPE, + node_mode, + settings.env_type, + CONTAINER_CONFIG_PATH, + check_type=CheckType.POSTINSTALL, + ) + if failed_checks: + print_failed_requirements_checks(failed_checks) + return False + return True + + +def cleanup(node_mode: NodeMode, compose_env: dict, prune: bool = False) -> None: + turn_off(compose_env, node_type=NodeType.FAIR, node_mode=node_mode) + if prune: + system_prune() + cleanup_no_lvm_datadir() + rm_dir(GLOBAL_SKALE_DIR) + rm_dir(SKALE_DIR) + cleanup_dir_content(NFTABLES_CHAIN_FOLDER_PATH) + cleanup_docker_configuration() + + +def trigger_skaled_snapshot_mode(env_type: EnvType, snapshot_from: str = 'any') -> None: + record = get_fair_chain_record(env_type) + if not snapshot_from: + snapshot_from = 'any' + logger.info('Triggering skaled snapshot mode, snapshot_from: %s', snapshot_from) + record.set_snapshot_from(snapshot_from) + + +def repair(env_type: EnvType, snapshot_from: str = 'any') -> None: + logger.info('Starting fair node repair') + container_name = 'sk_admin' + if is_admin_running(): + logger.info('Stopping admin container') + stop_container_by_name(container_name=container_name) + logger.info('Removing chain container') + remove_dynamic_containers() + logger.info('Cleaning up datadir') + cleanup_no_lvm_datadir() + logger.info('Requesting fair node repair') + trigger_skaled_snapshot_mode(env_type=env_type, snapshot_from=snapshot_from) + logger.info('Starting admin') + start_container_by_name(container_name=container_name) + logger.info('Fair node repair completed successfully') diff --git a/node_cli/operations/skale_node.py b/node_cli/operations/skale_node.py deleted file mode 100644 index d91e4765..00000000 --- a/node_cli/operations/skale_node.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# -# This file is part of node-cli -# -# Copyright (C) 2021 SKALE Labs -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -import logging -import os -import shutil -from typing import Optional - -from node_cli.utils.helper import rm_dir, rsync_dirs, safe_mkdir -from node_cli.utils.git_utils import clone_repo -from node_cli.utils.docker_utils import compose_pull, compose_build -from node_cli.configs import ( - CONTAINER_CONFIG_PATH, - CONTAINER_CONFIG_TMP_PATH, - SKALE_NODE_REPO_URL -) - - -logger = logging.getLogger(__name__) - - -def update_images(env: dict, sync_node: bool = False) -> None: - local = env.get('CONTAINER_CONFIGS_DIR') != '' - if local: - compose_build(env=env, sync_node=sync_node) - else: - compose_pull(env=env, sync_node=sync_node) - - -def download_skale_node(stream: Optional[str], src: Optional[str]) -> None: - rm_dir(CONTAINER_CONFIG_TMP_PATH) - safe_mkdir(CONTAINER_CONFIG_TMP_PATH) - dest = CONTAINER_CONFIG_TMP_PATH - if src: - rsync_dirs(src, dest) - else: - clone_repo( - SKALE_NODE_REPO_URL, - CONTAINER_CONFIG_TMP_PATH, - stream - ) - - -def sync_skale_node(): - if os.path.isdir(CONTAINER_CONFIG_PATH): - shutil.rmtree(CONTAINER_CONFIG_PATH) - shutil.move(CONTAINER_CONFIG_TMP_PATH, f'{CONTAINER_CONFIG_PATH}/') diff --git a/node_cli/operations/volume.py b/node_cli/operations/volume.py index b6de918a..e944547a 100644 --- a/node_cli/operations/volume.py +++ b/node_cli/operations/volume.py @@ -30,8 +30,8 @@ DOCKER_LVMPY_REPO_URL, FILESTORAGE_MAPPING, SCHAINS_MNT_DIR_REGULAR, - SCHAINS_MNT_DIR_SYNC, - SKALE_STATE_DIR + SCHAINS_MNT_DIR_SINGLE_CHAIN, + SKALE_STATE_DIR, ) logger = logging.getLogger(__name__) @@ -42,7 +42,7 @@ class FilesystemExistsError(Exception): def update_docker_lvmpy_env(env): - env['PHYSICAL_VOLUME'] = env['DISK_MOUNTPOINT'] + env['PHYSICAL_VOLUME'] = env['BLOCK_DEVICE'] env['VOLUME_GROUP'] = 'schains' env['FILESTORAGE_MAPPING'] = FILESTORAGE_MAPPING env['SCHAINS_MNT_DIR'] = SCHAINS_MNT_DIR_REGULAR @@ -58,11 +58,7 @@ def ensure_filestorage_mapping(mapping_dir=FILESTORAGE_MAPPING): def sync_docker_lvmpy_repo(env): if os.path.isdir(DOCKER_LVMPY_PATH): shutil.rmtree(DOCKER_LVMPY_PATH) - sync_repo( - DOCKER_LVMPY_REPO_URL, - DOCKER_LVMPY_PATH, - env["DOCKER_LVMPY_STREAM"] - ) + sync_repo(DOCKER_LVMPY_REPO_URL, DOCKER_LVMPY_PATH, env['DOCKER_LVMPY_VERSION']) def docker_lvmpy_update(env): @@ -70,10 +66,7 @@ def docker_lvmpy_update(env): ensure_filestorage_mapping() logger.info('Running docker-lvmpy update script') update_docker_lvmpy_env(env) - run_cmd( - cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/update.sh'.split(), - env=env - ) + run_cmd(cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/update.sh'.split(), env=env) logger.info('docker-lvmpy update done') @@ -81,10 +74,7 @@ def docker_lvmpy_install(env): sync_docker_lvmpy_repo(env) ensure_filestorage_mapping() update_docker_lvmpy_env(env) - run_cmd( - cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/install.sh'.split(), - env=env - ) + run_cmd(cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/install.sh'.split(), env=env) logger.info('docker-lvmpy installed') @@ -147,7 +137,7 @@ def prepare_block_device(block_device, force=False): else: logger.info('%s contains %s filesystem', block_device, filesystem) format_as_btrfs(block_device) - mount_device(block_device, SCHAINS_MNT_DIR_SYNC) + mount_device(block_device, SCHAINS_MNT_DIR_SINGLE_CHAIN) def max_resize_btrfs(path): diff --git a/node_cli/utils/decorators.py b/node_cli/utils/decorators.py index 95c822e8..f72fea60 100644 --- a/node_cli/utils/decorators.py +++ b/node_cli/utils/decorators.py @@ -22,19 +22,22 @@ from node_cli.core.host import is_node_inited from node_cli.utils.global_config import get_system_user from node_cli.utils.helper import error_exit, is_user_valid, get_g_conf_user -from node_cli.utils.texts import Texts +from node_cli.utils.texts import safe_load_texts from node_cli.utils.exit_codes import CLIExitCodes -TEXTS = Texts() +TEXTS = safe_load_texts() def check_not_inited(f): + """Decorator that checks if node is not already initialized.""" + @wraps(f) def inner(*args, **kwargs): if is_node_inited(): error_exit(TEXTS['node']['already_inited'], exit_code=CLIExitCodes.NODE_STATE_ERROR) return f(*args, **kwargs) + return inner @@ -44,6 +47,7 @@ def inner(*args, **kwargs): if not is_node_inited(): error_exit(TEXTS['node']['not_inited'], exit_code=CLIExitCodes.NODE_STATE_ERROR) return f(*args, **kwargs) + return inner @@ -53,8 +57,9 @@ def inner(*args, **kwargs): if not is_user_valid(): g_conf_user = get_g_conf_user() current_user = get_system_user() - error_msg = f'You couldn\'t execute this command from user {current_user}. \ -Allowed: {g_conf_user} or root.' + error_msg = f"You couldn't execute this command from user {current_user}. \ +Allowed: {g_conf_user} or root." error_exit(error_msg, exit_code=CLIExitCodes.BAD_USER_ERROR) return f(*args, **kwargs) + return inner diff --git a/node_cli/utils/docker_utils.py b/node_cli/utils/docker_utils.py index 2b75f9a3..98a4946c 100644 --- a/node_cli/utils/docker_utils.py +++ b/node_cli/utils/docker_utils.py @@ -19,45 +19,85 @@ import io import itertools -import os import logging +import os +import time from typing import Optional import docker from docker.client import DockerClient +from docker.errors import NotFound from docker.models.containers import Container -from node_cli.utils.helper import run_cmd, str_to_bool +from skale_core.settings import BaseNodeSettings + from node_cli.configs import ( COMPOSE_PATH, - SYNC_COMPOSE_PATH, - REMOVED_CONTAINERS_FOLDER_PATH, - SGX_CERTIFICATES_DIR_NAME, + FAIR_COMPOSE_PATH, NGINX_CONTAINER_NAME, + REMOVED_CONTAINERS_FOLDER_PATH, ) - +from node_cli.core.node_options import active_fair, active_skale, passive_fair, passive_skale +from node_cli.utils.helper import run_cmd +from node_cli.utils.node_type import NodeMode, NodeType logger = logging.getLogger(__name__) SCHAIN_REMOVE_TIMEOUT = 300 IMA_REMOVE_TIMEOUT = 20 TELEGRAF_REMOVE_TIMEOUT = 20 - -MAIN_COMPOSE_CONTAINERS = ('skale-api', 'bounty', 'skale-admin') -BASE_COMPOSE_SERVICES = ( - 'transaction-manager', - 'skale-admin', - 'skale-api', - 'bounty', - 'nginx', - 'redis', - 'watchdog', - 'filebeat', -) -MONITORING_COMPOSE_SERVICES = ( - 'node-exporter', - 'advisor', -) +REDIS_START_TIMEOUT = 10 + +REDIS_SERVICE_DICT = {'redis': 'sk_redis'} + +CORE_COMMON_COMPOSE_SERVICES = { + 'transaction-manager': 'sk_tm', + 'redis': 'sk_redis', + 'watchdog': 'sk_watchdog', + 'nginx': 'sk_nginx', + 'filebeat': 'sk_filebeat', +} + +BASE_SKALE_COMPOSE_SERVICES = { + **CORE_COMMON_COMPOSE_SERVICES, + 'admin': 'sk_admin', + 'api': 'sk_api', + 'bounty': 'sk_bounty', +} + +BASE_FAIR_COMPOSE_SERVICES = { + **CORE_COMMON_COMPOSE_SERVICES, + 'admin': 'sk_admin', + 'api': 'sk_api', +} + +BASE_FAIR_BOOT_COMPOSE_SERVICES = { + **CORE_COMMON_COMPOSE_SERVICES, + 'boot-admin': 'sk_boot_admin', + 'boot-api': 'sk_boot_api', +} + +BASE_PASSIVE_COMPOSE_SERVICES = { + 'admin': 'sk_admin', + 'nginx': 'sk_nginx', + 'api': 'sk_api', + 'watchdog': 'sk_watchdog', + **REDIS_SERVICE_DICT, +} + +BASE_PASSIVE_FAIR_COMPOSE_SERVICES = { + 'admin': 'sk_admin', + 'api': 'sk_api', + 'nginx': 'sk_nginx', + 'watchdog': 'sk_watchdog', + 'filebeat': 'sk_filebeat', + **REDIS_SERVICE_DICT, +} + +MONITORING_COMPOSE_SERVICES = { + 'node-exporter': 'monitor_node_exporter', + 'advisor': 'monitor_cadvisor', +} TELEGRAF_SERVICES = ('telegraf',) NOTIFICATION_COMPOSE_SERVICES = ('celery',) COMPOSE_TIMEOUT = 10 @@ -79,15 +119,18 @@ def get_sanitized_container_name(container_info: dict) -> str: def get_containers(container_name_filter=None, _all=True) -> list: - return docker_client().containers.list(all=_all) + filters = {} + if container_name_filter: + filters['name'] = container_name_filter + return docker_client().containers.list(all=_all, filters=filters) def get_all_schain_containers(_all=True) -> list: - return docker_client().containers.list(all=_all, filters={'name': 'skale_schain_*'}) + return docker_client().containers.list(all=_all, filters={'name': 'sk_skaled_*'}) def get_all_ima_containers(_all=True) -> list: - return docker_client().containers.list(all=_all, filters={'name': 'skale_ima_*'}) + return docker_client().containers.list(all=_all, filters={'name': 'sk_ima_*'}) def remove_dynamic_containers() -> None: @@ -133,7 +176,7 @@ def safe_rm(container: Container, timeout=DOCKER_DEFAULT_STOP_TIMEOUT, **kwargs) logger.info(f'Container removed: {container_name}') -def stop_container( +def stop_container_by_name( container_name: str, timeout: int = DOCKER_DEFAULT_STOP_TIMEOUT, dclient: Optional[DockerClient] = None, @@ -144,7 +187,7 @@ def stop_container( container.stop(timeout=timeout) -def rm_container( +def remove_container_by_name( container_name: str, timeout: int = DOCKER_DEFAULT_STOP_TIMEOUT, dclient: Optional[DockerClient] = None, @@ -153,29 +196,30 @@ def rm_container( container_names = [container.name for container in get_containers()] if container_name in container_names: container = dc.containers.get(container_name) - safe_rm(container) + safe_rm(container, timeout=timeout) -def start_container(container_name: str, dclient: Optional[DockerClient] = None) -> None: +def start_container_by_name(container_name: str, dclient: Optional[DockerClient] = None) -> None: dc = dclient or docker_client() container = dc.containers.get(container_name) logger.info('Starting container %s', container_name) container.start() -def remove_schain_container(schain_name: str, dclient: Optional[DockerClient] = None) -> None: - container_name = f'skale_schain_{schain_name}' - rm_container(container_name, timeout=SCHAIN_REMOVE_TIMEOUT, dclient=dclient) +def remove_schain_container_by_name( + schain_name: str, dclient: Optional[DockerClient] = None +) -> None: + container_name = f'sk_skaled_{schain_name}' + remove_container_by_name(container_name, timeout=SCHAIN_REMOVE_TIMEOUT, dclient=dclient) def backup_container_logs( container: Container, - head: int = DOCKER_DEFAULT_HEAD_LINES, - tail: int = DOCKER_DEFAULT_TAIL_LINES, + tail: int | str = DOCKER_DEFAULT_TAIL_LINES, ) -> None: logger.info(f'Going to backup container logs: {container.name}') logs_backup_filepath = get_logs_backup_filepath(container) - save_container_logs(container, logs_backup_filepath, tail) + save_container_logs(container, logs_backup_filepath, tail=tail) logger.info(f'Old container logs saved to {logs_backup_filepath}, tail: {tail}') @@ -183,7 +227,7 @@ def save_container_logs( container: Container, log_filepath: str, head: int = DOCKER_DEFAULT_HEAD_LINES, - tail: int = DOCKER_DEFAULT_TAIL_LINES, + tail: int | str = DOCKER_DEFAULT_TAIL_LINES, ) -> None: separator = b'=' * 80 + b'\n' tail_lines = container.logs(tail=tail) @@ -220,14 +264,14 @@ def is_volume_exists(name: str, dutils=None): dutils = dutils or docker_client() try: dutils.volumes.get(name) - except docker.errors.NotFound: + except NotFound: return False return True -def compose_rm(env={}, sync_node: bool = False): +def compose_rm(node_type: NodeType, node_mode: NodeMode, env={}): logger.info('Removing compose containers') - compose_path = get_compose_path(sync_node) + compose_path = get_compose_path(node_type, node_mode) run_cmd( cmd=( 'docker', @@ -243,49 +287,107 @@ def compose_rm(env={}, sync_node: bool = False): logger.info('Compose containers removed') -def compose_pull(env: dict, sync_node: bool = False): +def compose_pull(env: dict, node_type: NodeType, node_mode: NodeMode): logger.info('Pulling compose containers') - compose_path = get_compose_path(sync_node) + compose_path = get_compose_path(node_type, node_mode) run_cmd(cmd=('docker', 'compose', '-f', compose_path, 'pull'), env=env) -def compose_build(env: dict, sync_node: bool = False): +def compose_build(env: dict, node_type: NodeType, node_mode: NodeMode): logger.info('Building compose containers') - compose_path = get_compose_path(sync_node) + compose_path = get_compose_path(node_type, node_mode) run_cmd(cmd=('docker', 'compose', '-f', compose_path, 'build'), env=env) -def get_up_compose_cmd(services): - return ('docker', 'compose', '-f', COMPOSE_PATH, 'up', '-d', *services) +def get_compose_path(node_type: NodeType, node_mode: NodeMode) -> str: + if node_type == NodeType.FAIR: + return FAIR_COMPOSE_PATH + return COMPOSE_PATH -def get_up_compose_sync_cmd(): - return ('docker', 'compose', '-f', SYNC_COMPOSE_PATH, 'up', '-d') +def get_compose_services(node_type: NodeType, node_mode: NodeMode) -> list[str]: + if passive_skale(node_type, node_mode): + return list(BASE_PASSIVE_COMPOSE_SERVICES) + elif active_fair(node_type, node_mode): + return list(BASE_FAIR_COMPOSE_SERVICES) + elif passive_fair(node_type, node_mode): + return list(BASE_PASSIVE_FAIR_COMPOSE_SERVICES) + return list(BASE_SKALE_COMPOSE_SERVICES) -def get_compose_path(sync_node: bool) -> str: - return SYNC_COMPOSE_PATH if sync_node else COMPOSE_PATH +def get_up_compose_cmd( + node_type: NodeType, node_mode: NodeMode, services: list[str] | None = None +) -> tuple: + compose_path = get_compose_path(node_type, node_mode) + if services is None: + services = get_compose_services(node_type, node_mode) -def compose_up(env, sync_node=False): - if sync_node: - logger.info('Running containers for sync node') - run_cmd(cmd=get_up_compose_sync_cmd(), env=env) - return + return ('docker', 'compose', '-f', compose_path, 'up', '-d', *services) - logger.info('Running base set of containers') - if 'SGX_CERTIFICATES_DIR_NAME' not in env: - env['SGX_CERTIFICATES_DIR_NAME'] = SGX_CERTIFICATES_DIR_NAME +def compose_up( + env, + settings: BaseNodeSettings, + node_type: NodeType, + node_mode: NodeMode, + is_fair_boot: bool = False, + services: list[str] | None = None, +): + env['PASSIVE_NODE'] = str(node_mode == NodeMode.PASSIVE) + if passive_skale(node_type, node_mode) or passive_fair(node_type, node_mode): + logger.info('Running containers for passive node') + run_cmd(cmd=get_up_compose_cmd(node_type=node_type, node_mode=node_mode), env=env) + return - logger.debug('Launching containers with env %s', env) - run_cmd(cmd=get_up_compose_cmd(BASE_COMPOSE_SERVICES), env=env) - if str_to_bool(env.get('MONITORING_CONTAINERS', 'False')): + if active_fair(node_type, node_mode): + logger.info('Running fair base set of containers') + if is_fair_boot: + logger.debug('Launching fair boot containers with env %s', env) + run_cmd( + cmd=get_up_compose_cmd( + node_type=node_type, + node_mode=node_mode, + services=list(BASE_FAIR_BOOT_COMPOSE_SERVICES), + ), + env=env, + ) + else: + logger.debug('Launching fair containers with env %s', env) + run_cmd( + cmd=get_up_compose_cmd( + node_type=node_type, + node_mode=node_mode, + services=services, + ), + env=env, + ) + elif active_skale(node_type, node_mode): + logger.info('Running skale node base set of containers') + logger.debug('Launching skale node containers with env %s', env) + run_cmd(cmd=get_up_compose_cmd(node_type=node_type, node_mode=node_mode), env=env) + + if settings.tg_api_key and settings.tg_chat_id: + logger.info('Running containers for Telegram notifications') + run_cmd( + cmd=get_up_compose_cmd( + node_type=NodeType.SKALE, + node_mode=node_mode, + services=list(NOTIFICATION_COMPOSE_SERVICES), + ), + env=env, + ) + + if settings.monitoring_containers: logger.info('Running monitoring containers') - run_cmd(cmd=get_up_compose_cmd(MONITORING_COMPOSE_SERVICES), env=env) - if 'TG_API_KEY' in env and 'TG_CHAT_ID' in env: - logger.info('Running containers for Telegram notifications') - run_cmd(cmd=get_up_compose_cmd(NOTIFICATION_COMPOSE_SERVICES), env=env) + run_cmd( + cmd=get_up_compose_cmd( + node_type=NodeType.SKALE, + node_mode=node_mode, + services=list(MONITORING_COMPOSE_SERVICES), + ), + env=env, + ) def restart_nginx_container(dutils=None): @@ -318,20 +420,16 @@ def is_container_running(name: str, dclient: Optional[DockerClient] = None) -> b try: container = dc.containers.get(name) return container.status == 'running' - except docker.errors.NotFound: + except NotFound: return False -def is_admin_running(dclient: Optional[DockerClient] = None) -> bool: - return is_container_running(name='skale_admin', dclient=dclient) - - def is_api_running(dclient: Optional[DockerClient] = None) -> bool: - return is_container_running(name='skale_api', dclient=dclient) + return is_container_running(name='sk_api', dclient=dclient) -def is_sync_admin_running(dclient: Optional[DockerClient] = None) -> bool: - return is_container_running(name='skale_sync_admin', dclient=dclient) +def is_admin_running(dclient: Optional[DockerClient] = None) -> bool: + return is_container_running(name='sk_admin', dclient=dclient) def system_prune(): @@ -348,3 +446,19 @@ def docker_cleanup(dclient=None, ignore=None): system_prune() except Exception as e: logger.warning('Image cleanup errored with %s', e) + + +def wait_for_container(container_name: str, attempts: int = 10, interval: int = 3) -> bool: + logger.info('Waiting for container %s to be up', container_name) + dc = docker_client() + + for i in range(attempts): + try: + container = dc.containers.get(container_name) + if container.status == 'running': + logger.info('Container %s is up', container_name) + return True + except NotFound: + logger.warning('Container %s not found, retrying...', container_name) + time.sleep(interval) + return False diff --git a/node_cli/utils/exit_codes.py b/node_cli/utils/exit_codes.py index 85656fb1..cbe0c696 100644 --- a/node_cli/utils/exit_codes.py +++ b/node_cli/utils/exit_codes.py @@ -21,7 +21,8 @@ class CLIExitCodes(IntEnum): - """This class contains exit codes for SKALE CLI tools""" + """This class contains exit codes for SKALE CLI tools.""" + SUCCESS = 0 FAILURE = 1 BAD_API_RESPONSE = 3 diff --git a/node_cli/utils/git_utils.py b/node_cli/utils/git_utils.py index be1fd49d..38e5a11c 100644 --- a/node_cli/utils/git_utils.py +++ b/node_cli/utils/git_utils.py @@ -21,20 +21,12 @@ import logging from git.repo.base import Repo -from git.exc import GitCommandError - logger = logging.getLogger(__name__) def check_is_branch(repo: Repo, ref_name: str) -> bool: - try: - repo.git.show_ref('--verify', f'refs/heads/{ref_name}') - logger.debug(f'{ref_name} is branch') - return True - except GitCommandError: - logger.debug(f'{ref_name} is not branch') - return False + return ref_name in (branch.name for branch in repo.heads) def clone_repo(repo_url: str, repo_path: str, ref_name: str) -> None: @@ -44,6 +36,10 @@ def clone_repo(repo_url: str, repo_path: str, ref_name: str) -> None: def sync_repo(repo_url: str, repo_path: str, ref_name: str) -> None: + """ + Sync Git repository by cloning if it doesn't exist locally. If it exists, fetch latest changes. + """ + logger.info(f'Sync repo {repo_url} → {repo_path}') if not os.path.isdir(os.path.join(repo_path, '.git')): clone_repo(repo_url, repo_path, ref_name) @@ -52,11 +48,17 @@ def sync_repo(repo_url: str, repo_path: str, ref_name: str) -> None: def fetch_pull_repo(repo_path: str, ref_name: str) -> None: + """Fetch latest changes and checkout/pull specific git reference.""" + repo = Repo(repo_path) repo_name = os.path.basename(repo.working_dir) - logger.info(f'Fetching {repo_name} changes') + + logger.info(f'Fetching latest changes for {repo_name}') repo.remotes.origin.fetch() - logger.info(f'Checkouting {repo_path} to {ref_name}') + + logger.info(f'Checking out {ref_name} in {repo_name}') repo.git.checkout(ref_name) + if check_is_branch(repo, ref_name): + logger.info(f'Pulling latest changes for branch {ref_name}') repo.remotes.origin.pull() diff --git a/node_cli/utils/global_config.py b/node_cli/utils/global_config.py index 4a1c4eaa..974c573a 100644 --- a/node_cli/utils/global_config.py +++ b/node_cli/utils/global_config.py @@ -34,7 +34,7 @@ def get_home_dir() -> str: def read_g_config(g_skale_dir: str, g_skale_conf_filepath: str) -> dict: - """Read global SKALE config file, init if not exists""" + """Read global SKALE config file, init if it doesn't exist.""" if not os.path.isfile(g_skale_conf_filepath): return generate_g_config_file(g_skale_dir, g_skale_conf_filepath) with open(g_skale_conf_filepath, encoding='utf-8') as data_file: @@ -42,19 +42,20 @@ def read_g_config(g_skale_dir: str, g_skale_conf_filepath: str) -> dict: def generate_g_config_file(g_skale_dir: str, g_skale_conf_filepath: str) -> dict: - """Init global SKALE config file""" + """Init global SKALE config file.""" print('Generating global SKALE config file...') os.makedirs(g_skale_dir, exist_ok=True) - g_config = { - 'user': get_system_user(), - 'home_dir': get_home_dir() - } + g_config = {'user': get_system_user(), 'home_dir': get_home_dir()} print(f'{g_skale_conf_filepath} content: {g_config}') try: with open(g_skale_conf_filepath, 'w') as outfile: json.dump(g_config, outfile, indent=4) except PermissionError as e: logger.exception(e) - print('No permissions to write into /etc directory') + print(f'No permissions to write into {g_skale_dir} directory') + sys.exit(7) + except OSError as e: + logger.exception(e) + print(f'Error writing to {g_skale_conf_filepath}: {e}') sys.exit(7) return g_config diff --git a/node_cli/utils/helper.py b/node_cli/utils/helper.py index 7b6da7f2..8a642aee 100644 --- a/node_cli/utils/helper.py +++ b/node_cli/utils/helper.py @@ -19,58 +19,48 @@ import ipaddress import json +import logging +import logging.handlers as py_handlers import os import re -import socket -import sys -import uuid -from urllib.parse import urlparse -from typing import Optional - -import yaml import shutil -import requests +import socket import subprocess -import urllib.request - +import sys import urllib.parse +import urllib.request +import uuid +from pathlib import Path from functools import wraps - -import logging from logging import Formatter, StreamHandler -import logging.handlers as py_handlers - -import distutils -import distutils.util +from typing import Any, NoReturn, Optional +from urllib.parse import urlparse import click - +import requests +import yaml from jinja2 import Environment -from node_cli.utils.print_formatters import print_err_response -from node_cli.utils.exit_codes import CLIExitCodes -from node_cli.configs.env import absent_params as absent_env_params, get_env_config from node_cli.configs import ( - TEXT_FILE, ADMIN_HOST, ADMIN_PORT, - HIDE_STREAM_LOG, - GLOBAL_SKALE_DIR, - GLOBAL_SKALE_CONF_FILEPATH, DEFAULT_SSH_PORT, + GLOBAL_SKALE_CONF_FILEPATH, + GLOBAL_SKALE_DIR, + HIDE_STREAM_LOG, ) -from node_cli.configs.routes import get_route -from node_cli.utils.global_config import read_g_config, get_system_user - from node_cli.configs.cli_logger import ( + DEBUG_LOG_FILEPATH, FILE_LOG_FORMAT, LOG_BACKUP_COUNT, LOG_FILE_SIZE_BYTES, LOG_FILEPATH, STREAM_LOG_FORMAT, - DEBUG_LOG_FILEPATH, ) - +from node_cli.configs.routes import get_route +from node_cli.utils.exit_codes import CLIExitCodes +from node_cli.utils.global_config import get_system_user, read_g_config +from node_cli.utils.print_formatters import print_err_response logger = logging.getLogger(__name__) @@ -78,7 +68,7 @@ DEFAULT_ERROR_DATA = { 'status': 'error', - 'payload': 'Request failed. Check skale_api container logs', + 'payload': 'Request failed. Check API container logs', } @@ -96,13 +86,13 @@ def write_json(path: str, content: dict) -> None: json.dump(content, outfile, indent=4) -def save_json(path: str, content: dict) -> None: +def save_json(path: str | Path, content: dict) -> None: tmp_path = get_tmp_path(path) write_json(tmp_path, content) shutil.move(tmp_path, path) -def init_file(path, content=None): +def init_file(path: str | Path, content=None): if not os.path.exists(path): write_json(path, content) @@ -154,28 +144,24 @@ def get_username(): return os.environ.get('USERNAME') or os.environ.get('USER') -def extract_env_params(env_filepath, sync_node=False, raise_for_status=True): - env_params = get_env_config(env_filepath, sync_node=sync_node) - absent_params = ', '.join(absent_env_params(env_params)) - if absent_params: - click.echo( - f'Your env file({env_filepath}) have some absent params: ' - f'{absent_params}.\n' - f'You should specify them to make sure that ' - f'all services are working', - err=True, - ) - if raise_for_status: - raise InvalidEnvFileError(f'Missing params: {absent_params}') - return None - return env_params +def error_exit(error_payload: Any, exit_code: CLIExitCodes = CLIExitCodes.FAILURE) -> NoReturn: + """Print error message and exit the program with specified exit code. + Args: + error_payload: Error message string or list of error messages + exit_code: Exit code to use when terminating the program (default: FAILURE) -def str_to_bool(val): - return bool(distutils.util.strtobool(val)) + Raises: + TypeError: If exit_code is not CLIExitCodes + Example: + >>> error_exit("Permission denied", CLIExitCodes.BAD_USER_ERROR) + Permission denied + + """ + if not isinstance(exit_code, CLIExitCodes): + raise TypeError('exit_code must be CLIExitCodes enum') -def error_exit(error_payload, exit_code=CLIExitCodes.FAILURE): print_err_response(error_payload) sys.exit(exit_code.value) @@ -188,14 +174,6 @@ def safe_get_config(config, key): return None -def safe_load_texts(): - with open(TEXT_FILE, 'r') as stream: - try: - return yaml.safe_load(stream) - except yaml.YAMLError as exc: - print(exc) - - def safe_load_yml(filepath): with open(filepath, 'r') as stream: try: @@ -220,21 +198,23 @@ def post_request(blueprint, method, json=None, files=None): response = requests.post(url, json=json, files=files) data = response.json() except Exception as err: - logger.error('Request failed', exc_info=err) + logger.exception('Request failed', exc_info=err) data = DEFAULT_ERROR_DATA status = data['status'] payload = data['payload'] return status, payload -def get_request(blueprint: str, method: str, params: Optional[dict] = None) -> tuple[str, str]: +def get_request( + blueprint: str, method: str, params: Optional[dict] = None +) -> tuple[str, str | dict]: route = get_route(blueprint, method) url = construct_url(route) try: response = requests.get(url, params=params) data = response.json() except Exception as err: - logger.error('Request failed', exc_info=err) + logger.exception('Request failed', exc_info=err) data = DEFAULT_ERROR_DATA status = data['status'] @@ -299,23 +279,8 @@ def to_camel_case(snake_str): return components[0] + ''.join(x.title() for x in components[1:]) -def validate_abi(abi_filepath: str) -> dict: - if not os.path.isfile(abi_filepath): - return {'filepath': abi_filepath, 'status': 'error', 'msg': 'No such file'} - try: - with open(abi_filepath) as abi_file: - json.load(abi_file) - except Exception: - return { - 'filepath': abi_filepath, - 'status': 'error', - 'msg': 'Failed to load abi file as json', - } - return {'filepath': abi_filepath, 'status': 'ok', 'msg': ''} - - def streamed_cmd(func): - """Decorator that allow function to print logs into stderr""" + """Decorator that allows function to print logs into stderr.""" @wraps(func) def wrapper(*args, **kwargs): @@ -354,20 +319,41 @@ def rm_dir(folder: str) -> None: logger.info(f"{folder} doesn't exist, skipping...") -def safe_mkdir(path: str, print_res: bool = False): +def cleanup_dir_content(folder: str) -> None: + if os.path.exists(folder): + logger.info('Removing contents of %s', folder) + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def safe_mkdir(path: str | Path, print_res: bool = False) -> None: if os.path.exists(path): + logger.debug(f'Directory {path} already exists') return + msg = f'Creating {path} directory...' logger.info(msg) if print_res: print(msg) + os.makedirs(path, exist_ok=True) def rsync_dirs(src: str, dest: str) -> None: - logger.info(f'Syncing {dest} with {src}') - run_cmd(['rsync', '-r', f'{src}/', dest]) - run_cmd(['rsync', '-r', f'{src}/.git', dest]) + logger.info(f'Syncing directory {dest} with {src}') + + try: + run_cmd(['rsync', '-r', f'{src}/', dest]) + run_cmd(['rsync', '-r', f'{src}/.git', dest]) + except subprocess.CalledProcessError as e: + logger.error(f'Rsync failed: {e}') + error_exit( + f'Failed to sync directories: {e}', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR + ) def ok_result(payload: dict = None): @@ -391,6 +377,15 @@ def convert(self, value, param, ctx): return value +class UrlOrAnyType(UrlType): + name = 'url' + + def convert(self, value, param, ctx): + if value == 'any': + return value + return super().convert(value, param, ctx) + + class IpType(click.ParamType): name = 'ip' @@ -403,11 +398,12 @@ def convert(self, value, param, ctx): URL_TYPE = UrlType() +URL_OR_ANY_TYPE = UrlOrAnyType() IP_TYPE = IpType() -def get_tmp_path(path: str) -> str: - base, ext = os.path.splitext(path) +def get_tmp_path(path: str | Path) -> str: + base, ext = os.path.splitext(str(path)) salt = uuid.uuid4().hex[:5] return base + salt + '.tmp' + ext @@ -418,3 +414,12 @@ def get_ssh_port(ssh_service_name='ssh'): except OSError: logger.exception('Cannot get ssh service port') return DEFAULT_SSH_PORT + + +def is_btrfs_subvolume(path: str) -> bool: + """Check if the given path is a Btrfs subvolume.""" + try: + output = run_cmd(['btrfs', 'subvolume', 'show', path], check_code=False) + return output.returncode == 0 + except subprocess.CalledProcessError: + return False diff --git a/node_cli/utils/meta.py b/node_cli/utils/meta.py index 69078af2..651efc1e 100644 --- a/node_cli/utils/meta.py +++ b/node_cli/utils/meta.py @@ -1,61 +1,152 @@ +import abc import json import os -from collections import namedtuple +from dataclasses import dataclass + from node_cli.configs import META_FILEPATH DEFAULT_VERSION = '1.0.0' DEFAULT_CONFIG_STREAM = '1.1.0' -DEFAULT_DOCKER_LVMPY_STREAM = '1.0.0' +DEFAULT_DOCKER_LVMPY_VERSION = '1.0.0' DEFAULT_OS_ID = 'ubuntu' DEFAULT_OS_VERSION = '18.04' -class CliMeta( - namedtuple( - 'Node', - ['version', 'config_stream', 'docker_lvmpy_stream', 'os_id', 'os_version'] - ) -): - __slots__ = () - - def __new__(cls, version=DEFAULT_VERSION, config_stream=DEFAULT_CONFIG_STREAM, - docker_lvmpy_stream=DEFAULT_DOCKER_LVMPY_STREAM, os_id=DEFAULT_OS_ID, - os_version=DEFAULT_OS_VERSION): - return super(CliMeta, cls).__new__( - cls, version, config_stream, docker_lvmpy_stream, os_id, os_version - ) +@dataclass +class CliMetaBase(abc.ABC): + version: str = DEFAULT_VERSION + config_stream: str = DEFAULT_CONFIG_STREAM + os_id: str = DEFAULT_OS_ID + os_version: str = DEFAULT_OS_VERSION + + @abc.abstractmethod + def asdict(self) -> dict: + pass + + +@dataclass +class CliMeta(CliMetaBase): + docker_lvmpy_version: str | None = DEFAULT_DOCKER_LVMPY_VERSION + + def asdict(self) -> dict: + return { + 'version': self.version, + 'config_stream': self.config_stream, + 'docker_lvmpy_version': self.docker_lvmpy_version, + 'os_id': self.os_id, + 'os_version': self.os_version, + } + + +@dataclass +class FairCliMeta(CliMetaBase): + def asdict(self) -> dict: + return { + 'version': self.version, + 'config_stream': self.config_stream, + 'os_id': self.os_id, + 'os_version': self.os_version, + } + +class BaseCliMetaManager(abc.ABC): + def __init__(self, meta_filepath: str = META_FILEPATH) -> None: + self.meta_filepath = meta_filepath -def get_meta_info(raw: bool = False) -> CliMeta: - if not os.path.isfile(META_FILEPATH): - return None - with open(META_FILEPATH) as meta_file: - plain_meta = json.load(meta_file) - if raw: - return plain_meta - return CliMeta(**plain_meta) + def _get_plain_meta(self) -> dict: + if not os.path.isfile(self.meta_filepath): + return {} + with open(self.meta_filepath) as meta_file: + return json.load(meta_file) + @abc.abstractmethod + def get_meta_info(self, raw: bool = False) -> CliMetaBase | dict | None: + pass -def save_meta(meta: CliMeta) -> None: - with open(META_FILEPATH, 'w') as meta_file: - json.dump(meta._asdict(), meta_file) + def save_meta(self, meta: CliMetaBase) -> None: + with open(self.meta_filepath, 'w') as meta_file: + json.dump(meta.asdict(), meta_file) + @abc.abstractmethod + def compose_default_meta(self) -> CliMetaBase: + pass -def compose_default_meta() -> CliMeta: - return CliMeta(version=DEFAULT_VERSION, - docker_lvmpy_stream=DEFAULT_DOCKER_LVMPY_STREAM, - config_stream=DEFAULT_CONFIG_STREAM, os_id=DEFAULT_OS_ID, - os_version=DEFAULT_OS_VERSION) + def ensure_meta(self, meta: CliMetaBase | None = None) -> None: + if not self.get_meta_info(): + meta = meta or self.compose_default_meta() + self.save_meta(meta) + @abc.abstractmethod + def update_meta(self, *args, **kwargs) -> None: + pass -def ensure_meta(meta: CliMeta = None) -> None: - if not get_meta_info(): - meta = meta or compose_default_meta() - save_meta(meta) +class CliMetaManager(BaseCliMetaManager): + def get_meta_info(self, raw: bool = False) -> CliMeta | dict | None: + plain_meta = self._get_plain_meta() + if not raw and not plain_meta: + return None + allowed_fields = set(CliMeta.__dataclass_fields__.keys()) + clean_plain_meta = {k: v for k, v in plain_meta.items() if k in allowed_fields} + + if raw: + return clean_plain_meta + return CliMeta(**clean_plain_meta) + + def compose_default_meta(self) -> CliMeta: + return CliMeta( + version=DEFAULT_VERSION, + docker_lvmpy_version=DEFAULT_DOCKER_LVMPY_VERSION, + config_stream=DEFAULT_CONFIG_STREAM, + os_id=DEFAULT_OS_ID, + os_version=DEFAULT_OS_VERSION, + ) + + def update_meta( + self, + version: str, + config_stream: str, + docker_lvmpy_version: str | None, + os_id: str, + os_version: str, + ) -> None: + self.ensure_meta() + meta = CliMeta( + version, + config_stream, + os_id, + os_version, + docker_lvmpy_version, + ) + self.save_meta(meta) + + +class FairCliMetaManager(BaseCliMetaManager): + def get_meta_info(self, raw: bool = False) -> FairCliMeta | dict | None: + plain_meta = self._get_plain_meta() + if not raw and not plain_meta: + return None + allowed_fields = set(FairCliMeta.__dataclass_fields__.keys()) + clean_plain_meta = {k: v for k, v in plain_meta.items() if k in allowed_fields} + if raw: + return clean_plain_meta + return FairCliMeta(**clean_plain_meta) + + def compose_default_meta(self) -> FairCliMeta: + return FairCliMeta( + version=DEFAULT_VERSION, + config_stream=DEFAULT_CONFIG_STREAM, + os_id=DEFAULT_OS_ID, + os_version=DEFAULT_OS_VERSION, + ) -def update_meta(version: str, config_stream: str, - docker_lvmpy_stream: str, os_id: str, os_version: str) -> None: - ensure_meta() - meta = CliMeta(version, config_stream, docker_lvmpy_stream, os_id, os_version) - save_meta(meta) + def update_meta( + self, + version: str, + config_stream: str, + os_id: str, + os_version: str, + ) -> None: + self.ensure_meta() + meta = FairCliMeta(version, config_stream, os_id, os_version) + self.save_meta(meta) diff --git a/node_cli/core/node_config.py b/node_cli/utils/node_type.py similarity index 72% rename from node_cli/core/node_config.py rename to node_cli/utils/node_type.py index c7050918..754a1d69 100644 --- a/node_cli/core/node_config.py +++ b/node_cli/utils/node_type.py @@ -2,7 +2,7 @@ # # This file is part of node-cli # -# Copyright (C) 2021 SKALE Labs +# Copyright (C) 2025-Present SKALE Labs # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -17,19 +17,14 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +from enum import Enum -class NodeConfig: - def __init__(self, config_filepath, env_filepath=None): - pass - def load_env(self): - pass +class NodeType(str, Enum): + SKALE = 'skale' + FAIR = 'fair' - def validate_env(self): - pass - def load_config(self): - pass - - def validate_config(self): - pass +class NodeMode(str, Enum): + ACTIVE = 'active' + PASSIVE = 'passive' diff --git a/node_cli/utils/print_formatters.py b/node_cli/utils/print_formatters.py index 72225db9..1da07d51 100644 --- a/node_cli/utils/print_formatters.py +++ b/node_cli/utils/print_formatters.py @@ -17,30 +17,45 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -import os -import json import datetime +import inspect +import json +import os +from typing import Any + import texttable from dateutil import parser -import inspect - from node_cli.configs import LONG_LINE from node_cli.configs.cli_logger import DEBUG_LOG_FILEPATH from node_cli.utils.meta import CliMeta -from node_cli.utils.texts import Texts +from node_cli.utils.texts import safe_load_texts -TEXTS = Texts() +TEXTS = safe_load_texts() def print_wallet_info(wallet): - print(inspect.cleandoc(f''' + print( + inspect.cleandoc(f""" {LONG_LINE} Address: {wallet['address'].lower()} ETH balance: {wallet['eth_balance']} ETH SKALE balance: {wallet['skale_balance']} SKALE {LONG_LINE} - ''')) + """) + ) + + +def print_fair_wallet_info(wallet): + print( + inspect.cleandoc(f""" + {LONG_LINE} + Address: {wallet['address'].lower()} + Node balance: {wallet['fair_balance']} FAIR + Node balance WEI: {wallet['fair_balance_wei']} FAIR WEI + {LONG_LINE} + """) + ) def get_tty_width(): @@ -63,31 +78,21 @@ def table(self, headers, rows): def format_date(date): - return date.strftime("%b %d %Y %H:%M:%S") + return date.strftime('%b %d %Y %H:%M:%S') def print_containers(containers): - headers = [ - 'Name', - 'Status', - 'Started At', - 'Image' - ] + headers = ['Name', 'Status', 'Started At', 'Image'] rows = [] for container in containers: - date = parser.parse(container["state"]["StartedAt"]) - status = container["state"]["Status"].capitalize() + date = parser.parse(container['state']['StartedAt']) + status = container['state']['Status'].capitalize() if not container['state']['Running']: - finished_date = parser.parse(container["state"]["FinishedAt"]) + finished_date = parser.parse(container['state']['FinishedAt']) status = f'{status} ({format_date(finished_date)})' - rows.append([ - container['name'], - status, - format_date(date), - container['image'] - ]) + rows.append([container['name'], status, format_date(date), container['image']]) print(Formatter().table(headers, rows)) @@ -106,38 +111,29 @@ def print_schains(schains): rows = [] for schain in schains: date = datetime.datetime.fromtimestamp(schain['start_date']) - rows.append([ - schain['name'], - schain['mainnet_owner'], - schain['part_of_node'], - schain['lifetime'], - format_date(date), - schain['deposit'], - schain['generation'], - schain['originator'], - schain['options']['allocation_type'] - ]) + rows.append( + [ + schain['name'], + schain['mainnet_owner'], + schain['part_of_node'], + schain['lifetime'], + format_date(date), + schain['deposit'], + schain['generation'], + schain['originator'], + schain['options']['allocation_type'], + ] + ) print(Formatter().table(headers, rows)) def print_dkg_statuses(statuses): - headers = [ - 'sChain Name', - 'DKG Status', - 'Added At', - 'sChain Status' - ] + headers = ['sChain Name', 'DKG Status', 'Added At', 'sChain Status'] rows = [] for status in statuses: date = datetime.datetime.fromtimestamp(status['added_at']) - schain_status = 'Deleted' \ - if status['is_deleted'] else 'Exists' - rows.append([ - status['name'], - status['dkg_status_name'], - format_date(date), - schain_status - ]) + schain_status = 'Deleted' if status['is_deleted'] else 'Exists' + rows.append([status['name'], status['dkg_status_name'], format_date(date), schain_status]) print(Formatter().table(headers, rows)) @@ -152,23 +148,25 @@ def print_schains_healthchecks(schains): 'IMA', 'Firewall', 'RPC', - 'Blocks' + 'Blocks', ] rows = [] for schain in schains: healthchecks = schain['healthchecks'] - rows.append([ - schain['name'], - healthchecks['config_dir'], - healthchecks['dkg'], - healthchecks['config'], - healthchecks['volume'], - healthchecks['skaled_container'], - healthchecks.get('ima_container', 'No IMA'), - healthchecks['firewall_rules'], - healthchecks['rpc'], - healthchecks['blocks'] - ]) + rows.append( + [ + schain['name'], + healthchecks['config_dir'], + healthchecks['dkg'], + healthchecks['config'], + healthchecks['volume'], + healthchecks['skaled_container'], + healthchecks.get('ima_container', 'No IMA'), + healthchecks['firewall_rules'], + healthchecks['rpc'], + healthchecks['blocks'], + ] + ) print(Formatter().table(headers, rows)) @@ -187,19 +185,11 @@ def print_schains_logs(schains_logs): def print_log_list(logs): - headers = [ - 'Name', - 'Size', - 'Created At' - ] + headers = ['Name', 'Size', 'Created At'] rows = [] for log in logs: date = datetime.datetime.fromtimestamp(log['created_at']) - rows.append([ - log['name'], - log['size'], - format_date(date) - ]) + rows.append([log['name'], log['size'], format_date(date)]) print(Formatter().table(headers, rows)) @@ -209,10 +199,7 @@ def print_dict(title, rows, headers=['Key', 'Value']): def print_exit_status(exit_status_info): - headers = [ - 'Schain name', - 'Status' - ] + headers = ['Schain name', 'Status'] logs = exit_status_info['data'] node_exit_status = exit_status_info['status'].lower() rows = [[log['name'], log['status'].lower()] for log in logs] @@ -230,20 +217,14 @@ def print_firewall_rules(rules, raw=False): print('No allowed endpoints') return if raw: - print(json.dumpes(rules)) - headers = [ - 'IP range', - 'Port' - ] + print(json.dumps(rules)) + headers = ['IP range', 'Port'] rows = [] for rule in sorted(rules, key=lambda r: r['port']): ip_range = 'All IPs' - if rule["first_ip"] and rule["last_ip"]: + if rule['first_ip'] and rule['last_ip']: ip_range = f'{rule["first_ip"]} - {rule["last_ip"]}' - rows.append([ - ip_range, - rule['port'] - ]) + rows.append([ip_range, rule['port']]) print(Formatter().table(headers, rows)) @@ -256,24 +237,13 @@ def print_schain_info(info: dict, raw: bool = False) -> None: print(Formatter().table(headers, [rows])) -def print_abi_validation_errors(info: list, raw: bool = False) -> None: - if not info: - return - if raw: - print(json.dumps(info)) - else: - headers = info[0].keys() - rows = [tuple(r.values()) for r in info] - headers = list(map(lambda h: h.capitalize(), headers)) - print(Formatter().table(headers, rows)) - - def print_node_cmd_error(): print(TEXTS['node']['cmd_failed'].format(DEBUG_LOG_FILEPATH)) def print_node_info(node, node_status): - print(inspect.cleandoc(f""" + print( + inspect.cleandoc(f""" {LONG_LINE} Node info Name: {node['name']} @@ -284,20 +254,51 @@ def print_node_info(node, node_status): Domain name: {node['domain_name']} Status: {node_status} {LONG_LINE} - """)) + """) + ) -def print_err_response(error_payload): - if isinstance(error_payload, list): - error_msg = '\n'.join(error_payload) - else: - error_msg = error_payload - - print('Command failed with following errors:') - print(LONG_LINE) - print(error_msg) - print(LONG_LINE) - print(f'You can find more info in {DEBUG_LOG_FILEPATH}') +def print_node_info_fair(node): + print( + inspect.cleandoc(f""" + {LONG_LINE} + Node info + ID: {node['id']} + IP: {node['ip_str']} + Port: {node['port']} + Domain name: {node['domain_name']} + {LONG_LINE} + """) + ) + + +def print_err_response(error_payload: Any) -> None: + """Print formatted error message from API response payload. + + Handles different types of error payloads (str, list, dict etc.) and formats them + into a user-friendly error message along with debug log file location. + """ + try: + if isinstance(error_payload, (list, tuple)): + error_msg = '\n'.join(str(err) for err in error_payload) + elif isinstance(error_payload, dict): + error_msg = json.dumps(error_payload, indent=2) + else: + error_msg = str(error_payload) + + print('Command failed with following errors:') + print(LONG_LINE) + print(error_msg) + print(LONG_LINE) + print(f'You can find more info in {DEBUG_LOG_FILEPATH}') + + except Exception as e: + print('Error occurred while processing error payload:') + print(LONG_LINE) + print(f'Original error payload: {error_payload}') + print(f'Error while formatting: {str(e)}') + print(LONG_LINE) + print(f'Check logs at {DEBUG_LOG_FILEPATH} for more details') def print_failed_requirements_checks(failed_checks: list) -> None: @@ -313,10 +314,66 @@ def print_failed_requirements_checks(failed_checks: list) -> None: def print_meta_info(meta_info: CliMeta) -> None: - print(inspect.cleandoc(f""" + print( + inspect.cleandoc(f""" {LONG_LINE} Version: {meta_info.version} Config Stream: {meta_info.config_stream} - Lvmpy stream: {meta_info.docker_lvmpy_stream} + Lvmpy stream: {meta_info.docker_lvmpy_version} + {LONG_LINE} + """) + ) + + +def format_timestamp(value): + if value is None or value == 'N/A' or value == 0 or value == 0.0: + return 'N/A' + try: + timestamp = float(value) + if timestamp == 0: + return 'N/A' + dt = datetime.datetime.fromtimestamp(timestamp) + human_date = dt.strftime('%Y-%m-%d %H:%M:%S') + return f'{human_date} ({timestamp})' + except (ValueError, TypeError): + return str(value) + + +def print_chain_record(record): + print( + inspect.cleandoc(f""" + {LONG_LINE} + Fair Chain Record + Chain Name: {record.get('name', 'N/A')} + Config Version: {record.get('config_version', 'N/A')} + Sync Config Run: {record.get('sync_config_run', 'N/A')} + First Run: {record.get('first_run', 'N/A')} + Backup Run: {record.get('backup_run', 'N/A')} + Restart Count: {record.get('restart_count', 'N/A')} + Failed RPC Count: {record.get('failed_rpc_count', 'N/A')} + Monitor Last Seen: {format_timestamp(record.get('monitor_last_seen', 'N/A'))} + SSL Change Date: {format_timestamp(record.get('ssl_change_date', 'N/A'))} + Repair Date: {format_timestamp(record.get('repair_date', 'N/A'))} + DKG Status: {record.get('dkg_status', 'N/A')} + Repair Timestamp: {format_timestamp(record.get('repair_ts', 'N/A'))} + Snapshot From: {record.get('snapshot_from', 'N/A')} + Restart Timestamp: {format_timestamp(record.get('restart_ts', 'N/A'))} + Force Skaled Start: {record.get('force_skaled_start', 'N/A')} {LONG_LINE} - """)) + """) + ) + + +def print_chain_checks(checks): + def format_checks(check_dict, title): + print(f'\n{title}:') + for name, result in check_dict.items(): + status = 'PASS' if result else 'FAIL' + print(f' {name}: {status}') + + print(f'{LONG_LINE}') + print('Fair Chain Checks') + print(f'{LONG_LINE}') + format_checks(checks['config_checks'], 'Config Checks') + format_checks(checks['skaled_checks'], 'Skaled Checks') + print(f'{LONG_LINE}') diff --git a/node_cli/utils/settings.py b/node_cli/utils/settings.py new file mode 100644 index 00000000..8e7f957c --- /dev/null +++ b/node_cli/utils/settings.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2026 SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import tomllib + +from dotenv.main import DotEnv + +from skale_core.settings import ( + SETTINGS_MAP, + BaseNodeSettings, + FairBaseSettings, + FairSettings, + InternalSettings, + SkalePassiveSettings, + SkaleSettings, + write_internal_settings_file, + write_node_settings_file, +) + +from node_cli.configs import INTERNAL_SETTINGS_PATH, NODE_SETTINGS_PATH, SKALE_DIR +from node_cli.utils.node_type import NodeMode, NodeType + +InternalSettings.model_config['toml_file'] = INTERNAL_SETTINGS_PATH +SkaleSettings.model_config['toml_file'] = NODE_SETTINGS_PATH +SkalePassiveSettings.model_config['toml_file'] = NODE_SETTINGS_PATH +FairSettings.model_config['toml_file'] = NODE_SETTINGS_PATH +FairBaseSettings.model_config['toml_file'] = NODE_SETTINGS_PATH + + +def load_config_file(filepath: str) -> dict: + if filepath.endswith('.toml'): + with open(filepath, 'rb') as f: + return tomllib.load(f) + return {k.lower(): v for k, v in DotEnv(filepath).dict().items()} + + +def validate_and_save_node_settings( + config_filepath: str, + node_type: NodeType, + node_mode: NodeMode, +) -> BaseNodeSettings: + data = load_config_file(config_filepath) + settings_type = SETTINGS_MAP[(node_type.value, node_mode.value)] + write_node_settings_file(path=NODE_SETTINGS_PATH, settings_type=settings_type, data=data) + return settings_type() + + +def save_internal_settings( + node_type: NodeType, + node_mode: NodeMode, + backup_run: bool = False, + pull_config_for_schain: str | None = None, +) -> None: + data = { + 'node_type': node_type.value, + 'node_mode': node_mode.value, + 'skale_dir_host': str(SKALE_DIR), + 'backup_run': backup_run, + 'pull_config_for_schain': pull_config_for_schain, + } + write_internal_settings_file(path=INTERNAL_SETTINGS_PATH, data=data) diff --git a/node_cli/utils/texts.py b/node_cli/utils/texts.py index d4813d27..78102692 100644 --- a/node_cli/utils/texts.py +++ b/node_cli/utils/texts.py @@ -1,26 +1,29 @@ # -*- coding: utf-8 -*- # # This file is part of node-cli +# +# Copyright (C) 2025-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -import yaml -from node_cli.configs import TEXT_FILE +from typing import Dict +import yaml -class Texts: - def __init__(self): - self._texts = self._load() +from node_cli.configs import TEXT_FILE - def __getitem__(self, key): - return self._texts.get(key) - def _load(self): - with open(TEXT_FILE, 'r') as stream: - try: - return yaml.safe_load(stream) - except yaml.YAMLError as exc: - print(exc) +def safe_load_texts() -> Dict: + with open(TEXT_FILE, 'r') as stream: + return yaml.safe_load(stream) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..4ab3258d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,89 @@ +[build-system] +requires = ["setuptools>=75", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "node-cli" +version = "3.2.0" +description = "Node CLI tools" +readme = "README.md" +requires-python = ">=3.13" +license = { file = "LICENSE" } +keywords = ["skale", "cli"] +authors = [{ name = "SKALE Labs", email = "support@skalelabs.com" }] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: GNU Affero General Public License v3", + "Natural Language :: English", + "Programming Language :: Python :: 3.13", +] + +dependencies = [ + "click==8.3.1", + "distro==1.9.0", + "docker==7.1.0", + "texttable==1.7.0", + "python-dateutil==2.9.0.post0", + "Jinja2==3.1.6", + "psutil==7.1.3", + "python-dotenv==1.2.1", + "terminaltables==3.1.10", + "requests==2.32.5", + "GitPython==3.1.45", + "packaging==25.0", + "python-debian==1.0.1", + "PyYAML==6.0.3", + "pyOpenSSL==25.3.0", + "MarkupSafe==3.0.3", + "Flask==3.1.2", + "itsdangerous==2.2.0", + "cryptography==46.0.5", + "filelock==3.20.0", + "sh==2.2.2", + "python-crontab==3.3.0", + "requests-mock==1.12.1", + "redis==7.1.1", + "PyInstaller==6.18.0", + "skale.py-core==7.13.dev1", +] + +[project.urls] +Homepage = "https://github.com/skalenetwork/node-cli" + +[project.optional-dependencies] +dev = [ + "ruff==0.14.6", + "bumpversion==0.6.0", + "pytest==9.0.1", + "pytest-cov==7.0.0", + "twine==6.2.0", + "mock==5.2.0", + "freezegun==1.5.5", +] + +[tool.setuptools] +package-dir = { "" = "." } +include-package-data = true + +[tool.setuptools.packages.find] +where = ["."] +exclude = ["tests"] + +[tool.ruff] +line-length = 100 +target-version = "py313" + +[tool.ruff.format] +quote-style = "single" + +[tool.uv] +prerelease = "allow" + + +[tool.pytest.ini_options] +log_cli = false +log_cli_level = "INFO" +log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)" +log_cli_date_format = "%Y-%m-%d %H:%M:%S" +filterwarnings = ["ignore::DeprecationWarning"] diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 5785bf3f..00000000 --- a/pytest.ini +++ /dev/null @@ -1,6 +0,0 @@ -[pytest] -log_cli = 0 -log_cli_level = INFO -log_cli_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s) -log_cli_date_format=%Y-%m-%d %H:%M:%S -filterwarnings = ignore::DeprecationWarning diff --git a/ruff.toml b/ruff.toml deleted file mode 100644 index d823f4d6..00000000 --- a/ruff.toml +++ /dev/null @@ -1,4 +0,0 @@ -line-length = 100 - -[format] -quote-style = "single" diff --git a/scripts/build.sh b/scripts/build.sh index 3f334169..8e26577a 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -24,7 +24,7 @@ fi if [ -z "$3" ] then - (>&2 echo 'You should provide type: normal or sync') + (>&2 echo 'You should provide type: skale, or fair') echo $USAGE_MSG exit 1 fi @@ -33,22 +33,13 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" PARENT_DIR="$(dirname "$DIR")" OS=`uname -s`-`uname -m` -#CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) -LATEST_COMMIT=$(git rev-parse HEAD) -CURRENT_DATETIME="`date "+%Y-%m-%d %H:%M:%S"`"; -DIST_INFO_FILEPATH=$PARENT_DIR/node_cli/cli/info.py - -touch $DIST_INFO_FILEPATH - -echo "BUILD_DATETIME = '$CURRENT_DATETIME'" > $DIST_INFO_FILEPATH -echo "COMMIT = '$LATEST_COMMIT'" >> $DIST_INFO_FILEPATH -echo "BRANCH = '$BRANCH'" >> $DIST_INFO_FILEPATH -echo "OS = '$OS'" >> $DIST_INFO_FILEPATH -echo "VERSION = '$VERSION'" >> $DIST_INFO_FILEPATH -echo "TYPE = '$TYPE'" >> $DIST_INFO_FILEPATH - -if [ "$TYPE" = "sync" ]; then - EXECUTABLE_NAME=skale-$VERSION-$OS-sync + +# Use the new generate_info.sh script +bash "${DIR}/generate_info.sh" "$VERSION" "$BRANCH" "$TYPE" + + +if [ "$TYPE" = "fair" ]; then + EXECUTABLE_NAME=skale-$VERSION-$OS-fair else EXECUTABLE_NAME=skale-$VERSION-$OS fi diff --git a/scripts/export_env.sh b/scripts/export_env.sh new file mode 100644 index 00000000..af30b4ab --- /dev/null +++ b/scripts/export_env.sh @@ -0,0 +1,8 @@ +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +PROJECT_DIR=$(dirname $DIR) + +export LVMPY_LOG_DIR="$PROJECT_DIR/tests/" +export HIDE_STREAM_LOG=true +export TEST_HOME_DIR="$PROJECT_DIR/tests/" +export GLOBAL_SKALE_DIR="$PROJECT_DIR/tests/etc/skale" +export DOTENV_FILEPATH='tests/test-env' \ No newline at end of file diff --git a/scripts/generate_info.sh b/scripts/generate_info.sh new file mode 100644 index 00000000..67ee6aef --- /dev/null +++ b/scripts/generate_info.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +set -e + +VERSION=$1 +BRANCH=$2 +TYPE_STR=$3 + +USAGE_MSG='Usage: generate_info.sh [VERSION] [BRANCH] [TYPE]' + +if [ -z "$VERSION" ]; then + (>&2 echo 'You should provide version') + echo $USAGE_MSG + exit 1 +fi +if [ -z "$BRANCH" ]; then + (>&2 echo 'You should provide git branch') + echo $USAGE_MSG + exit 1 +fi +if [ -z "$TYPE_STR" ]; then + (>&2 echo 'You should provide type: skale or fair') + echo $USAGE_MSG + exit 1 +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +PARENT_DIR="$(dirname "$DIR")" +DIST_INFO_FILEPATH=$PARENT_DIR/node_cli/cli/info.py + +LATEST_COMMIT=$(git rev-parse HEAD) +CURRENT_DATETIME="$(date "+%Y-%m-%d %H:%M:%S")" +OS="$(uname -s)-$(uname -m)" + +case "$TYPE_STR" in + skale) + TYPE_ENUM="NodeType.SKALE" + ;; + fair) + TYPE_ENUM="NodeType.FAIR" + ;; + *) + (>&2 echo "Error: Invalid type '$TYPE_STR'. Must be 'skale', or 'fair'") + exit 1 + ;; +esac + +rm -f "$DIST_INFO_FILEPATH" +touch "$DIST_INFO_FILEPATH" + +echo "from node_cli.utils.node_type import NodeType" >> "$DIST_INFO_FILEPATH" +echo "" >> "$DIST_INFO_FILEPATH" + +echo "BUILD_DATETIME = '$CURRENT_DATETIME'" >> "$DIST_INFO_FILEPATH" +echo "COMMIT = '$LATEST_COMMIT'" >> "$DIST_INFO_FILEPATH" +echo "BRANCH = '$BRANCH'" >> "$DIST_INFO_FILEPATH" +echo "OS = '$OS'" >> "$DIST_INFO_FILEPATH" +echo "VERSION = '$VERSION'" >> "$DIST_INFO_FILEPATH" +echo "TYPE = $TYPE_ENUM" >> "$DIST_INFO_FILEPATH" \ No newline at end of file diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh index 97676b9a..1d3a3fe6 100755 --- a/scripts/run_tests.sh +++ b/scripts/run_tests.sh @@ -3,9 +3,6 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" PROJECT_DIR=$(dirname $DIR) -LVMPY_LOG_DIR="$PROJECT_DIR/tests/" \ - HIDE_STREAM_LOG=true \ - TEST_HOME_DIR="$PROJECT_DIR/tests/" \ - GLOBAL_SKALE_DIR="$PROJECT_DIR/tests/etc/skale" \ - DOTENV_FILEPATH='tests/test-env' \ - py.test --cov=$PROJECT_DIR/ --ignore=tests/core/nftables_test.py --ignore=tests/core/migration_test.py tests $@ +. "$DIR/export_env.sh" + +py.test --cov=$PROJECT_DIR/ --ignore=tests/core/nftables_test.py --ignore=tests/core/migration_test.py tests/ $@ diff --git a/scripts/set_versions_ga.sh b/scripts/set_versions_ga.sh index ddcc7aaa..7333357f 100644 --- a/scripts/set_versions_ga.sh +++ b/scripts/set_versions_ga.sh @@ -7,7 +7,7 @@ echo PROJECT_DIR: $GITHUB_WORKSPACE export BRANCH=${GITHUB_REF##*/} echo "Branch $BRANCH" -export VERSION=$(python setup.py --version) +export VERSION=$(python -c "import tomllib; print(tomllib.load(open('pyproject.toml', 'rb'))['project']['version'])") export VERSION=$(bash ./helper-scripts/calculate_version.sh) echo "VERSION=$VERSION" >> $GITHUB_ENV diff --git a/setup.py b/setup.py deleted file mode 100644 index f335705d..00000000 --- a/setup.py +++ /dev/null @@ -1,89 +0,0 @@ -import os -import re -from setuptools import find_packages, setup - - -def read(*parts): - path = os.path.join(os.path.dirname(__file__), *parts) - f = open(path, "r") - return f.read() - - -def find_version(*file_paths): - version_file = read(*file_paths) - version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", - version_file, re.M) - if version_match: - return version_match.group(1) - raise RuntimeError("Couldn't parse version from file.") - - -extras_require = { - 'linter': [ - "flake8==7.1.1", - "isort>=4.2.15,<5.10.2", - ], - 'dev': [ - "bumpversion==0.6.0", - "pytest==8.3.2", - "pytest-cov==5.0.0", - "twine==4.0.2", - "mock==4.0.3", - "freezegun==1.2.2" - ] -} - -extras_require['dev'] = ( - extras_require['linter'] + extras_require['dev'] -) - - -setup( - name='node-cli', - # *IMPORTANT*: Don't manually change the version here. - # Use the 'bumpversion' utility instead. - version=find_version("node_cli", "cli", "__init__.py"), - include_package_data=True, - description='SKALE client tools', - long_description_markdown_filename='README.md', - author='SKALE Labs', - author_email='support@skalelabs.com', - url='https://github.com/skalenetwork/node-cli', - install_requires=[ - "click==8.1.7", - "PyInstaller==5.12.0", - "distro==1.9.0", - "docker==6.0.1", - "texttable==1.6.7", - "python-dateutil==2.8.2", - "Jinja2==3.1.4", - "psutil==5.9.4", - "python-dotenv==0.21.0", - "terminaltables==3.1.10", - "requests==2.28.1", - "GitPython==3.1.41", - "packaging==23.0", - "python-debian==0.1.49", - "PyYAML==6.0", - "pyOpenSSL==24.2.1", - "MarkupSafe==3.0.2", - 'Flask==2.3.3', - 'itsdangerous==2.1.2', - "cryptography==42.0.4", - "filelock==3.0.12", - 'sh==1.14.2', - 'python-crontab==2.6.0' - ], - python_requires='>=3.8,<4', - extras_require=extras_require, - - keywords=['skale', 'cli'], - packages=find_packages(exclude=['tests']), - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: GNU Affero General Public License v3', - 'Natural Language :: English', - 'Programming Language :: Python :: 3.11', - ], -) diff --git a/tests/.skale/config/docker-compose-fair.yml b/tests/.skale/config/docker-compose-fair.yml new file mode 100644 index 00000000..c09f2c08 --- /dev/null +++ b/tests/.skale/config/docker-compose-fair.yml @@ -0,0 +1,5 @@ +services: + test: + container_name: test + image: alpine:latest + network_mode: host diff --git a/tests/.skale/config/nginx.conf.j2 b/tests/.skale/config/nginx.conf.j2 deleted file mode 100644 index dc264362..00000000 --- a/tests/.skale/config/nginx.conf.j2 +++ /dev/null @@ -1,47 +0,0 @@ -limit_req_zone $binary_remote_addr zone=one:10m rate=7r/s; - -server { - listen 3009; - - {% if ssl %} - listen 311 ssl; - ssl_certificate /ssl/ssl_cert; - ssl_certificate_key /ssl/ssl_key; - {% endif %} - - proxy_read_timeout 500s; - proxy_connect_timeout 500s; - proxy_send_timeout 500s; - - error_log /var/log/nginx/error.log warn; - client_max_body_size 20m; - - server_name localhost; - limit_req zone=one burst=10; - - location / { - include uwsgi_params; - uwsgi_read_timeout 500s; - uwsgi_socket_keepalive on; - uwsgi_pass 127.0.0.1:3010; - } -} - -server { - listen 80; - - {% if ssl %} - listen 443 ssl; - ssl_certificate /ssl/ssl_cert; - ssl_certificate_key /ssl/ssl_key; - {% endif %} - - error_log /var/log/nginx/error.log warn; - client_max_body_size 20m; - server_name localhost; - limit_req zone=one burst=50; - - location / { - root /filestorage; - } -} \ No newline at end of file diff --git a/tests/cli/exit_test.py b/tests/cli/exit_test.py index ae6e9530..a3b26ed0 100644 --- a/tests/cli/exit_test.py +++ b/tests/cli/exit_test.py @@ -5,17 +5,14 @@ def test_exit_status(): - payload = { - 'status': 'ACTIVE', - 'data': [{'name': 'test', 'status': 'ACTIVE'}], - 'exit_time': 0 - } + payload = {'status': 'ACTIVE', 'data': [{'name': 'test', 'status': 'ACTIVE'}], 'exit_time': 0} - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} - ) + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock( - 'node_cli.utils.helper.requests.get', resp_mock, status, ['--format', 'json']) + 'node_cli.utils.helper.requests.get', resp_mock, status, ['--format', 'json'] + ) assert result.exit_code == 0 - assert result.output == "{'status': 'ACTIVE', 'data': [{'name': 'test', 'status': 'ACTIVE'}], 'exit_time': 0}\n" # noqa + assert ( + result.output + == "{'status': 'ACTIVE', 'data': [{'name': 'test', 'status': 'ACTIVE'}], 'exit_time': 0}\n" + ) # noqa diff --git a/tests/cli/fair_cli_test.py b/tests/cli/fair_cli_test.py new file mode 100644 index 00000000..14e2aee9 --- /dev/null +++ b/tests/cli/fair_cli_test.py @@ -0,0 +1,141 @@ +import pathlib +from unittest import mock + +from click.testing import CliRunner +from node_cli.cli.fair_boot import ( + init_boot, + register_boot, + signature_boot, +) +from node_cli.cli.fair_node import ( + backup_node, + cleanup_node, + migrate_node, + exit_node, + restore_node, +) +from node_cli.configs import SKALE_DIR +from node_cli.utils.node_type import NodeMode +from node_cli.utils.meta import CliMeta +from tests.helper import run_command, subprocess_run_mock +from tests.resources_test import BIG_DISK_SIZE + + +@mock.patch('node_cli.cli.fair_node.restore_fair') +def test_fair_node_restore(mock_restore_core, valid_env_file, tmp_path): + runner = CliRunner() + backup_file = tmp_path / 'backup.tar.gz' + backup_file.touch() + backup_path = str(backup_file) + + result = runner.invoke(restore_node, [backup_path, valid_env_file]) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_restore_core.assert_called_once_with(backup_path, valid_env_file, False) + + +@mock.patch('node_cli.cli.fair_node.restore_fair') +def test_fair_node_restore_config_only(mock_restore_core, valid_env_file, tmp_path): + runner = CliRunner() + backup_file = tmp_path / 'backup_config.tar.gz' + backup_file.touch() + backup_path = str(backup_file) + + result = runner.invoke(restore_node, [backup_path, valid_env_file, '--config-only']) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_restore_core.assert_called_once_with(backup_path, valid_env_file, True) + + +@mock.patch('node_cli.cli.fair_node.backup') +def test_fair_node_backup(mock_backup_core, tmp_path): + runner = CliRunner() + backup_folder = str(tmp_path / 'backups') + pathlib.Path(backup_folder).mkdir(exist_ok=True) + + result = runner.invoke(backup_node, [backup_folder]) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_backup_core.assert_called_once_with(backup_folder) + + +@mock.patch('node_cli.cli.fair_boot.register') +def test_fair_boot_register(mock_register_core): + runner = CliRunner() + name = 'test-boot-node' + ip = '1.2.3.4' + port = 10001 + domain = 'boot.skale.test' + + result = runner.invoke( + register_boot, ['--name', name, '--ip', ip, '--port', str(port), '--domain', domain] + ) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_register_core.assert_called_once_with( + name=name, p2p_ip=ip, public_ip=ip, port=port, domain_name=domain + ) + + +@mock.patch('node_cli.cli.fair_boot.get_node_signature') +def test_fair_boot_signature(mock_signature_core): + runner = CliRunner() + validator_id = '101' + signature_val = '0xdef456' + mock_signature_core.return_value = signature_val + + result = runner.invoke(signature_boot, [validator_id]) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_signature_core.assert_called_once_with(validator_id) + assert f'Signature: {signature_val}' in result.output + + +@mock.patch('node_cli.cli.fair_boot.init') +def test_fair_boot_init(mock_init_core, valid_env_file): + runner = CliRunner() + result = runner.invoke(init_boot, [valid_env_file]) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_init_core.assert_called_once_with(valid_env_file) + + +@mock.patch('node_cli.cli.fair_node.migrate_from_boot') +def test_fair_node_migrate(mock_migrate_core, valid_env_file): + runner = CliRunner() + result = runner.invoke(migrate_node, ['--yes', valid_env_file]) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_migrate_core.assert_called_once_with(config_file=valid_env_file) + + +@mock.patch('node_cli.cli.fair_node.exit_fair') +def test_fair_node_exit(mock_exit_core): + runner = CliRunner() + result = runner.invoke(exit_node, ['--yes']) + + assert result.exit_code == 0, f'Output: {result.output}\nException: {result.exception}' + mock_exit_core.assert_called_once() + + +def test_cleanup_node(mocked_g_config, inited_node): + pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) + + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.fair.common.cleanup_fair_op') as cleanup_mock, + mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), + mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + mock.patch('node_cli.operations.base.configure_nftables'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), + mock.patch('node_cli.fair.common.compose_node_env', return_value={'SCHAIN_NAME': 'test'}), + mock.patch( + 'node_cli.core.node.CliMetaManager.get_meta_info', + return_value=CliMeta(version='2.6.0', config_stream='3.0.2'), + ), + ): + result = run_command(cleanup_node, ['--yes']) + assert result.exit_code == 0 + cleanup_mock.assert_called_once_with( + node_mode=NodeMode.ACTIVE, prune=False, compose_env={'SCHAIN_NAME': 'test'} + ) diff --git a/tests/cli/fair_passive_node_test.py b/tests/cli/fair_passive_node_test.py new file mode 100644 index 00000000..00c61ca0 --- /dev/null +++ b/tests/cli/fair_passive_node_test.py @@ -0,0 +1,107 @@ +import logging +import pathlib + +import mock + +from node_cli.cli.passive_fair_node import cleanup_node, init_passive_node, update_node +from node_cli.configs import NODE_DATA_PATH, SKALE_DIR +from node_cli.utils.helper import init_default_logger +from node_cli.utils.meta import CliMeta +from node_cli.utils.node_type import NodeMode +from tests.helper import run_command, subprocess_run_mock +from tests.resources_test import BIG_DISK_SIZE + +logger = logging.getLogger(__name__) +init_default_logger() + + +def test_init_fair_passive(mocked_g_config, fair_passive_settings, fair_passive_user_conf): + pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.fair.common.init_fair_op', return_value=True), + mock.patch('node_cli.fair.common.compose_node_env', return_value={}), + mock.patch('node_cli.fair.passive.setup_fair_passive'), + mock.patch('node_cli.fair.common.time.sleep'), + mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), + mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + mock.patch('node_cli.operations.base.configure_nftables'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False), + ): + result = run_command( + init_passive_node, + [ + fair_passive_user_conf.as_posix(), + '--id', + '1', + ], + ) + assert result.exit_code == 0 + + +def test_init_fair_passive_snapshot_any( + mocked_g_config, fair_passive_settings, fair_passive_user_conf +): + pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.fair.common.init_fair_op', return_value=True), + mock.patch('node_cli.fair.common.compose_node_env', return_value={}), + mock.patch('node_cli.fair.passive.setup_fair_passive'), + mock.patch('node_cli.fair.common.time.sleep'), + mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), + mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + mock.patch('node_cli.operations.base.configure_nftables'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False), + ): + result = run_command( + init_passive_node, + [ + fair_passive_user_conf.as_posix(), + '--id', + '2', + '--snapshot', + 'any', + ], + ) + assert result.exit_code == 0 + + +def test_update_fair_passive( + mocked_g_config, fair_passive_settings, fair_passive_user_conf, clean_node_options +): + pathlib.Path(NODE_DATA_PATH).mkdir(parents=True, exist_ok=True) + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.fair.common.update_fair_op', return_value=True), + mock.patch('node_cli.fair.common.compose_node_env', return_value={}), + mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), + mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + mock.patch('node_cli.operations.base.configure_nftables'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), + ): + result = run_command(update_node, [fair_passive_user_conf.as_posix(), '--yes']) + assert result.exit_code == 0 + + +def test_cleanup_node(mocked_g_config): + pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) + + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.fair.common.cleanup_fair_op') as cleanup_mock, + mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), + mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + mock.patch('node_cli.operations.base.configure_nftables'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), + mock.patch('node_cli.fair.common.compose_node_env', return_value={'SCHAIN_NAME': 'test'}), + mock.patch( + 'node_cli.core.node.CliMetaManager.get_meta_info', + return_value=CliMeta(version='2.6.0', config_stream='3.0.2'), + ), + ): + result = run_command(cleanup_node, ['--yes']) + assert result.exit_code == 0 + cleanup_mock.assert_called_once_with( + node_mode=NodeMode.PASSIVE, compose_env={'SCHAIN_NAME': 'test'}, prune=False + ) diff --git a/tests/cli/health_test.py b/tests/cli/health_test.py index b2f2fa3f..404d9a73 100644 --- a/tests/cli/health_test.py +++ b/tests/cli/health_test.py @@ -6,80 +6,88 @@ OK_LS_RESPONSE_DATA = { 'status': 'ok', - 'payload': - [ - { - 'image': 'skalenetwork/schain:1.46-develop.21', - 'name': 'skale_schain_shapely-alfecca-meridiana', - 'state': { - 'Status': 'running', 'Running': True, - 'Paused': False, 'Restarting': False, - 'OOMKilled': False, 'Dead': False, - 'Pid': 232, 'ExitCode': 0, - 'Error': '', - 'StartedAt': '2020-07-31T11:56:35.732888232Z', - 'FinishedAt': '0001-01-01T00:00:00Z' - } + 'payload': [ + { + 'image': 'skalenetwork/schain:1.46-develop.21', + 'name': 'sk_skaled_shapely-alfecca-meridiana', + 'state': { + 'Status': 'running', + 'Running': True, + 'Paused': False, + 'Restarting': False, + 'OOMKilled': False, + 'Dead': False, + 'Pid': 232, + 'ExitCode': 0, + 'Error': '', + 'StartedAt': '2020-07-31T11:56:35.732888232Z', + 'FinishedAt': '0001-01-01T00:00:00Z', + }, + }, + { + 'image': 'skale-admin:latest', + 'name': 'sk_api', + 'state': { + 'Status': 'running', + 'Running': True, + 'Paused': False, + 'Restarting': False, + 'OOMKilled': False, + 'Dead': False, + 'Pid': 6710, + 'ExitCode': 0, + 'Error': '', + 'StartedAt': '2020-07-31T11:55:17.28700307Z', + 'FinishedAt': '0001-01-01T00:00:00Z', }, - { - 'image': 'skale-admin:latest', 'name': 'skale_api', - 'state': { - 'Status': 'running', - 'Running': True, 'Paused': False, - 'Restarting': False, 'OOMKilled': False, - 'Dead': False, 'Pid': 6710, 'ExitCode': 0, - 'Error': '', - 'StartedAt': '2020-07-31T11:55:17.28700307Z', - 'FinishedAt': '0001-01-01T00:00:00Z' - } - } - ] + }, + ], } def test_containers(): - resp_mock = response_mock( - requests.codes.ok, - json_data=OK_LS_RESPONSE_DATA - ) - result = run_command_mock('node_cli.utils.helper.requests.get', - resp_mock, containers) + resp_mock = response_mock(requests.codes.ok, json_data=OK_LS_RESPONSE_DATA) + result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, containers) assert result.exit_code == 0 - assert result.output == ' Name Status Started At Image \n-------------------------------------------------------------------------------------------------------------\nskale_schain_shapely-alfecca-meridiana Running Jul 31 2020 11:56:35 skalenetwork/schain:1.46-develop.21\nskale_api Running Jul 31 2020 11:55:17 skale-admin:latest \n' # noqa + assert ( + result.output + == ' Name Status Started At Image \n----------------------------------------------------------------------------------------------------------\nsk_skaled_shapely-alfecca-meridiana Running Jul 31 2020 11:56:35 skalenetwork/schain:1.46-develop.21\nsk_api Running Jul 31 2020 11:55:17 skale-admin:latest \n' # noqa + ) def test_checks(): payload = [ { - "name": "test_schain", - "healthchecks": { - "config_dir": True, - "dkg": False, - "config": False, - "volume": False, - "skaled_container": False, - "ima_container": False, - "firewall_rules": False, - "rpc": False, - "blocks": False - } + 'name': 'test_schain', + 'healthchecks': { + 'config_dir': True, + 'dkg': False, + 'config': False, + 'volume': False, + 'skaled_container': False, + 'ima_container': False, + 'firewall_rules': False, + 'rpc': False, + 'blocks': False, + }, } ] - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} - ) - result = run_command_mock('node_cli.utils.helper.requests.get', - resp_mock, schains) + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) + result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, schains) assert result.exit_code == 0 - assert result.output == 'sChain Name Config directory DKG Config file Volume Container IMA Firewall RPC Blocks\n-------------------------------------------------------------------------------------------------------------\ntest_schain True False False False False False False False False \n' # noqa + assert ( + result.output + == 'sChain Name Config directory DKG Config file Volume Container IMA Firewall RPC Blocks\n-------------------------------------------------------------------------------------------------------------\ntest_schain True False False False False False False False False \n' # noqa + ) - result = run_command_mock('node_cli.utils.helper.requests.get', - resp_mock, schains, ['--json']) + result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, schains, ['--json']) assert result.exit_code == 0 - assert result.output == '[{"name": "test_schain", "healthchecks": {"config_dir": true, "dkg": false, "config": false, "volume": false, "skaled_container": false, "ima_container": false, "firewall_rules": false, "rpc": false, "blocks": false}}]\n' # noqa + assert ( + result.output + == '[{"name": "test_schain", "healthchecks": {"config_dir": true, "dkg": false, "config": false, "volume": false, "skaled_container": false, "ima_container": false, "firewall_rules": false, "rpc": false, "blocks": false}}]\n' # noqa + ) def test_sgx_status(): @@ -88,14 +96,13 @@ def test_sgx_status(): 'sgx_wallet_version': '1.50.1-stable.0', 'sgx_keyname': 'test_keyname', 'status_zmq': True, - 'status_https': True + 'status_https': True, } - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} - ) - result = run_command_mock( - 'node_cli.utils.helper.requests.get', resp_mock, sgx) + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) + result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, sgx) assert result.exit_code == 0 - assert result.output == '\x1b(0lqqqqqqqqqqqqqqqqqqqwqqqqqqqqqqqqqqqqqqqqqqqqk\x1b(B\n\x1b(0x\x1b(B SGX info \x1b(0x\x1b(B \x1b(0x\x1b(B\n\x1b(0tqqqqqqqqqqqqqqqqqqqnqqqqqqqqqqqqqqqqqqqqqqqqu\x1b(B\n\x1b(0x\x1b(B Server URL \x1b(0x\x1b(B https://127.0.0.1:1026 \x1b(0x\x1b(B\n\x1b(0x\x1b(B SGXWallet Version \x1b(0x\x1b(B 1.50.1-stable.0 \x1b(0x\x1b(B\n\x1b(0x\x1b(B Node SGX keyname \x1b(0x\x1b(B test_keyname \x1b(0x\x1b(B\n\x1b(0x\x1b(B Status HTTPS \x1b(0x\x1b(B True \x1b(0x\x1b(B\n\x1b(0x\x1b(B Status ZMQ \x1b(0x\x1b(B True \x1b(0x\x1b(B\n\x1b(0mqqqqqqqqqqqqqqqqqqqvqqqqqqqqqqqqqqqqqqqqqqqqj\x1b(B\n' # noqa + assert ( + result.output + == '\x1b(0lqqqqqqqqqqqqqqqqqqqwqqqqqqqqqqqqqqqqqqqqqqqqk\x1b(B\n\x1b(0x\x1b(B SGX info \x1b(0x\x1b(B \x1b(0x\x1b(B\n\x1b(0tqqqqqqqqqqqqqqqqqqqnqqqqqqqqqqqqqqqqqqqqqqqqu\x1b(B\n\x1b(0x\x1b(B Server URL \x1b(0x\x1b(B https://127.0.0.1:1026 \x1b(0x\x1b(B\n\x1b(0x\x1b(B SGXWallet Version \x1b(0x\x1b(B 1.50.1-stable.0 \x1b(0x\x1b(B\n\x1b(0x\x1b(B Node SGX keyname \x1b(0x\x1b(B test_keyname \x1b(0x\x1b(B\n\x1b(0x\x1b(B Status HTTPS \x1b(0x\x1b(B True \x1b(0x\x1b(B\n\x1b(0x\x1b(B Status ZMQ \x1b(0x\x1b(B True \x1b(0x\x1b(B\n\x1b(0mqqqqqqqqqqqqqqqqqqqvqqqqqqqqqqqqqqqqqqqqqqqqj\x1b(B\n' # noqa + ) diff --git a/tests/cli/logs_test.py b/tests/cli/logs_test.py index b292a848..c6fa37d2 100644 --- a/tests/cli/logs_test.py +++ b/tests/cli/logs_test.py @@ -23,11 +23,11 @@ from node_cli.configs import G_CONF_HOME from tests.helper import run_command -from tests.core.core_logs_test import backup_func, CURRENT_DATETIME, TEST_ARCHIVE_PATH # noqa +from tests.core.core_logs_test import backup_func, CURRENT_DATETIME, TEST_ARCHIVE_PATH # noqa @freezegun.freeze_time(CURRENT_DATETIME) -def test_dump(backup_func, removed_containers_folder): # noqa +def test_dump(backup_func, removed_containers_folder): # noqa result = run_command(dump, [G_CONF_HOME]) assert result.exit_code == 0 assert result.output == f'Logs dump created: {TEST_ARCHIVE_PATH}\n' diff --git a/tests/cli/main_test.py b/tests/cli/main_test.py index 5ce570ad..2d0e74c1 100644 --- a/tests/cli/main_test.py +++ b/tests/cli/main_test.py @@ -18,7 +18,7 @@ # along with this program. If not, see . -from node_cli.main import version +from node_cli.main import version, info from tests.helper import run_command @@ -28,3 +28,15 @@ def test_version(): assert result.output == expected result = run_command(version, ['--short']) assert result.output == 'test\n' + + +def test_info_command(): + result = run_command(info, []) + + assert result.exit_code == 0 + + expected_line = 'Full version: test' + assert expected_line in result.output + + assert 'Version:' in result.output + assert 'Build time:' in result.output diff --git a/tests/cli/node_test.py b/tests/cli/node_test.py index 7e206db1..a0c064f0 100644 --- a/tests/cli/node_test.py +++ b/tests/cli/node_test.py @@ -17,31 +17,33 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import logging import pathlib +from unittest.mock import MagicMock, patch import mock -from unittest.mock import MagicMock, patch +import pytest import requests -import logging -from node_cli.configs import SKALE_DIR, G_CONF_HOME from node_cli.cli.node import ( + _set_domain_name, + _turn_off, + _turn_on, + backup_node, + cleanup_node, node_info, register_node, - signature, - backup_node, + remove_node_from_maintenance, restore_node, set_node_in_maintenance, - remove_node_from_maintenance, + signature, version, - _turn_off, - _turn_on, - _set_domain_name, ) +from node_cli.configs import G_CONF_HOME, SKALE_DIR from node_cli.utils.exit_codes import CLIExitCodes from node_cli.utils.helper import init_default_logger from node_cli.utils.meta import CliMeta - +from node_cli.utils.node_type import NodeType, NodeMode from tests.helper import ( response_mock, run_command, @@ -54,7 +56,7 @@ init_default_logger() -def test_register_node(resource_alloc, mocked_g_config): +def test_register_node(inited_node, resource_alloc, mocked_g_config): resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None}) with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): result = run_command_mock( @@ -70,7 +72,7 @@ def test_register_node(resource_alloc, mocked_g_config): ) # noqa -def test_register_node_with_error(resource_alloc, mocked_g_config): +def test_register_node_with_error(inited_node, resource_alloc, mocked_g_config): resp_mock = response_mock( requests.codes.ok, {'status': 'error', 'payload': ['Strange error']}, @@ -83,10 +85,13 @@ def test_register_node_with_error(resource_alloc, mocked_g_config): ['--name', 'test-node2', '--ip', '0.0.0.0', '--port', '80', '-d', 'skale.test'], ) assert result.exit_code == 3 - assert (result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n') # noqa + assert ( + result.output + == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa + ) -def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config): +def test_register_node_with_prompted_ip(inited_node, resource_alloc, mocked_g_config): resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None}) with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): result = run_command_mock( @@ -97,10 +102,13 @@ def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config): input='0.0.0.0\n', ) assert result.exit_code == 0 - assert (result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n') # noqa + assert ( + result.output + == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa + ) -def test_register_node_with_default_port(resource_alloc, mocked_g_config): +def test_register_node_with_default_port(inited_node, resource_alloc, mocked_g_config): resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None}) with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): result = run_command_mock( @@ -111,7 +119,10 @@ def test_register_node_with_default_port(resource_alloc, mocked_g_config): input='0.0.0.0\n', ) assert result.exit_code == 0 - assert (result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n') # noqa + assert ( + result.output + == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa + ) def test_register_with_no_alloc(mocked_g_config): @@ -124,7 +135,10 @@ def test_register_with_no_alloc(mocked_g_config): input='0.0.0.0\n', ) assert result.exit_code == 8 - assert (result.output == f"Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn't been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n") # noqa + assert ( + result.output + == f"Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn't been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n" # noqa + ) def test_node_info_node_info(): @@ -149,7 +163,10 @@ def test_node_info_node_info(): resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info) assert result.exit_code == 0 - assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n') # noqa + assert ( + result.output + == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n' # noqa + ) def test_node_info_node_info_not_created(): @@ -199,7 +216,10 @@ def test_node_info_node_info_frozen(): resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info) assert result.exit_code == 0 - assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n') # noqa + assert ( + result.output + == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n' # noqa + ) def test_node_info_node_info_left(): @@ -224,7 +244,10 @@ def test_node_info_node_info_left(): resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info) assert result.exit_code == 0 - assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n') # noqa + assert ( + result.output + == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n' # noqa + ) def test_node_info_node_info_leaving(): @@ -249,7 +272,10 @@ def test_node_info_node_info_leaving(): resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info) assert result.exit_code == 0 - assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n') # noqa + assert ( + result.output + == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n' # noqa + ) def test_node_info_node_info_in_maintenance(): @@ -274,7 +300,10 @@ def test_node_info_node_info_in_maintenance(): resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info) assert result.exit_code == 0 - assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n') # noqa + assert ( + result.output + == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n' # noqa + ) def test_node_signature(): @@ -290,52 +319,45 @@ def test_backup(): pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) result = run_command(backup_node, ['/tmp']) assert result.exit_code == 0 - print(result.output) - assert 'Backup archive succesfully created ' in result.output + assert 'Backup archive successfully created ' in result.output -def test_restore(mocked_g_config): +@pytest.mark.parametrize( + 'node_type,node_mode,test_user_conf', + [ + (NodeType.SKALE, NodeMode.ACTIVE, 'regular_user_conf'), + (NodeType.FAIR, NodeMode.ACTIVE, 'fair_user_conf'), + ], +) +def test_restore(request, node_type, node_mode, test_user_conf, mocked_g_config, tmp_path): pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) - result = run_command(backup_node, ['/tmp']) + result = run_command(backup_node, [tmp_path]) backup_path = result.output.replace('Backup archive successfully created: ', '').replace( '\n', '' ) - with patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op, patch( - 'subprocess.run', new=subprocess_run_mock - ), patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), patch( - 'node_cli.utils.decorators.is_node_inited', return_value=False - ), patch( - 'node_cli.core.node.get_meta_info', - return_value=CliMeta(version='2.4.0', config_stream='3.0.2'), - ), patch('node_cli.operations.base.configure_nftables'): - result = run_command(restore_node, [backup_path, './tests/test-env']) + with ( + patch('node_cli.cli.node.TYPE', node_type), + patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op, + patch('subprocess.run', new=subprocess_run_mock), + patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + patch('node_cli.utils.decorators.is_node_inited', return_value=False), + patch( + 'node_cli.core.node.CliMetaManager.get_meta_info', + return_value=CliMeta(version='2.4.0', config_stream='3.0.2'), + ), + patch('node_cli.operations.base.configure_nftables'), + ): + user_conf_path = request.getfixturevalue(test_user_conf).as_posix() + result = run_command(restore_node, [backup_path, user_conf_path]) assert result.exit_code == 0 assert 'Node is restored from backup\n' in result.output # noqa + assert mock_restore_op.call_args.kwargs.get('backup_run') is True - assert mock_restore_op.call_args[0][0].get('BACKUP_RUN') == 'True' - - -def test_restore_no_snapshot(mocked_g_config): - pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) - result = run_command(backup_node, ['/tmp']) - backup_path = result.output.replace('Backup archive successfully created: ', '').replace( - '\n', '' - ) - - with patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op, patch( - 'subprocess.run', new=subprocess_run_mock - ), patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), patch( - 'node_cli.utils.decorators.is_node_inited', return_value=False - ), patch( - 'node_cli.core.node.get_meta_info', - return_value=CliMeta(version='2.4.0', config_stream='3.0.2'), - ), patch('node_cli.operations.base.configure_nftables'): - result = run_command(restore_node, [backup_path, './tests/test-env', '--no-snapshot']) + result = run_command(restore_node, [backup_path, user_conf_path, '--no-snapshot']) assert result.exit_code == 0 assert 'Node is restored from backup\n' in result.output # noqa - - assert mock_restore_op.call_args[0][0].get('BACKUP_RUN') is None + assert mock_restore_op.call_args.kwargs.get('backup_run') is False def test_maintenance_on(): @@ -347,7 +369,7 @@ def test_maintenance_on(): assert ( result.output == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n' - ) # noqa + ) def test_maintenance_off(mocked_g_config): @@ -359,28 +381,30 @@ def test_maintenance_off(mocked_g_config): assert ( result.output == 'Setting maintenance mode off...\nNode is successfully removed from maintenance mode\n' - ) # noqa + ) -def test_turn_off_maintenance_on(mocked_g_config): +def test_turn_off_maintenance_on(mocked_g_config, regular_user_conf, active_node_option, skale_active_settings): resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None}) - with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch( - 'node_cli.core.node.turn_off_op' - ), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.core.node.turn_off_op'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), + mock.patch('node_cli.cli.node.TYPE', NodeType.SKALE), + ): result = run_command_mock( 'node_cli.utils.helper.requests.post', resp_mock, _turn_off, ['--maintenance-on', '--yes'], ) + assert ( result.output == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n' - ) # noqa + ) assert result.exit_code == 0 - with mock.patch( - 'node_cli.utils.docker_utils.is_container_running', return_value=True - ): + with mock.patch('node_cli.utils.docker_utils.is_container_running', return_value=True): result = run_command_mock( 'node_cli.utils.helper.requests.post', resp_mock, @@ -391,25 +415,27 @@ def test_turn_off_maintenance_on(mocked_g_config): assert result.exit_code == CLIExitCodes.UNSAFE_UPDATE -def test_turn_on_maintenance_off(mocked_g_config): +def test_turn_on_maintenance_off(mocked_g_config, regular_user_conf, active_node_option, skale_active_settings): resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None}) - with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch( - 'node_cli.core.node.get_flask_secret_key' - ), mock.patch('node_cli.core.node.turn_on_op'), mock.patch( - 'node_cli.core.node.is_base_containers_alive' - ), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.core.node.turn_on_op'), + mock.patch('node_cli.core.node.is_base_containers_alive'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), + mock.patch('node_cli.cli.node.TYPE', NodeType.SKALE), + ): result = run_command_mock( 'node_cli.utils.helper.requests.post', resp_mock, _turn_on, - ['./tests/test-env', '--maintenance-off', '--sync-schains', '--yes'], + [regular_user_conf.as_posix(), '--maintenance-off', '--sync-schains', '--yes'], ) assert result.exit_code == 0 assert ( result.output == 'Setting maintenance mode off...\nNode is successfully removed from maintenance mode\n' - ) # noqa, tmp fix + ) def test_set_domain_name(): @@ -425,18 +451,42 @@ def test_set_domain_name(): assert result.exit_code == 0 assert ( result.output == 'Setting new domain name: skale.test\nDomain name successfully changed\n' - ) # noqa + ) def test_node_version(meta_file_v2): with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): result = run_command(version) assert result.exit_code == 0 - assert (result.output == '--------------------------------------------------\nVersion: 0.1.1\nConfig Stream: develop\nLvmpy stream: 1.1.2\n--------------------------------------------------\n') # noqa + assert ( + result.output + == '--------------------------------------------------\nVersion: 0.1.1\nConfig Stream: develop\nLvmpy stream: 1.1.2\n--------------------------------------------------\n' # noqa + ) result = run_command(version, ['--json']) assert result.exit_code == 0 assert ( result.output - == "{'version': '0.1.1', 'config_stream': 'develop', 'docker_lvmpy_stream': '1.1.2'}\n" - ) # noqa + == "{'version': '0.1.1', 'config_stream': 'develop', 'docker_lvmpy_version': '1.1.2'}\n" + ) + + +def test_cleanup_node(mocked_g_config): + pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) + + with ( + mock.patch('subprocess.run', new=subprocess_run_mock), + mock.patch('node_cli.core.node.cleanup_skale_op') as cleanup_mock, + mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), + mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), + mock.patch('node_cli.operations.base.configure_nftables'), + mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), + mock.patch('node_cli.core.node.compose_node_env', return_value={}), + mock.patch( + 'node_cli.core.node.CliMetaManager.get_meta_info', + return_value=CliMeta(version='2.6.0', config_stream='3.0.2'), + ), + ): + result = run_command(cleanup_node, ['--yes']) + assert result.exit_code == 0 + cleanup_mock.assert_called_once_with(node_mode=NodeMode.ACTIVE, prune=False, compose_env={}) diff --git a/tests/cli/sync_node_test.py b/tests/cli/passive_node_test.py similarity index 75% rename from tests/cli/sync_node_test.py rename to tests/cli/passive_node_test.py index 88803d4f..e8419220 100644 --- a/tests/cli/sync_node_test.py +++ b/tests/cli/passive_node_test.py @@ -17,36 +17,36 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . +import logging import pathlib import mock -import logging -from node_cli.configs import SKALE_DIR, NODE_DATA_PATH +from node_cli.cli.passive_node import cleanup_node, _init_passive, _update_passive +from node_cli.configs import NODE_DATA_PATH, SKALE_DIR from node_cli.core.node_options import NodeOptions -from node_cli.cli.sync_node import _init_sync, _update_sync, _cleanup_sync -from node_cli.utils.meta import CliMeta from node_cli.utils.helper import init_default_logger - +from node_cli.utils.meta import CliMeta +from node_cli.utils.node_type import NodeType, NodeMode +from tests.conftest import set_env_var from tests.helper import run_command, subprocess_run_mock from tests.resources_test import BIG_DISK_SIZE -from tests.conftest import set_env_var logger = logging.getLogger(__name__) init_default_logger() -def test_init_sync(mocked_g_config): +def test_init_passive(mocked_g_config, clean_node_options, passive_user_conf): pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) with ( mock.patch('subprocess.run', new=subprocess_run_mock), - mock.patch('node_cli.core.node.init_sync_op'), + mock.patch('node_cli.core.node.init_passive_op'), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False), ): - result = run_command(_init_sync, ['./tests/test-env']) + result = run_command(_init_passive, [passive_user_conf.as_posix()]) node_options = NodeOptions() assert not node_options.archive @@ -56,9 +56,8 @@ def test_init_sync(mocked_g_config): assert result.exit_code == 0 -def test_init_sync_archive(mocked_g_config, clean_node_options): +def test_init_passive_archive(mocked_g_config, clean_node_options, passive_user_conf): pathlib.Path(NODE_DATA_PATH).mkdir(parents=True, exist_ok=True) - # with mock.patch('subprocess.run', new=subprocess_run_mock), \ with ( mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch('node_cli.operations.base.cleanup_volume_artifacts'), @@ -66,20 +65,22 @@ def test_init_sync_archive(mocked_g_config, clean_node_options): mock.patch('node_cli.operations.base.sync_skale_node'), mock.patch('node_cli.operations.base.configure_docker'), mock.patch('node_cli.operations.base.prepare_host'), + mock.patch('node_cli.operations.base.save_internal_settings'), + mock.patch('node_cli.operations.base.run_host_checks', return_value=[]), mock.patch('node_cli.operations.base.ensure_filestorage_mapping'), - mock.patch('node_cli.operations.base.link_env_file'), - mock.patch('node_cli.operations.base.download_contracts'), mock.patch('node_cli.operations.base.generate_nginx_config'), + mock.patch('node_cli.operations.base.get_settings'), mock.patch('node_cli.operations.base.prepare_block_device'), - mock.patch('node_cli.operations.base.update_meta'), + mock.patch('node_cli.operations.base.CliMetaManager.update_meta'), mock.patch('node_cli.operations.base.update_resource_allocation'), mock.patch('node_cli.operations.base.update_images'), mock.patch('node_cli.operations.base.compose_up'), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False), + mock.patch('node_cli.cli.node.TYPE', NodeType.SKALE), ): - result = run_command(_init_sync, ['./tests/test-env', '--archive']) + result = run_command(_init_passive, [passive_user_conf.as_posix(), '--archive']) node_options = NodeOptions() assert node_options.archive @@ -93,7 +94,7 @@ def test_init_archive_indexer_fail(mocked_g_config, clean_node_options): pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) with ( mock.patch('subprocess.run', new=subprocess_run_mock), - mock.patch('node_cli.core.node.init_sync_op'), + mock.patch('node_cli.core.node.init_passive_op'), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch('node_cli.operations.base.configure_nftables'), @@ -101,45 +102,48 @@ def test_init_archive_indexer_fail(mocked_g_config, clean_node_options): mock.patch('node_cli.core.node.compose_node_env', return_value={}), set_env_var('ENV_TYPE', 'devnet'), ): - result = run_command(_init_sync, ['./tests/test-env', '--archive', '--indexer']) + result = run_command(_init_passive, ['./tests/test-env', '--archive', '--indexer']) assert result.exit_code == 1 assert 'Cannot use both' in result.output -def test_update_sync(mocked_g_config): +def test_update_passive(passive_user_conf, mocked_g_config): pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) with ( mock.patch('subprocess.run', new=subprocess_run_mock), - mock.patch('node_cli.core.node.update_sync_op'), + mock.patch('node_cli.core.node.update_passive_op'), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), mock.patch( - 'node_cli.core.node.get_meta_info', + 'node_cli.core.node.CliMetaManager.get_meta_info', return_value=CliMeta(version='2.6.0', config_stream='3.0.2'), ), ): - result = run_command(_update_sync, ['./tests/test-env', '--yes']) + result = run_command(_update_passive, [passive_user_conf.as_posix(), '--yes']) assert result.exit_code == 0 -def test_cleanup_sync(mocked_g_config): +def test_cleanup_node(mocked_g_config): pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True) with ( mock.patch('subprocess.run', new=subprocess_run_mock), - mock.patch('node_cli.core.node.cleanup_sync_op'), + mock.patch('node_cli.core.node.cleanup_skale_op') as cleanup_mock, mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True), mock.patch('node_cli.core.node.compose_node_env', return_value={'SCHAIN_NAME': 'test'}), mock.patch( - 'node_cli.core.node.get_meta_info', + 'node_cli.core.node.CliMetaManager.get_meta_info', return_value=CliMeta(version='2.6.0', config_stream='3.0.2'), ), ): - result = run_command(_cleanup_sync, ['--yes']) + result = run_command(cleanup_node, ['--yes']) assert result.exit_code == 0 + cleanup_mock.assert_called_once_with( + node_mode=NodeMode.PASSIVE, prune=False, compose_env={'SCHAIN_NAME': 'test'} + ) diff --git a/tests/cli/resources_allocation_test.py b/tests/cli/resources_allocation_test.py deleted file mode 100644 index d84d9fc3..00000000 --- a/tests/cli/resources_allocation_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# -# This file is part of node-cli -# -# Copyright (C) 2019 SKALE Labs -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -import os -import json -import mock -import requests - -import pytest - -from node_cli.configs.resource_allocation import ( - RESOURCE_ALLOCATION_FILEPATH, NODE_DATA_PATH -) -from node_cli.utils.helper import safe_mkdir, write_json -from tests.helper import response_mock, run_command_mock - -from node_cli.cli.resources_allocation import show, generate - -from tests.resources_test import BIG_DISK_SIZE - - -TEST_CONFIG = {'test': 1} - - -@pytest.fixture -def resource_alloc_config(): - write_json(RESOURCE_ALLOCATION_FILEPATH, TEST_CONFIG) - yield RESOURCE_ALLOCATION_FILEPATH - os.remove(RESOURCE_ALLOCATION_FILEPATH) - - -def test_show(resource_alloc_config): - resp_mock = response_mock(requests.codes.created) - write_json(RESOURCE_ALLOCATION_FILEPATH, TEST_CONFIG) - result = run_command_mock( - 'node_cli.utils.helper.post_request', - resp_mock, - show - ) - assert result.output == json.dumps(TEST_CONFIG, indent=4) + '\n' - assert result.exit_code == 0 - - -def test_generate(): - safe_mkdir(NODE_DATA_PATH) - resp_mock = response_mock(requests.codes.created) - with mock.patch('node_cli.core.resources.get_disk_size', - return_value=BIG_DISK_SIZE): - result = run_command_mock( - 'node_cli.utils.helper.post_request', - resp_mock, - generate, - ['./tests/test-env', '--yes'] - ) - assert result.output == (f'Resource allocation file generated: ' - f'{RESOURCE_ALLOCATION_FILEPATH}\n') - assert result.exit_code == 0 - - -def test_generate_already_exists(resource_alloc_config): - resp_mock = response_mock(requests.codes.created) - with mock.patch('node_cli.core.resources.get_disk_size', - return_value=BIG_DISK_SIZE): - result = run_command_mock( - 'node_cli.utils.helper.post_request', - resp_mock, - generate, - ['./tests/test-env', '--yes'] - ) - assert result.output == 'Resource allocation file is already exists\n' - assert result.exit_code == 0 - - result = run_command_mock( - 'node_cli.utils.helper.post_request', - resp_mock, - generate, - ['./tests/test-env', '--yes', '--force'] - ) - assert result.output == ( - f'Resource allocation file generated: ' - f'{RESOURCE_ALLOCATION_FILEPATH}\n' - ) - assert result.exit_code == 0 diff --git a/tests/cli/schains_test.py b/tests/cli/schains_test.py index d12e7277..e450800a 100644 --- a/tests/cli/schains_test.py +++ b/tests/cli/schains_test.py @@ -24,8 +24,7 @@ from node_cli.configs import G_CONF_HOME from tests.helper import response_mock, run_command, run_command_mock -from node_cli.cli.schains import (get_schain_config, ls, dkg, show_rules, - repair, info_) +from node_cli.cli.schains import get_schain_config, ls, dkg, show_rules, repair, info_ def test_ls(): @@ -33,26 +32,39 @@ def test_ls(): time.tzset() payload = [ { - 'name': 'test_schain1', 'mainnet_owner': '0x123', - 'index_owner_list': 3, 'part_of_node': 0, - 'lifetime': 5, 'start_date': 1570115385, - 'deposit': 1000000000000000000, 'index': 3, 'generation': 1, 'originator': '0x465', 'options': {'allocation_type': 0} # noqa + 'name': 'test_schain1', + 'mainnet_owner': '0x123', + 'index_owner_list': 3, + 'part_of_node': 0, + 'lifetime': 5, + 'start_date': 1570115385, + 'deposit': 1000000000000000000, + 'index': 3, + 'generation': 1, + 'originator': '0x465', + 'options': {'allocation_type': 0}, # noqa }, { 'name': 'crazy_cats1', 'mainnet_owner': '0x321', - 'index_owner_list': 8, 'part_of_node': 0, - 'lifetime': 5, 'start_date': 1570469410, - 'deposit': 1000000000000000000, 'index': 8, 'generation': 0, 'originator': '0x0', 'options': {'allocation_type': 0} # noqa - } + 'index_owner_list': 8, + 'part_of_node': 0, + 'lifetime': 5, + 'start_date': 1570469410, + 'deposit': 1000000000000000000, + 'index': 8, + 'generation': 0, + 'originator': '0x0', + 'options': {'allocation_type': 0}, # noqa + }, ] - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} - ) + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, ls) assert result.exit_code == 0 - assert result.output == ' Name Owner Size Lifetime Created At Deposit Generation Originator Type\n--------------------------------------------------------------------------------------------------------------------\ntest_schain1 0x123 0 5 Oct 03 2019 16:09:45 1000000000000000000 1 0x465 0 \ncrazy_cats1 0x321 0 5 Oct 07 2019 18:30:10 1000000000000000000 0 0x0 0 \n' # noqa + assert ( + result.output + == ' Name Owner Size Lifetime Created At Deposit Generation Originator Type\n--------------------------------------------------------------------------------------------------------------------\ntest_schain1 0x123 0 5 Oct 03 2019 16:09:45 1000000000000000000 1 0x465 0 \ncrazy_cats1 0x321 0 5 Oct 07 2019 18:30:10 1000000000000000000 0 0x0 0 \n' # noqa + ) def test_dkg(): @@ -64,68 +76,81 @@ def test_dkg(): 'added_at': 1578497212.645233, 'dkg_status': 2, 'dkg_status_name': 'IN_PROGRESS', - 'is_deleted': False + 'is_deleted': False, } ] - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} - ) - result = run_command_mock('node_cli.utils.helper.requests.get', - resp_mock, dkg) + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) + result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, dkg) assert result.exit_code == 0 - assert result.output == ' sChain Name DKG Status Added At sChain Status\n---------------------------------------------------------------------\nmelodic-aldhibah IN_PROGRESS Jan 08 2020 15:26:52 Exists \n' # noqa + assert ( + result.output + == ' sChain Name DKG Status Added At sChain Status\n---------------------------------------------------------------------\nmelodic-aldhibah IN_PROGRESS Jan 08 2020 15:26:52 Exists \n' # noqa + ) - result = run_command_mock('node_cli.utils.helper.requests.get', - resp_mock, dkg, ['--all']) + result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, dkg, ['--all']) assert result.exit_code == 0 - assert result.output == ' sChain Name DKG Status Added At sChain Status\n---------------------------------------------------------------------\nmelodic-aldhibah IN_PROGRESS Jan 08 2020 15:26:52 Exists \n' # noqa + assert ( + result.output + == ' sChain Name DKG Status Added At sChain Status\n---------------------------------------------------------------------\nmelodic-aldhibah IN_PROGRESS Jan 08 2020 15:26:52 Exists \n' # noqa + ) def test_get_schain_config(): payload = { 'nodeInfo': { - 'nodeID': 2, 'nodeName': 'testnet-1', - 'basePort': 10011, 'httpRpcPort': 10009, - 'httpsRpcPort': 11118, 'wsRpcPort': 10118, + 'nodeID': 2, + 'nodeName': 'testnet-1', + 'basePort': 10011, + 'httpRpcPort': 10009, + 'httpsRpcPort': 11118, + 'wsRpcPort': 10118, 'wssRpcPort': 13219, - 'bindIP': '123.123.123.123' + 'bindIP': '123.123.123.123', }, 'sChain': { - 'schainID': 1, 'schainName': 'test1', + 'schainID': 1, + 'schainName': 'test1', 'nodes': [ - {'nodeID': 2, - 'nodeName': 'testnet-1', - 'basePort': 10011, - 'httpRpcPort': 10013, - 'httpsRpcPort': 10018, - 'wsRpcPort': 10014, - 'wssRpcPort': 10019, - 'publicKey': 'public_key', - 'owner': '0xe3213', - 'schainIndex': 1, - 'ip': '213.13.123.13', - 'publicIP': '1.1.1.1' - }, - {'nodeID': 0, 'nodeName': 'testnet-2', - 'basePort': 10077, 'httpRpcPort': 10079, - 'httpsRpcPort': 10084, 'wsRpcPort': 10080, - 'wssRpcPort': 10085, - 'publicKey': 'public_key352', - 'owner': '0x323', - 'schainIndex': 2, 'ip': '2.2.2.2', - 'publicIP': '3.3.3.3' - }]} + { + 'nodeID': 2, + 'nodeName': 'testnet-1', + 'basePort': 10011, + 'httpRpcPort': 10013, + 'httpsRpcPort': 10018, + 'wsRpcPort': 10014, + 'wssRpcPort': 10019, + 'publicKey': 'public_key', + 'owner': '0xe3213', + 'schainIndex': 1, + 'ip': '213.13.123.13', + 'publicIP': '1.1.1.1', + }, + { + 'nodeID': 0, + 'nodeName': 'testnet-2', + 'basePort': 10077, + 'httpRpcPort': 10079, + 'httpsRpcPort': 10084, + 'wsRpcPort': 10080, + 'wssRpcPort': 10085, + 'publicKey': 'public_key352', + 'owner': '0x323', + 'schainIndex': 2, + 'ip': '2.2.2.2', + 'publicIP': '3.3.3.3', + }, + ], + }, } - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) + result = run_command_mock( + 'node_cli.utils.helper.requests.get', resp_mock, get_schain_config, ['test1'] ) - result = run_command_mock('node_cli.utils.helper.requests.get', - resp_mock, - get_schain_config, ['test1']) assert result.exit_code == 0 - assert result.output == "{'nodeInfo': {'basePort': 10011,\n 'bindIP': '123.123.123.123',\n 'httpRpcPort': 10009,\n 'httpsRpcPort': 11118,\n 'nodeID': 2,\n 'nodeName': 'testnet-1',\n 'wsRpcPort': 10118,\n 'wssRpcPort': 13219},\n 'sChain': {'nodes': [{'basePort': 10011,\n 'httpRpcPort': 10013,\n 'httpsRpcPort': 10018,\n 'ip': '213.13.123.13',\n 'nodeID': 2,\n 'nodeName': 'testnet-1',\n 'owner': '0xe3213',\n 'publicIP': '1.1.1.1',\n 'publicKey': 'public_key',\n 'schainIndex': 1,\n 'wsRpcPort': 10014,\n 'wssRpcPort': 10019},\n {'basePort': 10077,\n 'httpRpcPort': 10079,\n 'httpsRpcPort': 10084,\n 'ip': '2.2.2.2',\n 'nodeID': 0,\n 'nodeName': 'testnet-2',\n 'owner': '0x323',\n 'publicIP': '3.3.3.3',\n 'publicKey': 'public_key352',\n 'schainIndex': 2,\n 'wsRpcPort': 10080,\n 'wssRpcPort': 10085}],\n 'schainID': 1,\n 'schainName': 'test1'}}\n" # noqa + assert ( + result.output + == "{'nodeInfo': {'basePort': 10011,\n 'bindIP': '123.123.123.123',\n 'httpRpcPort': 10009,\n 'httpsRpcPort': 11118,\n 'nodeID': 2,\n 'nodeName': 'testnet-1',\n 'wsRpcPort': 10118,\n 'wssRpcPort': 13219},\n 'sChain': {'nodes': [{'basePort': 10011,\n 'httpRpcPort': 10013,\n 'httpsRpcPort': 10018,\n 'ip': '213.13.123.13',\n 'nodeID': 2,\n 'nodeName': 'testnet-1',\n 'owner': '0xe3213',\n 'publicIP': '1.1.1.1',\n 'publicKey': 'public_key',\n 'schainIndex': 1,\n 'wsRpcPort': 10014,\n 'wssRpcPort': 10019},\n {'basePort': 10077,\n 'httpRpcPort': 10079,\n 'httpsRpcPort': 10084,\n 'ip': '2.2.2.2',\n 'nodeID': 0,\n 'nodeName': 'testnet-2',\n 'owner': '0x323',\n 'publicIP': '3.3.3.3',\n 'publicKey': 'public_key352',\n 'schainIndex': 2,\n 'wsRpcPort': 10080,\n 'wssRpcPort': 10085}],\n 'schainID': 1,\n 'schainName': 'test1'}}\n" # noqa + ) def test_schain_rules(): @@ -139,18 +164,19 @@ def test_schain_rules(): {'port': 10005, 'first_ip': '127.0.0.2', 'last_ip': '127.0.0.2'}, {'port': 10007, 'first_ip': None, 'last_ip': None}, {'port': 10008, 'first_ip': None, 'last_ip': None}, - {'port': 10009, 'first_ip': None, 'last_ip': None} + {'port': 10009, 'first_ip': None, 'last_ip': None}, ] } - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} - ) + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) result = run_command_mock( - 'node_cli.utils.helper.requests.get', resp_mock, show_rules, ['schain-test']) + 'node_cli.utils.helper.requests.get', resp_mock, show_rules, ['schain-test'] + ) assert result.exit_code == 0 print(repr(result.output)) - assert result.output == ' IP range Port \n-----------------------------\n127.0.0.2 - 127.0.0.2 10000\n127.0.0.2 - 127.0.0.2 10001\nAll IPs 10002\nAll IPs 10003\n127.0.0.2 - 127.0.0.2 10004\n127.0.0.2 - 127.0.0.2 10005\nAll IPs 10007\nAll IPs 10008\nAll IPs 10009\n' # noqa + assert ( + result.output + == ' IP range Port \n-----------------------------\n127.0.0.2 - 127.0.0.2 10000\n127.0.0.2 - 127.0.0.2 10001\nAll IPs 10002\nAll IPs 10003\n127.0.0.2 - 127.0.0.2 10004\n127.0.0.2 - 127.0.0.2 10005\nAll IPs 10007\nAll IPs 10008\nAll IPs 10009\n' # noqa + ) def test_repair(tmp_schains_dir): @@ -167,24 +193,29 @@ def test_info(): 'name': 'attractive-ed-asich', 'id': '0xfb3b68013fa494407b691b4b603d84c66076c0a5ac96a7d6b162d7341d74fa61', 'owner': '0x1111111111111111111111111111111111111111', - 'part_of_node': 0, 'dkg_status': 3, 'is_deleted': False, - 'first_run': False, 'repair_mode': False + 'part_of_node': 0, + 'dkg_status': 3, + 'is_deleted': False, + 'first_run': False, + 'repair_mode': False, } - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'ok'} + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'}) + result = run_command_mock( + 'node_cli.utils.helper.requests.get', resp_mock, info_, ['attractive-ed-asich'] + ) + assert ( + result.output + == ' Name Id Owner Part_of_node Dkg_status Is_deleted First_run Repair_mode\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nattractive-ed-asich 0xfb3b68013fa494407b691b4b603d84c66076c0a5ac96a7d6b162d7341d74fa61 0x1111111111111111111111111111111111111111 0 3 False False False \n' # noqa ) - result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, info_, - ['attractive-ed-asich']) - assert result.output == ' Name Id Owner Part_of_node Dkg_status Is_deleted First_run Repair_mode\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nattractive-ed-asich 0xfb3b68013fa494407b691b4b603d84c66076c0a5ac96a7d6b162d7341d74fa61 0x1111111111111111111111111111111111111111 0 3 False False False \n' # noqa assert result.exit_code == 0 payload = ['error'] - resp_mock = response_mock( - requests.codes.ok, - json_data={'payload': payload, 'status': 'error'} + resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'error'}) + result = run_command_mock( + 'node_cli.utils.helper.requests.get', resp_mock, info_, ['schain not found'] + ) + assert ( + result.output + == f'Command failed with following errors:\n--------------------------------------------------\nerror\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa ) - result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, info_, - ['schain not found']) - assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nerror\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa assert result.exit_code == 3 diff --git a/tests/cli/validate_test.py b/tests/cli/validate_test.py deleted file mode 100644 index 7a595b87..00000000 --- a/tests/cli/validate_test.py +++ /dev/null @@ -1,67 +0,0 @@ -import json -import pathlib -import shutil - -import pytest - -from node_cli.configs import (CONTRACTS_PATH, G_CONF_HOME, - IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH) -from node_cli.cli.validate import abi -from tests.helper import run_command - - -@pytest.fixture -def contracts_info_dir(): - pathlib.Path(CONTRACTS_PATH).mkdir(parents=True, exist_ok=True) - yield CONTRACTS_PATH - shutil.rmtree(CONTRACTS_PATH) - - -@pytest.fixture -def contract_valid_abi_files(contracts_info_dir): - json_data = {'test': 'abi'} - with open(IMA_CONTRACTS_FILEPATH, 'w') as ima_abi_file: - json.dump(json_data, ima_abi_file) - with open(MANAGER_CONTRACTS_FILEPATH, 'w') as manager_abi_file: - json.dump(json_data, manager_abi_file) - yield IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH - - -@pytest.fixture -def contract_abi_file_invalid(contracts_info_dir): - json_data = {'test': 'abi'} - with open(IMA_CONTRACTS_FILEPATH, 'w') as ima_abi_file: - json.dump(json_data, ima_abi_file) - with open(MANAGER_CONTRACTS_FILEPATH, 'w') as manager_abi_file: - manager_abi_file.write('Invalid json') - yield IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH - - -@pytest.fixture -def contract_abi_file_empty(contracts_info_dir): - json_data = {'test': 'abi'} - with open(IMA_CONTRACTS_FILEPATH, 'w') as ima_abi_file: - json.dump(json_data, ima_abi_file) - yield IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH - - -def test_validate_abi(contract_valid_abi_files): - result = run_command(abi) - assert result.output == 'All abi files are correct json files!\n' - assert result.exit_code == 0 - - -def test_validate_abi_invalid_file(contract_abi_file_invalid): - result = run_command(abi) - assert 'Some files do not exist or are incorrect' in result.output - assert f'{G_CONF_HOME}.skale/contracts_info/manager.json error Failed to load abi file as json' in result.output # noqa - assert f'{G_CONF_HOME}.skale/contracts_info/ima.json ok' in result.output - assert result.exit_code == 0 - - -def test_validate_abi_empty_file(contract_abi_file_empty): - result = run_command(abi) - assert 'Some files do not exist or are incorrect' in result.output - assert f'{G_CONF_HOME}.skale/contracts_info/manager.json error No such file' in result.output # noqa - assert f'{G_CONF_HOME}.skale/contracts_info/ima.json ok' in result.output - assert result.exit_code == 0 diff --git a/tests/cli/wallet_test.py b/tests/cli/wallet_test.py index 489ec07b..234f4f1a 100644 --- a/tests/cli/wallet_test.py +++ b/tests/cli/wallet_test.py @@ -30,18 +30,12 @@ def test_wallet_info(): response_data = { 'status': 'ok', - 'payload': { - 'address': 'simple_address', - 'eth_balance': 13, - 'skale_balance': 123 - } + 'payload': {'address': 'simple_address', 'eth_balance': 13, 'skale_balance': 123}, } response_mock = MagicMock() response_mock.status_code = requests.codes.ok response_mock.json = Mock(return_value=response_data) - result = run_command_mock('node_cli.utils.helper.requests.get', - response_mock, - wallet_info) + result = run_command_mock('node_cli.utils.helper.requests.get', response_mock, wallet_info) assert result.exit_code == 0 expected = ( '--------------------------------------------------\n' @@ -52,28 +46,22 @@ def test_wallet_info(): ) assert result.output == expected - result = run_command_mock('node_cli.utils.helper.requests.get', - response_mock, - wallet_info, - ['--format', 'json']) - assert result.exit_code == 0 - expected = ( - "{\"address\": \"simple_address\", " - "\"eth_balance\": 13, \"skale_balance\": 123}\n" + result = run_command_mock( + 'node_cli.utils.helper.requests.get', response_mock, wallet_info, ['--format', 'json'] ) + assert result.exit_code == 0 + expected = '{"address": "simple_address", "eth_balance": 13, "skale_balance": 123}\n' assert result.output == expected def test_wallet_send(): - resp_mock = response_mock( - requests.codes.ok, - {'status': 'ok', 'payload': None} - ) + resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None}) result = run_command_mock( 'node_cli.utils.helper.requests.post', resp_mock, send, - ['0x00000000000000000000000000000000', '10', '--yes']) + ['0x00000000000000000000000000000000', '10', '--yes'], + ) assert result.exit_code == 0 assert result.output == 'Funds were successfully transferred\n' # noqa @@ -87,6 +75,10 @@ def test_wallet_send_with_error(): 'node_cli.utils.helper.requests.post', resp_mock, send, - ['0x00000000000000000000000000000000', '10', '--yes']) + ['0x00000000000000000000000000000000', '10', '--yes'], + ) assert result.exit_code == 3 - assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa + assert ( + result.output + == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa + ) diff --git a/tests/configs_env_test.py b/tests/configs_env_test.py deleted file mode 100644 index 1fe9ac4e..00000000 --- a/tests/configs_env_test.py +++ /dev/null @@ -1,15 +0,0 @@ -from node_cli.configs.env import NotValidEnvParamsError, validate_params - - -def test_validate_params(): - valid_config = {'ENV_TYPE': 'mainnet'} - validate_params(valid_config) - invalid_config = {'ENV_TYPE': ''} - error = None - try: - validate_params(invalid_config) - except NotValidEnvParamsError as e: - error = e - assert error is not None - earg = 'Allowed ENV_TYPE values are [\'mainnet\', \'testnet\', \'qanet\', \'devnet\']. Actual: ""' # noqa - assert error.args[0] == earg diff --git a/tests/conftest.py b/tests/conftest.py index 146c9a22..8b610d96 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,120 +16,71 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see . -"""SKALE config test""" import json import os import pathlib import shutil +import tempfile from contextlib import contextmanager import docker import mock import pytest -import yaml +import redis from node_cli.configs import ( CONTAINER_CONFIG_TMP_PATH, GLOBAL_SKALE_CONF_FILEPATH, GLOBAL_SKALE_DIR, META_FILEPATH, + NGINX_CONFIG_FILEPATH, NGINX_CONTAINER_NAME, + NODE_DATA_PATH, + REDIS_URI, REMOVED_CONTAINERS_FOLDER_PATH, - STATIC_PARAMS_FILEPATH, SCHAIN_NODE_DATA_PATH, ) from node_cli.configs.node_options import NODE_OPTIONS_FILEPATH -from node_cli.configs.ssl import SSL_FOLDER_PATH from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH +from node_cli.configs.ssl import SSL_FOLDER_PATH +from node_cli.core.node_options import NodeOptions from node_cli.utils.docker_utils import docker_client from node_cli.utils.global_config import generate_g_config_file +from node_cli.utils.node_type import NodeMode +from tests.fixtures.settings import ( # noqa: F401 + INTERNAL_FAIR_ACTIVE, + INTERNAL_FAIR_PASSIVE, + INTERNAL_SKALE_ACTIVE, + INTERNAL_SKALE_PASSIVE, + NODE_FAIR_ACTIVE, + NODE_FAIR_PASSIVE, + NODE_SKALE_ACTIVE, + NODE_SKALE_PASSIVE, + _cleanup_settings, + _write_settings, + fair_active_settings, + fair_passive_settings, + skale_active_settings, + skale_passive_settings, +) +from tests.helper import TEST_META_V1, TEST_META_V2, TEST_META_V3, TEST_SCHAINS_MNT_DIR_SINGLE_CHAIN -from tests.helper import TEST_META_V1, TEST_META_V2, TEST_META_V3, TEST_SCHAINS_MNT_DIR_SYNC - - -TEST_ENV_PARAMS = """ -mainnet: - server: - cpu_total: 4 - cpu_physical: 4 - memory: 32 - swap: 16 - disk: 2000000000000 - - packages: - docker: 1.1.3 - docker-compose: 1.1.3 - iptables-persistant: 1.1.3 - lvm2: 1.1.1 - -testnet: - server: - cpu_total: 4 - cpu_physical: 4 - memory: 32 - swap: 16 - disk: 200000000000 - - packages: - docker: 1.1.3 - docker-compose: 1.1.3 - iptables-persistant: 1.1.3 - lvm2: 1.1.1 - -testnet: - server: - cpu_total: 4 - cpu_physical: 4 - memory: 32 - swap: 16 - disk: 200000000000 - - packages: - docker: 1.1.3 - docker-compose: 1.1.3 - iptables-persistant: 1.1.3 - lvm2: 1.1.1 - -qanet: - server: - cpu_total: 4 - cpu_physical: 4 - memory: 32 - swap: 16 - disk: 200000000000 - - packages: - docker: 1.1.3 - docker-compose: 1.1.3 - iptables-persistant: 1.1.3 - lvm2: 1.1.1 - -devnet: - server: - cpu_total: 4 - cpu_physical: 4 - memory: 32 - swap: 16 - disk: 80000000000 - - packages: - iptables-persistant: 1.1.3 - lvm2: 1.1.1 - docker-compose: 1.1.3 - - docker: - docker-api: 1.1.3 - docker-engine: 1.1.3 -""" +TIMEOUT_PATCHES = [ + 'node_cli.configs.TM_INIT_TIMEOUT', + 'node_cli.configs.RESTORE_SLEEP_TIMEOUT', + 'node_cli.configs.INIT_TIMEOUT', +] -@pytest.fixture -def net_params_file(): - with open(STATIC_PARAMS_FILEPATH, 'w') as f: - yaml.dump(yaml.load(TEST_ENV_PARAMS, Loader=yaml.Loader), stream=f, Dumper=yaml.Dumper) - yield STATIC_PARAMS_FILEPATH - os.remove(STATIC_PARAMS_FILEPATH) +@pytest.fixture(autouse=True, scope='session') +def _fast_timeouts(): + patchers = [mock.patch(target, 1) for target in TIMEOUT_PATCHES] + for p in patchers: + p.start() + yield + for p in patchers: + p.stop() @pytest.fixture() @@ -202,7 +153,24 @@ def resource_alloc(): with open(RESOURCE_ALLOCATION_FILEPATH, 'w') as alloc_file: json.dump({}, alloc_file) yield RESOURCE_ALLOCATION_FILEPATH - os.remove(RESOURCE_ALLOCATION_FILEPATH) + try: + os.remove(RESOURCE_ALLOCATION_FILEPATH) + except FileNotFoundError: + pass + + +@pytest.fixture +def inited_node(): + path = pathlib.Path(NGINX_CONFIG_FILEPATH) + path.parent.mkdir(parents=True, exist_ok=True) + path.touch() + try: + yield + finally: + try: + os.remove(NGINX_CONFIG_FILEPATH) + except FileNotFoundError: + pass @pytest.fixture @@ -217,6 +185,46 @@ def ssl_folder(): shutil.rmtree(SSL_FOLDER_PATH) +@pytest.fixture +def active_node_option(): + if os.path.isdir(NODE_DATA_PATH): + shutil.rmtree(NODE_DATA_PATH) + path = pathlib.Path(NODE_DATA_PATH) + path.mkdir(parents=True, exist_ok=True) + node_options = NodeOptions() + node_options.node_mode = NodeMode.ACTIVE + try: + yield + finally: + try: + if os.path.isdir(NODE_OPTIONS_FILEPATH): + shutil.rmtree(NODE_OPTIONS_FILEPATH) + elif os.path.isfile(NODE_OPTIONS_FILEPATH): + os.remove(NODE_OPTIONS_FILEPATH) + except FileNotFoundError: + pass + + +@pytest.fixture +def passive_node_option(): + if os.path.isdir(NODE_DATA_PATH): + shutil.rmtree(NODE_DATA_PATH) + path = pathlib.Path(NODE_DATA_PATH) + path.mkdir(parents=True, exist_ok=True) + node_options = NodeOptions() + node_options.node_mode = NodeMode.PASSIVE + try: + yield + finally: + try: + if os.path.isdir(NODE_OPTIONS_FILEPATH): + shutil.rmtree(NODE_OPTIONS_FILEPATH) + elif os.path.isfile(NODE_OPTIONS_FILEPATH): + os.remove(NODE_OPTIONS_FILEPATH) + except FileNotFoundError: + pass + + @pytest.fixture def dutils(): return docker_client() @@ -268,7 +276,10 @@ def meta_file_v3(): try: yield META_FILEPATH finally: - os.remove(META_FILEPATH) + try: + os.remove(META_FILEPATH) + except FileNotFoundError: + pass @pytest.fixture @@ -299,12 +310,60 @@ def tmp_schains_dir(): @pytest.fixture -def tmp_sync_datadir(): - os.makedirs(TEST_SCHAINS_MNT_DIR_SYNC, exist_ok=True) +def tmp_passive_datadir(): + os.makedirs(TEST_SCHAINS_MNT_DIR_SINGLE_CHAIN, exist_ok=True) try: - yield TEST_SCHAINS_MNT_DIR_SYNC + yield TEST_SCHAINS_MNT_DIR_SINGLE_CHAIN finally: - shutil.rmtree(TEST_SCHAINS_MNT_DIR_SYNC) + shutil.rmtree(TEST_SCHAINS_MNT_DIR_SINGLE_CHAIN) + + +@pytest.fixture +def valid_env_params(): + return { + 'ENDPOINT': 'http://localhost:8545', + 'NODE_VERSION': 'master', + 'FILEBEAT_HOST': '127.0.0.1:3010', + 'SGX_URL': 'http://127.0.0.1', + 'BLOCK_DEVICE': '/dev/sss', + 'ENV_TYPE': 'devnet', + 'ENFORCE_BTRFS': 'False', + 'FAIR_CONTRACTS': 'test-fair', + } + + +@pytest.fixture +def valid_env_file(valid_env_params): + file_name = None + try: + with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: + for key, value in valid_env_params.items(): + f.write(f'{key}={value}\n') + file_name = f.name + yield file_name + finally: + if file_name: + os.unlink(file_name) + _cleanup_settings() + + +@pytest.fixture +def mock_chain_response(): + return { + 'jsonrpc': '2.0', + 'id': 1, + 'result': '0x1', + } + + +@pytest.fixture +def mock_networks_metadata(): + return { + 'networks': [ + {'chainId': 1, 'name': 'Mainnet', 'path': 'mainnet'}, + {'chainId': 2, 'name': 'Testnet', 'path': 'testnet'}, + ] + } @contextmanager @@ -318,3 +377,124 @@ def set_env_var(name, value): del os.environ[name] else: os.environ[name] = old_value + + +@pytest.fixture +def regular_user_conf(tmp_path): + test_env_path = pathlib.Path(tmp_path / 'test-env') + _write_settings(INTERNAL_SKALE_ACTIVE, NODE_SKALE_ACTIVE) + try: + test_env = """ + ENDPOINT=http://localhost:8545 + NODE_VERSION='main' + FILEBEAT_HOST=127.0.0.1:3010 + SGX_URL=http://127.0.0.1 + BLOCK_DEVICE=/dev/sss + DOCKER_LVMPY_VERSION='master' + ENV_TYPE='devnet' + MANAGER_CONTRACTS='test-manager' + IMA_CONTRACTS='test-ima' + """ + with open(test_env_path, 'w') as env_file: + env_file.write(test_env) + yield test_env_path + finally: + test_env_path.unlink() + _cleanup_settings() + + +@pytest.fixture +def fair_user_conf(tmp_path): + test_env_path = pathlib.Path(tmp_path / 'test-env') + _write_settings(INTERNAL_FAIR_ACTIVE, NODE_FAIR_ACTIVE) + try: + test_env = """ + ENDPOINT=http://localhost:8545 + NODE_VERSION='main' + FILEBEAT_HOST=127.0.0.1:3010 + SGX_URL=http://127.0.0.1 + BLOCK_DEVICE=/dev/sss + ENV_TYPE='devnet' + ENFORCE_BTRFS=False + FAIR_CONTRACTS='test-fair' + """ + with open(test_env_path, 'w') as env_file: + env_file.write(test_env) + yield test_env_path + finally: + test_env_path.unlink() + _cleanup_settings() + + +@pytest.fixture +def fair_boot_user_conf(tmp_path): + test_env_path = pathlib.Path(tmp_path / 'test-env') + _write_settings(INTERNAL_FAIR_ACTIVE, NODE_FAIR_ACTIVE) + try: + test_env = """ + ENDPOINT=http://localhost:8545 + NODE_VERSION='main' + FILEBEAT_HOST=127.0.0.1:3010 + SGX_URL=http://127.0.0.1 + BLOCK_DEVICE=/dev/sss + ENV_TYPE='devnet' + FAIR_CONTRACTS='test-fair' + """ + with open(test_env_path, 'w') as env_file: + env_file.write(test_env) + yield test_env_path + finally: + test_env_path.unlink() + _cleanup_settings() + + +@pytest.fixture +def fair_passive_user_conf(tmp_path): + test_env_path = pathlib.Path(tmp_path / 'test-env') + _write_settings(INTERNAL_FAIR_PASSIVE, NODE_FAIR_PASSIVE) + try: + test_env = """ + ENDPOINT=http://localhost:8545 + NODE_VERSION='main' + FILEBEAT_HOST=127.0.0.1:3010 + BLOCK_DEVICE=/dev/sss + ENV_TYPE='devnet' + ENFORCE_BTRFS=False + FAIR_CONTRACTS='test-fair' + """ + with open(test_env_path, 'w') as env_file: + env_file.write(test_env) + yield test_env_path + finally: + test_env_path.unlink() + _cleanup_settings() + + +@pytest.fixture +def passive_user_conf(tmp_path): + test_env_path = pathlib.Path(tmp_path / 'test-env') + _write_settings(INTERNAL_SKALE_PASSIVE, NODE_SKALE_PASSIVE) + try: + test_env = """ + ENDPOINT=http://localhost:8545 + NODE_VERSION='main' + FILEBEAT_HOST=127.0.0.1:3010 + BLOCK_DEVICE=/dev/sss + ENV_TYPE='devnet' + SCHAIN_NAME='test-schain' + ENFORCE_BTRFS=False + MANAGER_CONTRACTS='test-manager' + IMA_CONTRACTS='test-ima' + """ + with open(test_env_path, 'w') as env_file: + env_file.write(test_env) + yield test_env_path + finally: + test_env_path.unlink() + _cleanup_settings() + + +@pytest.fixture +def redis_client(): + cpool = redis.ConnectionPool.from_url(REDIS_URI) + return redis.Redis(connection_pool=cpool) diff --git a/tests/core/core_checks_test.py b/tests/core/core_checks_test.py index 6e75bbdd..aa986cc7 100644 --- a/tests/core/core_checks_test.py +++ b/tests/core/core_checks_test.py @@ -1,7 +1,6 @@ import os import shutil -import time -from pip._internal import main as pipmain +import subprocess import mock import pytest @@ -19,9 +18,11 @@ MachineChecker, merge_reports, PackageChecker, - save_report + save_report, ) +from node_cli.utils.node_type import NodeMode, NodeType + @pytest.fixture def requirements_data(): @@ -31,21 +32,20 @@ def requirements_data(): 'cpu_physical': 1, 'memory': 100, 'swap': 100, - 'disk': 100000000 - }, - 'package': { - 'iptables_persistant': '0.0.0', - 'lvm2': '0.0.0', - 'test-package': '2.2.2' + 'disk': 100000000, }, - 'docker': { - 'docker-engine': '0.0.0', - 'docker-api': '0.0.0', - 'docker-compose': '1.27.4' - } + 'package': {'iptables_persistant': '0.0.0', 'lvm2': '0.0.0', 'test-package': '2.2.2'}, + 'docker': {'docker-engine': '0.0.0', 'docker-api': '0.0.0', 'docker-compose': '1.27.4'}, } +@pytest.fixture +def fair_requirements_data(requirements_data): + reqs = {k: v.copy() for k, v in requirements_data.items()} + reqs['package']['lvm2'] = 'disabled' + return reqs + + @pytest.fixture def server_req(requirements_data): return requirements_data['server'] @@ -154,7 +154,7 @@ def test_checks_machine_check(server_req): {'name': 'disk', 'status': 'ok'}, {'name': 'memory', 'status': 'ok'}, {'name': 'network', 'status': 'ok'}, - {'name': 'swap', 'status': 'ok'} + {'name': 'swap', 'status': 'ok'}, ] @@ -203,58 +203,37 @@ def test_checks_docker_api(docker_req): assert r.info['expected_version'] == '111.111.111' -@pytest.fixture -def docker_compose_pkg_1_27_4(): - pipmain(['install', 'docker-compose==1.27.4']) - time.sleep(10) - yield - pipmain(['uninstall', 'docker-compose', '-y']) - - -@pytest.fixture -def docker_compose_pkg_1_24_1(): - pipmain(['install', 'docker-compose==1.24.1']) - time.sleep(10) - yield - pipmain(['uninstall', 'docker-compose', '-y']) - +@mock.patch('node_cli.utils.helper.subprocess.run') +@mock.patch('node_cli.core.checks.shutil.which', return_value='/usr/bin/docker') +def test_checks_docker_compose_version_mocked(mock_shutil_which, mock_subprocess_run, docker_req): + checker = DockerChecker(docker_req) + expected_version = docker_req['docker-compose'] -def test_checks_docker_compose_good_pkg(docker_req, docker_compose_pkg_1_27_4): - checker = DockerChecker(package_req) - r = checker.docker_compose() - r.name == 'docker-compose' - r.status == 'ok' + mock_output = f'Docker Compose version v{expected_version}, build somehash'.encode('utf-8') + mock_result = mock.Mock(spec=subprocess.CompletedProcess) + mock_result.stdout = mock_output + mock_result.stderr = None + mock_result.returncode = 0 + mock_subprocess_run.return_value = mock_result -def test_checks_docker_compose_no_pkg(docker_req): - checker = DockerChecker(package_req) r = checker.docker_compose() - r.name == 'docker-compose' - r.status == 'ok' - -def test_checks_docker_compose_invalid_version( - docker_req, - docker_compose_pkg_1_24_1 -): - checker = DockerChecker(docker_req) - r = checker.docker_compose() - r.name == 'docker-compose' - r.status == 'ok' + assert r.name == 'docker' + assert r.status == 'ok', f'Check failed: {r}' + assert isinstance(r.info, str) + assert f'expected docker compose version {expected_version}' in r.info.lower() + assert f'actual v{expected_version}' in r.info.lower() def test_checks_docker_config(docker_req): checker = DockerChecker(docker_req) - valid_config = { - 'live-restore': True - } + valid_config = {'live-restore': True} r = checker._check_docker_alive_option(valid_config) assert r[0] is True assert r[1] == 'Docker daemon live-restore option is set as "true"' - invalid_config = { - 'live-restore': False - } + invalid_config = {'live-restore': False} r = checker._check_docker_alive_option(invalid_config) assert r[0] is False assert r[1] == 'Docker daemon live-restore option should be set as "true"' @@ -274,16 +253,22 @@ def test_checks_docker_hosts(docker_req): r = checker._check_docker_hosts_option(invalid_config) assert r == ( False, - "Docker daemon hosts is misconfigured. Missing hosts: ['fd://', 'unix:///var/run/skale/docker.sock']" # noqa + "Docker daemon hosts is misconfigured. Missing hosts: ['fd://', 'unix:///var/run/skale/docker.sock']", # noqa ) invalid_config = {'hosts': ['http://127.0.0.1:8080']} r = checker._check_docker_hosts_option(invalid_config) - assert r == (False, "Docker daemon hosts is misconfigured. Missing hosts: ['fd://', 'unix:///var/run/skale/docker.sock']") # noqa + assert r == ( + False, + "Docker daemon hosts is misconfigured. Missing hosts: ['fd://', 'unix:///var/run/skale/docker.sock']", + ) # noqa invalid_config = {'hosts': ['fd://']} r = checker._check_docker_hosts_option(invalid_config) - assert r == (False, "Docker daemon hosts is misconfigured. Missing hosts: ['unix:///var/run/skale/docker.sock']") # noqa + assert r == ( + False, + "Docker daemon hosts is misconfigured. Missing hosts: ['unix:///var/run/skale/docker.sock']", + ) # noqa def test_checks_docker_pre_post_install_checks(docker_req): @@ -333,18 +318,26 @@ def run_cmd_mock(*args, **kwargs): assert r.status == 'ok' -def test_get_all_checkers(requirements_data): +def test_get_all_checkers(requirements_data, active_node_option): disk = 'test-disk' - checkers = get_all_checkers(disk, requirements_data) + checkers = get_all_checkers(disk, requirements_data, node_mode=NodeMode.ACTIVE) assert len(checkers) == 3 - assert isinstance(checkers[0], MachineChecker) - assert isinstance(checkers[1], PackageChecker) - assert isinstance(checkers[2], DockerChecker) + assert isinstance(checkers[0], PackageChecker) + assert isinstance(checkers[1], DockerChecker) + assert isinstance(checkers[2], MachineChecker) + + +def test_get_all_checkers_passive(requirements_data, passive_node_option): + disk = 'test-disk' + checkers = get_all_checkers(disk, requirements_data, node_mode=NodeMode.PASSIVE) + assert len(checkers) == 2 + assert isinstance(checkers[0], PackageChecker) + assert isinstance(checkers[1], DockerChecker) -def test_get_checks(requirements_data): +def test_get_checks(requirements_data, active_node_option): disk = 'test-disk' - checkers = get_all_checkers(disk, requirements_data) + checkers = get_all_checkers(disk, requirements_data, node_mode=NodeMode.ACTIVE) checks = get_checks(checkers) assert len(checks) == 16 checks = get_checks(checkers, check_type=CheckType.PREINSTALL) @@ -353,6 +346,18 @@ def test_get_checks(requirements_data): assert len(checks) == 2 +def test_get_checks_fair(fair_requirements_data, active_node_option): + disk = 'test-disk' + fair_checkers = get_all_checkers(disk, fair_requirements_data, node_mode=NodeMode.ACTIVE) + + fair_all_checks = get_checks(fair_checkers, CheckType.ALL) + fair_all_names = {f.func.__name__ for f in fair_all_checks} + assert 'network' in fair_all_names + assert 'lvm2' not in fair_all_names + assert 'cpu_total' in fair_all_names + assert 'btrfs_progs' in fair_all_names + + def test_get_save_report(tmp_dir_path): path = os.path.join(tmp_dir_path, 'checks.json') report = get_report(path) @@ -367,23 +372,23 @@ def test_merge_report(): old_report = [ {'name': 'test1', 'status': 'ok', 'info': 'Test'}, {'name': 'test2', 'status': 'failed', 'info': 'Test1'}, - {'name': 'test3', 'status': 'failed', 'info': 'Test1'} + {'name': 'test3', 'status': 'failed', 'info': 'Test1'}, ] new_report = [ {'name': 'test1', 'status': 'ok', 'info': 'Test'}, - {'name': 'test2', 'status': 'ok', 'info': 'Test1'} + {'name': 'test2', 'status': 'ok', 'info': 'Test1'}, ] report = merge_reports(old_report, new_report) assert report == [ {'name': 'test1', 'status': 'ok', 'info': 'Test'}, {'name': 'test2', 'status': 'ok', 'info': 'Test1'}, - {'name': 'test3', 'status': 'failed', 'info': 'Test1'} + {'name': 'test3', 'status': 'failed', 'info': 'Test1'}, ] def test_get_static_params(tmp_config_dir): - params = get_static_params() + params = get_static_params(NodeType.SKALE) shutil.copy(STATIC_PARAMS_FILEPATH, tmp_config_dir) - tmp_params = get_static_params(config_path=tmp_config_dir) + tmp_params = get_static_params(NodeType.SKALE, config_path=tmp_config_dir) assert params['server']['cpu_total'] == 8 assert params == tmp_params diff --git a/tests/core/core_logs_test.py b/tests/core/core_logs_test.py index a82c3234..b3414797 100644 --- a/tests/core/core_logs_test.py +++ b/tests/core/core_logs_test.py @@ -2,7 +2,6 @@ import time import shlex import shutil -from datetime import datetime import pytest import freezegun @@ -11,10 +10,8 @@ from node_cli.configs import G_CONF_HOME, SKALE_TMP_DIR from node_cli.utils.docker_utils import docker_client from node_cli.utils.helper import run_cmd, safe_mkdir +from tests.helper import CURRENT_DATETIME - -CURRENT_TIMESTAMP = 1594903080 -CURRENT_DATETIME = datetime.utcfromtimestamp(CURRENT_TIMESTAMP) TEST_DUMP_DIR_PATH = os.path.join(SKALE_TMP_DIR, 'skale-logs-dump-2020-07-16--12-38-00') TEST_IMAGE = 'alpine' @@ -44,10 +41,7 @@ def backup_func(): def skale_container(): client = docker_client() container = client.containers.run( - image=TEST_IMAGE, - name=TEST_SKALE_NAME, - detach=True, - entrypoint=TEST_ENTRYPOINT + image=TEST_IMAGE, name=TEST_SKALE_NAME, detach=True, entrypoint=TEST_ENTRYPOINT ) time.sleep(10) try: @@ -77,8 +71,8 @@ def test_create_logs_dump(backup_func, skale_container, removed_containers_folde content = data_file.readlines() assert content == [ 'Hello, SKALE!\n', - '================================================================================\n', # noqa - 'Hello, SKALE!\n' + '================================================================================\n', # noqa + 'Hello, SKALE!\n', ] assert os.path.exists(os.path.join(TEST_ARCHIVE_FOLDER_PATH, 'removed_containers')) diff --git a/tests/core/core_node_test.py b/tests/core/core_node_test.py index a8b7968a..b28719d4 100644 --- a/tests/core/core_node_test.py +++ b/tests/core/core_node_test.py @@ -5,58 +5,202 @@ from pathlib import Path import docker +from docker import errors as docker_errors import mock import pytest import requests -from node_cli.configs import NODE_DATA_PATH +from node_cli.configs import ( + NODE_DATA_PATH, + SCHAINS_MNT_DIR_REGULAR, + SCHAINS_MNT_DIR_SINGLE_CHAIN, +) from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH -from node_cli.core.node import BASE_CONTAINERS_AMOUNT, is_base_containers_alive -from node_cli.core.node import init, pack_dir, update, is_update_safe -from node_cli.utils.meta import CliMeta +from node_cli.core.node import ( + cleanup, + compose_node_env, + get_expected_container_names, + init, + is_base_containers_alive, + is_update_safe, + pack_dir, + update, +) +from node_cli.utils.meta import CliMeta +from node_cli.utils.node_type import NodeType, NodeMode from tests.helper import response_mock, safe_update_api_response, subprocess_run_mock from tests.resources_test import BIG_DISK_SIZE dclient = docker.from_env() ALPINE_IMAGE_NAME = 'alpine:3.12' -HELLO_WORLD_IMAGE_NAME = 'hello-world' -CMD = 'sleep 10' +CMD = 'sleep 60' + +WRONG_CONTAINERS = [ + 'WRONG_CONTAINER_1', + 'skale_WRONG_CONTAINER_4', + 'fair_WRONG_CONTAINER_6', + 'passive_WRONG_CONTAINER_8', +] + +NODE_TYPE_MODE_BOOT_COMBINATIONS: list[tuple[NodeType, NodeMode, bool]] = [ + (NodeType.SKALE, NodeMode.ACTIVE, False), + (NodeType.SKALE, NodeMode.PASSIVE, False), + (NodeType.FAIR, NodeMode.ACTIVE, True), + (NodeType.FAIR, NodeMode.ACTIVE, False), +] + +alive_test_params = [ + pytest.param( + node_type, + node_mode, + is_boot, + get_expected_container_names(node_type, node_mode, is_boot), + id=f'{node_type.name}-{node_mode.name}-boot_{is_boot}-correct_containers', + ) + for node_type, node_mode, is_boot in NODE_TYPE_MODE_BOOT_COMBINATIONS +] + +wrong_test_params = [ + pytest.param( + node_type, + node_mode, + is_boot, + WRONG_CONTAINERS, + id=f'{node_type.name}-{node_mode.name}-boot_{is_boot}-wrong_containers', + ) + for node_type, node_mode, is_boot in NODE_TYPE_MODE_BOOT_COMBINATIONS +] + +missing_test_params = [] +for node_type, node_mode, is_boot in NODE_TYPE_MODE_BOOT_COMBINATIONS: + expected_names = get_expected_container_names(node_type, node_mode, is_boot) + containers_to_create = expected_names[1:] + missing_test_params.append( + pytest.param( + node_type, + node_mode, + is_boot, + containers_to_create, + id=f'{node_type.name}-{node_mode.name}-boot_{is_boot}-missing_containers', + ) + ) @pytest.fixture -def skale_base_containers(): - containers = [ - dclient.containers.run(ALPINE_IMAGE_NAME, detach=True, name=f'skale_test{i}', command=CMD) - for i in range(BASE_CONTAINERS_AMOUNT) - ] - yield containers - for c in containers: - c.remove(force=True) - - -@pytest.fixture -def skale_base_containers_without_one(): - containers = [ - dclient.containers.run(ALPINE_IMAGE_NAME, detach=True, name=f'skale_test{i}', command=CMD) - for i in range(BASE_CONTAINERS_AMOUNT - 1) - ] - yield containers - for c in containers: - c.remove(force=True) - +def manage_node_containers(request): + container_names_to_create = request.param + created_containers = [] + try: + for name in container_names_to_create: + try: + existing_container = dclient.containers.get(name) + existing_container.remove(force=True) + except docker_errors.NotFound: + pass + container = dclient.containers.run( + ALPINE_IMAGE_NAME, + detach=True, + name=name, + command=CMD, + ) + created_containers.append(container) + + if created_containers: + time.sleep(2) + + yield created_containers -@pytest.fixture -def skale_base_containers_exited(): - containers = [ - dclient.containers.run(HELLO_WORLD_IMAGE_NAME, detach=True, name=f'skale_test{i}') - for i in range(BASE_CONTAINERS_AMOUNT) - ] - time.sleep(10) - yield containers - for c in containers: - c.remove(force=True) + finally: + all_containers_now = dclient.containers.list(all=True) + cleaned_count = 0 + for container_obj in all_containers_now: + if container_obj.name in container_names_to_create: + try: + container_obj.remove(force=True) + cleaned_count += 1 + except docker_errors.NotFound: + pass + + +@pytest.mark.parametrize( + 'node_type, node_mode, is_boot, manage_node_containers', + alive_test_params, + indirect=['manage_node_containers'], +) +def test_is_base_containers_alive(manage_node_containers, node_type, node_mode, is_boot): + assert ( + is_base_containers_alive(node_type=node_type, node_mode=node_mode, is_fair_boot=is_boot) + is True + ) + + +@pytest.mark.parametrize( + 'node_type, node_mode, is_boot, manage_node_containers', + wrong_test_params, + indirect=['manage_node_containers'], +) +def test_is_base_containers_alive_wrong(manage_node_containers, node_type, node_mode, is_boot): + assert ( + is_base_containers_alive(node_type=node_type, node_mode=node_mode, is_fair_boot=is_boot) + is False + ) + + +@pytest.mark.parametrize( + 'node_type, node_mode, is_boot, manage_node_containers', + missing_test_params, + indirect=['manage_node_containers'], +) +def test_is_base_containers_alive_missing(manage_node_containers, node_type, node_mode, is_boot): + assert ( + is_base_containers_alive(node_type=node_type, node_mode=node_mode, is_fair_boot=is_boot) + is False + ) + + +@pytest.mark.parametrize('node_type, node_mode, is_boot', NODE_TYPE_MODE_BOOT_COMBINATIONS) +def test_is_base_containers_alive_empty(node_type, node_mode, is_boot): + assert ( + is_base_containers_alive(node_type=node_type, node_mode=node_mode, is_fair_boot=is_boot) + is False + ) + + +@pytest.mark.parametrize( + 'node_type, node_mode, expected_mnt_dir', + [ + ( + NodeType.SKALE, + NodeMode.ACTIVE, + SCHAINS_MNT_DIR_REGULAR, + ), + ( + NodeType.SKALE, + NodeMode.PASSIVE, + SCHAINS_MNT_DIR_SINGLE_CHAIN, + ), + ( + NodeType.FAIR, + NodeMode.ACTIVE, + SCHAINS_MNT_DIR_SINGLE_CHAIN, + ), + ], + ids=[ + 'regular', + 'passive', + 'fair', + ], +) +def test_compose_node_env(node_type, node_mode, expected_mnt_dir, regular_user_conf): + result_env = compose_node_env( + node_type=node_type, + node_mode=node_mode, + ) + + assert result_env['SCHAINS_MNT_DIR'] == expected_mnt_dir + assert 'BACKUP_RUN' not in result_env @pytest.fixture @@ -101,24 +245,6 @@ def test_pack_dir(tmp_dir): pack_dir(backup_dir, cleaned_archive_path, exclude=('trash_data',)) -def test_is_base_containers_alive(skale_base_containers): - cont = skale_base_containers - print([c.name for c in cont]) - assert is_base_containers_alive() - - -def test_is_base_containers_alive_one_failed(skale_base_containers_without_one): - assert not is_base_containers_alive() - - -def test_is_base_containers_alive_exited(skale_base_containers_exited): - assert not is_base_containers_alive() - - -def test_is_base_containers_alive_empty(): - assert not is_base_containers_alive() - - @pytest.fixture def no_resource_file(): try: @@ -138,10 +264,9 @@ def resource_file(): os.remove(RESOURCE_ALLOCATION_FILEPATH) -def test_init_node(no_resource_file): # todo: write new init node test +def test_init_node(regular_user_conf, no_resource_file): # todo: write new init node test resp_mock = response_mock(requests.codes.created) assert not os.path.isfile(RESOURCE_ALLOCATION_FILEPATH) - env_filepath = './tests/test-env' with ( mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), @@ -152,19 +277,16 @@ def test_init_node(no_resource_file): # todo: write new init node test mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch('node_cli.utils.helper.post_request', resp_mock), ): - init(env_filepath) + init(config_file=regular_user_conf.as_posix(), node_type=NodeType.SKALE) assert os.path.isfile(RESOURCE_ALLOCATION_FILEPATH) -def test_update_node(mocked_g_config, resource_file): - env_filepath = './tests/test-env' +def test_update_node(regular_user_conf, mocked_g_config, resource_file, inited_node): resp_mock = response_mock(requests.codes.created) os.makedirs(NODE_DATA_PATH, exist_ok=True) with ( mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch('node_cli.core.node.update_op'), - mock.patch('node_cli.core.node.get_flask_secret_key'), - mock.patch('node_cli.core.node.save_env_params'), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch('node_cli.core.host.prepare_host'), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), @@ -172,41 +294,131 @@ def test_update_node(mocked_g_config, resource_file): mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch('node_cli.core.host.init_data_dir'), mock.patch( - 'node_cli.core.node.get_meta_info', + 'node_cli.core.node.CliMetaManager.get_meta_info', return_value=CliMeta(version='2.6.0', config_stream='3.0.2'), ), ): with mock.patch( 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response() ): # noqa - result = update(env_filepath, pull_config_for_schain=None) + result = update( + regular_user_conf.as_posix(), + pull_config_for_schain=None, + node_type=NodeType.SKALE, + node_mode=NodeMode.ACTIVE, + ) assert result is None -def test_is_update_safe(): - assert is_update_safe() - assert is_update_safe(sync_node=True) - - with mock.patch('node_cli.core.node.is_admin_running', return_value=True): - with mock.patch('node_cli.core.node.is_api_running', return_value=True): - assert not is_update_safe() - assert is_update_safe(sync_node=True) - - with mock.patch('node_cli.core.node.is_sync_admin_running', return_value=True): - assert is_update_safe() - assert not is_update_safe(sync_node=True) - - with mock.patch('node_cli.utils.docker_utils.is_container_running', return_value=True): - with mock.patch( - 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response() - ): - assert is_update_safe() - - with mock.patch('node_cli.utils.helper.requests.get', return_value=safe_update_api_response()): - assert is_update_safe() - - with mock.patch('node_cli.utils.docker_utils.is_container_running', return_value=True): - with mock.patch( - 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response(safe=False) - ): - assert not is_update_safe() +@pytest.mark.parametrize( + 'node_type,node_mode', + [ + (NodeType.SKALE, NodeMode.ACTIVE), + (NodeType.SKALE, NodeMode.PASSIVE), + (NodeType.FAIR, NodeMode.ACTIVE), + ], +) +@mock.patch('node_cli.core.node.is_admin_running', return_value=False) +@mock.patch('node_cli.core.node.is_api_running', return_value=False) +@mock.patch('node_cli.utils.helper.requests.get') +def test_is_update_safe_when_admin_and_api_not_running( + mock_requests_get, mock_is_api_running, mock_is_admin_running, node_type, node_mode +): + assert is_update_safe(node_mode=node_mode) is True + mock_requests_get.assert_not_called() + + +@mock.patch('node_cli.core.node.is_admin_running', return_value=False) +@mock.patch('node_cli.core.node.is_api_running', return_value=True) +@mock.patch('node_cli.utils.helper.requests.get') +def test_is_update_safe_when_admin_not_running_for_passive( + mock_requests_get, mock_is_api_running, mock_is_admin_running +): + assert is_update_safe(node_mode=NodeMode.PASSIVE) is True + mock_requests_get.assert_not_called() + + +@pytest.mark.parametrize( + 'node_type,node_mode', + [ + (NodeType.SKALE, NodeMode.ACTIVE), + (NodeType.SKALE, NodeMode.PASSIVE), + (NodeType.FAIR, NodeMode.ACTIVE), + ], +) +@pytest.mark.parametrize( + 'api_is_safe, expected_result', + [(True, True), (False, False)], + ids=['api_safe', 'api_unsafe'], +) +@mock.patch('node_cli.core.node.is_admin_running', return_value=True) +@mock.patch('node_cli.utils.helper.requests.get') +def test_is_update_safe_when_admin_running( + mock_requests_get, mock_is_admin_running, api_is_safe, expected_result, node_type, node_mode +): + mock_requests_get.return_value = safe_update_api_response(safe=api_is_safe) + assert is_update_safe(node_mode=node_mode) is expected_result + mock_requests_get.assert_called_once() + + +@pytest.mark.parametrize('node_type', [NodeType.SKALE, NodeType.FAIR]) +@pytest.mark.parametrize( + 'api_is_safe, expected_result', + [(True, True), (False, False)], + ids=['api_safe', 'api_unsafe'], +) +@mock.patch('node_cli.core.node.is_admin_running', return_value=False) +@mock.patch('node_cli.core.node.is_api_running', return_value=True) +@mock.patch('node_cli.utils.helper.requests.get') +def test_is_update_safe_when_only_api_running_for_regular( + mock_requests_get, + mock_is_api_running, + mock_is_admin_running, + api_is_safe, + expected_result, + node_type, +): + mock_requests_get.return_value = safe_update_api_response(safe=api_is_safe) + assert is_update_safe(node_mode=NodeMode.ACTIVE) is expected_result + mock_requests_get.assert_called_once() + + +@pytest.mark.parametrize( + 'node_type,node_mode', + [ + (NodeType.SKALE, NodeMode.ACTIVE), + (NodeType.SKALE, NodeMode.PASSIVE), + (NodeType.FAIR, NodeMode.ACTIVE), + ], +) +@mock.patch('node_cli.core.node.is_admin_running', return_value=True) +@mock.patch('node_cli.utils.helper.requests.get') +def test_is_update_safe_when_api_call_fails( + mock_requests_get, mock_is_admin_running, node_type, node_mode +): + mock_requests_get.side_effect = requests.exceptions.ConnectionError('Test connection error') + assert is_update_safe(node_mode=node_mode) is False + mock_requests_get.assert_called_once() + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.core.node.cleanup_skale_op') +@mock.patch('node_cli.core.node.compose_node_env') +def test_cleanup_success( + mock_compose_env, + mock_cleanup_skale_op, + mock_is_user_valid, + inited_node, + resource_alloc, + meta_file_v3, + active_node_option, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + + cleanup(node_mode=NodeMode.ACTIVE) + + mock_compose_env.assert_called_once_with(NodeType.SKALE, NodeMode.ACTIVE) + mock_cleanup_skale_op.assert_called_once_with( + node_mode=NodeMode.ACTIVE, compose_env=mock_env, prune=False + ) diff --git a/tests/core/core_schains_test.py b/tests/core/core_schains_test.py index 1681ce20..7016a262 100644 --- a/tests/core/core_schains_test.py +++ b/tests/core/core_schains_test.py @@ -1,17 +1,12 @@ import os -import datetime -from unittest import mock from pathlib import Path - +from unittest import mock import freezegun -from node_cli.core.schains import cleanup_sync_datadir, toggle_schain_repair_mode +from node_cli.core.schains import cleanup_no_lvm_datadir, toggle_schain_repair_mode from node_cli.utils.helper import read_json - - -CURRENT_TIMESTAMP = 1594903080 -CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP) +from tests.helper import CURRENT_DATETIME, CURRENT_TIMESTAMP @freezegun.freeze_time(CURRENT_DATETIME) @@ -39,9 +34,9 @@ def test_toggle_repair_mode(tmp_schains_dir): @freezegun.freeze_time(CURRENT_DATETIME) -def test_cleanup_sync_datadir(tmp_sync_datadir): +def test_cleanup_passive_datadir(tmp_passive_datadir): schain_name = 'test_schain' - base_folder = Path(tmp_sync_datadir).joinpath(schain_name) + base_folder = Path(tmp_passive_datadir).joinpath(schain_name) base_folder.mkdir() folders = [ '28e07f34', @@ -85,6 +80,9 @@ def test_cleanup_sync_datadir(tmp_sync_datadir): hash_path = snapshot_folder.joinpath('snapshot_hash.txt') hash_path.touch() - with mock.patch('node_cli.core.schains.rm_btrfs_subvolume'): - cleanup_sync_datadir(schain_name, base_path=tmp_sync_datadir) + with ( + mock.patch('node_cli.core.schains.rm_btrfs_subvolume'), + mock.patch('node_cli.core.schains.run_cmd'), + ): + cleanup_no_lvm_datadir(schain_name, base_path=tmp_passive_datadir) assert not os.path.isdir(base_folder) diff --git a/tests/core/host/docker_config_test.py b/tests/core/host/docker_config_test.py index 5a93331a..9f53828b 100644 --- a/tests/core/host/docker_config_test.py +++ b/tests/core/host/docker_config_test.py @@ -62,7 +62,7 @@ def test_ensure_service_overriden_config(tmp_dir): '[Service]', 'ExecStart=', 'ExecStart=/usr/bin/dockerd', - 'ExecStartPre=/bin/mkdir -p /var/run/skale' + 'ExecStartPre=/bin/mkdir -p /var/run/skale', ] ) assert r == DockerConfigResult.CHANGED @@ -74,7 +74,7 @@ def test_ensure_service_overriden_config(tmp_dir): '[Service]', 'ExecStart=', 'ExecStart=/usr/bin/dockerd', - 'ExecStartPre=/bin/mkdir -p /var/run/skale' + 'ExecStartPre=/bin/mkdir -p /var/run/skale', ] ) assert r == DockerConfigResult.UNCHANGED @@ -93,10 +93,7 @@ def test_ensure_docker_daemon_config(tmp_dir): with open(daemon_config_path, 'r') as daemon_config_file: conf = json.load(daemon_config_file) assert conf['live-restore'] is True - assert conf['hosts'] == [ - 'fd://', - 'unix:///var/run/skale/docker.sock' - ] + assert conf['hosts'] == ['fd://', 'unix:///var/run/skale/docker.sock'] assert r == DockerConfigResult.CHANGED conf.pop('hosts') @@ -108,10 +105,7 @@ def test_ensure_docker_daemon_config(tmp_dir): with open(daemon_config_path, 'r') as daemon_config_file: conf = json.load(daemon_config_file) assert conf['live-restore'] is True - assert conf['hosts'] == [ - 'fd://', - 'unix:///var/run/skale/docker.sock' - ] + assert conf['hosts'] == ['fd://', 'unix:///var/run/skale/docker.sock'] assert conf['test'] == 'TEST' assert r == DockerConfigResult.CHANGED @@ -136,8 +130,8 @@ def container(dclient): c.remove(force=True) -def test_assert_no_contaners(): - assert_no_containers(ignore=('ganache',)) +def test_assert_no_containers(): + assert_no_containers(ignore=('redis',)) def test_assert_no_containers_failed(container): diff --git a/tests/core/host/kernel_config_test.py b/tests/core/host/kernel_config_test.py index 92e4e8e1..5ea64f93 100644 --- a/tests/core/host/kernel_config_test.py +++ b/tests/core/host/kernel_config_test.py @@ -2,10 +2,7 @@ import pytest -from node_cli.core.host import ( - is_btrfs_module_autoloaded, - ensure_btrfs_kernel_module_autoloaded -) +from node_cli.core.host import is_btrfs_module_autoloaded, ensure_btrfs_kernel_module_autoloaded @pytest.fixture diff --git a/tests/core/migration_test.py b/tests/core/migration_test.py index 7509bc14..585e6fb0 100644 --- a/tests/core/migration_test.py +++ b/tests/core/migration_test.py @@ -41,7 +41,10 @@ def test_migration(base_rules): migrate() res = run_cmd(['iptables', '-S']) output = res.stdout.decode('utf-8') - assert output == f'-P INPUT ACCEPT\n-P FORWARD ACCEPT\n-P OUTPUT ACCEPT\n-N {CUSTOM_CHAIN_NAME}\n-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT\n-A INPUT -p udp -m udp --dport 53 -j ACCEPT\n-A {CUSTOM_CHAIN_NAME} -p tcp -m tcp --dport 2222 -j ACCEPT\n' # noqa + assert ( + output + == f'-P INPUT ACCEPT\n-P FORWARD ACCEPT\n-P OUTPUT ACCEPT\n-N {CUSTOM_CHAIN_NAME}\n-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT\n-A INPUT -p udp -m udp --dport 53 -j ACCEPT\n-A {CUSTOM_CHAIN_NAME} -p tcp -m tcp --dport 2222 -j ACCEPT\n' # noqa + ) nft = NFTablesManager(family='ip', table='filter') assert nft.get_rules(chain='INPUT') == [ { diff --git a/tests/core/nftables_test.py b/tests/core/nftables_test.py index 74852585..b6490b08 100644 --- a/tests/core/nftables_test.py +++ b/tests/core/nftables_test.py @@ -9,7 +9,7 @@ @pytest.fixture(scope='module') def nft_manager(): - """Returns a NFTablesManager instance""" + """Returns a NFTablesManager instance.""" manager = NFTablesManager(family='inet', table='filter') try: yield manager @@ -19,7 +19,7 @@ def nft_manager(): @pytest.fixture def mock_nft_output(): - """Fixture for mock nftables output""" + """Fixture for mocking nftables output.""" return { 'nftables': [ {'chain': {'family': 'inet', 'table': 'filter', 'name': 'INPUT', 'handle': 1}}, @@ -46,7 +46,7 @@ def mock_nft_output(): def test_init(nft_manager): - """Test initialization""" + """Test initialization.""" assert nft_manager.family == 'inet' assert nft_manager.table == 'filter' assert isinstance(nft_manager.nft, nftables.Nftables) @@ -54,7 +54,7 @@ def test_init(nft_manager): @patch('nftables.Nftables.json_cmd') def test_execute_cmd_success(mock_json_cmd, nft_manager): - """Test successful command execution""" + """Test successful command execution.""" mock_json_cmd.return_value = (0, '', '') cmd = {'nftables': [{'add': {'table': {'family': 'inet', 'name': 'filter'}}}]} @@ -64,7 +64,7 @@ def test_execute_cmd_success(mock_json_cmd, nft_manager): @patch('nftables.Nftables.json_cmd') def test_execute_cmd_failure(mock_json_cmd, nft_manager): - """Test command execution failure""" + """Test command execution failure.""" mock_json_cmd.return_value = (1, '', 'Error message') cmd = {'nftables': [{'add': {'table': {'family': 'inet', 'name': 'filter'}}}]} @@ -75,7 +75,7 @@ def test_execute_cmd_failure(mock_json_cmd, nft_manager): @patch('nftables.Nftables.cmd') def test_get_chains(mock_cmd, nft_manager, mock_nft_output): - """Test getting chains""" + """Test getting chains.""" mock_cmd.return_value = (0, json.dumps(mock_nft_output), '') chains = nft_manager.get_chains() @@ -85,7 +85,7 @@ def test_get_chains(mock_cmd, nft_manager, mock_nft_output): @patch('nftables.Nftables.cmd') def test_chain_exists(mock_cmd, nft_manager, mock_nft_output): - """Test chain existence check""" + """Test chain existence check.""" mock_cmd.return_value = (0, json.dumps(mock_nft_output), '') assert nft_manager.chain_exists('INPUT') @@ -95,7 +95,7 @@ def test_chain_exists(mock_cmd, nft_manager, mock_nft_output): @patch.object(NFTablesManager, 'execute_cmd') @patch.object(NFTablesManager, 'chain_exists') def test_create_chain_if_not_exists(mock_exists, mock_execute, nft_manager): - """Test chain creation""" + """Test chain creation.""" mock_exists.return_value = False nft_manager.create_chain_if_not_exists('INPUT', 'input') @@ -113,7 +113,7 @@ def test_create_chain_if_not_exists(mock_exists, mock_execute, nft_manager): @patch.object(NFTablesManager, 'execute_cmd') @patch.object(NFTablesManager, 'rule_exists') def test_add_rule(mock_exists, mock_execute, nft_manager, rule_data): - """Test rule addition with different types""" + """Test rule addition with different types.""" mock_exists.return_value = False rule = Rule(**rule_data) @@ -123,7 +123,7 @@ def test_add_rule(mock_exists, mock_execute, nft_manager, rule_data): @patch.object(NFTablesManager, 'execute_cmd') def test_setup_firewall(mock_execute, nft_manager): - """Test complete firewall setup""" + """Test complete firewall setup.""" with patch.multiple( NFTablesManager, table_exists=Mock(return_value=False), @@ -135,7 +135,7 @@ def test_setup_firewall(mock_execute, nft_manager): def test_invalid_protocol(nft_manager): - """Test adding rule with invalid protocol""" + """Test adding rule with invalid protocol.""" rule = Rule(chain='INPUT', protocol='invalid', first_port=80) with pytest.raises(Exception): nft_manager.add_rule(rule) diff --git a/tests/core/nginx_test.py b/tests/core/nginx_test.py new file mode 100644 index 00000000..a867b0b0 --- /dev/null +++ b/tests/core/nginx_test.py @@ -0,0 +1,146 @@ +import os +from pathlib import Path + +import pytest +import mock + +from node_cli.core.nginx import ( + generate_nginx_config, + check_ssl_certs, + is_skale_node_nginx, + SSL_KEY_NAME, + SSL_CRT_NAME, +) +from node_cli.utils.node_type import NodeType, NodeMode +from node_cli.configs import NGINX_TEMPLATE_FILEPATH, NGINX_CONFIG_FILEPATH, NODE_CERTS_PATH + +TEST_NGINX_TEMPLATE = """ +server { + listen 3009; + {% if ssl %} + listen 311 ssl; + ssl_certificate /ssl/ssl_cert; + ssl_certificate_key /ssl/ssl_key; + {% endif %} +} + +{% if skale_node %} +server { + listen 80; + {% if ssl %} + listen 443 ssl; + ssl_certificate /ssl/ssl_cert; + ssl_certificate_key /ssl/ssl_key; + {% endif %} +} +{% endif %} +""" + +CORE_SSL_SNIPPET = 'listen 311 ssl;' +FILESTORAGE_SNIPPET = 'listen 80;' +FILESTORAGE_SSL_SNIPPET = 'listen 443 ssl;' + + +@pytest.fixture +def nginx_template(): + """Create a temporary nginx template file.""" + os.makedirs(os.path.dirname(NGINX_TEMPLATE_FILEPATH), exist_ok=True) + with open(NGINX_TEMPLATE_FILEPATH, 'w') as f: + f.write(TEST_NGINX_TEMPLATE) + try: + yield + finally: + if os.path.isfile(NGINX_TEMPLATE_FILEPATH): + os.remove(NGINX_TEMPLATE_FILEPATH) + if os.path.isfile(NGINX_CONFIG_FILEPATH): + os.remove(NGINX_CONFIG_FILEPATH) + + +@pytest.mark.parametrize( + 'node_type, node_mode, ssl_exists, expected_regular_flag, expected_ssl_flag', + [ + (NodeType.SKALE, NodeMode.ACTIVE, True, True, True), + (NodeType.SKALE, NodeMode.ACTIVE, False, True, False), + (NodeType.SKALE, NodeMode.PASSIVE, True, True, True), + (NodeType.SKALE, NodeMode.PASSIVE, False, True, False), + (NodeType.FAIR, NodeMode.ACTIVE, True, False, True), + (NodeType.FAIR, NodeMode.ACTIVE, False, False, False), + ], + ids=[ + 'regular_ssl_on', + 'regular_ssl_off', + 'regular_ssl_on', + 'regular_ssl_off', + 'fair_ssl_on', + 'fair_ssl_off', + ], +) +@mock.patch('node_cli.core.nginx.check_ssl_certs') +@mock.patch('node_cli.core.nginx.TYPE') +def test_generate_nginx_config( + mock_type, + mock_check_ssl, + node_type, + node_mode, + ssl_exists, + expected_regular_flag, + expected_ssl_flag, + nginx_template, +): + mock_type.__eq__.side_effect = lambda other: node_type == other + mock_type.__ne__.side_effect = lambda other: node_type != other + mock_check_ssl.return_value = ssl_exists + + generate_nginx_config() + + assert os.path.exists(NGINX_CONFIG_FILEPATH) + with open(NGINX_CONFIG_FILEPATH) as f: + rendered_config = f.read() + + rendered_config = rendered_config.strip() + + if expected_regular_flag: + assert FILESTORAGE_SNIPPET in rendered_config + else: + assert FILESTORAGE_SNIPPET not in rendered_config + + if expected_ssl_flag: + assert CORE_SSL_SNIPPET in rendered_config + else: + assert CORE_SSL_SNIPPET not in rendered_config + + if expected_regular_flag and expected_ssl_flag: + assert FILESTORAGE_SSL_SNIPPET in rendered_config + else: + assert FILESTORAGE_SSL_SNIPPET not in rendered_config + + +def test_check_ssl_certs_exist(ssl_folder): + Path(os.path.join(NODE_CERTS_PATH, SSL_CRT_NAME)).touch() + Path(os.path.join(NODE_CERTS_PATH, SSL_KEY_NAME)).touch() + assert check_ssl_certs() + + +def test_check_ssl_certs_missing_one(ssl_folder): + Path(os.path.join(NODE_CERTS_PATH, SSL_CRT_NAME)).touch() + assert check_ssl_certs() is False + + +def test_check_ssl_certs_missing_both(ssl_folder): + assert check_ssl_certs() is False + + +@pytest.mark.parametrize( + 'node_type, node_mode, expected_result', + [ + (NodeType.SKALE, NodeMode.ACTIVE, True), + (NodeType.SKALE, NodeMode.PASSIVE, True), + (NodeType.FAIR, NodeMode.ACTIVE, False), + ], +) +@mock.patch('node_cli.core.nginx.TYPE') +def test_is_skale_node_nginx(mock_type, node_type, node_mode, expected_result): + mock_type.__eq__.side_effect = lambda other: node_type == other + mock_type.__ne__.side_effect = lambda other: node_type != other + + assert is_skale_node_nginx() is expected_result diff --git a/tests/docker_utils_test.py b/tests/docker_utils_test.py index 2d61a489..fd3e8d3f 100644 --- a/tests/docker_utils_test.py +++ b/tests/docker_utils_test.py @@ -5,11 +5,7 @@ import mock import pytest -from node_cli.utils.docker_utils import ( - docker_cleanup, - save_container_logs, - safe_rm -) +from node_cli.utils.docker_utils import docker_cleanup, save_container_logs, safe_rm from node_cli.configs import REMOVED_CONTAINERS_FOLDER_PATH @@ -19,10 +15,7 @@ def simple_container(dclient, simple_image, docker_hc): c = None try: info = dclient.api.create_container( - simple_image, - detach=True, - name=name, - host_config=docker_hc + simple_image, detach=True, name=name, host_config=docker_hc ) c = dclient.containers.get(info['Id']) c.restart() @@ -57,7 +50,7 @@ def test_save_container_logs(simple_container, tmp_dir_path): 'INFO:__main__:Test 7\n', 'INFO:__main__:Test 8\n', 'INFO:__main__:Test 9\n', - 'INFO:__main__:Waiting\n' + 'INFO:__main__:Waiting\n', ] save_container_logs(simple_container, log_path, head=10, tail=5) with open(log_path) as log_file: @@ -73,7 +66,7 @@ def test_save_container_logs(simple_container, tmp_dir_path): 'INFO:__main__:Test 7\n', 'INFO:__main__:Test 8\n', 'INFO:__main__:Test 9\n', - 'INFO:__main__:Waiting\n' + 'INFO:__main__:Waiting\n', ] diff --git a/tests/fair/__init__.py b/tests/fair/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/fair/fair_node_test.py b/tests/fair/fair_node_test.py new file mode 100644 index 00000000..6554cd3f --- /dev/null +++ b/tests/fair/fair_node_test.py @@ -0,0 +1,321 @@ +from unittest import mock + +import pytest + +from node_cli.fair.boot import init as init_boot +from node_cli.fair.boot import update +from node_cli.fair.common import cleanup +from node_cli.fair.active import migrate_from_boot, restore +from node_cli.operations.fair import FairUpdateType +from node_cli.utils.node_type import NodeMode, NodeType + + +@mock.patch('node_cli.fair.active.time.sleep') +@mock.patch('node_cli.fair.active.restore_fair_op') +@mock.patch('node_cli.fair.active.compose_node_env') +def test_restore_fair( + mock_compose_env, + mock_restore_op, + mock_sleep, + valid_env_file, + ensure_meta_removed, + active_node_option, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + mock_restore_op.return_value = True + backup_path = '/fake/backup' + + restore(backup_path, valid_env_file) + + mock_compose_env.assert_called_once_with(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + mock_restore_op.assert_called_once_with( + node_mode=NodeMode.ACTIVE, + settings=mock.ANY, + compose_env=mock_env, + backup_path=backup_path, + config_only=False, + ) + mock_sleep.assert_called_once() + + +@mock.patch('node_cli.fair.boot.is_base_containers_alive', return_value=True) +@mock.patch('node_cli.fair.boot.time.sleep') +@mock.patch('node_cli.fair.boot.init_fair_boot_op') +@mock.patch('node_cli.fair.boot.compose_node_env') +def test_init_fair_boot( + mock_compose_env, + mock_init_op, + mock_sleep, + mock_is_alive, + valid_env_file, + ensure_meta_removed, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + + init_boot(valid_env_file) + + mock_compose_env.assert_called_once_with(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + mock_init_op.assert_called_once_with( + settings=mock.ANY, + compose_env=mock_env, + node_mode=NodeMode.ACTIVE, + ) + mock_sleep.assert_called_once() + mock_is_alive.assert_called_once_with( + node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE, is_fair_boot=True + ) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.boot.is_base_containers_alive', return_value=True) +@mock.patch('node_cli.fair.boot.time.sleep') +@mock.patch('node_cli.fair.boot.update_fair_boot_op') +@mock.patch('node_cli.fair.boot.compose_node_env') +def test_update_fair_boot( + mock_compose_env, + mock_update_op, + mock_sleep, + mock_is_alive, + mock_is_user_valid, + valid_env_file, + inited_node, + resource_alloc, + meta_file_v3, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + mock_update_op.return_value = True + pull_config_for_schain = 'fair' + + update(valid_env_file, pull_config_for_schain) + + mock_compose_env.assert_called_once_with(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + mock_update_op.assert_called_once_with( + settings=mock.ANY, + compose_env=mock_env, + node_mode=NodeMode.ACTIVE, + ) + mock_sleep.assert_called_once() + mock_is_alive.assert_called_once_with( + node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE, is_fair_boot=True + ) + + +@mock.patch('node_cli.fair.active.update_fair_op') +@mock.patch('node_cli.fair.active.compose_node_env') +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +def test_migrate_from_boot( + mock_is_user_valid, + mock_compose_env, + mock_migrate_op, + valid_env_file, + inited_node, + resource_alloc, + meta_file_v3, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + mock_migrate_op.return_value = True + + migrate_from_boot(valid_env_file) + + mock_compose_env.assert_called_once_with(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + mock_migrate_op.assert_called_once_with( + settings=mock.ANY, + compose_env=mock_env, + node_mode=NodeMode.ACTIVE, + update_type=FairUpdateType.FROM_BOOT, + force_skaled_start=False, + ) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.common.cleanup_fair_op') +@mock.patch('node_cli.fair.common.compose_node_env') +def test_cleanup_success( + mock_compose_env, + mock_cleanup_fair_op, + mock_is_user_valid, + resource_alloc, + meta_file_v3, + active_node_option, + inited_node, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + + cleanup(node_mode=NodeMode.ACTIVE) + + mock_compose_env.assert_called_once_with(node_type=NodeType.FAIR, node_mode=NodeMode.ACTIVE) + mock_cleanup_fair_op.assert_called_once_with( + node_mode=NodeMode.ACTIVE, compose_env=mock_env, prune=False + ) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.common.cleanup_fair_op') +@mock.patch('node_cli.fair.common.compose_node_env') +def test_cleanup_calls_operations_in_correct_order( + mock_compose_env, + mock_cleanup_fair_op, + mock_is_user_valid, + resource_alloc, + meta_file_v3, + active_node_option, + inited_node, +): + from node_cli.fair.common import cleanup + + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + + manager = mock.Mock() + manager.attach_mock(mock_compose_env, 'compose_env') + manager.attach_mock(mock_cleanup_fair_op, 'cleanup_fair_op') + + cleanup(node_mode=NodeMode.ACTIVE) + + expected_calls = [ + mock.call.compose_env( + node_type=NodeType.FAIR, + node_mode=NodeMode.ACTIVE, + ), + mock.call.cleanup_fair_op(node_mode=NodeMode.ACTIVE, compose_env=mock_env, prune=False), + ] + manager.assert_has_calls(expected_calls, any_order=False) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.common.cleanup_fair_op', side_effect=Exception('Cleanup failed')) +@mock.patch('node_cli.fair.common.compose_node_env') +def test_cleanup_continues_after_fair_op_error( + mock_compose_env, + mock_cleanup_fair_op, + mock_is_user_valid, + resource_alloc, + meta_file_v3, + active_node_option, + inited_node, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + + with pytest.raises(Exception, match='Cleanup failed'): + cleanup(node_mode=NodeMode.ACTIVE) + + mock_compose_env.assert_called_once() + mock_cleanup_fair_op.assert_called_once_with( + node_mode=NodeMode.ACTIVE, compose_env=mock_env, prune=False + ) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=False) +def test_cleanup_fails_when_user_invalid( + mock_is_user_valid, + resource_alloc, + meta_file_v3, + inited_node, +): + """Test that cleanup fails when user validation fails""" + import pytest + + from node_cli.fair.common import cleanup + + with pytest.raises(SystemExit): + cleanup(node_mode=NodeMode.ACTIVE) + + +def test_cleanup_fails_when_not_inited(ensure_meta_removed, active_node_option, fair_user_conf): + import pytest + + with mock.patch('node_cli.operations.cleanup_fair_op', return_value=None): + with pytest.raises(SystemExit): + cleanup(node_mode=NodeMode.ACTIVE) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.common.cleanup_fair_op') +@mock.patch('node_cli.fair.common.compose_node_env') +@mock.patch('node_cli.fair.common.logger') +def test_cleanup_logs_success_message( + mock_logger, + mock_compose_env, + mock_cleanup_fair_op, + mock_is_user_valid, + resource_alloc, + meta_file_v3, + active_node_option, + inited_node, +): + mock_env = {'ENV_TYPE': 'devnet'} + mock_compose_env.return_value = mock_env + + cleanup(node_mode=NodeMode.ACTIVE) + + mock_logger.info.assert_called_once_with( + 'Fair node was cleaned up, all containers and data removed' + ) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.active.post_request') +@mock.patch('node_cli.fair.active.is_node_inited', return_value=True) +def test_exit_success( + mock_is_inited, + mock_post_request, + mock_is_user_valid, + inited_node, + resource_alloc, + meta_file_v3, +): + from node_cli.fair.active import exit + + mock_post_request.return_value = ('ok', {}) + + exit() + + mock_post_request.assert_called_once_with(blueprint='fair-node', method='exit', json={}) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.active.error_exit') +@mock.patch('node_cli.fair.active.post_request') +@mock.patch('node_cli.fair.active.is_node_inited', return_value=True) +def test_exit_error( + mock_is_inited, + mock_post_request, + mock_error_exit, + mock_is_user_valid, + inited_node, + resource_alloc, + meta_file_v3, +): + from node_cli.fair.active import exit + + error_msg = 'Exit failed' + mock_post_request.return_value = ('error', error_msg) + + exit() + + mock_post_request.assert_called_once_with(blueprint='fair-node', method='exit', json={}) + mock_error_exit.assert_called_once_with(error_msg, exit_code=mock.ANY) + + +@mock.patch('node_cli.utils.decorators.is_user_valid', return_value=True) +@mock.patch('node_cli.fair.active.is_node_inited', return_value=False) +def test_exit_not_inited( + mock_is_inited, + mock_is_user_valid, + inited_node, + resource_alloc, + meta_file_v3, + capsys, +): + from node_cli.fair.active import exit + + exit() + + captured = capsys.readouterr() + assert 'Node should be initialized to proceed with operation' in captured.out diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/fixtures/settings.py b/tests/fixtures/settings.py new file mode 100644 index 00000000..55b080b2 --- /dev/null +++ b/tests/fixtures/settings.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# +# This file is part of node-cli +# +# Copyright (C) 2026-Present SKALE Labs +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import pytest +import tomli_w + +from skale_core.settings import get_internal_settings + +from node_cli.configs import INTERNAL_SETTINGS_PATH, NODE_SETTINGS_PATH + +SKALE_DIR_HOST = './skale-data/' + +INTERNAL_SKALE_ACTIVE = { + 'node_type': 'skale', + 'node_mode': 'active', + 'skale_dir_host': SKALE_DIR_HOST, +} + +INTERNAL_SKALE_PASSIVE = { + 'node_type': 'skale', + 'node_mode': 'passive', + 'skale_dir_host': SKALE_DIR_HOST, +} + +INTERNAL_FAIR_ACTIVE = { + 'node_type': 'fair', + 'node_mode': 'active', + 'skale_dir_host': SKALE_DIR_HOST, +} + +INTERNAL_FAIR_PASSIVE = { + 'node_type': 'fair', + 'node_mode': 'passive', + 'skale_dir_host': SKALE_DIR_HOST, +} + +_BASE_NODE = { + 'env_type': 'devnet', + 'endpoint': 'http://127.0.0.1:8545', + 'container_stop_timeout': 1, + 'tg_api_key': '123', + 'tg_chat_id': '-1231232', + 'node_version': '0.0.0', + 'block_device': '/dev/sda', +} + +NODE_SKALE_ACTIVE = { + **_BASE_NODE, + 'sgx_url': 'https://localhost:1026', + 'docker_lvmpy_version': '0.0.0', + 'manager_contracts': 'test-manager', + 'ima_contracts': 'test-ima', +} + +NODE_SKALE_PASSIVE = { + **_BASE_NODE, + 'manager_contracts': 'test-manager', + 'ima_contracts': 'test-ima', + 'schain_name': 'test-schain', + 'enforce_btrfs': False, +} + +NODE_FAIR_ACTIVE = { + **_BASE_NODE, + 'sgx_url': 'https://localhost:1026', + 'fair_contracts': 'test-fair', + 'enforce_btrfs': False, +} + +NODE_FAIR_PASSIVE = { + **_BASE_NODE, + 'fair_contracts': 'test-fair', + 'enforce_btrfs': False, +} + + +def _write_settings(internal: dict, node: dict) -> None: + INTERNAL_SETTINGS_PATH.parent.mkdir(parents=True, exist_ok=True) + INTERNAL_SETTINGS_PATH.write_bytes(tomli_w.dumps(internal).encode()) + NODE_SETTINGS_PATH.write_bytes(tomli_w.dumps(node).encode()) + get_internal_settings.cache_clear() + + +def _cleanup_settings() -> None: + INTERNAL_SETTINGS_PATH.unlink(missing_ok=True) + NODE_SETTINGS_PATH.unlink(missing_ok=True) + get_internal_settings.cache_clear() + + +@pytest.fixture +def skale_active_settings(): + _write_settings(INTERNAL_SKALE_ACTIVE, NODE_SKALE_ACTIVE) + yield + _cleanup_settings() + + +@pytest.fixture +def skale_passive_settings(): + _write_settings(INTERNAL_SKALE_PASSIVE, NODE_SKALE_PASSIVE) + yield + _cleanup_settings() + + +@pytest.fixture +def fair_active_settings(): + _write_settings(INTERNAL_FAIR_ACTIVE, NODE_FAIR_ACTIVE) + yield + _cleanup_settings() + + +@pytest.fixture +def fair_passive_settings(): + _write_settings(INTERNAL_FAIR_PASSIVE, NODE_FAIR_PASSIVE) + yield + _cleanup_settings() diff --git a/tests/helper.py b/tests/helper.py index 805fcf51..6af0af14 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -17,45 +17,35 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -import mock +import datetime import os - +from unittest import mock +from unittest.mock import Mock, MagicMock import requests from click.testing import CliRunner -from mock import Mock, MagicMock -BLOCK_DEVICE = os.getenv('BLOCK_DEVICE') +CURRENT_TIMESTAMP = 1594903080 +CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP) -TEST_SCHAINS_MNT_DIR_SYNC = 'tests/tmp' +BLOCK_DEVICE = os.getenv('BLOCK_DEVICE') -TEST_META_V1 = { - 'version': '0.1.1', - 'config_stream': 'develop' -} +TEST_SCHAINS_MNT_DIR_SINGLE_CHAIN = 'tests/tmp' -TEST_META_V2 = { - 'version': '0.1.1', - 'config_stream': 'develop', - 'docker_lvmpy_stream': '1.1.2' +TEST_META_V1 = {'version': '0.1.1', 'config_stream': 'develop'} -} +TEST_META_V2 = {'version': '0.1.1', 'config_stream': 'develop', 'docker_lvmpy_version': '1.1.2'} TEST_META_V3 = { 'version': '0.1.1', 'config_stream': 'develop', - 'docker_lvmpy_stream': '1.1.2', + 'docker_lvmpy_version': '1.1.2', 'os_id': 'ubuntu', - 'os_version': '18.04' + 'os_version': '18.04', } -def response_mock( - status_code=0, - json_data=None, - headers=None, - raw=None -): +def response_mock(status_code=0, json_data=None, headers=None, raw=None): result = MagicMock() result.status_code = status_code @@ -75,10 +65,8 @@ def run_command(command, params=[], input=''): return runner.invoke(command, params, input=input) -def run_command_mock(mock_call_path, response_mock, - command, params=[], input=''): - with mock.patch(mock_call_path, - new=request_mock(response_mock)): +def run_command_mock(mock_call_path, response_mock, command, params=[], input=''): + with mock.patch(mock_call_path, new=request_mock(response_mock)): return run_command(command, params, input=input) diff --git a/tests/resources_test.py b/tests/resources_test.py index 19755e03..1f545080 100644 --- a/tests/resources_test.py +++ b/tests/resources_test.py @@ -9,12 +9,45 @@ from node_cli.core.resources import ( compose_resource_allocation_config, update_resource_allocation, - get_cpu_alloc, get_memory_alloc, verify_disk_size + get_cpu_alloc, + get_memory_alloc, + verify_disk_size, ) from node_cli.utils.helper import write_json, safe_load_yml -SCHAIN_VOLUME_PARTS = {'large': {'max_consensus_storage_bytes': 21311992627, 'max_file_storage_bytes': 21311992627, 'max_reserved_storage_bytes': 7103997542, 'max_skaled_leveldb_storage_bytes': 21311992627}, 'medium': {'max_consensus_storage_bytes': 2663999078, 'max_file_storage_bytes': 2663999078, 'max_reserved_storage_bytes': 887999692, 'max_skaled_leveldb_storage_bytes': 2663999078}, 'small': {'max_consensus_storage_bytes': 166499942, 'max_file_storage_bytes': 166499942, 'max_reserved_storage_bytes': 55499980, 'max_skaled_leveldb_storage_bytes': 166499942}, 'test': {'max_consensus_storage_bytes': 2663999078, 'max_file_storage_bytes': 2663999078, 'max_reserved_storage_bytes': 887999692, 'max_skaled_leveldb_storage_bytes': 2663999078}, 'test4': {'max_consensus_storage_bytes': 2663999078, 'max_file_storage_bytes': 2663999078, 'max_reserved_storage_bytes': 887999692, 'max_skaled_leveldb_storage_bytes': 2663999078}} # noqa +SCHAIN_VOLUME_PARTS = { + 'large': { + 'max_consensus_storage_bytes': 21311992627, + 'max_file_storage_bytes': 21311992627, + 'max_reserved_storage_bytes': 7103997542, + 'max_skaled_leveldb_storage_bytes': 21311992627, + }, + 'medium': { + 'max_consensus_storage_bytes': 2663999078, + 'max_file_storage_bytes': 2663999078, + 'max_reserved_storage_bytes': 887999692, + 'max_skaled_leveldb_storage_bytes': 2663999078, + }, + 'small': { + 'max_consensus_storage_bytes': 166499942, + 'max_file_storage_bytes': 166499942, + 'max_reserved_storage_bytes': 55499980, + 'max_skaled_leveldb_storage_bytes': 166499942, + }, + 'test': { + 'max_consensus_storage_bytes': 2663999078, + 'max_file_storage_bytes': 2663999078, + 'max_reserved_storage_bytes': 887999692, + 'max_skaled_leveldb_storage_bytes': 2663999078, + }, + 'test4': { + 'max_consensus_storage_bytes': 2663999078, + 'max_file_storage_bytes': 2663999078, + 'max_reserved_storage_bytes': 887999692, + 'max_skaled_leveldb_storage_bytes': 2663999078, + }, +} # noqa DEFAULT_ENV_TYPE = 'devnet' @@ -48,29 +81,34 @@ def test_generate_resource_allocation_config(): with mock.patch('node_cli.core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE): resource_allocation_config = compose_resource_allocation_config(DEFAULT_ENV_TYPE) - assert resource_allocation_config['schain']['cpu_shares']['test4'] == 102 - assert resource_allocation_config['schain']['cpu_shares']['test'] == 102 - assert resource_allocation_config['schain']['cpu_shares']['small'] == 6 - assert resource_allocation_config['schain']['cpu_shares']['medium'] == 102 - assert resource_allocation_config['schain']['cpu_shares']['large'] == 819 + assert resource_allocation_config['skaled']['cpu_shares']['test4'] == 102 + assert resource_allocation_config['skaled']['cpu_shares']['test'] == 102 + assert resource_allocation_config['skaled']['cpu_shares']['small'] == 6 + assert resource_allocation_config['skaled']['cpu_shares']['medium'] == 102 + assert resource_allocation_config['skaled']['cpu_shares']['large'] == 819 - assert isinstance(resource_allocation_config['schain']['mem']['test4'], int) - assert isinstance(resource_allocation_config['schain']['mem']['test'], int) - assert isinstance(resource_allocation_config['schain']['mem']['small'], int) - assert isinstance(resource_allocation_config['schain']['mem']['medium'], int) - assert isinstance(resource_allocation_config['schain']['mem']['large'], int) + assert isinstance(resource_allocation_config['skaled']['mem']['test4'], int) + assert isinstance(resource_allocation_config['skaled']['mem']['test'], int) + assert isinstance(resource_allocation_config['skaled']['mem']['small'], int) + assert isinstance(resource_allocation_config['skaled']['mem']['medium'], int) + assert isinstance(resource_allocation_config['skaled']['mem']['large'], int) - assert resource_allocation_config['schain']['disk']['test4'] == 8879996928 - assert resource_allocation_config['schain']['disk']['test'] == 8879996928 - assert resource_allocation_config['schain']['disk']['small'] == 554999808 - assert resource_allocation_config['schain']['disk']['medium'] == 8879996928 - assert resource_allocation_config['schain']['disk']['large'] == 71039975424 + assert resource_allocation_config['skaled']['disk']['test4'] == 8879996928 + assert resource_allocation_config['skaled']['disk']['test'] == 8879996928 + assert resource_allocation_config['skaled']['disk']['small'] == 554999808 + assert resource_allocation_config['skaled']['disk']['medium'] == 8879996928 + assert resource_allocation_config['skaled']['disk']['large'] == 71039975424 assert resource_allocation_config['ima']['cpu_shares'] == { - 'large': 204, 'medium': 25, 'small': 1, 'test': 25, 'test4': 25} + 'large': 204, + 'medium': 25, + 'small': 1, + 'test': 25, + 'test4': 25, + } assert isinstance(resource_allocation_config['ima']['mem'], dict) - assert resource_allocation_config['schain']['volume_limits'] == SCHAIN_VOLUME_PARTS + assert resource_allocation_config['skaled']['volume_limits'] == SCHAIN_VOLUME_PARTS def test_update_allocation_config(resource_alloc_config): @@ -80,10 +118,7 @@ def test_update_allocation_config(resource_alloc_config): assert json.load(jfile) != INITIAL_CONFIG -def test_get_static_disk_alloc_devnet( - params_by_env_type, - schain_allocation_data -): +def test_get_static_disk_alloc_devnet(params_by_env_type, schain_allocation_data): env_configs = params_by_env_type['envs']['devnet'] block_device = '/dev/test' with mock.patch('node_cli.core.resources.get_disk_size', return_value=SMALL_DISK_SIZE): @@ -101,7 +136,7 @@ def test_get_static_disk_alloc_devnet( 'medium': 8879996928, 'small': 554999808, 'test': 8879996928, - 'test4': 8879996928 + 'test4': 8879996928, } @@ -159,10 +194,10 @@ def test_get_memory_alloc(params_by_env_type): def test_leveldb_limits(): with mock.patch('node_cli.core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE): resource_allocation_config = compose_resource_allocation_config(DEFAULT_ENV_TYPE) - assert resource_allocation_config['schain']['leveldb_limits'] == { + assert resource_allocation_config['skaled']['leveldb_limits'] == { 'large': {'contract_storage': 12787195576, 'db_storage': 4262398525}, 'medium': {'contract_storage': 1598399446, 'db_storage': 532799815}, 'small': {'contract_storage': 99899965, 'db_storage': 33299988}, 'test': {'contract_storage': 1598399446, 'db_storage': 532799815}, - 'test4': {'contract_storage': 1598399446, 'db_storage': 532799815} + 'test4': {'contract_storage': 1598399446, 'db_storage': 532799815}, } diff --git a/tests/routes_test.py b/tests/routes_test.py index 9c00b8f1..39490ab3 100644 --- a/tests/routes_test.py +++ b/tests/routes_test.py @@ -1,7 +1,11 @@ import pytest -from node_cli.configs.routes import (route_exists, get_route, get_all_available_routes, - RouteNotFoundException) +from node_cli.configs.routes import ( + RouteNotFoundException, + get_all_available_routes, + get_route, + route_exists, +) ALL_V1_ROUTES = [ '/api/v1/node/info', @@ -14,23 +18,35 @@ '/api/v1/node/exit/status', '/api/v1/node/set-domain-name', '/api/v1/node/update-safe', - '/api/v1/health/containers', '/api/v1/health/schains', '/api/v1/health/sgx', - '/api/v1/schains/config', '/api/v1/schains/list', '/api/v1/schains/dkg-statuses', '/api/v1/schains/firewall-rules', '/api/v1/schains/repair', '/api/v1/schains/get', - '/api/v1/ssl/status', '/api/v1/ssl/upload', - '/api/v1/wallet/info', - '/api/v1/wallet/send-eth' + '/api/v1/wallet/send-eth', + '/api/v1/fair-node/info', + '/api/v1/fair-node/register', + '/api/v1/fair-node/set-domain-name', + '/api/v1/fair-node/change-ip', + '/api/v1/fair-node/exit', + '/api/v1/fair-chain/record', + '/api/v1/fair-chain/checks', + '/api/v1/fair-node-passive/setup', + '/api/v1/fair-staking/add-receiver', + '/api/v1/fair-staking/remove-receiver', + '/api/v1/fair-staking/set-fee-rate', + '/api/v1/fair-staking/request-fees', + '/api/v1/fair-staking/request-send-fees', + '/api/v1/fair-staking/claim-request', + '/api/v1/fair-staking/get-earned-fee-amount', + '/api/v1/fair-staking/get-exit-requests', ] diff --git a/tests/simple_container/main.py b/tests/simple_container/main.py index 00d109cc..177f2c68 100644 --- a/tests/simple_container/main.py +++ b/tests/simple_container/main.py @@ -9,7 +9,7 @@ handlers=[ StreamHandler(), ], - level=logging.INFO + level=logging.INFO, ) logger = logging.getLogger(__name__) diff --git a/tests/test-env b/tests/test-env deleted file mode 100644 index eb598381..00000000 --- a/tests/test-env +++ /dev/null @@ -1,16 +0,0 @@ -ENDPOINT=127.0.0.1 -IMA_ENDPOINT=127.0.01 -DB_USER=user -DB_PASSWORD=pass -DB_PORT=3307 -CONTAINER_CONFIGS_STREAM='master' -MANAGER_CONTRACTS_ABI_URL=http://127.0.0.1 -IMA_CONTRACTS_ABI_URL=http:/127.0.0.1 -FILEBEAT_HOST=127.0.0.1:3010 -MANAGER_CONTRACTS_ABI_URL=http://127.0.0.1 -SGX_SERVER_URL=http://127.0.0.1 -DISK_MOUNTPOINT=/dev/sss -DOCKER_LVMPY_STREAM='master' -ENV_TYPE='devnet' -SCHAIN_NAME='test' -ENFORCE_BTRFS=False \ No newline at end of file diff --git a/tests/tools_meta_test.py b/tests/tools_meta_test.py index 431533db..c82e435c 100644 --- a/tests/tools_meta_test.py +++ b/tests/tools_meta_test.py @@ -1,103 +1,322 @@ import json +import os from node_cli.configs import META_FILEPATH from node_cli.utils.meta import ( - CliMeta, compose_default_meta, - DEFAULT_CONFIG_STREAM, DEFAULT_VERSION, - ensure_meta, get_meta_info, - save_meta, update_meta + DEFAULT_CONFIG_STREAM, + DEFAULT_VERSION, + CliMeta, + CliMetaManager, + FairCliMeta, + FairCliMetaManager, ) from tests.helper import TEST_META_V1, TEST_META_V2, TEST_META_V3 def test_get_meta_info_v1(meta_file_v1): - meta = get_meta_info() + meta = CliMetaManager().get_meta_info() assert meta.version == TEST_META_V1['version'] assert meta.config_stream == TEST_META_V1['config_stream'] - assert meta.docker_lvmpy_stream == '1.0.0' + assert meta.docker_lvmpy_version == '1.0.0' def test_get_meta_info_v2(meta_file_v2): - meta = get_meta_info() + meta = CliMetaManager().get_meta_info() assert meta.version == TEST_META_V2['version'] assert meta.config_stream == TEST_META_V2['config_stream'] - assert meta.docker_lvmpy_stream == TEST_META_V2['docker_lvmpy_stream'] + assert meta.docker_lvmpy_version == TEST_META_V2['docker_lvmpy_version'] def test_get_meta_info_v3(meta_file_v3): - meta = get_meta_info() + meta = CliMetaManager().get_meta_info() assert meta.version == TEST_META_V3['version'] assert meta.config_stream == TEST_META_V3['config_stream'] - assert meta.docker_lvmpy_stream == TEST_META_V3['docker_lvmpy_stream'] + assert meta.docker_lvmpy_version == TEST_META_V3['docker_lvmpy_version'] assert meta.os_id == TEST_META_V3['os_id'] assert meta.os_version == TEST_META_V3['os_version'] def test_get_meta_info_empty(): - meta = get_meta_info() + meta = CliMetaManager().get_meta_info() assert meta is None def test_compose_default_meta(): - meta = compose_default_meta() + meta = CliMetaManager().compose_default_meta() assert meta.version == '1.0.0' assert meta.config_stream == '1.1.0' - assert meta.docker_lvmpy_stream == '1.0.0' + assert meta.docker_lvmpy_version == '1.0.0' assert meta.os_id == 'ubuntu' assert meta.os_version == '18.04' def test_save_meta(meta_file_v2): meta = CliMeta(version='1.1.2', config_stream='2.2.2') - save_meta(meta) + CliMetaManager().save_meta(meta) with open(META_FILEPATH) as meta_f: saved_json = json.load(meta_f) assert saved_json == { 'version': '1.1.2', 'config_stream': '2.2.2', - 'docker_lvmpy_stream': '1.0.0', + 'docker_lvmpy_version': '1.0.0', 'os_id': 'ubuntu', 'os_version': '18.04', } def test_update_meta_from_v2_to_v3(meta_file_v2): - old_meta = get_meta_info() - update_meta(version='3.3.3', config_stream='1.1.1', - docker_lvmpy_stream='1.2.2', os_id='debian', os_version='11') - meta = get_meta_info() + old_meta = CliMetaManager().get_meta_info() + CliMetaManager().update_meta( + version='3.3.3', + config_stream='1.1.1', + docker_lvmpy_version='1.2.2', + os_id='debian', + os_version='11', + ) + meta = CliMetaManager().get_meta_info() assert meta.version == '3.3.3' assert meta.config_stream == '1.1.1' - assert meta.docker_lvmpy_stream == '1.2.2' + assert meta.docker_lvmpy_version == '1.2.2' assert meta.os_id == 'debian' assert meta.os_version == '11' assert meta != old_meta def test_update_meta_from_v1(meta_file_v1): - update_meta(version='4.4.4', config_stream='beta', - docker_lvmpy_stream='1.3.3', os_id='debian', os_version='11') - meta = get_meta_info() + CliMetaManager().update_meta( + version='4.4.4', + config_stream='beta', + docker_lvmpy_version='1.3.3', + os_id='debian', + os_version='11', + ) + meta = CliMetaManager().get_meta_info() assert meta.version == '4.4.4' assert meta.config_stream == 'beta' - assert meta.docker_lvmpy_stream == '1.3.3' + assert meta.docker_lvmpy_version == '1.3.3' assert meta.os_id == 'debian' assert meta.os_version == '11' def test_update_meta_from_v3(meta_file_v3): - update_meta(version='5.5.5', config_stream='stable', - docker_lvmpy_stream='1.2.3', os_id='ubuntu', os_version='20.04') - meta = get_meta_info() + CliMetaManager().update_meta( + version='5.5.5', + config_stream='stable', + docker_lvmpy_version='1.2.3', + os_id='ubuntu', + os_version='20.04', + ) + meta = CliMetaManager().get_meta_info() assert meta.version == '5.5.5' assert meta.config_stream == 'stable' - assert meta.docker_lvmpy_stream == '1.2.3' + assert meta.docker_lvmpy_version == '1.2.3' assert meta.os_id == 'ubuntu' assert meta.os_version == '20.04' def test_ensure_meta(ensure_meta_removed): - ensure_meta() - assert get_meta_info() == CliMeta(DEFAULT_VERSION, DEFAULT_CONFIG_STREAM) - ensure_meta(CliMeta(version='1.1.1', config_stream='1.1.1')) - assert get_meta_info() == CliMeta(DEFAULT_VERSION, DEFAULT_CONFIG_STREAM) + CliMetaManager().ensure_meta() + assert CliMetaManager().get_meta_info() == CliMeta(DEFAULT_VERSION, DEFAULT_CONFIG_STREAM) + CliMetaManager().ensure_meta(CliMeta(version='1.1.1', config_stream='1.1.1')) + assert CliMetaManager().get_meta_info() == CliMeta(DEFAULT_VERSION, DEFAULT_CONFIG_STREAM) + + +def test_fair_get_meta_info_v1(meta_file_v1): + meta = FairCliMetaManager().get_meta_info() + assert meta.version == TEST_META_V1['version'] + assert meta.config_stream == TEST_META_V1['config_stream'] + assert meta.os_id == 'ubuntu' + assert meta.os_version == '18.04' + + +def test_fair_get_meta_info_v2(meta_file_v2): + meta = FairCliMetaManager().get_meta_info() + assert meta.version == TEST_META_V2['version'] + assert meta.config_stream == TEST_META_V2['config_stream'] + assert meta.os_id == 'ubuntu' # default value + assert meta.os_version == '18.04' # default value + + +def test_fair_get_meta_info_v3(meta_file_v3): + meta = FairCliMetaManager().get_meta_info() + assert meta.version == TEST_META_V3['version'] + assert meta.config_stream == TEST_META_V3['config_stream'] + assert meta.os_id == TEST_META_V3['os_id'] + assert meta.os_version == TEST_META_V3['os_version'] + + +def test_fair_get_meta_info_empty(): + meta = FairCliMetaManager().get_meta_info() + assert meta is None + + +def test_fair_compose_default_meta(): + meta = FairCliMetaManager().compose_default_meta() + assert meta.version == '1.0.0' + assert meta.config_stream == '1.1.0' + assert meta.os_id == 'ubuntu' + assert meta.os_version == '18.04' + assert not hasattr(meta, 'docker_lvmpy_version') + + +def test_fair_save_meta(meta_file_v2): + meta = FairCliMeta( + version='2.2.2', config_stream='fair-stable', os_id='debian', os_version='11' + ) + FairCliMetaManager().save_meta(meta) + with open(META_FILEPATH) as meta_f: + saved_json = json.load(meta_f) + assert saved_json == { + 'version': '2.2.2', + 'config_stream': 'fair-stable', + 'os_id': 'debian', + 'os_version': '11', + } + assert 'docker_lvmpy_version' not in saved_json + + +def test_fair_update_meta_from_v2_to_v3(meta_file_v2): + old_meta = FairCliMetaManager().get_meta_info() + FairCliMetaManager().update_meta( + version='3.3.3', + config_stream='fair-beta', + os_id='debian', + os_version='11', + ) + meta = FairCliMetaManager().get_meta_info() + assert meta.version == '3.3.3' + assert meta.config_stream == 'fair-beta' + assert meta.os_id == 'debian' + assert meta.os_version == '11' + assert meta != old_meta + + +def test_fair_update_meta_from_v1(meta_file_v1): + FairCliMetaManager().update_meta( + version='4.4.4', + config_stream='fair-develop', + os_id='centos', + os_version='8', + ) + meta = FairCliMetaManager().get_meta_info() + assert meta.version == '4.4.4' + assert meta.config_stream == 'fair-develop' + assert meta.os_id == 'centos' + assert meta.os_version == '8' + + +def test_fair_update_meta_from_v3(meta_file_v3): + FairCliMetaManager().update_meta( + version='5.5.5', + config_stream='fair-stable', + os_id='ubuntu', + os_version='22.04', + ) + meta = FairCliMetaManager().get_meta_info() + assert meta.version == '5.5.5' + assert meta.config_stream == 'fair-stable' + assert meta.os_id == 'ubuntu' + assert meta.os_version == '22.04' + + +def test_fair_ensure_meta(ensure_meta_removed): + FairCliMetaManager().ensure_meta() + assert FairCliMetaManager().get_meta_info() == FairCliMeta( + DEFAULT_VERSION, DEFAULT_CONFIG_STREAM + ) + FairCliMetaManager().ensure_meta(FairCliMeta(version='1.1.1', config_stream='1.1.1')) + assert FairCliMetaManager().get_meta_info() == FairCliMeta( + DEFAULT_VERSION, DEFAULT_CONFIG_STREAM + ) + + +def test_fair_get_meta_info_raw(meta_file_v3): + raw_meta = FairCliMetaManager().get_meta_info(raw=True) + assert isinstance(raw_meta, dict) + assert raw_meta['version'] == TEST_META_V3['version'] + assert raw_meta['config_stream'] == TEST_META_V3['config_stream'] + assert raw_meta['os_id'] == TEST_META_V3['os_id'] + assert raw_meta['os_version'] == TEST_META_V3['os_version'] + assert 'docker_lvmpy_version' not in raw_meta + + +def test_fair_get_meta_info_raw_empty(): + raw_meta = FairCliMetaManager().get_meta_info(raw=True) + assert raw_meta == {} + + +def test_fair_asdict(): + meta = FairCliMeta( + version='1.2.3', config_stream='test-stream', os_id='fedora', os_version='35' + ) + meta_dict = meta.asdict() + expected = { + 'version': '1.2.3', + 'config_stream': 'test-stream', + 'os_id': 'fedora', + 'os_version': '35', + } + assert meta_dict == expected + assert 'docker_lvmpy_version' not in meta_dict + + +def test_fair_meta_compatibility_with_cli_meta_file(meta_file_v3): + meta = FairCliMetaManager().get_meta_info() + assert meta.version == TEST_META_V3['version'] + assert meta.config_stream == TEST_META_V3['config_stream'] + assert meta.os_id == TEST_META_V3['os_id'] + assert meta.os_version == TEST_META_V3['os_version'] + # Should not have docker_lvmpy_version even though it's in the file + assert not hasattr(meta, 'docker_lvmpy_version') + + +def test_fair_save_meta_overwrites_cli_meta(meta_file_v3): + with open(META_FILEPATH) as f: + original_data = json.load(f) + assert 'docker_lvmpy_version' in original_data + + fair_meta = FairCliMeta(version='2.0.0', config_stream='fair-new') + FairCliMetaManager().save_meta(fair_meta) + + with open(META_FILEPATH) as f: + saved_data = json.load(f) + assert 'docker_lvmpy_version' not in saved_data + assert saved_data['version'] == '2.0.0' + assert saved_data['config_stream'] == 'fair-new' + + +def test_fair_ensure_meta_with_existing_cli_meta(meta_file_v3): + FairCliMetaManager().ensure_meta() + meta = FairCliMetaManager().get_meta_info() + assert meta.version == TEST_META_V3['version'] + assert meta.config_stream == TEST_META_V3['config_stream'] + + +def test_fair_meta_defaults(): + meta = FairCliMeta() + assert meta.version == DEFAULT_VERSION + assert meta.config_stream == DEFAULT_CONFIG_STREAM + assert meta.os_id == 'ubuntu' + assert meta.os_version == '18.04' + + +def test_fair_meta_partial_initialization(): + meta = FairCliMeta(version='1.5.0', os_id='alpine') + assert meta.version == '1.5.0' + assert meta.config_stream == DEFAULT_CONFIG_STREAM + assert meta.os_id == 'alpine' + assert meta.os_version == '18.04' + + +def test_fair_update_meta_ensure_called(): + manager = FairCliMetaManager() + + manager.update_meta(version='1.0.0', config_stream='test', os_id='ubuntu', os_version='20.04') + + meta = manager.get_meta_info() + assert meta is not None + assert meta.version == '1.0.0' + assert meta.config_stream == 'test' + + if os.path.isfile(META_FILEPATH): + os.remove(META_FILEPATH) diff --git a/tests/utils/decorators_test.py b/tests/utils/decorators_test.py index e67e1f04..4c659984 100644 --- a/tests/utils/decorators_test.py +++ b/tests/utils/decorators_test.py @@ -11,6 +11,7 @@ def test_check_not_inited(): @check_not_inited def requires_not_inited_node(): pass + with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False): requires_not_inited_node() with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): @@ -22,6 +23,7 @@ def test_check_inited(): @check_inited def requires_inited_node(): pass + with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True): requires_inited_node() with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False): @@ -33,6 +35,7 @@ def test_check_user(mocked_g_config): @check_user def this_checks_user(): pass + generate_g_config_file(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH) this_checks_user() write_json(GLOBAL_SKALE_CONF_FILEPATH, {'user': 'skaletest'}) diff --git a/tests/utils/global_config_test.py b/tests/utils/global_config_test.py index e159d51f..cb4fb05a 100644 --- a/tests/utils/global_config_test.py +++ b/tests/utils/global_config_test.py @@ -1,4 +1,3 @@ - import os import mock from node_cli.utils.global_config import read_g_config, generate_g_config_file diff --git a/node_cli/cli/validate.py b/tests/utils/settings_test.py similarity index 62% rename from node_cli/cli/validate.py rename to tests/utils/settings_test.py index f8134df9..e7b3f4be 100644 --- a/node_cli/cli/validate.py +++ b/tests/utils/settings_test.py @@ -2,7 +2,7 @@ # # This file is part of node-cli # -# Copyright (C) 2019 SKALE Labs +# Copyright (C) 2026 SKALE Labs # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by @@ -17,27 +17,3 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -import click - -from node_cli.core.host import validate_abi_files - - -@click.group() -def validate_cli(): - pass - - -@validate_cli.group(help="Validation commands") -def validate(): - pass - - -@validate.command('abi', help="Validate contracts abi") -@click.option( - '--json', - 'json_format', - help='Show result in JSON format', - is_flag=True -) -def abi(json_format): - validate_abi_files(json_result=json_format) diff --git a/text.yml b/text.yml index 80461c76..6013f2d0 100644 --- a/text.yml +++ b/text.yml @@ -40,7 +40,7 @@ service: ssl: no_cert: |- - No SSL certificates on the node. + No SSL certificates on the node. Run < skale ssl upload > to add new certificates. uploaded: |- Certificates are successfully uploaded. @@ -60,13 +60,13 @@ exit: wait_for_rotations: "Node is waiting to finish rotations" completed: "Node exiting is completed" -sync_node: +passive_node: init: - help: Initialize sync SKALE node - indexer: Run sync node in indexer mode (disable block rotation) + help: Initialize passive SKALE node + indexer: Run passive node in indexer mode (disable block rotation) archive: Enable historic state and disable block rotation snapshot_from: IP of the node to take snapshot from - snapshot: Start sync node from snapshot + snapshot: Start passive node from snapshot lvmpy: help: Lvmpy commands @@ -76,3 +76,29 @@ lvmpy: heal: help: Run healing procedure for lvmpy server prompt: Are you sure you want run healing procedure? + +fair: + node: + repair: + help: Repair Fair chain node + warning: Are you sure you want to repair Fair chain node? In rare cases may cause data loss and require additional maintenance + snapshot: IP of the node to take snapshot from (put "any" to use any available node) + repair_requested: Repair mode is requested + not_inited: Node should be initialized to proceed with operation + + setup: + help: Setup passive Fair node + id: ID of the node in Fair manager + + registered: Node is registered in Fair manager. + setup_complete: Passive node setup complete. + register: + help: Register node in Fair manager + ip: IP address of the node in Fair manager + ip_changed: Node IP changed in Fair manager + change-ip: + help: Change the node IP in Fair manager + exited: Node removed from Fair manager + exit: + help: Remove node from Fair manager + prompt: Are you sure you want to remove the node from Fair manager?