diff --git a/impulse-automation/README.md b/impulse-automation/README.md new file mode 100644 index 0000000..c2922a5 --- /dev/null +++ b/impulse-automation/README.md @@ -0,0 +1,11 @@ +# Ansible Automation for Lava Docker Services + +This repository contains Ansible playbooks and templates essential for setting up various Lava Services using Docker and Docker Compose. These tools ensure easy, scalable deployments across multiple environments adaptive to specific service requirements of the Lava ecosystem. + +## Directory Structure + +- **`/ansible/cache/`**: Contains playbooks for deploying the Lava cache service. +- **`/ansible/provider/`**: Focuses on the Lava RPC Provider service deployment. +- **`/ansible/node/`**: Dedicated to setting up and managing Lava nodes with a focus on ensuring optimal performance and reliability. + +> Each directory is equipped with its own README.md providing detailed documentation on the deployment and management processes for the respective services. Explore these directories to understand specific configurations and operational procedures related to each service. diff --git a/impulse-automation/cache/README.md b/impulse-automation/cache/README.md new file mode 100644 index 0000000..0316082 --- /dev/null +++ b/impulse-automation/cache/README.md @@ -0,0 +1,84 @@ +# Cache Service Deployment + +This `cache` directory contains Ansible playbooks and templates for deploying the configuration to set up a service based on the `lava` cache, using Docker and Docker Compose. + +## Requirements + +- **Ansible 2.9** or higher. +- **Docker** installed on the host machine. +- **Docker Compose** installed on the host machine. +- **Access** to the host machine as a user with sudo privileges. + +## Setup + +1. **Clone the repository**: + Ensure that you have cloned this repository to your local machine or to the server where you want to run the playbook. + +2. **Configure Hosts**: + Update the `hosts` file in the `inventory` directory to match the target deployment environment's IP address or hostname. + +3. **Set Variables**: + Customize variables in `group_vars/all.yml` and `host_vars/.yml` to match your deployment settings. These settings include the Docker image, network configurations, and resource limits. + +## Deployment + +The deployment process involves setting directly configuring Docker networks, generating necessary configuration claims, and managing Docker containers through Docker Compose. + +### Prepare VM on clean system + +If you plan to deploy on a clean Debian or Ubuntu system without an installed Docker engine, use the `prepare` tag to install additional software: + +```bash +ansible-playbook main.yml --tags prepare +``` + +![lava_prepare_gifsicle.gif](..%2Fguides%2Flava_prepare_gifsicle.gif) + +### Deploying the Cache Service + +To deploy the cache service, run the following command: + +```bash +ansible-playbook main.yml --tags deploy +``` + +This command executes the role that sets up directories, configures Docker Compose, and ensures that the network is ready for the service to run. + +> Note that by default ```anisble-playbook main.yml``` command deploys and runs the service. + +![lava_cache_provider_gifsicle.gif](..%2Fguides%2Flava_cache_provider_gifsicle.gif) + +## Managing + +The managing process involves such operations as: starting the service, stopping and restarting. They runs using corresponding tags. + +### Starting the Service + +To start the cache service if it is not already running, use the following command: + +```bash +ansible-playbook main.yml --tags start +``` + +### Stopping the Service + +To stop the cache service, execute: + +```bash +ansible-playbook main.yml --tags stop +``` + +This command will stop the Docker container without removing the configuration, allowing you to restart it later without reconfiguration. + +### Restarting the Service + +If you need to restart the cache service, you can use: + +```bash +ansible-playbook main.yml --tags restart +``` + +## Configuration Files + +- Docker Compose Configuration: Located at `{{ project_path }}/docker-compose.yml`, it defines the service setup, including image, ports, and environment variables. +- Environment Variables: Stored in `{{ project_path }}/cache.env`, this file includes environment-specific variables like log level and cache expiration. diff --git a/impulse-automation/cache/ansible.cfg b/impulse-automation/cache/ansible.cfg new file mode 100644 index 0000000..2e7ac1e --- /dev/null +++ b/impulse-automation/cache/ansible.cfg @@ -0,0 +1,30 @@ +[defaults] +# Defines the location of the inventory file that Ansible will use to find the host information. +inventory = ./inventory/hosts + +# Path where Ansible will look for the roles. +roles_path = roles + +# The number of parallel processes to use when Ansible executes tasks on multiple hosts. +forks = 20 + +# Disables SSH host key checking, making Ansible automatically accept unknown host keys. +# This is useful in automated environments to avoid manual intervention. +host_key_checking = False + +# Changes the default merging behavior of variables. With 'merge', hashes will be deeply merged. +hash_behaviour = merge + +# Enables pipelining, which reduces the number of SSH operations required to execute a module. +# This can result in a significant performance improvement but may not be compatible with all setups. +pipelining = True + +# Specifies the SSH private key file to use for SSH authentication. +private_key_file = ~/.ssh/id_rsa + +[ssh_connection] +# SSH arguments used when connecting to hosts. +# - ForwardAgent=yes: Allows SSH agent forwarding. +# - ControlMaster=auto: Enables the sharing of multiple sessions over a single network connection. +# - ControlPersist=60s: Makes the master connection stay open in the background for up to 60 seconds after the initial connection, improving subsequent connection times. +ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s diff --git a/impulse-automation/cache/inventory/group_vars/all.yml b/impulse-automation/cache/inventory/group_vars/all.yml new file mode 100644 index 0000000..ae0d14c --- /dev/null +++ b/impulse-automation/cache/inventory/group_vars/all.yml @@ -0,0 +1,9 @@ +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +project_name: lava +project_type: cache +project_unique_name: "{{ project_name }}-{{ project_type }}" +service_path: /opt/services +project_path: "{{ service_path }}/{{ project_unique_name }}" diff --git a/impulse-automation/cache/inventory/host_vars/lava-cache-eu.yml b/impulse-automation/cache/inventory/host_vars/lava-cache-eu.yml new file mode 100644 index 0000000..1d8d884 --- /dev/null +++ b/impulse-automation/cache/inventory/host_vars/lava-cache-eu.yml @@ -0,0 +1,25 @@ +# host_vars/lava-cache-eu.yml +--- +ansible_host: xxx.xxx.xxx.xxx # Specify your host +network: testnet + +container: + image: svetekllc/lava # The docker image as an example. Feel free to use your own if you want + tag: v2.0.1-cache + limits: + cpu: 1 + memory: 2gb + +cache_config: + expiration: 1h0m0s # Duration after which cached items expire and are removed + cache_port: 23100 # The port number on which the cache server will listen + log_level: info # The verbosity level for logs + max_items: 500000000 # The maximum number of items the cache can store + metrics_port: 23101 # The port number on which the metrics service will listen + +cache_ports: + listen: "{{ cache_config.cache_port }}" + metrics: "{{ cache_config.metrics_port }}" + +networks: + - lava \ No newline at end of file diff --git a/impulse-automation/cache/inventory/hosts b/impulse-automation/cache/inventory/hosts new file mode 100644 index 0000000..1042951 --- /dev/null +++ b/impulse-automation/cache/inventory/hosts @@ -0,0 +1,3 @@ +all: + hosts: + lava-cache-eu: diff --git a/impulse-automation/cache/main.yml b/impulse-automation/cache/main.yml new file mode 100644 index 0000000..5f1d411 --- /dev/null +++ b/impulse-automation/cache/main.yml @@ -0,0 +1,35 @@ +--- +# The main playbook for cache deployment +- name: Cache service deployment + hosts: all + become: true + gather_facts: true + roles: + - role: prepare + tags: + - never + - prepare + - role: deploy + tags: + - deploy + tasks: + - name: Run the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: present + tags: + - start + - name: Stop the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: stopped + tags: + - never + - stop + - name: Restart the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: restarted + tags: + - never + - restart diff --git a/impulse-automation/cache/roles/deploy/defaults/main.yml b/impulse-automation/cache/roles/deploy/defaults/main.yml new file mode 100644 index 0000000..74b75a1 --- /dev/null +++ b/impulse-automation/cache/roles/deploy/defaults/main.yml @@ -0,0 +1 @@ +# roles/deploy/defaults/main.yml diff --git a/impulse-automation/cache/roles/deploy/meta/main.yml b/impulse-automation/cache/roles/deploy/meta/main.yml new file mode 100644 index 0000000..516ec16 --- /dev/null +++ b/impulse-automation/cache/roles/deploy/meta/main.yml @@ -0,0 +1,19 @@ +# roles/deploy/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for deploying the lava cache service + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + galaxy_tags: + - lava + - rpc + - docker diff --git a/impulse-automation/cache/roles/deploy/tasks/main.yml b/impulse-automation/cache/roles/deploy/tasks/main.yml new file mode 100644 index 0000000..147639c --- /dev/null +++ b/impulse-automation/cache/roles/deploy/tasks/main.yml @@ -0,0 +1,23 @@ +# roles/deploy/tasks/main.yml +--- +- name: "Create a directory for the docker-compose file" + ansible.builtin.file: + path: "{{ project_path }}" + state: directory + mode: "0755" + +- name: Generate the docker-compose file + ansible.builtin.template: + src: "docker-compose.yml.j2" + dest: "{{ project_path }}/docker-compose.yml" + mode: "0644" + +- name: Generate the cache.env file + ansible.builtin.template: + src: "cache.env.j2" + dest: "{{ project_path }}/cache.env" + mode: "0644" + +- name: "Create the network" + community.docker.docker_network: + name: "{{ project_name }}" diff --git a/impulse-automation/cache/roles/deploy/templates/cache.env.j2 b/impulse-automation/cache/roles/deploy/templates/cache.env.j2 new file mode 100644 index 0000000..1037ed9 --- /dev/null +++ b/impulse-automation/cache/roles/deploy/templates/cache.env.j2 @@ -0,0 +1,17 @@ +### Cache variables ### + +# Duration after which cached items expire and are removed. Default is 1 hour. +# Format is [hours]h[minutes]m[seconds]s. +EXPIRATION="{{ cache_config.expiration }}" + +# The port number on which the cache server will listen. (default: 23100) +CACHE_PORT="{{ cache_config.cache_port }}" + +# The verbosity level for logs. Available levels are trace, debug, info, warn, error, fatal, and panic. +LOGLEVEL="{{ cache_config.log_level }}" + +# The maximum number of items the cache can store. Default is 2,147,483,648. +MAX_ITEMS="{{ cache_config.max_items }}" + +# The port number on which the metrics service will listen. +METRICS_PORT="{{ cache_config.metrics_port }}" diff --git a/impulse-automation/cache/roles/deploy/templates/docker-compose.yml.j2 b/impulse-automation/cache/roles/deploy/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..39f5a77 --- /dev/null +++ b/impulse-automation/cache/roles/deploy/templates/docker-compose.yml.j2 @@ -0,0 +1,37 @@ +--- +name: {{ project_unique_name }} + +services: + cache: + image: {{ container.image }}:{{ container.tag }} + container_name: {{ project_unique_name }} + labels: + network: "{% if network %}{{ network }}{% else %}-no_network_set{% endif %}" + env_file: + - cache.env + ports: +{% for key, value in cache_ports.items() %} + - "{{ value }}:{{ value }}" +{% endfor %} + networks: +{% for value in networks %} + - {{ value }} +{% endfor %} + logging: + driver: "json-file" + options: + max-size: "100m" + max-file: "1" + deploy: + resources: + limits: + cpus: "{{ container.limits.cpu }}" + memory: "{{ container.limits.memory }}" + restart: unless-stopped + +networks: +{% for value in networks %} + {{ value }}: + name: {{ value }} + external: true +{% endfor %} diff --git a/impulse-automation/cache/roles/deploy/vars/main.yml b/impulse-automation/cache/roles/deploy/vars/main.yml new file mode 100644 index 0000000..fbdd3e4 --- /dev/null +++ b/impulse-automation/cache/roles/deploy/vars/main.yml @@ -0,0 +1,11 @@ +# roles/deploy/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. + +# Examples: +# network: testnet +# cache_config: +# cache_port: 23100 diff --git a/impulse-automation/cache/roles/prepare/defaults/main.yml b/impulse-automation/cache/roles/prepare/defaults/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/impulse-automation/cache/roles/prepare/meta/main.yml b/impulse-automation/cache/roles/prepare/meta/main.yml new file mode 100644 index 0000000..655edb8 --- /dev/null +++ b/impulse-automation/cache/roles/prepare/meta/main.yml @@ -0,0 +1,21 @@ +# roles/prepare/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for deploying the lava cache service + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + - name: Debian + versions: + - buster + - bullseye + - bookworm + diff --git a/impulse-automation/cache/roles/prepare/tasks/main.yml b/impulse-automation/cache/roles/prepare/tasks/main.yml new file mode 100644 index 0000000..7a9afc6 --- /dev/null +++ b/impulse-automation/cache/roles/prepare/tasks/main.yml @@ -0,0 +1,64 @@ +# roles/prepare/tasks/main.yml +--- +- name: Update the apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + +# Turn off: need other role process for update runtime systems +#- name: Upgrade all apt packages +# ansible.builtin.apt: +# upgrade: dist + +- name: Install necessary packages + ansible.builtin.apt: + name: + - software-properties-common + - apt-transport-https + - ca-certificates + - sudo + - aria2 + - curl + - htop + - wget + - jq + - lz4 + - rsync + state: present + +- name: Add Docker Repository for Debian + when: ansible_facts['distribution'] == "Debian" + ansible.builtin.shell: | + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +- name: Add Docker Repository for Ubuntu + when: ansible_facts['distribution'] == "Ubuntu" + ansible.builtin.shell: | + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +- name: Update apt and install docker-ce + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + update_cache: true \ No newline at end of file diff --git a/impulse-automation/cache/roles/prepare/vars/main.yml b/impulse-automation/cache/roles/prepare/vars/main.yml new file mode 100644 index 0000000..b92e9e1 --- /dev/null +++ b/impulse-automation/cache/roles/prepare/vars/main.yml @@ -0,0 +1,7 @@ +# roles/prepare/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. + diff --git a/impulse-automation/guides/lava_cache_provider_gifsicle.gif b/impulse-automation/guides/lava_cache_provider_gifsicle.gif new file mode 100644 index 0000000..09f2f1f Binary files /dev/null and b/impulse-automation/guides/lava_cache_provider_gifsicle.gif differ diff --git a/impulse-automation/guides/lava_node_deploy_gifsicle.gif b/impulse-automation/guides/lava_node_deploy_gifsicle.gif new file mode 100644 index 0000000..5e1021d Binary files /dev/null and b/impulse-automation/guides/lava_node_deploy_gifsicle.gif differ diff --git a/impulse-automation/guides/lava_node_prepare_gifsicle.gif b/impulse-automation/guides/lava_node_prepare_gifsicle.gif new file mode 100644 index 0000000..03762ca Binary files /dev/null and b/impulse-automation/guides/lava_node_prepare_gifsicle.gif differ diff --git a/impulse-automation/guides/lava_prepare_gifsicle.gif b/impulse-automation/guides/lava_prepare_gifsicle.gif new file mode 100644 index 0000000..46dfe02 Binary files /dev/null and b/impulse-automation/guides/lava_prepare_gifsicle.gif differ diff --git a/impulse-automation/nodes/README.md b/impulse-automation/nodes/README.md new file mode 100644 index 0000000..cffead3 --- /dev/null +++ b/impulse-automation/nodes/README.md @@ -0,0 +1,129 @@ +# Node Service Deployment + +This repository includes Ansible playbooks and supporting files for deploying and managing the Node Service. The service uses Docker containers managed via Docker Compose to ensure easy and scalable deployments across multiple environments. + +## Prerequisites + +- **Ansible 2.9+**: Ensure Ansible is installed on your control machine. +- **Docker**: Must be installed on the target hosts. +- **Docker Compose**: Required for managing Dockerized applications. +- **SSH Access**: Root or sudo access on the target hosts. + +## Repository Structure + +- **`group_vars`** and **`host_vars`**: Contains variables specific to hosts and groups. Customize these to fit the deployment context. +- **`roles`**: Contains the tasks used for setting up the node. +- **`templates`**: Jinja2 templates for generating Docker Compose and environment configuration files. +- **`inventory`**: Hosts file defining the servers on which the node service will be deployed. + +## Installation and Setup + +### Clone the Repository + +Start by cloning this repository to your Ansible control machine: + +```bash +git clone +cd +``` + +### Configure Inventory + +Edit the `inventory/hosts` file to add the IP addresses or hostnames of the machines where the service should be deployed. + +Example: + +```yaml +all: + children: + lava_testnet_node_eu: + hosts: + 192.168.1.100: + ansible_user: root + ansible_ssh_private_key_file: ~/.ssh/id_rsa +``` + +> You can declare certain parameters in the `ansible.cfg` configuration file or in `group_vars/all.yml`. These settings will be applied to all hosts at each startup + +ansible.cfg + +```ini +[defaults] +private_key_file = ~/.ssh/id_rsa +``` + +group_vars/all.yml + +```yml +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +``` + +### Set Role Variables + +Adjust role-specific variables in `group_vars/all.yml` and `host_vars/*.yml` to match your environment: + +## Deployment + +The deployment process involves setting directly configuring Docker networks, generating necessary configuration claims, and managing Docker containers through Docker Compose. + +### Prepare VM on clean system + +If you plan to deploy on a clean Debian or Ubuntu system without an installed Docker engine, use the `prepare` tag to install additional software: + +```bash +ansible-playbook main.yml --tags prepare +``` + +![lava_node_prepare_gifsicle.gif](https://github.com/svetek/lava-ansible-deployment/blob/main/guides/lava_node_prepare_gifsicle.gif?raw=true) + +### Deploy the Service + +To deploy the Lava Node Service: + +```bash +ansible-playbook main.yml --tags deploy +``` + +To download the snapshot, you need to run the following command: + +```bash +ansible-playbook main.yml --tags snapshot +``` + +![lava_node_deploy_gifsicle.gif](https://github.com/svetek/lava-ansible-deployment/blob/main/guides/lava_node_deploy_gifsicle.gif?raw=true) + +## Managing + +Start the Service: Ensure the service is up and running: + +```bash +ansible-playbook main.yml --tags start +``` + +Stop the Service: Safely stop the service when needed: + +```bash +ansible-playbook main.yml --tags stop +``` + +Restart the Service: Restart the service to apply updates or changes: + +```bash +ansible-playbook main.yml --tags restart +``` + +## Configuration Files + +- Docker Compose Configuration: Located at `{{ project_path }}/docker-compose.yml`, it defines the service setup, including image, ports, and environment variables. + +- Environment Variables: Stored in `{{ project_path }}/node.env`, this file includes environment-specific variables like log level and cache expiration. + +- Chain configuration and database: Stored in `{{ volume_path }}`. + +> Note: +> +> - That by default, the `ansible-playbook main.yml` command deploys and runs the service but does not download the snapshot. +> - You can use serveral tags `ansible-playbook main.yml --tags "prepare,deploy,snapshot,start"` for fast deployment. diff --git a/impulse-automation/nodes/ansible.cfg b/impulse-automation/nodes/ansible.cfg new file mode 100644 index 0000000..c3e0a99 --- /dev/null +++ b/impulse-automation/nodes/ansible.cfg @@ -0,0 +1,30 @@ +[defaults] +# Defines the location of the inventory file that Ansible will use to find the host information. +inventory = ./inventory/hosts + +# Path where Ansible will look for the roles. +roles_path = roles + +# The number of parallel processes to use when Ansible executes tasks on multiple hosts. +forks = 20 + +# Disables SSH host key checking, making Ansible automatically accept unknown host keys. +# This is useful in automated environments to avoid manual intervention. +host_key_checking = False + +# Changes the default merging behavior of variables. With 'merge', hashes will be deeply merged. +hash_behaviour = merge + +# Enables pipelining, which reduces the number of SSH operations required to execute a module. +# This can result in a significant performance improvement but may not be compatible with all setups. +pipelining = True + +# Specifies the SSH private key file to use for SSH authentication. +private_key_file = ~/.ssh/svc_ansible + +[ssh_connection] +# SSH arguments used when connecting to hosts. +# - ForwardAgent=yes: Allows SSH agent forwarding. +# - ControlMaster=auto: Enables the sharing of multiple sessions over a single network connection. +# - ControlPersist=60s: Makes the master connection stay open in the background for up to 60 seconds after the initial connection, improving subsequent connection times. +ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s diff --git a/impulse-automation/nodes/inventory/group_vars/all.yml b/impulse-automation/nodes/inventory/group_vars/all.yml new file mode 100644 index 0000000..71707ab --- /dev/null +++ b/impulse-automation/nodes/inventory/group_vars/all.yml @@ -0,0 +1,9 @@ +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +project_type: node +project_unique_name: "{{ project_name }}{% if network %}-{{ network }}{% endif %}-{{ project_type }}" +service_path: /opt/services +project_path: "{{ service_path }}/{{ project_unique_name }}" +volume_path: "{{ container.volume_path }}" \ No newline at end of file diff --git a/impulse-automation/nodes/inventory/host_vars/lava_testnet_node_eu.yml b/impulse-automation/nodes/inventory/host_vars/lava_testnet_node_eu.yml new file mode 100644 index 0000000..6b112c1 --- /dev/null +++ b/impulse-automation/nodes/inventory/host_vars/lava_testnet_node_eu.yml @@ -0,0 +1,55 @@ +# host_vars/lava_testnet_node_eu.yml +--- +ansible_host: xxx.xxx.xxx.xxx # IP address of the host +project_name: lava +environment_template: node.env.j2 +network: testnet + +# Container configuration +container: + image: svetekllc/lava # Docker image name + tag: v2.0.0-node # Docker image tag to specify version + limits: + cpu: 2 # Maximum number of CPU cores the container can use + memory: 4gb # Maximum amount of memory the container can use + volume_path: /opt/{{ project_unique_name }} # Path where volumes are mounted in the host + +# Node-specific configuration +node_config: + moniker: Lava # Custom name for the provider instance + chain_id: lava-testnet-2 # Blockchain network identifier + log_level: info # Logging level + config_path: /root/.lava # Path to provider configuration files + database_backend: goleveldb # AppDBBackend defines the database backend type to use for the application and snapshots DBs. + addrbook_url: "" + genesis_url: https://raw.githubusercontent.com/lavanet/lava-config/main/testnet-2/genesis_json/genesis.json + peers: "" + seeds: d1730b774b7c1d52dd9f6ae874a56de958aa147e@139.45.205.60:23656,51789af428293359348020d298143946c9a0492e@5.161.132.110:26656,802e15de52338029c6e2de2901c8cdd75f15ee9b@64.120.88.81:23656 + state_sync: false + diff_height: 1000 + public_rpc_url: https://public-rpc-testnet2.lavanet.xyz:443/rpc/ # Public URL for RPC connections + +# Network ports used by the node +node_ports: + grpc: 9090 # Port for gRPC service + api: 1317 # Port for REST API service + p2p: 26656 # Port for P2P service + rpc: 26657 # Port for RPC service + metrics: 26660 # Port for Prometheus Metrics + +networks: + - lava # Network label used for Docker networking + +# Wallet configuration +wallet: + name: lava # Wallet name + password: "" # Wallet password. Required if the keyring_backend parameter is equal: os, file + keyring_backend: test # Backend system for managing keys and secrets + +# Download Snapshot configuration +download_tmp_dir: "{{ volume_path }}" +download_files: + snapshot: + url: https://testnet-files.itrocket.net/lava/snap_lava.tar.lz4 + file_ext: tar.lz4 # File type zip / tar.lz4 / tar.zstd / none + unpack_dir: "{{ volume_path }}" diff --git a/impulse-automation/nodes/inventory/hosts b/impulse-automation/nodes/inventory/hosts new file mode 100644 index 0000000..f59e7c0 --- /dev/null +++ b/impulse-automation/nodes/inventory/hosts @@ -0,0 +1,3 @@ +all: + hosts: + lava_testnet_node_eu: diff --git a/impulse-automation/nodes/main.yml b/impulse-automation/nodes/main.yml new file mode 100644 index 0000000..ef83563 --- /dev/null +++ b/impulse-automation/nodes/main.yml @@ -0,0 +1,39 @@ +--- +# The main playbook for nodes deployment +- name: Deploying and managing the node Service + hosts: all + become: true + gather_facts: true + roles: + - role: prepare + tags: + - never + - prepare + - role: deploy + tags: + - deploy + - role: snapshot + tags: + - never + - snapshot + tasks: + - name: Run the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: present + tags: + - start + - name: Stop the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: stopped + tags: + - never + - stop + - name: Restart the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: restarted + tags: + - never + - restart diff --git a/impulse-automation/nodes/roles/deploy/defaults/main.yml b/impulse-automation/nodes/roles/deploy/defaults/main.yml new file mode 100644 index 0000000..74b75a1 --- /dev/null +++ b/impulse-automation/nodes/roles/deploy/defaults/main.yml @@ -0,0 +1 @@ +# roles/deploy/defaults/main.yml diff --git a/impulse-automation/nodes/roles/deploy/tasks/main.yml b/impulse-automation/nodes/roles/deploy/tasks/main.yml new file mode 100644 index 0000000..dfc82a6 --- /dev/null +++ b/impulse-automation/nodes/roles/deploy/tasks/main.yml @@ -0,0 +1,56 @@ +# roles/deploy/tasks/main.yml +--- +- name: Create a directory for the Docker Compose file + ansible.builtin.file: + path: "{{ project_path }}" + state: directory + mode: "0755" + +- name: Generate the Docker Compose file + ansible.builtin.template: + src: "docker-compose.yml.j2" + dest: "{{ project_path }}/docker-compose.yml" + mode: "0644" + +- name: Generate the node.env file + ansible.builtin.template: + src: "{{ environment_template }}" + dest: "{{ project_path }}/node.env" + mode: "0644" + +- name: Create the data volume directory + ansible.builtin.file: + path: "{{ volume_path }}" + state: directory + mode: "0755" + +- name: Get volume information + community.docker.docker_volume_info: + name: "{{ project_unique_name }}" + register: volume_info + +- name: Create the data volume + when: + - volume_info.exists + - volume_info.volume.Options.device != volume_path + block: + - name: Get container information + community.docker.docker_container_info: + name: "{{ project_unique_name }}" + register: container_info + + - name: Remove the container + community.docker.docker_container: + name: "{{ project_unique_name }}" + state: absent + when: container_info.exists + + - name: Remove the volume + community.docker.docker_volume: + name: "{{ project_unique_name }}" + state: absent + when: volume_info.exists + +- name: Create a network + community.docker.docker_network: + name: "{{ project_name }}" diff --git a/impulse-automation/nodes/roles/deploy/templates/docker-compose.yml.j2 b/impulse-automation/nodes/roles/deploy/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..352c807 --- /dev/null +++ b/impulse-automation/nodes/roles/deploy/templates/docker-compose.yml.j2 @@ -0,0 +1,48 @@ +--- +name: {{ project_unique_name }} + +services: + node: + image: {{ container.image }}:{{ container.tag }} + container_name: {{ project_unique_name }} + labels: + network: "{% if network %}{{ network }}{% else %}-no_network_set{% endif %}" + env_file: + - node.env + volumes: + - data:{{ node_config.config_path}} + ports: +{% for key, value in node_ports.items() %} + - "{{ value }}:{{ value }}" +{% endfor %} + networks: +{% for value in networks %} + - {{ value }} +{% endfor %} + logging: + driver: "json-file" + options: + max-size: "100m" + max-file: "1" + deploy: + resources: + limits: + cpus: "{{ container.limits.cpu }}" + memory: "{{ container.limits.memory }}" + restart: unless-stopped + +volumes: + data: + name: {{ project_unique_name }} + driver: local + driver_opts: + type: none + o: bind + device: {{ container.volume_path }} + +networks: +{% for value in networks %} + {{ value }}: + name: {{ value }} + external: true +{% endfor %} diff --git a/impulse-automation/nodes/roles/deploy/templates/node.env.j2 b/impulse-automation/nodes/roles/deploy/templates/node.env.j2 new file mode 100644 index 0000000..5b2ee41 --- /dev/null +++ b/impulse-automation/nodes/roles/deploy/templates/node.env.j2 @@ -0,0 +1,75 @@ +### Node variables ### + +# The URL to download the address book. +ADDRBOOK_URL="{{ node_config.addrbook_url }}" + +# The unique identifier for the blockchain network. +CHAIN_ID="{{ node_config.chain_id }}" + +# The directory path where the node's configuration files are stored. +CONFIG_PATH="{{ node_config.config_path }}" + +# The URL to download the genesis file of the network. +GENESIS_URL="{{ node_config.genesis_url }}" + +### Enable state sync +# Whether to enable state synchronization from other nodes. (true|false) +STATE_SYNC="{{ node_config.state_sync | lower }}" +# Blocks behind the highest height to start the state sync. (default: 1000) +DIFF_HEIGHT="{{ node_config.diff_height }}" + +# AppDBBackend defines the database backend type to use for the application and snapshots DBs. (default: goleveldb) +# Database backends: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +DB_BACKEND="{{ node_config.database_backend }}" + +### Set the wallet name and password. +# The local name for the user's wallet. +WALLET="{{ wallet.name }}" +# The password for the wallet, must be at least 8 characters. +WALLET_PASS="{{ wallet.password }}" +# The storage mechanism for keys. (default: os) +KEYRING_BACKEND="{{ wallet.keyring_backend }}" + +### Set the logging level. +# The verbosity of logs output [trace|debug|info|warn|error|fatal|panic] (default: info) +LOGLEVEL="{{ node_config.log_level }}" + +### Moniker name +# The custom name to identify your node in the network. +MONIKER="{{ node_config.moniker | default('node') }}" + +### Set the peers or seeds. +# Direct connections for network communication. +PEERS="{{ node_config.peers }}" +# Known nodes to help discover other peers in the network. +SEEDS="{{ node_config.seeds }}" + +### Metrics and node communication ports. +{% if node_ports.grpc is defined %} +# GRPC port for node communication. (default: 9090) +NODE_GRPC_PORT="{{ node_ports.grpc }}" +{% endif %} +{% if node_ports.jrpc is defined %} +# JSON-RPC port for node communication (default: 8545) +NODE_JRPC_PORT="{{ node_ports.jrpc }}" +{% endif %} +{% if node_ports.api is defined %} +# API port for node communication. (default: 1317) +NODE_API_PORT="{{ node_ports.api }}" +{% endif %} +{% if node_ports.p2p is defined %} +# The port for peer-to-peer network communication. (default: 26656) +NODE_P2P_PORT="{{ node_ports.p2p }}" +{% endif %} +{% if node_ports.rpc is defined %} +# The port for RPC server. (default: 26657) +NODE_RPC_PORT="{{ node_ports.rpc }}" +{% endif %} +{% if node_ports.metrics is defined %} +# The port for Prometheus metrics. (default: 26660) +METRICS_PORT="{{ node_ports.metrics }}" +{% endif %} + +### Public RPC URL +# The URL to access the public RPC server for the network. +PUBLIC_RPC="{{ node_config.public_rpc_url }}" diff --git a/impulse-automation/nodes/roles/deploy/vars/main.yml b/impulse-automation/nodes/roles/deploy/vars/main.yml new file mode 100644 index 0000000..c09bf91 --- /dev/null +++ b/impulse-automation/nodes/roles/deploy/vars/main.yml @@ -0,0 +1,7 @@ +# roles/deploy/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. + diff --git a/impulse-automation/nodes/roles/prepare/defaults/main.yml b/impulse-automation/nodes/roles/prepare/defaults/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/impulse-automation/nodes/roles/prepare/meta/main.yml b/impulse-automation/nodes/roles/prepare/meta/main.yml new file mode 100644 index 0000000..676b42f --- /dev/null +++ b/impulse-automation/nodes/roles/prepare/meta/main.yml @@ -0,0 +1,21 @@ +# roles/prepare/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for deploying the lava node service + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + - name: Debian + versions: + - buster + - bullseye + - bookworm + diff --git a/impulse-automation/nodes/roles/prepare/tasks/main.yml b/impulse-automation/nodes/roles/prepare/tasks/main.yml new file mode 100644 index 0000000..7a9afc6 --- /dev/null +++ b/impulse-automation/nodes/roles/prepare/tasks/main.yml @@ -0,0 +1,64 @@ +# roles/prepare/tasks/main.yml +--- +- name: Update the apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + +# Turn off: need other role process for update runtime systems +#- name: Upgrade all apt packages +# ansible.builtin.apt: +# upgrade: dist + +- name: Install necessary packages + ansible.builtin.apt: + name: + - software-properties-common + - apt-transport-https + - ca-certificates + - sudo + - aria2 + - curl + - htop + - wget + - jq + - lz4 + - rsync + state: present + +- name: Add Docker Repository for Debian + when: ansible_facts['distribution'] == "Debian" + ansible.builtin.shell: | + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +- name: Add Docker Repository for Ubuntu + when: ansible_facts['distribution'] == "Ubuntu" + ansible.builtin.shell: | + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +- name: Update apt and install docker-ce + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + update_cache: true \ No newline at end of file diff --git a/impulse-automation/nodes/roles/prepare/vars/main.yml b/impulse-automation/nodes/roles/prepare/vars/main.yml new file mode 100644 index 0000000..b92e9e1 --- /dev/null +++ b/impulse-automation/nodes/roles/prepare/vars/main.yml @@ -0,0 +1,7 @@ +# roles/prepare/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. + diff --git a/impulse-automation/nodes/roles/snapshot/defaults/main.yml b/impulse-automation/nodes/roles/snapshot/defaults/main.yml new file mode 100644 index 0000000..46cad48 --- /dev/null +++ b/impulse-automation/nodes/roles/snapshot/defaults/main.yml @@ -0,0 +1 @@ +# roles/snapshot/defaults/main.yml diff --git a/impulse-automation/nodes/roles/snapshot/handlers/main.yml b/impulse-automation/nodes/roles/snapshot/handlers/main.yml new file mode 100644 index 0000000..f0f8a13 --- /dev/null +++ b/impulse-automation/nodes/roles/snapshot/handlers/main.yml @@ -0,0 +1,2 @@ +# roles/snapshot/handlers/main.yml + diff --git a/impulse-automation/nodes/roles/snapshot/meta/main.yml b/impulse-automation/nodes/roles/snapshot/meta/main.yml new file mode 100644 index 0000000..aa0dad9 --- /dev/null +++ b/impulse-automation/nodes/roles/snapshot/meta/main.yml @@ -0,0 +1,21 @@ +# roles/snapshot/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for managing the snapshot + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + - name: Debian + versions: + - buster + - bullseye + - bookworm + diff --git a/impulse-automation/nodes/roles/snapshot/tasks/main.yml b/impulse-automation/nodes/roles/snapshot/tasks/main.yml new file mode 100644 index 0000000..b169b17 --- /dev/null +++ b/impulse-automation/nodes/roles/snapshot/tasks/main.yml @@ -0,0 +1,49 @@ +# roles/snapshot/tasks/main.yml +--- +- name: Create unpack directory + ansible.builtin.file: + path: "{{ item.value.unpack_dir }}" + state: directory + mode: '0755' + loop: "{{ download_files | dict2items }}" + loop_control: + label: "{{ item.key }} & directory={{ item.value.unpack_dir }}" + +- name: Download files to tmp folder use aria2c + ansible.builtin.shell: aria2c -x6 --auto-file-renaming=false --continue=true {{ item.value.url }} --dir={{ download_tmp_dir }} --out={{ item.key }}.{{ item.value.file_ext }} 2>&1 >{{ download_tmp_dir }}/{{ item.key }}.{{ item.value.file_ext }}.output + async: 21600 # Maximum allowed time in Seconds + poll: 10 # Polling Interval in Seconds + loop: "{{ download_files | dict2items }}" + loop_control: + label: "{{ item.key }}.{{ item.value.file_ext }}" + +- name: Extract tar.gz + ansible.builtin.shell: "tar -xzf {{ download_tmp_dir }}/{{ item.key }}.{{ item.value.file_ext }} -C {{ item.value.unpack_dir }}" + loop: "{{ download_files | dict2items }}" + when: item.value.file_ext == "tar.gz" + +- name: Extract tar.lz4 + ansible.builtin.shell: "lz4 -c -d {{ download_tmp_dir }}/{{ item.key }}.{{ item.value.file_ext }} | tar -x -C {{ item.value.unpack_dir }}" + loop: "{{ download_files | dict2items }}" + when: item.value.file_ext == "tar.lz4" + +- name: Extract zip + ansible.builtin.shell: "unzip -o {{ download_tmp_dir }}/{{ item.key }}.{{ item.value.file_ext }} -d {{ item.value.unpack_dir }}" + loop: "{{ download_files | dict2items }}" + when: item.value.file_ext == "zip" + +- name: Extract tar.zstd + ansible.builtin.shell: "zstd -cd {{ download_tmp_dir }}/{{ item.key }}.{{ item.value.file_ext }} | tar -x -C {{ item.value.unpack_dir }}" + loop: "{{ download_files | dict2items }}" + when: item.value.file_ext == "tar.zstd" + +- name: none + ansible.builtin.shell: "pwd" + loop: "{{ download_files | dict2items }}" + when: item.value.file_ext == "none" + +- name: Remove snapshot file (delete file) + ansible.builtin.file: + path: "{{ download_tmp_dir }}/{{ item.key }}.{{ item.value.file_ext }}" + state: absent + loop: "{{ download_files | dict2items }}" diff --git a/impulse-automation/nodes/roles/snapshot/vars/main.yml b/impulse-automation/nodes/roles/snapshot/vars/main.yml new file mode 100644 index 0000000..0665658 --- /dev/null +++ b/impulse-automation/nodes/roles/snapshot/vars/main.yml @@ -0,0 +1,6 @@ +# roles/snapshot/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. diff --git a/impulse-automation/provider/README.md b/impulse-automation/provider/README.md new file mode 100644 index 0000000..2e81d2d --- /dev/null +++ b/impulse-automation/provider/README.md @@ -0,0 +1,118 @@ +# RPC Provider Service Deployment + +This repository includes Ansible playbooks and supporting files for deploying and managing the RPC Provider Service. The service uses Docker containers managed via Docker Compose to ensure easy and scalable deployments across multiple environments. + +## Prerequisites + +- **Ansible 2.9+**: Ensure Ansible is installed on your control machine. +- **Docker**: Must be installed on the target hosts. +- **Docker Compose**: Required for managing Dockerized applications. +- **SSH Access**: Root or sudo access on the target hosts. + +## Repository Structure + +- **`group_vars`** and **`host_vars`**: Contains variables specific to hosts and groups. Customize these to fit the deployment context. +- **`roles`**: Contains the tasks used for setting up the RPC provider. +- **`templates`**: Jinja2 templates for generating Docker Compose and environment configuration files. +- **`inventory`**: Hosts file defining the servers on which the RPC service will be deployed. + +## Installation and Setup + +### Clone the Repository + +Start by cloning this repository to your Ansible control machine: + +```bash +git clone +cd +``` + +### Configure Inventory + +Edit the `inventory/hosts` file to add the IP addresses or hostnames of the machines where the service should be deployed. + +Example: + +```yaml +all: + children: + lava_provider_eu: + hosts: + 192.168.1.100: + ansible_user: root + ansible_ssh_private_key_file: ~/.ssh/id_rsa +``` + +> You can declare certain parameters in the `ansible.cfg` configuration file or in `group_vars/all.yml`. These settings will be applied to all hosts at each startup + +ansible.cfg + +```ini +[defaults] +private_key_file = ~/.ssh/id_rsa +``` + +group_vars/all.yml + +```yml +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +``` + +### Set Role Variables + +Adjust role-specific variables in `group_vars/all.yml` and `host_vars/*.yml` to match your environment: + +## Deployment + +The deployment process involves setting directly configuring Docker networks, generating necessary configuration claims, and managing Docker containers through Docker Compose. + +### Prepare VM on clean system + +If you plan to deploy on a clean Debian or Ubuntu system without an installed Docker engine, use the `prepare` tag to install additional software: + +```bash +ansible-playbook main.yml --tags prepare +``` + +![lava_prepare_gifsicle.gif](..%2Fguides%2Flava_prepare_gifsicle.gif) + +### Deploy the Service + +To deploy the RPC Provider Service: + +```bash +ansible-playbook main.yml --tags deploy +``` + +> Note that by default ```anisble-playbook main.yml``` command deploys and runs the service. + +![lava_cache_provider_gifsicle.gif](..%2Fguides%2Flava_cache_provider_gifsicle.gif) + +## Managing + +Start the Service: Ensure the service is up and running: + +```bash +ansible-playbook main.yml --tags start +``` + +Stop the Service: Safely stop the service when needed: + +```bash +ansible-playbook main.yml --tags stop +``` + +Restart the Service: Restart the service to apply updates or changes: + +```bash +ansible-playbook main.yml --tags restart +``` + +## Configuration Files + +- Docker Compose Configuration: Located at `{{ project_path }}/docker-compose.yml`, it defines the service setup, including image, ports, and environment variables. +- Environment Variables: Stored in `{{ project_path }}/provider.env`, this file includes environment-specific variables like log level and cache expiration. +- Chains configuration: Stored in `{{ volume_path }}`. This config includes specified chain settings for the Lava RPC Provider. diff --git a/impulse-automation/provider/ansible.cfg b/impulse-automation/provider/ansible.cfg new file mode 100644 index 0000000..2e7ac1e --- /dev/null +++ b/impulse-automation/provider/ansible.cfg @@ -0,0 +1,30 @@ +[defaults] +# Defines the location of the inventory file that Ansible will use to find the host information. +inventory = ./inventory/hosts + +# Path where Ansible will look for the roles. +roles_path = roles + +# The number of parallel processes to use when Ansible executes tasks on multiple hosts. +forks = 20 + +# Disables SSH host key checking, making Ansible automatically accept unknown host keys. +# This is useful in automated environments to avoid manual intervention. +host_key_checking = False + +# Changes the default merging behavior of variables. With 'merge', hashes will be deeply merged. +hash_behaviour = merge + +# Enables pipelining, which reduces the number of SSH operations required to execute a module. +# This can result in a significant performance improvement but may not be compatible with all setups. +pipelining = True + +# Specifies the SSH private key file to use for SSH authentication. +private_key_file = ~/.ssh/id_rsa + +[ssh_connection] +# SSH arguments used when connecting to hosts. +# - ForwardAgent=yes: Allows SSH agent forwarding. +# - ControlMaster=auto: Enables the sharing of multiple sessions over a single network connection. +# - ControlPersist=60s: Makes the master connection stay open in the background for up to 60 seconds after the initial connection, improving subsequent connection times. +ssh_args = -o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s diff --git a/impulse-automation/provider/inventory/group_vars/all.yml b/impulse-automation/provider/inventory/group_vars/all.yml new file mode 100644 index 0000000..47be87c --- /dev/null +++ b/impulse-automation/provider/inventory/group_vars/all.yml @@ -0,0 +1,10 @@ +# group_vars/all.yml +--- +ansible_user: root +ansible_port: 22 +project_name: lava +project_type: provider +project_unique_name: "{{ project_name }}-{{ project_type }}" +service_path: /opt/services # The docker-compose.yml and a variables file are located at this path. +project_path: "{{ service_path }}/{{ project_unique_name }}" # Configuration files and a wallet are located at this path. +volume_path: "{{ container.volume_path }}" diff --git a/impulse-automation/provider/inventory/host_vars/lava_provider_eu.yml b/impulse-automation/provider/inventory/host_vars/lava_provider_eu.yml new file mode 100644 index 0000000..ddb7859 --- /dev/null +++ b/impulse-automation/provider/inventory/host_vars/lava_provider_eu.yml @@ -0,0 +1,140 @@ +# host_vars/lava_provider_eu.yml +--- +ansible_host: xxx.xxx.xxx.xxx # IP address of the host +network: testnet + +# Container configuration +container: + image: svetekllc/lava # Docker image name + tag: v2.0.1-provider # Docker image tag to specify version + limits: + cpu: 2 # Maximum number of CPU cores the container can use + memory: 4gb # Maximum amount of memory the container can use + volume_path: /opt/{{ project_unique_name }} # Path where volumes are mounted in the host + +# Provider-specific configuration +provider_config: + cache: + enable: false # Whether caching is enabled (true | false) + address: lava-cache # Lava Cache service address (IP address | FQDN) + port: 23100 # Lava Cache service port + chain_id: lava-testnet-2 # Blockchain network identifier + config_path: /root/.lava # Path to provider configuration files + geolocation: 2 # Geolocation ID, used for regional distinctions + log_level: info # Logging level + moniker: Lava # Custom name for the provider instance + rewards_storage_dir: rewards-storage # Directory for storing rewards data + public_rpc_url: https://public-rpc-testnet2.lavanet.xyz:443/rpc/ # Public URL for RPC connections + total_connections: 25 # Max number of simultaneous network connections + +# Wallet configuration +wallet: + name: lava # Wallet name + password: "" # Wallet password. Required if the keyring_backend parameter is equal: os, file + keyring_backend: test # Backend system for managing keys and secrets + +# Network ports used by the provider +provider_ports: + grpc: 22001 # Port for gRPC service + metrics: 23001 # Port for exposing metrics + +networks: + - lava # Network label used for Docker networking + +# Blockchain chains configurations +chains: + - name: "Arbitrum mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "ARB1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://arbitrum-mainnet:8545"] } + + - name: "Avalanche mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "AVAX", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://avalanche-mainnet/ext/bc"] } + + - name: "Axelar testnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "AXELART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-testnet:26657", "ws://axelar-testnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "AXELART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["axelar-testnet:9090"] } + - { api_interface: "rest", chain_id: "AXELART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-testnet:26317"] } + + - name: "Axelar mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "AXELAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-mainnet:26657", "ws://axelar-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "AXELAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["axelar-mainnet:16090"] } + - { api_interface: "rest", chain_id: "AXELAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://axelar-mainnet:26317"] } + + - name: "Binance mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "BSC", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://binance-mainnet"] } + + - name: "Canto mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://canto-mainnet:26657", "ws://canto-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["canto-mainnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://canto-mainnet:8545"] } + - { api_interface: "rest", chain_id: "CANTO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://canto-mainnet:1317"] } + + - name: "Celestia mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celestia-mainnet:26657", "ws://celestia-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["celestia-mainnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celestia-mainnet-light:26658"] } + - { api_interface: "rest", chain_id: "CELESTIA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celestia-mainnet:1317"] } + + - name: "Celo mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "CELO", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://celo-mainnet:8545"] } + + - name: "CosmosHub mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "COSMOSHUB", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://cosmos-mainnet:26657", "ws://cosmos-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "COSMOSHUB", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["cosmos-mainnet:9090"] } + - { api_interface: "rest", chain_id: "COSMOSHUB", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://cosmos-mainnet:1317"] } + + - name: "Ethereum mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "ETH1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://ethereum-mainnet:8549"] } + + - name: "Evmos testnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-testnet:26657", "ws://evmos-testnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["evmos-testnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-testnet:8545"] } + - { api_interface: "rest", chain_id: "EVMOST", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-testnet:1317"] } + + - name: "Evmos mainnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-mainnet:26657", "ws://evmos-mainnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["evmos-mainnet:9090"] } + - { api_interface: "jsonrpc", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-mainnet:8545"] } + - { api_interface: "rest", chain_id: "EVMOS", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://evmos-mainnet:1317"] } + + - name: "Fantom mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "FTM250", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://fantom-mainnet:8545"] } + + - name: "Lava testnet" + endpoints: + - { api_interface: "tendermintrpc", chain_id: "LAV1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://lava-testnet:26657", "ws://lava-testnet:26657/websocket"] } + - { api_interface: "grpc", chain_id: "LAV1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["lava-testnet:9090"] } + - { api_interface: "rest", chain_id: "LAV1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://lava-testnet:1317"] } + + - name: "Near testnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "NEART", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://near-testnet:3030"] } + + - name: "Near mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "NEAR", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://near-mainnet:3030"] } + + - name: "Polygon mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "POLYGON1", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://polygon-mainnet"] } + + - name: "Solana mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "SOLANA", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://solana-mainnet"] } + + - name: "Starknet mainnet" + endpoints: + - { api_interface: "jsonrpc", chain_id: "STRK", network_address: "0.0.0.0:22001", disable_tls: true, node_urls: ["http://starknet-mainnet:9545"] } diff --git a/impulse-automation/provider/inventory/hosts b/impulse-automation/provider/inventory/hosts new file mode 100644 index 0000000..5cab541 --- /dev/null +++ b/impulse-automation/provider/inventory/hosts @@ -0,0 +1,3 @@ +all: + hosts: + lava_provider_eu: diff --git a/impulse-automation/provider/main.yml b/impulse-automation/provider/main.yml new file mode 100644 index 0000000..ad58c1f --- /dev/null +++ b/impulse-automation/provider/main.yml @@ -0,0 +1,35 @@ +--- +# The main playbook for rpc provider deployment +- name: Deploying and managing the RPC Provider Service + hosts: all + become: true + gather_facts: true + roles: + - role: prepare + tags: + - never + - prepare + - role: deploy + tags: + - deploy + tasks: + - name: Run the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: present + tags: + - start + - name: Stop the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: stopped + tags: + - never + - stop + - name: Restart the service + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: restarted + tags: + - never + - restart diff --git a/impulse-automation/provider/roles/deploy/defaults/main.yml b/impulse-automation/provider/roles/deploy/defaults/main.yml new file mode 100644 index 0000000..74b75a1 --- /dev/null +++ b/impulse-automation/provider/roles/deploy/defaults/main.yml @@ -0,0 +1 @@ +# roles/deploy/defaults/main.yml diff --git a/impulse-automation/provider/roles/deploy/handlers/main.yml b/impulse-automation/provider/roles/deploy/handlers/main.yml new file mode 100644 index 0000000..73e7d35 --- /dev/null +++ b/impulse-automation/provider/roles/deploy/handlers/main.yml @@ -0,0 +1,16 @@ +# roles/deploy/handlers/main.yml +--- +- name: Check and possibly restart docker service + block: + - name: Get container info + community.docker.docker_container_info: + name: "{{ project_unique_name }}" + register: container_info + listen: Check and possibly restart docker service + + - name: Restart docker service if container exists + community.docker.docker_compose_v2: + project_src: "{{ project_path }}" + state: restarted + when: container_info.exists + listen: Check and possibly restart docker service diff --git a/impulse-automation/provider/roles/deploy/meta/main.yml b/impulse-automation/provider/roles/deploy/meta/main.yml new file mode 100644 index 0000000..6a70a8e --- /dev/null +++ b/impulse-automation/provider/roles/deploy/meta/main.yml @@ -0,0 +1,19 @@ +# roles/deploy/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for deploying the lava RPC provider + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + galaxy_tags: + - lava + - rpc + - docker diff --git a/impulse-automation/provider/roles/deploy/tasks/main.yml b/impulse-automation/provider/roles/deploy/tasks/main.yml new file mode 100644 index 0000000..31f41da --- /dev/null +++ b/impulse-automation/provider/roles/deploy/tasks/main.yml @@ -0,0 +1,70 @@ +# roles/deploy/tasks/main.yml +--- +- name: Create a directory for the Docker Compose file + ansible.builtin.file: + path: "{{ project_path }}" + state: directory + mode: "0755" + +- name: Generate the Docker Compose file + ansible.builtin.template: + src: "docker-compose.yml.j2" + dest: "{{ project_path }}/docker-compose.yml" + mode: "0644" + +- name: Generate the provider.env file + ansible.builtin.template: + src: "provider.env.j2" + dest: "{{ project_path }}/provider.env" + mode: "0644" + +- name: Create the data volume directory + ansible.builtin.file: + path: "{{ volume_path }}" + state: directory + mode: "0755" + +- name: Get volume information + community.docker.docker_volume_info: + name: "{{ project_unique_name }}" + register: volume_info + +- name: Create the data volume + when: + - volume_info.exists + - volume_info.volume.Options.device != volume_path + block: + - name: Get container information + community.docker.docker_container_info: + name: "{{ project_unique_name }}" + register: container_info + + - name: Remove the container + community.docker.docker_container: + name: "{{ project_unique_name }}" + state: absent + when: container_info.exists + + - name: Remove the volume + community.docker.docker_volume: + name: "{{ project_unique_name }}" + state: absent + when: volume_info.exists + +- name: "Create the directory for the configuration file" + ansible.builtin.file: + path: "{{ volume_path }}/config" + state: directory + mode: "0755" + +- name: Generate the rpcprovider.yml file + ansible.builtin.template: + src: "rpcprovider.yml.j2" + dest: "{{ volume_path }}/config/rpcprovider.yml" + mode: "0644" + notify: + - Check and possibly restart docker service + +- name: "Create the network" + community.docker.docker_network: + name: "{{ project_name }}" diff --git a/impulse-automation/provider/roles/deploy/templates/docker-compose.yml.j2 b/impulse-automation/provider/roles/deploy/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..3fc81ff --- /dev/null +++ b/impulse-automation/provider/roles/deploy/templates/docker-compose.yml.j2 @@ -0,0 +1,48 @@ +--- +name: {{ project_unique_name }} + +services: + provider: + image: {{ container.image }}:{{ container.tag }} + container_name: {{ project_unique_name }} + labels: + network: "{% if network %}{{ network }}{% else %}-no_network_set{% endif %}" + env_file: + - provider.env + volumes: + - data:{{ provider_config.config_path}} + ports: +{% for key, value in provider_ports.items() %} + - "{{ value }}:{{ value }}" +{% endfor %} + networks: +{% for value in networks %} + - {{ value }} +{% endfor %} + logging: + driver: "json-file" + options: + max-size: "100m" + max-file: "1" + deploy: + resources: + limits: + cpus: "{{ container.limits.cpu }}" + memory: "{{ container.limits.memory }}" + restart: unless-stopped + +volumes: + data: + name: {{ project_unique_name }} + driver: local + driver_opts: + type: none + o: bind + device: {{ container.volume_path }} + +networks: +{% for value in networks %} + {{ value }}: + name: {{ value }} + external: true +{% endfor %} diff --git a/impulse-automation/provider/roles/deploy/templates/provider.env.j2 b/impulse-automation/provider/roles/deploy/templates/provider.env.j2 new file mode 100644 index 0000000..f99536c --- /dev/null +++ b/impulse-automation/provider/roles/deploy/templates/provider.env.j2 @@ -0,0 +1,45 @@ +### Provider variables ### + +# Enable or disable cache based on requirements. +# Set to true to enable, false to disable. +CACHE_ENABLE="{{ provider_config.cache.enable | lower }}" + +# Specify the IP address or domain name of the cache server. +CACHE_ADDRESS="{{ provider_config.cache.address }}" + +# Define the port number on which the cache server is running. +CACHE_PORT="{{ provider_config.cache.port }}" + +# Unique identifier for the blockchain network. Default is 'lava-testnet-2'. +CHAIN_ID="{{ provider_config.chain_id }}" + +# Directory path for storing the provider's configuration files. +# Default path is '/root/.lava'. +CONFIG_PATH="{{ provider_config.config_path }}" + +# Geolocation of the node, used for network optimization and identification. +GEOLOCATION="{{ provider_config.geolocation }}" + +# Name of the wallet used for transactions. +WALLET="{{ wallet.name }}" + +# Storage mechanism for keys, such as 'os', 'file', or 'pass'. +KEYRING_BACKEND="{{ wallet.keyring_backend }}" + +# Verbosity level of logs output. +LOGLEVEL="{{ provider_config.log_level }}" + +# Port number for exposing Prometheus metrics. +METRICS_PORT="{{ provider_ports.metrics }}" + +# Custom name to identify your provider node within the network. +MONIKER="{{ provider_config.moniker }}" + +# Directory to store rewards data. +REWARDS_STORAGE_DIR="{{ provider_config.rewards_storage_dir }}" + +# URL to access the public RPC server for the network. +PUBLIC_RPC="{{ provider_config.public_rpc_url }}" + +# Total number of simultaneous network connections the node will attempt to maintain. +TOTAL_CONNECTIONS="{{ provider_config.total_connections }}" diff --git a/impulse-automation/provider/roles/deploy/templates/rpcprovider.yml.j2 b/impulse-automation/provider/roles/deploy/templates/rpcprovider.yml.j2 new file mode 100644 index 0000000..0830511 --- /dev/null +++ b/impulse-automation/provider/roles/deploy/templates/rpcprovider.yml.j2 @@ -0,0 +1,17 @@ +endpoints: + +{% for chain in chains %} + #### {{ chain.name }} #### +{% for endpoint in chain.endpoints %} + - api-interface: {{ endpoint.api_interface }} + chain-id: {{ endpoint.chain_id }} + network-address: + address: {{ endpoint.network_address }} + disable-tls: {{ endpoint.disable_tls | lower }} + node-urls: +{% for url in endpoint.node_urls %} + - url: {{ url }} +{% endfor %} +{% endfor %} + +{% endfor %} diff --git a/impulse-automation/provider/roles/deploy/vars/main.yml b/impulse-automation/provider/roles/deploy/vars/main.yml new file mode 100644 index 0000000..c31b4b9 --- /dev/null +++ b/impulse-automation/provider/roles/deploy/vars/main.yml @@ -0,0 +1,11 @@ +# roles/deploy/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. + +# Examples: +# network: testnet +# provider_config: +# chain_id: lava-testnet-2 diff --git a/impulse-automation/provider/roles/prepare/defaults/main.yml b/impulse-automation/provider/roles/prepare/defaults/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/impulse-automation/provider/roles/prepare/meta/main.yml b/impulse-automation/provider/roles/prepare/meta/main.yml new file mode 100644 index 0000000..655edb8 --- /dev/null +++ b/impulse-automation/provider/roles/prepare/meta/main.yml @@ -0,0 +1,21 @@ +# roles/prepare/meta/main.yml +--- +dependencies: [] +galaxy_info: + author: Michael + description: The role for deploying the lava cache service + company: Impulse Expert | https://impulse.expert + license: GPL + min_ansible_version: "2.9" + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + - focal + - name: Debian + versions: + - buster + - bullseye + - bookworm + diff --git a/impulse-automation/provider/roles/prepare/tasks/main.yml b/impulse-automation/provider/roles/prepare/tasks/main.yml new file mode 100644 index 0000000..7a9afc6 --- /dev/null +++ b/impulse-automation/provider/roles/prepare/tasks/main.yml @@ -0,0 +1,64 @@ +# roles/prepare/tasks/main.yml +--- +- name: Update the apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + +# Turn off: need other role process for update runtime systems +#- name: Upgrade all apt packages +# ansible.builtin.apt: +# upgrade: dist + +- name: Install necessary packages + ansible.builtin.apt: + name: + - software-properties-common + - apt-transport-https + - ca-certificates + - sudo + - aria2 + - curl + - htop + - wget + - jq + - lz4 + - rsync + state: present + +- name: Add Docker Repository for Debian + when: ansible_facts['distribution'] == "Debian" + ansible.builtin.shell: | + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +- name: Add Docker Repository for Ubuntu + when: ansible_facts['distribution'] == "Ubuntu" + ansible.builtin.shell: | + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Add the repository to Apt sources: + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +- name: Update apt and install docker-ce + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + update_cache: true \ No newline at end of file diff --git a/impulse-automation/provider/roles/prepare/vars/main.yml b/impulse-automation/provider/roles/prepare/vars/main.yml new file mode 100644 index 0000000..b92e9e1 --- /dev/null +++ b/impulse-automation/provider/roles/prepare/vars/main.yml @@ -0,0 +1,7 @@ +# roles/prepare/vars/main.yml + +# In Ansible, the priority of variable values is determined by +# the order in which they are defined, known as variable precedence. +# Variables defined in roles/deploy/vars/main.yml can override the values set in the inventory +# if they are specified later in the precedence order. +