diff --git a/Cargo.lock b/Cargo.lock index eaeb087be..68e6810e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1626,6 +1626,10 @@ dependencies = [ "globset", "proc-macro2", "quote", + "serde", + "serde_json", + "serde_with", + "serde_yaml", "walkdir", ] diff --git a/examples/node-fetch-network/.gitignore b/examples/node-fetch-network/.gitignore new file mode 100644 index 000000000..f81d56eaa --- /dev/null +++ b/examples/node-fetch-network/.gitignore @@ -0,0 +1,169 @@ +# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore + +# Logs + +logs +_.log +npm-debug.log_ +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) + +report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json + +# Runtime data + +pids +_.pid +_.seed +\*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover + +lib-cov + +# Coverage directory used by tools like istanbul + +coverage +\*.lcov + +# nyc test coverage + +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) + +.grunt + +# Bower dependency directory (https://bower.io/) + +bower_components + +# node-waf configuration + +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) + +build/Release + +# Dependency directories + +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) + +web_modules/ + +# TypeScript cache + +\*.tsbuildinfo + +# Optional npm cache directory + +.npm + +# Optional eslint cache + +.eslintcache + +# Optional stylelint cache + +.stylelintcache + +# Microbundle cache + +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history + +.node_repl_history + +# Output of 'npm pack' + +\*.tgz + +# Yarn Integrity file + +.yarn-integrity + +# dotenv environment variable files + +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) + +.cache +.parcel-cache + +# Next.js build output + +.next +out + +# Nuxt.js build / generate output + +.nuxt +dist + +# Gatsby files + +.cache/ + +# Comment in the public line in if your project uses Gatsby and not Next.js + +# https://nextjs.org/blog/next-9-1#public-directory-support + +# public + +# vuepress build output + +.vuepress/dist + +# vuepress v2.x temp and cache directory + +.temp +.cache + +# Docusaurus cache and generated files + +.docusaurus + +# Serverless directories + +.serverless/ + +# FuseBox cache + +.fusebox/ + +# DynamoDB Local files + +.dynamodb/ + +# TernJS port file + +.tern-port + +# Stores VSCode versions used for testing VSCode extensions + +.vscode-test + +# yarn v2 + +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.\* diff --git a/examples/node-fetch-network/fetchData.js b/examples/node-fetch-network/fetchData.js new file mode 100644 index 000000000..389be2566 --- /dev/null +++ b/examples/node-fetch-network/fetchData.js @@ -0,0 +1,23 @@ +import fetch from 'node-fetch'; + +const url = process.env.REMOTE_URL; + +if (!url) { + console.error('REMOTE_URL is not defined in the environment variables.'); + process.exit(1); +} + +fetch(url) + .then(response => { + if (!response.ok) { + throw new Error(`Network response was not ok: ${response.statusText}`); + } + return response.statusText; + }) + .then(data => { + console.log('Fetched data:', data); + }) + .catch(error => { + console.error('Fetching data failed:', error); + process.exit(1); + }); \ No newline at end of file diff --git a/examples/node-fetch-network/package.json b/examples/node-fetch-network/package.json new file mode 100644 index 000000000..534a314d7 --- /dev/null +++ b/examples/node-fetch-network/package.json @@ -0,0 +1,17 @@ +{ + "type": "module", + "name": "your-project-name", + "version": "1.0.0", + "description": "A project to fetch data from a remote URL", + "main": "index.js", + "scripts": { + "build": "echo $REMOTE_URL && node fetchData.js", + "start": "node fetchData.js" + }, + "dependencies": { + "dotenv": "^10.0.0", + "node-fetch": "^3.0.0" + }, + "author": "Your Name", + "license": "MIT" +} diff --git a/examples/node-fetch-network/pnpm-lock.yaml b/examples/node-fetch-network/pnpm-lock.yaml new file mode 100644 index 000000000..bde2d98e1 --- /dev/null +++ b/examples/node-fetch-network/pnpm-lock.yaml @@ -0,0 +1,59 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +dependencies: + dotenv: + specifier: ^10.0.0 + version: 10.0.0 + node-fetch: + specifier: ^3.0.0 + version: 3.3.2 + +packages: + + /data-uri-to-buffer@4.0.1: + resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==} + engines: {node: '>= 12'} + dev: false + + /dotenv@10.0.0: + resolution: {integrity: sha512-rlBi9d8jpv9Sf1klPjNfFAuWDjKLwTIJJ/VxtoTwIR6hnZxcEOQCZg2oIL3MWBYw5GpUDKOEnND7LXTbIpQ03Q==} + engines: {node: '>=10'} + dev: false + + /fetch-blob@3.2.0: + resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} + engines: {node: ^12.20 || >= 14.13} + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.3.3 + dev: false + + /formdata-polyfill@4.0.10: + resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} + engines: {node: '>=12.20.0'} + dependencies: + fetch-blob: 3.2.0 + dev: false + + /node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + dev: false + + /node-fetch@3.3.2: + resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + data-uri-to-buffer: 4.0.1 + fetch-blob: 3.2.0 + formdata-polyfill: 4.0.10 + dev: false + + /web-streams-polyfill@3.3.3: + resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} + engines: {node: '>= 8'} + dev: false diff --git a/src/main.rs b/src/main.rs index fce4c18c1..677802697 100644 --- a/src/main.rs +++ b/src/main.rs @@ -145,6 +145,10 @@ enum Commands { #[arg(long)] docker_host: Option, + /// Adds hosts to the Docker build + #[arg(long, global = true)] + add_host: Vec, + /// Specify if Docker client should verify the TLS (Transport Layer Security) certificates #[arg(long)] docker_tls_verify: Option, @@ -255,6 +259,7 @@ async fn main() -> Result<()> { cache_from, docker_host, docker_tls_verify, + add_host, inline_cache, no_error_without_start, cpu_quota, @@ -288,6 +293,7 @@ async fn main() -> Result<()> { no_error_without_start, incremental_cache_image, cpu_quota, + add_host, memory, verbose, }; diff --git a/src/nixpacks/builder/docker/docker_helper.rs b/src/nixpacks/builder/docker/docker_helper.rs new file mode 100644 index 000000000..938da47a2 --- /dev/null +++ b/src/nixpacks/builder/docker/docker_helper.rs @@ -0,0 +1,59 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::error::Error; +use std::process::Command; +use std::str; + +#[derive(Serialize, Deserialize, Debug)] +struct ContainerInfoFromDocker { + #[serde(rename = "Name")] + name: String, + #[serde(rename = "IPv4Address")] + ipv4_address: String, +} + +pub struct ContainerInfo { + pub name: String, + pub ipv4_address: String, + pub ipv4_address_without_mask: String, +} + +type Containers = HashMap; + +pub struct DockerHelper {} + +impl DockerHelper { + pub fn containers_in_network(network: &str) -> Result> { + let output = Command::new("docker") + .arg("network") + .arg("inspect") + .arg(network) + .arg("-f") + .arg("{{json .Containers}}") + .output()?; + + if output.status.success() { + let containers_string = str::from_utf8(&output.stdout)?; + let containers: HashMap = + serde_json::from_str(containers_string)?; + + let mut vec = Vec::new(); + for info in containers.values() { + let ipv4 = info.ipv4_address.split('/').next().unwrap(); + let container_info = ContainerInfo { + name: info.name.clone(), + ipv4_address: info.ipv4_address.clone(), + ipv4_address_without_mask: ipv4.to_string(), + }; + vec.push(container_info); + } + + return Ok(vec + .into_iter() + .map(|info| (info.name.clone(), info)) + .collect()); + } + + Err("Docker command failed".into()) + } +} diff --git a/src/nixpacks/builder/docker/docker_image_builder.rs b/src/nixpacks/builder/docker/docker_image_builder.rs index 9237b10b2..250656206 100644 --- a/src/nixpacks/builder/docker/docker_image_builder.rs +++ b/src/nixpacks/builder/docker/docker_image_builder.rs @@ -155,7 +155,7 @@ impl DockerImageBuilder { .arg("build") .arg(&output.root) .arg("-f") - .arg(&output.get_absolute_path("Dockerfile")) + .arg(output.get_absolute_path("Dockerfile")) .arg("-t") .arg(name); @@ -163,6 +163,14 @@ impl DockerImageBuilder { docker_build_cmd.arg("--progress=plain"); } + if !self.options.add_host.is_empty() { + for host in &self.options.add_host { + docker_build_cmd.arg("--add-host").arg(host); + } + + docker_build_cmd.arg("--network").arg("host"); + } + if self.options.quiet { docker_build_cmd.arg("--quiet"); } diff --git a/src/nixpacks/builder/docker/incremental_cache.rs b/src/nixpacks/builder/docker/incremental_cache.rs index b4edd6930..ddfe2f697 100644 --- a/src/nixpacks/builder/docker/incremental_cache.rs +++ b/src/nixpacks/builder/docker/incremental_cache.rs @@ -84,7 +84,7 @@ impl IncrementalCache { // #3 Use Docker import: Provide 3 seconds in a sample test for f in files { let mut docker_import_cmd = Command::new("docker"); - docker_import_cmd.arg("import").arg(&f?.path()).arg(tag); + docker_import_cmd.arg("import").arg(f?.path()).arg(tag); let result = docker_import_cmd .spawn()? diff --git a/src/nixpacks/builder/docker/mod.rs b/src/nixpacks/builder/docker/mod.rs index 8267efe39..891c80c46 100644 --- a/src/nixpacks/builder/docker/mod.rs +++ b/src/nixpacks/builder/docker/mod.rs @@ -23,9 +23,11 @@ pub struct DockerBuilderOptions { pub verbose: bool, pub docker_host: Option, pub docker_tls_verify: Option, + pub add_host: Vec, } mod cache; +pub mod docker_helper; pub mod docker_image_builder; mod dockerfile_generation; pub mod file_server; diff --git a/test-helper/Cargo.toml b/test-helper/Cargo.toml index e2207cb96..b3123c162 100644 --- a/test-helper/Cargo.toml +++ b/test-helper/Cargo.toml @@ -9,7 +9,13 @@ proc-macro = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +serde_json = "1.0.89" +serde_yaml = "0.9.14" +serde_with = { features = [ + "macros", +], default-features = false, version = "2.1.0" } globset = "0.4.9" proc-macro2 = "1.0.43" quote = "1.0.21" walkdir = "2.3.2" +serde = { version = "1.0.151", features = ["derive"] } diff --git a/tests/docker_run_tests.rs b/tests/docker_run_tests.rs index 2a0a53b19..2dda44603 100644 --- a/tests/docker_run_tests.rs +++ b/tests/docker_run_tests.rs @@ -2,12 +2,13 @@ use anyhow::{Context, Result}; use nixpacks::{ create_docker_image, nixpacks::{ - builder::docker::DockerBuilderOptions, environment::EnvironmentVariables, - plan::generator::GeneratePlanOptions, + builder::docker::docker_helper::DockerHelper, builder::docker::DockerBuilderOptions, + environment::EnvironmentVariables, plan::generator::GeneratePlanOptions, }, }; use std::io::{BufRead, BufReader}; use std::process::{Command, Stdio}; +use std::str; use std::time::Duration; use uuid::Uuid; use wait_timeout::ChildExt; @@ -123,6 +124,46 @@ async fn run_image(name: &str, cfg: Option) -> String { .join("\n") } +async fn build_with_hosts(path: &str, add_hosts: &[String], nginx_host: String) -> String { + let name = Uuid::new_v4().to_string(); + let mut env: Vec<&str> = Vec::new(); + let env_var = format!("REMOTE_URL=http://{}", nginx_host); + env.push(&*env_var); + + create_docker_image( + path, + env, + &GeneratePlanOptions::default(), + &DockerBuilderOptions { + name: Some(name.clone()), + quiet: true, + add_host: add_hosts.to_owned(), + + ..Default::default() + }, + ) + .await + .unwrap(); + + name +} + +async fn build_with_env(path: &str, env: Vec<&str>) -> anyhow::Result<()> { + let name = Uuid::new_v4().to_string(); + + create_docker_image( + path, + env, + &GeneratePlanOptions::default(), + &DockerBuilderOptions { + name: Some(name.clone()), + quiet: true, + ..Default::default() + }, + ) + .await +} + /// Builds a directory with default options /// Returns the randomly generated image name async fn simple_build(path: &str) -> Result { @@ -161,6 +202,7 @@ async fn build_with_build_time_env_vars(path: &str, env_vars: Vec<&str>) -> Resu const POSTGRES_IMAGE: &str = "postgres"; const MYSQL_IMAGE: &str = "mysql"; +const NGINX_IMAGE: &str = "nginx"; struct Network { name: String, @@ -338,6 +380,41 @@ fn run_mysql() -> Container { } } +fn run_nginx() -> Container { + let mut docker_cmd = Command::new("docker"); + + let hash = Uuid::new_v4().to_string(); + let container_name = format!("nginx-{hash}"); + + // run + docker_cmd.arg("run"); + + // Run detached + docker_cmd.arg("-d"); + + // attach name + docker_cmd.arg("--name").arg(container_name.clone()); + + // Assign image + docker_cmd.arg(NGINX_IMAGE); + + // Run the command + docker_cmd + .spawn() + .unwrap() + .wait() + .context("Building nginx") + .unwrap(); + + Container { + name: container_name.clone(), + config: Some(Config { + environment_variables: EnvironmentVariables::from([]), + network: None, + }), + } +} + #[tokio::test] async fn test_deno() { let name = simple_build("./examples/deno").await.unwrap(); @@ -504,6 +581,102 @@ async fn test_node_moon_custom_start() { .contains("ready - started server on 0.0.0.0:3000")); } +#[tokio::test] +async fn test_pnpm_network_call_working_with_add_hosts() { + // Create the network + let n = create_network(); + let network_name = n.name.clone(); + + // Create the nginx instance + let c = run_nginx(); + let container_name = c.name.clone(); + + // Attach the postgres instance to the network + attach_container_to_network(n.name, container_name.clone()); + + let containers = DockerHelper::containers_in_network(&network_name); + + if containers.is_err() { + panic!("Failed to fetch containers in network"); + } + + let mut vec_hosts = Vec::new(); + + for (_, containerinfo) in containers.unwrap() { + let add_host = format!( + "{}:{}", + containerinfo.name, containerinfo.ipv4_address_without_mask + ); + vec_hosts.push(add_host); + } + + // Build the basic example, a function that calls the database + let name = build_with_hosts( + "./examples/node-fetch-network", + &vec_hosts, + container_name.clone(), + ) + .await; + + // Run the example on the attached network + let output = run_image( + &name, + Some(Config { + environment_variables: c.config.unwrap().environment_variables, + network: Some(network_name.clone()), + }), + ) + .await; + + // Cleanup containers and networks + stop_and_remove_container(container_name); + remove_network(network_name); + + assert!(output.contains("Fetched data: OK")); +} + +#[tokio::test] +async fn test_pnpm_network_call_should_not_work_without_hosts() { + // Create the network + let n = create_network(); + let network_name = n.name.clone(); + + // Create the nginx instance + let c = run_nginx(); + let container_name = c.name.clone(); + + // Attach the postgres instance to the network + attach_container_to_network(n.name, container_name.clone()); + + let containers = DockerHelper::containers_in_network(&network_name); + + if containers.is_err() { + panic!("Failed to fetch containers in network"); + } + + let mut vec_hosts = Vec::new(); + + for (_, container_info) in containers.unwrap() { + let add_host = format!( + "{}:{}", + container_info.name, container_info.ipv4_address_without_mask + ); + vec_hosts.push(add_host); + } + + let mut env: Vec<&str> = Vec::new(); + let env_var = format!("REMOTE_URL=http://{}", container_name); + env.push(&*env_var); + + // Build the basic example, a function that calls the database + let build_result = build_with_env("./examples/node-fetch-network", env).await; + + assert!(build_result.is_err()); + + stop_and_remove_container(container_name); + remove_network(network_name); +} + #[tokio::test] async fn test_prisma_postgres() -> Result<()> { // Create the network diff --git a/tests/snapshots/generate_plan_tests__node_fetch_network.snap b/tests/snapshots/generate_plan_tests__node_fetch_network.snap new file mode 100644 index 000000000..c2517d29b --- /dev/null +++ b/tests/snapshots/generate_plan_tests__node_fetch_network.snap @@ -0,0 +1,57 @@ +--- +source: tests/generate_plan_tests.rs +expression: plan +--- +{ + "providers": [], + "buildImage": "[build_image]", + "variables": { + "CI": "true", + "NIXPACKS_METADATA": "node", + "NODE_ENV": "production", + "NPM_CONFIG_PRODUCTION": "false" + }, + "phases": { + "build": { + "name": "build", + "dependsOn": [ + "install" + ], + "cmds": [ + "pnpm run build" + ], + "cacheDirectories": [ + "node_modules/.cache" + ] + }, + "install": { + "name": "install", + "dependsOn": [ + "setup" + ], + "cmds": [ + "pnpm i --frozen-lockfile" + ], + "cacheDirectories": [ + "/root/.local/share/pnpm/store/v3" + ], + "paths": [ + "/app/node_modules/.bin" + ] + }, + "setup": { + "name": "setup", + "nixPkgs": [ + "nodejs_18", + "pnpm-8_x" + ], + "nixOverlays": [ + "https://github.com/railwayapp/nix-npm-overlay/archive/main.tar.gz" + ], + "nixpkgsArchive": "[archive]" + } + }, + "start": { + "cmd": "pnpm run start" + } +}