The workflow JSON
Copy or download the full n8n JSON below. Paste it into a new n8n workflow, add your credentials, activate. Full import guide →
{
"id": "d3xtaER6gl4aqLZR",
"meta": {
"templateCredsSetupCompleted": true
},
"name": "PUQ Docker NextCloud deploy",
"tags": [],
"nodes": [
{
"id": "dc9d4284-0ff7-4068-af3d-2b7f38451118",
"name": "If",
"type": "n8n-nodes-base.if",
"position": [
540,
920
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "or",
"conditions": [
{
"id": "b702e607-888a-42c9-b9a7-f9d2a64dfccd",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.server_domain }}",
"rightValue": "=d01-test.uuq.pl"
},
{
"id": "8a6662a4-4539-4ab1-bd5b-46b0a0d6e023",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.server_domain }}",
"rightValue": "d02-test.uuq.pl"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "b015bca6-fe71-4eb4-8e99-2904911c03b3",
"name": "Parametrs",
"type": "n8n-nodes-base.set",
"position": [
320,
920
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "370ddc4e-0fc0-48f6-9b30-ebdfba72c62f",
"name": "clients_dir",
"type": "string",
"value": "/opt/docker/clients"
},
{
"id": "92202bb8-6113-4bc5-9a29-79d238456df2",
"name": "mount_dir",
"type": "string",
"value": "/mnt"
},
{
"id": "baa52df2-9c10-42b2-939f-f05ea85ea2be",
"name": "screen_left",
"type": "string",
"value": "{{"
},
{
"id": "2b19ed99-2630-412a-98b6-4be44d35d2e7",
"name": "screen_right",
"type": "string",
"value": "}}"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "b0c5ccb8-0692-4bb0-99e1-769fde372e0f",
"name": "API",
"type": "n8n-nodes-base.webhook",
"position": [
0,
920
],
"parameters": {
"path": "docker-nextcloud",
"options": {},
"httpMethod": [
"POST"
],
"responseMode": "responseNode",
"authentication": "basicAuth",
"multipleMethods": true
},
"credentials": {
"httpBasicAuth": {
"name": "<your credential>"
}
},
"typeVersion": 2
},
{
"id": "bcaf7ce1-464a-492e-b7f5-50ba8e465171",
"name": "422-Invalid server domain",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
500,
1240
],
"parameters": {
"options": {
"responseCode": 422
},
"respondWith": "json",
"responseBody": "[{\n \"status\": \"error\",\n \"error\": \"Invalid server domain\"\n}]"
},
"typeVersion": 1.1,
"alwaysOutputData": false
},
{
"id": "3c642087-bd6b-4996-890b-4d50fbca8c55",
"name": "Container Actions",
"type": "n8n-nodes-base.switch",
"position": [
940,
1740
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "start",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_start"
}
]
},
"renameOutput": true
},
{
"outputKey": "stop",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_stop"
}
]
},
"renameOutput": true
},
{
"outputKey": "mount_disk",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "727971bf-4218-41c1-9b07-22df4b947852",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_mount_disk"
}
]
},
"renameOutput": true
},
{
"outputKey": "unmount_disk",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "0c80b1d9-e7ca-4cf3-b3ac-b40fdf4dd8f8",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_unmount_disk"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_get_acl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "72a60c6b-5dc5-48db-8d3a-e083ffad6ae2",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_get_acl"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_set_acl",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "74eb2334-6176-46ef-b444-d99b439fea17",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_set_acl"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_get_net",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "817ef082-a2d8-4b13-a8df-6e946878653b",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_get_net"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "396e6074-98ec-47df-956c-ce5c3b75e57e",
"name": "Container Stats",
"type": "n8n-nodes-base.switch",
"position": [
940,
1080
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "inspect",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_information_inspect"
}
]
},
"renameOutput": true
},
{
"outputKey": "stats",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_information_stats"
}
]
},
"renameOutput": true
},
{
"outputKey": "log",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "50ede522-af22-4b7a-b1fd-34b27fd3fadd",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_log"
}
]
},
"renameOutput": true
},
{
"outputKey": "dependent_containers_information_stats",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "d3070310-d3c2-4200-9765-495cf69fa835",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "dependent_containers_information_stats"
}
]
},
"renameOutput": true
},
{
"outputKey": "container_update_dns_record",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "dc17d6ad-4fa1-4006-8718-8188efa5f458",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "container_update_dns_record"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "d0084a58-b157-4635-955a-8638f348bf72",
"name": "Inspect",
"type": "n8n-nodes-base.set",
"position": [
1260,
760
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}\"\n\nINSPECT_JSON=\"{}\"\nif sudo docker ps -a --filter \"name=$CONTAINER_NAME\" | grep -q \"$CONTAINER_NAME\"; then\n INSPECT_JSON=$(sudo docker inspect \"$CONTAINER_NAME\")\nfi\n\necho \"{\\\"inspect\\\": $INSPECT_JSON}\"\n\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "cec87c49-d7ea-4407-bc4c-21ea75b25baa",
"name": "Stat",
"type": "n8n-nodes-base.set",
"position": [
1260,
920
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"\nSTATUS_FILE=\"$COMPOSE_DIR/status.json\"\nIMG_FILE=\"$COMPOSE_DIR/data.img\"\nMOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"\n\n# Initialize empty container data\nINSPECT_JSON=\"{}\"\nSTATS_JSON=\"{}\"\n\n# Check if container is running\nif sudo docker ps -a --filter \"name=$CONTAINER_NAME\" | grep -q \"$CONTAINER_NAME\"; then\n # Get Docker inspect info in JSON (as raw string)\n INSPECT_JSON=$(sudo docker inspect \"$CONTAINER_NAME\")\n\n # Get Docker stats info in JSON (as raw string)\n STATS_JSON=$(sudo docker stats --no-stream --format \"{{ $('Parametrs').item.json.screen_left }}json .{{ $('Parametrs').item.json.screen_right }}\" \"$CONTAINER_NAME\")\n STATS_JSON=${STATS_JSON:-'{}'}\nfi\n\n# Initialize disk info variables\nMOUNT_USED=\"N/A\"\nMOUNT_FREE=\"N/A\"\nMOUNT_TOTAL=\"N/A\"\nMOUNT_PERCENT=\"N/A\"\nIMG_SIZE=\"N/A\"\nIMG_PERCENT=\"N/A\"\nDISK_STATS_IMG=\"N/A\"\n\n# Check if mount directory exists and is accessible\nif [ -d \"$MOUNT_DIR\" ]; then\n if mount | grep -q \"$MOUNT_DIR\"; then\n # Get disk usage for mounted directory\n DISK_STATS_MOUNT=$(df -h \"$MOUNT_DIR\" | tail -n 1)\n MOUNT_USED=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $3}')\n MOUNT_FREE=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $4}')\n MOUNT_TOTAL=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $2}')\n MOUNT_PERCENT=$(echo \"$DISK_STATS_MOUNT\" | awk '{print $5}')\n fi\nfi\n\n# Check if image file exists\nif [ -f \"$IMG_FILE\" ]; then\n # Get disk usage for image file\n IMG_SIZE=$(du -sh \"$IMG_FILE\" | awk '{print $1}')\nfi\n\n# Manually create a combined JSON object\nFINAL_JSON=\"{\\\"inspect\\\": $INSPECT_JSON, \\\"stats\\\": $STATS_JSON, \\\"disk\\\": {\\\"mounted\\\": {\\\"used\\\": \\\"$MOUNT_USED\\\", \\\"free\\\": \\\"$MOUNT_FREE\\\", \\\"total\\\": \\\"$MOUNT_TOTAL\\\", \\\"percent\\\": \\\"$MOUNT_PERCENT\\\"}, \\\"img_file\\\": {\\\"size\\\": \\\"$IMG_SIZE\\\"}}}\"\n\n# Output the result\necho \"$FINAL_JSON\"\n\nexit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "80dcd9b2-f1f5-44c3-98e8-38dae5ad4edb",
"name": "Start",
"type": "n8n-nodes-base.set",
"position": [
1400,
1500
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"\nSTATUS_FILE=\"$COMPOSE_DIR/status.json\"\nIMG_FILE=\"$COMPOSE_DIR/data.img\"\nMOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"\n\n# Function to log an error, write to status file, and print to console\nhandle_error() {\n echo \"error: $1\"\n exit 1\n}\n\nif ! df -h | grep -q \"$MOUNT_DIR\"; then\n handle_error \"The file $IMG_FILE is not mounted to $MOUNT_DIR\"\nfi\n\nif sudo docker ps --filter \"name={{ $('API').item.json.body.domain }}\" --filter \"status=running\" -q | grep -q .; then\n handle_error \"{{ $('API').item.json.body.domain }} container is running\"\nfi\n\n# Change to the compose directory\ncd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"\n\n# Start the Docker containers\nif ! sudo docker-compose up -d > /dev/null 2>error.log; then\n ERROR_MSG=$(tail -n 10 error.log)\n handle_error \"Docker-compose failed: $ERROR_MSG\"\nfi\n\n# Success\necho \"success\"\n\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "9cde27ca-4749-4660-9d46-d3161946b627",
"name": "Stop",
"type": "n8n-nodes-base.set",
"position": [
1400,
1660
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"\nSTATUS_FILE=\"$COMPOSE_DIR/status.json\"\nIMG_FILE=\"$COMPOSE_DIR/data.img\"\nMOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"\n\n# Function to log an error, write to status file, and print to console\nhandle_error() {\n echo \"error: $1\"\n exit 1\n}\n\n# Check if Docker container is running\nif ! sudo docker ps --filter \"name={{ $('API').item.json.body.domain }}\" --filter \"status=running\" -q | grep -q .; then\n handle_error \"{{ $('API').item.json.body.domain }} container is not running\"\nfi\n\n# Stop and remove the Docker containers (also remove associated volumes)\nif ! sudo docker-compose -f \"$COMPOSE_DIR/docker-compose.yml\" down > /dev/null 2>&1; then\n handle_error \"Failed to stop and remove docker-compose containers\"\nfi\n\necho \"success\"\n\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "f957ffb7-ccb5-41b2-b89e-ef1a92942251",
"name": "Mount Disk",
"type": "n8n-nodes-base.set",
"position": [
1400,
1820
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"\nSTATUS_FILE=\"$COMPOSE_DIR/status.json\"\nIMG_FILE=\"$COMPOSE_DIR/data.img\"\nMOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"\n\n# Function to log an error, write to status file, and print to console\nhandle_error() {\n echo \"error: $1\"\n exit 1\n}\n\n# Create necessary directories with permissions\nsudo mkdir -p \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to create $MOUNT_DIR\"\nsudo chmod 777 \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to set permissions on $MOUNT_DIR\"\n\nif df -h | grep -q \"$MOUNT_DIR\"; then\n handle_error \"The file $IMG_FILE is mounted to $MOUNT_DIR\"\nfi\n\nif ! grep -q \"$IMG_FILE\" /etc/fstab; then\n echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to add entry to /etc/fstab\"\nfi\n\nsudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"\n\necho \"success\"\n\nexit 0\n "
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "00cb7b5b-429e-494f-b2a9-1c0c45ac8d66",
"name": "Unmount Disk",
"type": "n8n-nodes-base.set",
"position": [
1400,
1980
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/{{ $('API').item.json.body.domain }}\"\nSTATUS_FILE=\"$COMPOSE_DIR/status.json\"\nIMG_FILE=\"$COMPOSE_DIR/data.img\"\nMOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}\"\n\n# Function to log an error, write to status file, and print to console\nhandle_error() {\n echo \"error: $1\"\n exit 1\n}\n\nif ! df -h | grep -q \"$MOUNT_DIR\"; then\n handle_error \"The file $IMG_FILE is not mounted to $MOUNT_DIR\"\nfi\n\n# Remove the mount entry from /etc/fstab if it exists\nif grep -q \"$IMG_FILE\" /etc/fstab; then\n sudo sed -i \"\\|$(printf '%s\\n' \"$IMG_FILE\" | sed 's/[.[\\*^$]/\\\\&/g')|d\" /etc/fstab\nfi\n\n# Unmount the image if it is mounted (using fstab)\nif mount | grep -q \"$MOUNT_DIR\"; then\n sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"\nfi\n\n# Remove the mount directory (if needed)\nif ! sudo rm -rf \"$MOUNT_DIR\" > /dev/null 2>&1; then\n handle_error \"Failed to remove $MOUNT_DIR\"\nfi\n\necho \"success\"\n\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "49487b07-8b7f-48c4-b7d0-819336ce6691",
"name": "Log",
"type": "n8n-nodes-base.set",
"position": [
1420,
1040
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"\nLOGS_JSON=\"{}\"\n\n# Function to return error in JSON format\nhandle_error() {\n echo \"{\\\"status\\\": \\\"error\\\", \\\"message\\\": \\\"$1\\\"}\"\n exit 1\n}\n\n# Check if the container exists\nif ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then\n handle_error \"Container $CONTAINER_NAME not found\"\nfi\n\n# Get logs of the container\nLOGS=$(sudo docker logs --tail 1000 \"$CONTAINER_NAME\" 2>&1)\nif [ $? -ne 0 ]; then\n handle_error \"Failed to retrieve logs for $CONTAINER_NAME\"\nfi\n\n# Escape double quotes in logs for valid JSON\nLOGS_ESCAPED=$(echo \"$LOGS\" | sed 's/\"/\\\\\"/g' | sed ':a;N;$!ba;s/\\n/\\\\n/g')\n\n# Format logs as JSON\nLOGS_JSON=\"{\\\"logs\\\": \\\"$LOGS_ESCAPED\\\"}\"\n\necho \"$LOGS_JSON\"\nexit 0"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "f8dfb4a8-5887-4796-9d1e-f882947fe9e8",
"name": "Sticky Note",
"type": "n8n-nodes-base.stickyNote",
"position": [
0,
0
],
"parameters": {
"color": 6,
"width": 639,
"height": 909,
"content": "## \ud83d\udc4b Welcome to PUQ Docker NextCloud deploy!\n# Template for Docker NextCloud: API Backend for WHMCS/WISECP by PUQcloud\n\nThis is an Docker NextCloud template that creates an API backend for the WHMCS/WISECP module developed by PUQcloud.\n\n## Setup Instructions\n\n### 1. Configure API Webhook and SSH Access\n- Create a Credential (Basic Auth) for the **Webhook API Block** in n8n.\n- Create a Credential for **SSH access** to a server with Docker installed (**SSH Block**).\n\n### 2. Install Required Packages on the Docker Server\nRun the following command on your server:\n```\napt-get install sqlite3 apache2-utils -y\n```\n### 3. Modify Template Parameters\nIn the **Parameters** block of the template, update the following settings:\n\n- `server_domain` \u2013 must match the domain of the WHMCS/WISECP Docker server.\n- `clients_dir` \u2013 directory where user data related to Docker and disks will be stored.\n- `mount_dir` \u2013 default mount point for the container disk (recommended not to change).\n\n**Do not modify** the following technical parameters:\n\n- `screen_left`\n- `screen_right`\n\n## Additional Resources\n- Full documentation: [https://doc.puq.info/books/docker-nextcloud-whmcs-module](https://doc.puq.info/books/docker-nextcloud-whmcs-module)\n- WHMCS module: [https://puqcloud.com/whmcs-module-docker-nextcloud.php](https://puqcloud.com/whmcs-module-docker-nextcloud.php)\n\n"
},
"typeVersion": 1
},
{
"id": "29bd957b-a5be-4a6e-81e3-ba7d88462d93",
"name": "Deploy-docker-compose",
"type": "n8n-nodes-base.set",
"position": [
1340,
20
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "docker-compose",
"type": "string",
"value": "=version: \"3.8\"\n\nservices:\n {{ $('API').item.json.body.domain }}_nextcloud:\n image: nextcloud:latest\n container_name: {{ $('API').item.json.body.domain }}_nextcloud\n environment:\n NEXTCLOUD_ADMIN_USER: {{ $('API').item.json.body.nc_admin_user }}\n NEXTCLOUD_ADMIN_PASSWORD: {{ $('API').item.json.body.nc_admin_password }}\n NEXTCLOUD_TRUSTED_DOMAINS: {{ $('API').item.json.body.domain }}\n MYSQL_PASSWORD: {{ $('API').item.json.body.mysql_password }}\n MYSQL_DATABASE: {{ $('API').item.json.body.mysql_database }}\n MYSQL_USER: {{ $('API').item.json.body.mysql_user }}\n MYSQL_HOST: {{ $('API').item.json.body.domain }}_db\n REDIS_HOST: {{ $('API').item.json.body.domain }}_redis\n VIRTUAL_HOST: {{ $('API').item.json.body.domain }}\n LETSENCRYPT_HOST: {{ $('API').item.json.body.domain }}\n volumes:\n - \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/config:/var/www/html/config\"\n - \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/data:/var/www/html/data\"\n - \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/html:/var/www/html\"\n networks:\n - nginx-proxy_web\n depends_on:\n - {{ $('API').item.json.body.domain }}_db\n - {{ $('API').item.json.body.domain }}_redis\n - {{ $('API').item.json.body.domain }}_collabora\n mem_limit: \"{{ $('API').item.json.body.ram }}G\"\n cpus: \"{{ $('API').item.json.body.cpu }}\"\n\n {{ $('API').item.json.body.domain }}_db:\n image: mariadb:11.4\n container_name: {{ $('API').item.json.body.domain }}_db\n environment:\n MYSQL_ROOT_PASSWORD: {{ $('API').item.json.body.mysql_root_password }}\n MYSQL_PASSWORD: {{ $('API').item.json.body.mysql_password }}\n MYSQL_DATABASE: {{ $('API').item.json.body.mysql_database }}\n MYSQL_USER: {{ $('API').item.json.body.mysql_user }}\n volumes:\n - \"{{ $('Parametrs').item.json.mount_dir }}/{{ $('API').item.json.body.domain }}/db:/var/lib/mysql\"\n networks:\n - nginx-proxy_web\n mem_limit: \"{{ Number($('API').item.json.body.ram) / 2 }}G\"\n cpus: \"{{ Number($('API').item.json.body.cpu) / 2 }}\"\n\n {{ $('API').item.json.body.domain }}_redis:\n image: redis:alpine\n container_name: {{ $('API').item.json.body.domain }}_redis\n networks:\n - nginx-proxy_web\n mem_limit: \"{{ Number($('API').item.json.body.ram) / 4 }}G\"\n cpus: \"{{ Number($('API').item.json.body.cpu) / 4 }}\"\n\n {{ $('API').item.json.body.domain }}_collabora:\n image: collabora/code\n container_name: {{ $('API').item.json.body.domain }}_collabora\n environment:\n - domain={{ $('API').item.json.body.office_domain_escaped }}:443\n - server_name=office.{{ $('API').item.json.body.domain }}\n - username={{ $('API').item.json.body.mysql_user }}\n - password={{ $('API').item.json.body.mysql_password }}\n - \"dictionaries=ru_RU uk_UA pl_PL en\"\n - \"extra_params=--o:ssl.enable=true --o:ssl.termination=true --o:net.proto=https --o:ssl.le=true --o:storage.wopi.host=https://{{ $('API').item.json.body.domain }}\"\n - VIRTUAL_HOST=office.{{ $('API').item.json.body.domain }}\n - LETSENCRYPT_HOST=office.{{ $('API').item.json.body.domain }}\n - VIRTUAL_PROTO=https\n - VIRTUAL_PORT=9980\n cap_add:\n - MKNOD\n - SYS_ADMIN\n extra_hosts:\n - \"{{ $('API').item.json.body.domain }}:77.87.125.201\"\n dns:\n - 8.8.8.8\n - 8.8.4.4\n networks:\n - nginx-proxy_web\n mem_limit: \"{{ Number($('API').item.json.body.ram) }}G\"\n cpus: \"{{ Number($('API').item.json.body.cpu) / 2 }}\"\n\nnetworks:\n nginx-proxy_web:\n external: true\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "4243c90b-de8a-4931-972b-5f700edb09d4",
"name": "Version",
"type": "n8n-nodes-base.set",
"position": [
1380,
2640
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\n# Define the container name dynamically using an API call\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"\nVERSION_JSON=\"{}\"\n\n# Function to handle errors and return a JSON-formatted message\nhandle_error() {\n echo \"{\\\"status\\\": \\\"error\\\", \\\"message\\\": \\\"$1\\\"}\"\n exit 1\n}\n\n# Check if the container exists by searching for its name in the list of all Docker containers\nif ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then\n handle_error \"Container $CONTAINER_NAME not found\"\nfi\n\n# Retrieve the Nextcloud status as a JSON response from the container\n# The '-u 33' option ensures that the command is executed as the Nextcloud user (www-data)\nNEXTCLOUD_STATUS=$(sudo docker exec -u 33 \"$CONTAINER_NAME\" php occ status --output=json 2>/dev/null)\n\n# Validate if the command was executed successfully and if the output is not empty\nif [ $? -ne 0 ] || [ -z \"$NEXTCLOUD_STATUS\" ]; then\n handle_error \"Failed to retrieve Nextcloud status for $CONTAINER_NAME\"\nfi\n\n# Extract the Nextcloud version string from the JSON response\nVERSION=$(echo \"$NEXTCLOUD_STATUS\" | jq -r '.versionstring')\n\n# Ensure that a valid version string was extracted\nif [ -z \"$VERSION\" ]; then\n handle_error \"Failed to parse Nextcloud version from response\"\nfi\n\n# Construct a JSON-formatted output containing the Nextcloud version\nVERSION_JSON=\"{\\\"version\\\": \\\"$VERSION\\\"}\"\n\n# Print the JSON result\necho \"$VERSION_JSON\"\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "4f13c4f2-82dd-478f-915b-247a071db107",
"name": "Users",
"type": "n8n-nodes-base.set",
"position": [
1380,
2780
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\n# Define the container name dynamically using an API call\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"\nUSERS_JSON=\"{}\"\n\n# Function to handle errors and return a JSON-formatted message\nhandle_error() {\n echo \"{\\\"status\\\": \\\"error\\\", \\\"message\\\": \\\"$1\\\"}\"\n exit 1\n}\n\n# Check if the container exists by searching for its name in the list of all Docker containers\nif ! sudo docker ps -a | grep -q \"$CONTAINER_NAME\" > /dev/null 2>&1; then\n handle_error \"Container $CONTAINER_NAME not found\"\nfi\n\n# Retrieve the list of Nextcloud users and reformat it into a proper JSON array\nUSERS=$(sudo docker exec -u 33 \"$CONTAINER_NAME\" php occ user:list --output=json 2>/dev/null | jq -c 'to_entries | map({username: .key, displayname: .value})')\n\n# Validate if the command executed successfully and output is not empty\nif [ $? -ne 0 ] || [ -z \"$USERS\" ]; then\n handle_error \"Failed to retrieve users from Nextcloud\"\nfi\n\n# Construct a JSON-formatted output containing all retrieved users\nUSERS_JSON=\"{\\\"users\\\": $USERS}\"\n\n# Print the JSON result\necho \"$USERS_JSON\"\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "6d385bc7-01f1-4d42-b16e-a2e45927ef7f",
"name": "Change Password",
"type": "n8n-nodes-base.set",
"position": [
1380,
2960
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"\nNC_USER=\"{{ $('API').item.json.body.user_email }}\"\nNEW_PASSWORD=\"{{ $('API').item.json.body.password }}\"\n\n# Function to output error in JSON format and exit with code 1\nhandle_error() {\n echo \"{\\\"status\\\": \\\"error\\\", \\\"message\\\": \\\"$1\\\"}\"\n exit 1\n}\n\n# Check if container name is provided\nif [ -z \"$CONTAINER_NAME\" ]; then\n handle_error \"No container name provided\"\nfi\n\n# Check if Nextcloud username is provided\nif [ -z \"$NC_USER\" ]; then\n handle_error \"No Nextcloud user provided\"\nfi\n\n# Check if password is provided\nif [ -z \"$NEW_PASSWORD\" ]; then\n handle_error \"No password provided\"\nfi\n\n# Run command in container\n# -u 33 => as UID 33 (often www-data in Nextcloud)\n# -e OC_PASS=\"$NEW_PASSWORD\" => pass password through environment to container\n# php occ user:resetpassword --password-from-env \"$NC_USER\"\n# returns 0 if successful\n\nOUTPUT=$( sudo docker exec -u 33 \\\n -e OC_PASS=\"$NEW_PASSWORD\" \\\n \"$CONTAINER_NAME\" \\\n php occ user:resetpassword --password-from-env \"$NC_USER\" 2>&1 )\n\n# Check return code\nif [ $? -ne 0 ]; then\n handle_error \"Failed to reset password. Output: $OUTPUT\"\nfi\n\necho \"{\\\"status\\\": \\\"success\\\"}\"\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "dd283191-a5cd-4d29-8c2d-0ef42b63f69c",
"name": "NextCloud",
"type": "n8n-nodes-base.switch",
"position": [
920,
2620
],
"parameters": {
"rules": {
"values": [
{
"outputKey": "version",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "66ad264d-5393-410c-bfa3-011ab8eb234a",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "app_version"
}
]
},
"renameOutput": true
},
{
"outputKey": "users",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "b48957a0-22c0-4ac0-82ef-abd9e7ab0207",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "app_users"
}
]
},
"renameOutput": true
},
{
"outputKey": "change_password",
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "7c862a6f-5df1-499c-b9c6-9b266e2bebec",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $('API').item.json.body.command }}",
"rightValue": "change_password"
}
]
},
"renameOutput": true
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "9f5e3d3e-4f6d-4967-aefe-b953c5c3418b",
"name": "nginx",
"type": "n8n-nodes-base.set",
"position": [
1080,
140
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "main",
"type": "string",
"value": "=# Increase max body size for large file uploads\nclient_max_body_size 50000M;\n\n# Proxy headers\nproxy_set_header Host $http_host;\nproxy_set_header X-Real-IP $remote_addr;\nproxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\nproxy_set_header X-Forwarded-Proto $scheme;\n\n# WebSocket support\nproxy_http_version 1.1;\nproxy_set_header Upgrade $http_upgrade;\nproxy_set_header Connection \"upgrade\";\n\n# Timeouts\nproxy_read_timeout 600s;\nproxy_send_timeout 600s;\nsend_timeout 600s;\n\n# Additional optimizations\nproxy_buffering off;\nproxy_buffer_size 128k;\nproxy_buffers 4 256k;\nproxy_busy_buffers_size 256k;\nproxy_temp_file_write_size 256k;\nproxy_connect_timeout 600s;\n"
},
{
"id": "6507763a-21d4-4ff0-84d2-5dc9d21b7430",
"name": "main_location",
"type": "string",
"value": "="
},
{
"id": "d00aa07a-0641-43ef-8fd2-5fb9ef62e313",
"name": "office",
"type": "string",
"value": "=server_name office.{{ $('API').item.json.body.domain }};\n\n# static files\n location ^~ /browser {\n proxy_pass https://office.{{ $('API').item.json.body.domain }};\n proxy_set_header Host $host;\n }\n\n\n # WOPI discovery URL\n location ^~ /hosting/discovery {\n proxy_pass https://office.{{ $('API').item.json.body.domain }};\n proxy_set_header Host $host;\n }\n\n\n # Capabilities\n location ^~ /hosting/capabilities {\n proxy_pass https://office.{{ $('API').item.json.body.domain }};\n proxy_set_header Host $host;\n }\n\n\n # main websocket\n location ~ ^/cool/(.*)/ws$ {\n proxy_pass https://office.{{ $('API').item.json.body.domain }};\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"Upgrade\";\n proxy_set_header Host $host;\n proxy_read_timeout 36000s;\n }\n\n\n # download, presentation and image upload\n location ~ ^/(c|l)ool {\n proxy_pass https://office.{{ $('API').item.json.body.domain }};\n proxy_set_header Host $host;\n }\n\n # Admin Console websocket\n location ^~ /cool/adminws {\n proxy_pass https://office.{{ $('API').item.json.body.domain }};\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"Upgrade\";\n proxy_set_header Host $host;\n proxy_read_timeout 36000s;\n }\n"
},
{
"id": "c00fb803-8b9f-4aca-a1b1-2e3da42fc8d1",
"name": "office_location",
"type": "string",
"value": "="
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "fa40012b-0e58-4d6c-af19-b9dd6c72386d",
"name": "Test Connection",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
-40
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\n# Function to log an error, print to console\nhandle_error() {\n echo \"error: $1\"\n exit 1\n}\n\n# Check if Docker is installed\nif ! command -v docker &> /dev/null; then\n handle_error \"Docker is not installed\"\nfi\n\n# Check if Docker service is running\nif ! systemctl is-active --quiet docker; then\n handle_error \"Docker service is not running\"\nfi\n\n# Check if nginx-proxy container is running\nif ! sudo docker ps --filter \"name=nginx-proxy\" --filter \"status=running\" -q > /dev/null; then\n handle_error \"nginx-proxy container is not running\"\nfi\n\n# Check if letsencrypt-nginx-proxy-companion container is running\nif ! sudo docker ps --filter \"name=letsencrypt-nginx-proxy-companion\" --filter \"status=running\" -q > /dev/null; then\n handle_error \"letsencrypt-nginx-proxy-companion container is not running\"\nfi\n\n# If everything is successful\necho \"success\"\n\nexit 0\n"
}
]
}
},
"typeVersion": 3.4,
"alwaysOutputData": true
},
{
"id": "12240691-bcbe-407c-b53c-89cf84bc190f",
"name": "ChangePackage",
"type": "n8n-nodes-base.set",
"onError": "continueRegularOutput",
"position": [
1920,
840
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "21f4453e-c136-4388-be90-1411ae78e8a5",
"name": "sh",
"type": "string",
"value": "=#!/bin/bash\n\n# Get values for variables from templates\nDOMAIN=\"{{ $('API').item.json.body.domain }}\"\nCONTAINER_NAME=\"{{ $('API').item.json.body.domain }}_nextcloud\"\nCOMPOSE_DIR=\"{{ $('Parametrs').item.json.clients_dir }}/$DOMAIN\"\nCOMPOSE_FILE=\"$COMPOSE_DIR/docker-compose.yml\"\nSTATUS_FILE=\"$COMPOSE_DIR/status\"\nIMG_FILE=\"$COMPOSE_DIR/data.img\"\nNGINX_DIR=\"$COMPOSE_DIR/nginx\"\nVHOST_DIR=\"/opt/docker/nginx-proxy/nginx/vhost.d\"\nMOUNT_DIR=\"{{ $('Parametrs').item.json.mount_dir }}/$DOMAIN\"\nDOCKER_COMPOSE_TEXT='{{ JSON.stringify($('Deploy-docker-compose').item.json['docker-compose']).base64Encode() }}'\n\nNGINX_MAIN_TEXT='{{ JSON.stringify($('nginx').item.json['main']).base64Encode() }}'\nNGINX_MAIN_FILE=\"$NGINX_DIR/$DOMAIN\"\nVHOST_MAIN_FILE=\"$VHOST_DIR/$DOMAIN\"\n\nNGINX_MAIN_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['main_location']).base64Encode() }}'\nNGINX_MAIN_LOCATION_FILE=\"$NGINX_DIR/$DOMAIN\"_location\nVHOST_MAIN_LOCATION_FILE=\"$VHOST_DIR/$DOMAIN\"_location\n\nNGINX_OFFICE_TEXT='{{ JSON.stringify($('nginx').item.json['office']).base64Encode() }}'\nNGINX_OFFICE_FILE=\"$NGINX_DIR/office.$DOMAIN\"\nVHOST_OFFICE_FILE=\"$VHOST_DIR/office.$DOMAIN\"\n\nNGINX_OFFICE_LOCATION_TEXT='{{ JSON.stringify($('nginx').item.json['office_location']).base64Encode() }}'\nNGINX_OFFICE_LOCATION_FILE=\"$NGINX_DIR/office.$DOMAIN\"_location\nVHOST_OFFICE_LOCATION_FILE=\"$VHOST_DIR/office.$DOMAIN\"_location\n\nDISK_SIZE=\"{{ $('API').item.json.body.disk }}\"\n\n# Function to log an error, write to status file, and print to office\nhandle_error() {\n STATUS_JSON=\"{\\\"status\\\": \\\"error\\\", \\\"message\\\": \\\"$1\\\"}\"\n echo \"$STATUS_JSON\" | sudo tee \"$STATUS_FILE\" > /dev/null\n echo \"error: $1\"\n exit 1\n}\n\n# Get nginx-proxy IP address before installing Nextcloud Office\nget_proxy_ip() {\n local ip=\"\"\n local retries=10 # Try a few times\n local count=0\n while [[ -z \"$ip\" && $count -lt $retries ]]; do\n ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' nginx-proxy)\n if [[ -z \"$ip\" ]]; then\n echo \"[DEBUG] nginx-proxy IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"\n sleep 2 # Wait a bit before retrying\n fi\n ((count++))\n done\n\n if [[ -z \"$ip\" ]]; then\n echo \"[ERROR] Failed to retrieve nginx-proxy IP after $retries attempts!\" >> \"$STATUS_FILE\"\n handle_error \"Failed to retrieve nginx-proxy IP\"\n fi\n\n echo \"[DEBUG] Detected nginx-proxy IP: $ip\" >> \"$STATUS_FILE\"\n echo \"$ip\"\n}\n\n# Get the IP address of Nextcloud Office\nget_office_ip() {\n local ip=\"\"\n local retries=10 # Try a few times\n local count=0\n while [[ -z \"$ip\" && $count -lt $retries ]]; do\n ip=$(sudo docker inspect -f '{{ $('Parametrs').item.json.screen_left }}range .NetworkSettings.Networks{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}.IPAddress{{ $('Parametrs').item.json.screen_right }}{{ $('Parametrs').item.json.screen_left }}end{{ $('Parametrs').item.json.screen_right }}' \"$DOMAIN\"_collabora)\n if [[ -z \"$ip\" ]]; then\n echo \"[DEBUG] office IP not found, retrying ($count/$retries)...\" >> \"$STATUS_FILE\"\n sleep 2 # Wait a bit before retrying\n fi\n ((count++))\n done\n\n if [[ -z \"$ip\" ]]; then\n echo \"[ERROR] Failed to retrieve office IP after $retries attempts!\" >> \"$STATUS_FILE\"\n handle_error \"Failed to retrieve office IP\"\n fi\n\n # Convert IP to subnet by replacing the last octet with 0 and adding /24\n local subnet=$(echo \"$ip\" | sed 's/\\.[0-9]*$/.0\\/24/')\n echo \"[DEBUG] Detected office subnet: $subnet\" >> \"$STATUS_FILE\"\n echo \"$subnet\"\n}\n\n# Check if the compose file exists before stopping the container\nif [ -f \"$COMPOSE_FILE\" ]; then\n sudo docker-compose -f \"$COMPOSE_FILE\" down > /dev/null 2>&1 || handle_error \"Failed to stop containers\"\nelse\n handle_error \"docker-compose.yml not found\"\nfi\n\n# Unmount the image if it is currently mounted\nif mount | grep -q \"$MOUNT_DIR\"; then\n sudo umount \"$MOUNT_DIR\" > /dev/null 2>&1 || handle_error \"Failed to unmount $MOUNT_DIR\"\nfi\n\n# Create docker-compose.yml file\necho -e \"$DOCKER_COMPOSE_TEXT\" | base64 --decode | sed 's/\\\\n/\\n/g' | sed 's/\\\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$COMPOSE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $COMPOSE_FILE\"\n\n# Create NGINX configuration files\necho -e \"$NGINX_MAIN_TEXT\" | base64 --decode | sed 's/\\\\n/\\n/g' | sed 's/\\\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_FILE\"\necho -e \"$NGINX_MAIN_LOCATION_TEXT\" | base64 --decode | sed 's/\\\\n/\\n/g' | sed 's/\\\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_MAIN_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_MAIN_LOCATION_FILE\"\n\necho -e \"$NGINX_OFFICE_TEXT\" | base64 --decode | sed 's/\\\\n/\\n/g' | sed 's/\\\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_FILE\"\necho -e \"$NGINX_OFFICE_LOCATION_TEXT\" | base64 --decode | sed 's/\\\\n/\\n/g' | sed 's/\\\\\"/\"/g' | sed '1s/^\"//' | sed '$s/\"$//' | sudo tee \"$NGINX_OFFICE_LOCATION_FILE\" > /dev/null 2>&1 || handle_error \"Failed to create $NGINX_OFFICE_LOCATION_FILE\"\n\n# Resize or extend the disk image to match DISK_SIZE\nif [ -f \"$IMG_FILE\" ]; then\n DESIRED_SIZE_BYTES=$((DISK_SIZE * 1024 * 1024 * 1024))\n CURRENT_SIZE_BYTES=$(stat -c %s \"$IMG_FILE\")\n\n # Expand or shrink as needed\n if [ \"$CURRENT_SIZE_BYTES\" -lt \"$DESIRED_SIZE_BYTES\" ]; then\n # echo \"[INFO] Expanding image to $DISK_SIZE GB...\"\n sudo truncate -s \"$DESIRED_SIZE_BYTES\" \"$IMG_FILE\" || handle_error \"Failed to expand $IMG_FILE\" 2>/dev/null\n\n LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\" 2>/dev/null) || handle_error \"Failed to setup loop device\" \n sudo e2fsck -fy \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"Filesystem check failed\" ; } 2>/dev/null\n sudo resize2fs \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"resize2fs after expand failed\" ; } 2>/dev/null\n sudo losetup -d \"$LOOP_DEV\" 2>/dev/null\n\n elif [ \"$CURRENT_SIZE_BYTES\" -gt \"$DESIRED_SIZE_BYTES\" ]; then\n # echo \"[INFO] Shrinking image to $DISK_SIZE GB...\"\n LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\" 2>/dev/null) || handle_error \"Failed to setup loop device\" \n sudo e2fsck -fy \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"Filesystem check failed\" ; } 2>/dev/null\n sudo resize2fs -M \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"resize2fs -M failed\" ; } 2>/dev/null\n\n BLOCKS=$(sudo tune2fs -l \"$LOOP_DEV\" | grep '^Block count:' | awk '{print $3}')\n BLOCK_SIZE=$(sudo tune2fs -l \"$LOOP_DEV\" | grep '^Block size:' | awk '{print $3}')\n MIN_BYTES=$((BLOCKS * BLOCK_SIZE))\n sudo losetup -d \"$LOOP_DEV\" 2>/dev/null\n\n if [ \"$DESIRED_SIZE_BYTES\" -lt \"$MIN_BYTES\" ]; then\n handle_error \"DISK_SIZE too small. Minimum size is $((MIN_BYTES / 1024 / 1024 / 1024)) GB\"\n fi\n\n sudo truncate -s \"$DESIRED_SIZE_BYTES\" \"$IMG_FILE\" || handle_error \"Failed to truncate to desired size\"\n\n LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\" 2>/dev/null) || handle_error \"Failed to setup loop device (after shrink)\"\n sudo resize2fs \"$LOOP_DEV\" || { sudo losetup -d \"$LOOP_DEV\"; handle_error \"resize2fs after shrink failed\" ; } 2>/dev/null\n sudo losetup -d \"$LOOP_DEV\" 2>/dev/null\n fi\n\n # Remove the old line from /etc/fstab (if it exists) and add it again\n sudo sed -i \"\\|$IMG_FILE|d\" /etc/fstab\n echo \"$IMG_FILE $MOUNT_DIR ext4 loop 0 0\" | sudo tee -a /etc/fstab > /dev/null || handle_error \"Failed to update /etc/fstab\"\n\n # Create the folder if it doesn't exist\n sudo mkdir -p \"$MOUNT_DIR\"\n sudo chmod 777 \"$MOUNT_DIR\"\n\n # Try to mount manually\n if ! sudo mount \"$MOUNT_DIR\"; then\n echo \"[WARN] mount -a failed, trying manual mount with loop\"\n LOOP_DEV=$(sudo losetup --find --show \"$IMG_FILE\") || handle_error \"Failed to setup loop device (manual)\"\n sudo mount -t ext4 \"$LOOP_DEV\" \"$MOUNT_DIR\" || {\n sudo losetup -d \"$LOOP_DEV\"\n handle_error \"Manual mount failed\"\n }\n fi\nelse\n handle_error \"Disk image $IMG_FILE does not exist\"\nfi\n\n# Mount the disk only if it is not already mounted\nif ! mount | grep -q \"$MOUNT_DIR\"; then\n sudo mount -a || handle_error \"Failed to mount entries from /etc/fstab\"\nfi\n\n# Change to the compose directory\ncd \"$COMPOSE_DIR\" > /dev/null 2>&1 || handle_error \"Failed to change directory to $COMPOSE_DIR\"\n\n# Copy NGINX configuration files instead of creating symbolic links\nsudo cp -f \"$NGINX_MAIN_FILE\" \"$VHOST_MAIN_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_FILE to $VHOST_MAIN_FILE\"\nsudo chmod 777 \"$VHOST_MAIN_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_FILE\"\n\nsudo cp -f \"$NGINX_MAIN_LOCATION_FILE\" \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_MAIN_LOCATION_FILE to $VHOST_MAIN_LOCATION_FILE\"\nsudo chmod 777 \"$VHOST_MAIN_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_MAIN_LOCATION_FILE\"\n\nsudo cp -f \"$NGINX_OFFICE_FILE\" \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_FILE to $VHOST_OFFICE_FILE\"\nsudo chmod 777 \"$VHOST_OFFICE_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_FILE\"\n\nsudo cp -f \"$NGINX_OFFICE_LOCATION_FILE\" \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to copy $NGINX_OFFICE_LOCATION_FILE to $VHOST_OFFICE_LOCATION_FILE\"\nsudo chmod 777 \"$VHOST_OFFICE_LOCATION_FILE\" || handle_error \"Failed to set permissions on $VHOST_OFFICE_LOCATION_FILE\"\n\n# Start Docker containers using docker-compose\nif ! sudo docker compose up -d > /dev/null 2>error.log; then\n ERROR_MSG=$(tail -n 10 error.log) # Read the last 10 lines from error.log\n handle_error \"Docker-compose failed: $ERROR_MSG\"\nfi\n\n# --- Function that installs Nextcloud Office (Collabora) in the background ---\ninstall_nextcloud_office() {\n MAX_RETRIES=60\n COUNTER=0\n\n\n # 1) Wait until \"installed: true\" in occ status\n while true; do\n STATUS_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ status 2>&1)\"\n if echo \"$STATUS_OUTPUT\" | grep -q \"installed: true\"; then\n echo \"[OfficeSetup] Nextcloud reports installed: true. Proceeding...\" >> \"$STATUS_FILE\"\n break\n else\n echo \"[OfficeSetup] [$COUNTER/$MAX_RETRIES] Nextcloud not fully installed yet, waiting...\" >> \"$STATUS_FILE\"\n sleep 2\n ((COUNTER++))\n if [ $COUNTER -ge $MAX_RETRIES ]; then\n echo \"[OfficeSetup] Nextcloud did not report 'installed: true' within time limit. Skipping Office install.\" >> \"$STATUS_FILE\"\n return\n fi\n fi\n done\n\n # Get the nginx-proxy IP\n PROXY_IP=$(get_proxy_ip)\n\n echo \"[OfficeSetup] Detected nginx-proxy IP: $PROXY_IP\" >> \"$STATUS_FILE\"\n \n\n # Write the needed parameters to the Nextcloud config\n echo \"[OfficeSetup] Setting overwrite protocol/host/cli.url in Nextcloud config...\" >> \"$STATUS_FILE\"\n sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwriteprotocol --value=https 2>&1\n sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwritehost --value=\"$DOMAIN\" 2>&1\n sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set overwrite.cli.url --value=\"https://$DOMAIN\" 2>&1\n\n # Add the nginx-proxy IP to the trusted_proxies list\n echo \"[OfficeSetup] Adding nginx-proxy IP to trusted_proxies...\" >> \"$STATUS_FILE\"\n # *** NEW BLOCK *** - Get the IP address of the reverse proxy\n sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:system:set trusted_proxies 0 --value=\"$PROXY_IP\" 2>&1\n\n echo \"[OfficeSetup] Installing Nextcloud Office (richdocuments)...\" >> \"$STATUS_FILE\"\n\n # 2) Install the richdocuments app\n INSTALL_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:install richdocuments 2>&1 || echo \"[OfficeSetup] App already installed\")\"\n echo \"[OfficeSetup] app:install richdocuments => $INSTALL_OUTPUT\" >> \"$STATUS_FILE\"\n\n # 3) Set the Collabora Online URL in Nextcloud\n WOPI_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ config:app:set richdocuments wopi_url --value=\"https://office.$DOMAIN/\" 2>&1)\"\n echo \"[OfficeSetup] wopi_url => $WOPI_OUTPUT\" >> \"$STATUS_FILE\"\n\n # 4) Enable the app\n ENABLE_OUTPUT=\"$(sudo docker exec -u www-data \"$CONTAINER_NAME\" php occ app:enable richdocuments 2>&1)\"\n echo \"[OfficeSetup] app:enable richdocuments => $ENABLE_OUTPUT\" >> \"$STATUS_FILE\"\n\n # 5) Allow local remote servers (Fix for Collabora access issues)\n ALLOW_LOCAL_OUTPUT=\"$(sudo docker exec -u www
Credentials you'll need
Each integration node will prompt for credentials when you import. We strip credential IDs before publishing — you'll add your own.
httpBasicAuthsshPassword
About this workflow
PUQ Docker NextCloud deploy. Uses respondToWebhook, stickyNote, httpRequest, ssh. Webhook trigger; 44 nodes.
Source: https://github.com/Zie619/n8n-workflows — original creator credit. Request a take-down →