-
Notifications
You must be signed in to change notification settings - Fork 36
Expand file tree
/
Copy pathbackup.sh
More file actions
executable file
·176 lines (154 loc) · 4.99 KB
/
backup.sh
File metadata and controls
executable file
·176 lines (154 loc) · 4.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#!/usr/bin/env bash
set -e
# Add any volume names that need to be backed up here
BACKUP_VOLUMES=(bitcart_datadir tor_servicesdir tor_datadir tor_relay_datadir)
function display_help() {
cat <<-END
Usage:
------
Backup Bitcart files
This script must be run as root
-h, --help: Show help
--only-db: Backup database only. Default: false
--restart: Restart Bitcart (to avoid data corruption if needed). Default: false
This script will backup the database as SQL script, essential volumes and put it to tar.zst archive
It may optionally upload the backup to a remote server
Environment variables:
BACKUP_PROVIDER: where to upload. Default empty (local). See list of supported providers below
BACKUP_ENCRYPTION: whether to encrypt backups with OpenSSL (AES-256-CBC). Default: false
SCP_TARGET: where to upload the backup via scp
S3_BUCKET: where to upload the backup via s3
S3_PATH: path to the backup on the remote server
S3_ACCESS_KEY_ID: AWS access key ID for S3 authentication
S3_SECRET_ACCESS_KEY: AWS secret access key for S3 authentication
S3_DEFAULT_REGION: AWS region for S3 (e.g., us-east-1)
S3_ENDPOINT_URL: (optional) Custom S3 endpoint URL for S3-compatible services
Supported providers:
* local: keeps backups in backup_datadir docker volume (default)
* scp: uploads the backup to a remote server via scp
* s3: uploads to s3://bucket/path
END
}
ONLY_DB=false
RESTART_SERVICES=false
# TODO: less duplication for args parsing
while (("$#")); do
case "$1" in
-h)
display_help
exit 0
;;
--help)
display_help
exit 0
;;
--only-db)
ONLY_DB=true
shift 1
;;
--restart)
RESTART_SERVICES=true
shift 1
;;
--) # end argument parsing
shift
break
;;
--*= | -*) # unsupported flags
echo "Error: Unsupported flag $1" >&2
display_help
exit 1
;;
*)
shift
;;
esac
done
# shellcheck source=helpers.sh
. helpers.sh
load_env true
cd "$BITCART_BASE_DIRECTORY"
deployment_name=$(volume_name)
volumes_dir=/var/lib/docker/volumes
backup_dir="$volumes_dir/backup_datadir"
timestamp=$(date "+%Y%m%d-%H%M%S")
filename="$timestamp-backup.tar.zst"
dumpname="$timestamp-database.sql"
backup_path="$backup_dir/_data/${filename}"
dbdump_path="$backup_dir/_data/${dumpname}"
echo "Dumping database …"
bitcart_dump_db "$dumpname"
if $ONLY_DB; then
tar_path="${backup_path%.zst}"
tar -cvf "$tar_path" "$dbdump_path"
zstdmt -1 --rm "$tar_path" -o "$backup_path"
else
if $RESTART_SERVICES; then
echo "Stopping Bitcart…"
bitcart_stop
fi
echo "Backing up files …"
files=()
for fname in "${BACKUP_VOLUMES[@]}"; do
fname=$(volume_name "$fname")
if [ -d "$volumes_dir/$fname" ]; then
files+=("$fname")
fi
done
# put all volumes to volumes directory and remove timestamps
tar_path="${backup_path%.zst}"
tar -cvf "$tar_path" -C "$volumes_dir" --exclude="$(volume_name bitcart_datadir)/_data/host_authorized_keys" --exclude="$(volume_name bitcart_datadir)/_data/host_id_rsa" --exclude="$(volume_name bitcart_datadir)/_data/host_id_rsa.pub" --transform "s|^$deployment_name|volumes/$deployment_name|" "${files[@]}" \
-C "$(dirname "$dbdump_path")" --transform "s|$timestamp-||" --transform "s|$timestamp||" "$dumpname" \
-C "$BITCART_BASE_DIRECTORY/compose" plugins
zstdmt -1 --rm "$tar_path" -o "$backup_path"
if $RESTART_SERVICES; then
echo "Restarting Bitcart…"
bitcart_start
fi
fi
if [ "$BACKUP_ENCRYPTION" = "true" ]; then
if [ -z "$BACKUP_ENCRYPTION_KEY" ]; then
echo "Error: BACKUP_ENCRYPTION is enabled but BACKUP_ENCRYPTION_KEY is not set"
echo "The encryption key should be automatically generated in .deploy file"
exit 1
fi
echo "Encrypting backup …"
if ! openssl enc -aes-256-cbc -salt -pbkdf2 -in "$backup_path" -out "${backup_path}.enc" -pass pass:"$BACKUP_ENCRYPTION_KEY"; then
echo "Error: Failed to encrypt backup file"
exit 1
fi
rm "$backup_path"
backup_path="${backup_path}.enc"
filename="${filename}.enc"
fi
delete_backup() {
echo "Deleting local backup …"
rm "$backup_path"
}
case $BACKUP_PROVIDER in
"s3")
echo "Uploading to S3 …"
docker_args=(--rm
-e AWS_ACCESS_KEY_ID="$S3_ACCESS_KEY_ID"
-e AWS_SECRET_ACCESS_KEY="$S3_SECRET_ACCESS_KEY"
-e AWS_DEFAULT_REGION="$S3_DEFAULT_REGION")
if [ -n "$S3_ENDPOINT_URL" ]; then
docker_args+=(-e AWS_ENDPOINT_URL="$S3_ENDPOINT_URL")
fi
docker_args+=(-v "$backup_path:/aws/$filename" amazon/aws-cli s3 cp "$filename" "s3://$S3_BUCKET/$S3_PATH/$filename")
docker run "${docker_args[@]}"
delete_backup
;;
"scp")
echo "Uploading via SCP …"
scp "$backup_path" "$SCP_TARGET"
delete_backup
;;
*)
echo "Backed up to $backup_path"
;;
esac
# cleanup
rm "$dbdump_path"
echo "Backup done."
set +e