generated from stacks-network/.github
-
Notifications
You must be signed in to change notification settings - Fork 42
/
Copy pathmanage.sh
1144 lines (1085 loc) · 41.9 KB
/
manage.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
set -eo pipefail
set -Eo functrace
shopt -s expand_aliases
# The following values can be overridden in the .env file or cmd line. adding some defaults here
export NETWORK="mainnet"
export ACTION=""
export PROFILE="stacks-blockchain"
SIGNER=false
STACKS_CHAIN_ID="2147483648"
STACKS_SHUTDOWN_TIMEOUT=1200 # default to 20 minutes, during sync it can take a long time to stop the runloop
LOG_TAIL="100"
FLAGS="proxy"
LOG_OPTS="-f --tail ${LOG_TAIL}"
VERBOSE=false
REVERT_BNS=false
REVERT_EVENTS=false
# # Base colors
# COLBLACK=$'\033[30m' # Black
COLRED=$'\033[31m' # Red
COLGREEN=$'\033[32m' # Green
COLYELLOW=$'\033[33m' # Yellow
COLBLUE=$'\033[34m' # Blue
COLMAGENTA=$'\033[35m' # Magenta
COLCYAN=$'\033[36m' # Cyan
# COLWHITE=$'\033[37m' # White
# # Bright colors
COLBRRED=$'\033[91m' # Bright Red
# COLBRGREEN=$'\033[92m' # Bright Green
# COLBRYELLOW=$'\033[93m' # Bright Yellow
# COLBRBLUE=$'\033[94m' # Bright Blue
# COLBRMAGENTA=$'\033[95m' # Bright Magenta
# COLBRCYAN=$'\033[96m' # Bright Cyan
# COLBRWHITE=$'\033[97m' # Bright White
# # Text formatting
# COLITALIC=$'\033[3m' # Italic
# COLUNDERLINE=$'\033[4m' # underline
# COLITALIC=$'\033[3m' # italic
# COLUNDERLINE=$'\033[4m' # underline
COLBOLD=$'\033[1m' # Bold Text
# # Text rest to default
COLRESET=$'\033[0m' # reset color
ERROR="${COLRED}[ Error ]${COLRESET} "
WARN="${COLYELLOW}[ Warn ]${COLRESET} "
INFO="${COLGREEN}[ Success ]${COLRESET} "
EXIT_MSG="${COLRED}[ Exit Error ]${COLRESET} "
DEBUG="[ DEBUG ] "
# Use .env in the local dir
# - This var is also used in the docker-compose yaml files
ABS_PATH="$( cd -- "$(dirname "${0}")" >/dev/null 2>&1 ; pwd -P )"
export SCRIPTPATH=${ABS_PATH}
ENV_FILE="${SCRIPTPATH}/.env"
ENV_FILE_TMP="${SCRIPTPATH}/.env.tmp"
# If no .env file exists, copy the sample env and export the vars
if [ ! -f "${ENV_FILE}" ];then
cp -a "${SCRIPTPATH}/sample.env" "${ENV_FILE}"
fi
source "${ENV_FILE}"
alias log="logger"
alias log_error='logger "${ERROR}"'
alias log_warn='logger "${WARN}"'
alias log_info='logger "${INFO}"'
alias log_exit='exit_error "${EXIT_MSG}"'
if ${VERBOSE}; then
alias log='logger "$(date "+%D %H:%m:%S")" "Func:${FUNCNAME:-main}" "Line:${LINENO:-null}"'
alias log_info='logger "$(date "+%D %H:%m:%S")" "Func:${FUNCNAME:-main}" "Line:${LINENO:-null}" "${INFO}"'
alias log_warn='logger "$(date "+%D %H:%m:%S")" "Func:${FUNCNAME:-main}" "Line:${LINENO:-null}" "${WARN}"'
alias log_error='logger "$(date "+%D %H:%m:%S")" "Func:${FUNCNAME:-main}" "Line:${LINENO:-null}" "${ERROR}"'
alias log_exit='exit_error "$(date "+%D %H:%m:%S")" "Func:${FUNCNAME:-main}" "Line:${LINENO:-null}" "${EXIT_MSG}"'
fi
logger() {
if ${VERBOSE}; then
printf "%s %-25s %-10s %-10s %-25s %s\\n" "${1}" "${2}" "${3}" "${DEBUG}" "${4}" "${5}"
else
printf "%-25s %s\\n" "${1}" "${2}"
fi
}
exit_error() {
if ${VERBOSE}; then
printf "%s %-25s %-10s %-10s %-25s %s\\n\\n" "${1}" "${2}" "${DEBUG}" "${3}" "${4}" "${5}"
else
printf "%-25s %s\\n\\n" "${1}" "${2}"
fi
exit 1
}
# Populate hardcoded list of default services for shutdown order and log files export
${VERBOSE} && log "Creating list of default services"
DEFAULT_SERVICES=(
stacks-blockchain
stacks-blockchain-api
postgres
)
${VERBOSE} && log "DEFAULT_SERVICES: ${DEFAULT_SERVICES[*]}"
# Populate list of supported flags based on files in ./compose-files/extra-services
OPTIONAL_FLAGS=""
SUPPORTED_FLAGS=()
${VERBOSE} && log "Creating list of supported flags"
for i in "${SCRIPTPATH}"/compose-files/extra-services/*.yaml; do
flag=$(basename "${i%.*}")
SUPPORTED_FLAGS+=("$flag")
done
${VERBOSE} && log "SUPPORTED_FLAGS: ${SUPPORTED_FLAGS[*]}"
# Populate list of supported networks based on files in ./compose-files/networks
${VERBOSE} && log "Creating list of supported networks"
SUPPORTED_NETWORKS=()
for i in "${SCRIPTPATH}"/compose-files/networks/*.yaml; do
network=$(basename "${i%.*}")
SUPPORTED_NETWORKS+=("${network}")
done
${VERBOSE} && log "SUPPORTED_NETWORKS: ${SUPPORTED_NETWORKS[*]}"
# Hardcoded list of supported actions this script accepts
${VERBOSE} && log "Defining hardcoded list of supported actions"
SUPPORTED_ACTIONS=(
up
start
down
stop
restart
log
logs
import
export
upgrade
pull
status
reset
bns
)
${VERBOSE} && log "SUPPORTED_ACTIONS: ${SUPPORTED_ACTIONS[*]}"
# Print usage with some examples
usage() {
echo
log "Usage:"
log " ${0} -n <network> -a <action> <optional args>"
log " -n|--network: [ mainnet | testnet | mocknet ]"
log " -a|--action: [ start | stop | logs | reset | upgrade | import | export | bns ]"
log " optional args:"
log " -f|--flags: [ signer,proxy ]"
log " export: combined with 'logs' action, exports logs to a text file"
log " ex: ${COLCYAN}${0} -n mainnet -a start -f proxy${COLRESET}"
log " ex: ${COLCYAN}${0} -n mainnet -a start -f signer,proxy${COLRESET}"
log " ex: ${COLCYAN}${0} --network mainnet --action start --flags proxy${COLRESET}"
log " ex: ${COLCYAN}${0} -n mainnet -a logs export${COLRESET}"
echo
exit 0
}
# Function to ask for confirmation. Loop until valid input is received
confirm() {
# y/n confirmation. loop until valid response is received
while true; do
read -r -n 1 -p "${1:-Continue?} [y/n]: " REPLY
case ${REPLY} in
[yY]) echo ; return 0 ;;
[nN]) echo ; return 1 ;;
*) printf "\\033[31m %s \\n\\033[0m" "invalid input"
esac
done
}
# Function to check for a valid flag (exists in provided arg of array)
# - arrays are provided as args
check_flags() {
local array="${1}"
local element="${2}"
${VERBOSE} && log "array: ${1}"
${VERBOSE} && log "element: ${element}"
for i in ${array}; do
if [[ ${i} == "${element}" ]]; then
return 0
fi
done
return 1
}
# Check if we're on a Mac M1 - Docker IO is not ideal yet, and we're IO heavy
# - Confirm if user really wants to run this on an M1
check_device() {
# Check if we're on a M1 Mac - Disk IO is not ideal on this platform
if [[ $(uname -m) == "arm64" ]]; then
echo
log_warn "⚠️ ${COLYELLOW}WARNING${COLRESET}"
log_warn "⚠️ MacOS M1 CPU detected - NOT recommended for this repo"
log_warn "⚠️ see README for details"
log_warn "⚠️ https://github.com/stacks-network/stacks-blockchain-docker/blob/master/docs/requirements.md#macos-with-an-m1-processor-is-not-recommended-for-this-repo"
confirm "Continue Anyway?" || exit_error "${COLRED}Exiting${COLRESET}"
fi
}
# # Try to detect a breaking (major version change) in the API by comparing local version to .env definition
# # Return non-zero if a breaking change is detected (this logic is suspect, but should be ok)
# check_api(){
# # Try to detect if there is a breaking API change based on major version change
# ${VERBOSE} && log "Checking API version for potential breaking change"
# if [ "${PROFILE}" != "event-replay" ]; then
# CURRENT_API_VERSION=$(docker images --format "{{.Tag}}" blockstack/stacks-blockchain-api | cut -f 1 -d "." | head -1)
# CONFIGURED_API_VERSION=$( echo "${STACKS_BLOCKCHAIN_API_VERSION}" | cut -f 1 -d ".")
# ${VERBOSE} && log "CURRENT_API_VERSION: ${CURRENT_API_VERSION}"
# ${VERBOSE} && log "CONFIGURED_API_VERSION: ${CONFIGURED_API_VERSION}"
# if [ "${CURRENT_API_VERSION}" != "" ]; then
# if [ "${CURRENT_API_VERSION}" -lt "${CONFIGURED_API_VERSION}" ];then
# echo
# log_warn "${COLBOLD}stacks-blockchain-api contains a breaking schema change${COLRESET} ( Version: ${COLYELLOW}${STACKS_BLOCKCHAIN_API_VERSION}${COLRESET} )"
# return 0
# fi
# fi
# fi
# ${VERBOSE} && log "No schema-breaking change detected"
# return 1
# }
# Check if services are running
check_network() {
local profile="${1}"
${VERBOSE} && log "Checking if default services are running"
# Determine if the services are already running
if [[ $(docker-compose -f "${SCRIPTPATH}/compose-files/common.yaml" --profile ${profile} ps -q) ]]; then
${VERBOSE} && log "Docker services have a pid"
# Docker is running, return success
return 0
fi
${VERBOSE} && log "Docker services have no pid"
# Docker is not running, return fail
return 1
}
# Check if there is an event-replay operation in progress
check_event_replay(){
${VERBOSE} && log "Checking status of API event-replay"
##
## Check if import has started and save return code
if [[ "${ACTION}" == "export" || "${ACTION}" == "import" ]]; then
log "${ACTION} Checking for an active event-replay import"
fi
eval "docker logs stacks-blockchain-api 2>&1 | head -n20 | grep -q 'Importing raw event requests'" || test ${?} -eq 141
check_import_started="${?}"
${VERBOSE} && log "check_import_started: ${check_import_started}"
##
## Check if import has completed and save return code
if [[ "${ACTION}" == "export" || "${ACTION}" == "import" ]]; then
log "${ACTION} Checking for a completed event-replay import"
fi
eval "docker logs stacks-blockchain-api --tail 20 2>&1 | grep -q 'Event import and playback successful'" || test ${?} -eq 141
check_import_finished="${?}"
${VERBOSE} && log "check_import_finished: ${check_import_finished}"
##
## Check if export has started and save return code
if [[ "${ACTION}" == "export" || "${ACTION}" == "import" ]]; then
log "${ACTION} Checking for an active event-replay export"
fi
eval "docker logs stacks-blockchain-api 2>&1 | head -n20 | grep -q 'Export started'" || test ${?} -eq 141
check_export_started="${?}"
${VERBOSE} && log "check_export_started: ${check_export_started}"
##
## Check if export has completed and save return code
if [[ "${ACTION}" == "export" || "${ACTION}" == "import" ]]; then
log "${ACTION} Checking for a completed event-replay export"
fi
eval "docker logs stacks-blockchain-api --tail 20 2>&1 | grep -q 'Export successful'" || test ${?} -eq 141
check_export_finished="${?}"
${VERBOSE} && log "check_export_finished: ${check_export_finished}"
if [ "${check_import_started}" -eq "0" ]; then
# Import has started
${VERBOSE} && log "import has started"
if [ "${check_import_finished}" -eq "0" ]; then
# Import has finished
log "Event import and playback has finished"
${VERBOSE} && log "import has finished, return 0"
return 0
fi
# Import hasn't finished, return 1
log_warn "Event import and playback is in progress"
${VERBOSE} && log "import has not finished, return 1"
return 1
fi
if [ "${check_export_started}" -eq "0" ]; then
# Export has started
${VERBOSE} && log "export has started"
if [ "${check_export_finished}" -eq "0" ]; then
# Export has finished
log "Event export has finished"
${VERBOSE} && log "export has finished, return 0"
return 0
fi
# Export hasn't finished, return 1
log_warn "Event export is in progress"
${VERBOSE} && log "export has not finished, return 1"
return 1
fi
${VERBOSE} && log "No event-replay in progress"
# Default return success - event-replay is not running
return 0
}
# Determine if a supplied container name is running
check_container() {
local container="${1}"
${VERBOSE} && log "Checking if container ${container} is running"
if [ "$(docker ps -f name="^${container}"$ -q)" ]; then
# Container is running, return success
${VERBOSE} && log "${container} is running, return 0"
return 0
fi
# Container is not running return fail
${VERBOSE} && log "${container} is running, return 1"
return 1
}
# # Check if BNS_IMPORT_DIR is defined, and if the directory exists/not empty
check_bns() {
if [ "${BNS_IMPORT_DIR}" ]; then
${VERBOSE} && log "Defined BNS_IMPORT_DIR var"
local file_list=()
if [ -d "${SCRIPTPATH}/persistent-data${BNS_IMPORT_DIR}" ]; then
${VERBOSE} && log "Found existing BNS_IMPORT_DIR directory"
for file in "${SCRIPTPATH}"/persistent-data"${BNS_IMPORT_DIR}"/*; do
file_base=$(basename "${file%.*}")
file_list+=("$file_base")
done
for item in "${BNS_FILES[@]}"; do
if ! check_flags "${file_list[*]}" "$item"; then
return 1
fi
done
fi
fi
return 0
}
# adjust BNS_IMPORT_DIR for mocknet
bns_import_env() {
if "${REVERT_BNS}"; then
${VERBOSE} && log "Uncommenting BNS_IMPORT_DIR in ${ENV_FILE}"
${VERBOSE} && log "Running: sed -i.tmp \"s/^#BNS_IMPORT_DIR=/BNS_IMPORT_DIR=/;\" ${ENV_FILE}"
$(sed -i.tmp "
s/^#BNS_IMPORT_DIR=/BNS_IMPORT_DIR=/;
" "${ENV_FILE}" 2>&1) || {
log_exit "Unable to update BNS_IMPORT_DIR value in .env file: ${COLCYAN}${ENV_FILE}${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Deleting temp .env file: ${ENV_FILE}.tmp${COLRESET}"
${VERBOSE} && log "${COLYELLOW}Grepping for BNS_IMPORT_DIR"
cat ${ENV_FILE} | grep "BNS_IMPORT_DIR"
$(rm "${ENV_FILE}.tmp" 2>&1) || {
log_exit "Unable to delete tmp .env file: ${COLCYAN}${ENV_FILE}.tmp${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Set REVERT_BNS to false${COLRESET}"
REVERT_BNS=false
fi
if [ "${BNS_IMPORT_DIR}" ];then
${VERBOSE} && log "Commenting BNS_IMPORT_DIR in ${ENV_FILE}"
${VERBOSE} && log "Running: sed -i.tmp \"s/^BNS_IMPORT_DIR=/#BNS_IMPORT_DIR=/;\" ${ENV_FILE}"
$(sed -i.tmp "
s/^BNS_IMPORT_DIR=/#BNS_IMPORT_DIR=/;
" "${ENV_FILE}" 2>&1) || {
log_exit "Unable to update BNS_IMPORT_DIR value in .env file: ${COLCYAN}${ENV_FILE}${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Deleting temp .env file: ${ENV_FILE}.tmp${COLRESET}"
${VERBOSE} && log "${COLYELLOW}Grepping for BNS_IMPORT_DIR"
cat ${ENV_FILE} | grep "BNS_IMPORT_DIR"
$(rm "${ENV_FILE}.tmp" 2>&1) || {
log_exit "Unable to delete tmp .env file: ${COLCYAN}${ENV_FILE}.tmp${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Set REVERT_BNS to true${COLRESET}"
REVERT_BNS=true
${VERBOSE} && log "${COLYELLOW}Unset BNS_IMPORT_DIR var${COLRESET}"
unset BNS_IMPORT_DIR
fi
${VERBOSE} && log "Sourcing updated .env file: ${COLCYAN}${ENV_FILE}${COLRESET}"
source "${ENV_FILE}"
return 0
}
# adjust STACKS_EXPORT_EVENTS_FILE for mocknet
events_file_env(){
if "${REVERT_EVENTS}"; then
${VERBOSE} && log "Uncommenting STACKS_EXPORT_EVENTS_FILE in ${ENV_FILE}"
${VERBOSE} && log "Running: sed -i.tmp \"s/^#STACKS_EXPORT_EVENTS_FILE=/STACKS_EXPORT_EVENTS_FILE=/;\" ${ENV_FILE}"
$(sed -i.tmp "
s/^#STACKS_EXPORT_EVENTS_FILE=/STACKS_EXPORT_EVENTS_FILE=/;
" "${ENV_FILE}" 2>&1) || {
log_exit "Unable to update STACKS_EXPORT_EVENTS_FILE value in .env file: ${COLCYAN}${ENV_FILE}${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Grepping for STACKS_EXPORT_EVENTS_FILE"
cat ${ENV_FILE} | grep "STACKS_EXPORT_EVENTS_FILE"
${VERBOSE} && log "${COLYELLOW}Deleting temp .env file: ${ENV_FILE}.tmp${COLRESET}"
$(rm "${ENV_FILE}.tmp" 2>&1) || {
log_exit "Unable to delete tmp .env file: ${COLCYAN}${ENV_FILE}.tmp${COLRESET}"
}
${VERBOSE} && log ${COLYELLOW}"Set REVERT_EVENTS to false${COLRESET}"
REVERT_EVENTS=false
fi
if [ "${STACKS_EXPORT_EVENTS_FILE}" ]; then
${VERBOSE} && log "Commenting STACKS_EXPORT_EVENTS_FILE in ${ENV_FILE_TMP}"
${VERBOSE} && log "Running: sed -i.tmp \"s/^STACKS_EXPORT_EVENTS_FILE=/#STACKS_EXPORT_EVENTS_FILE=/;\" ${ENV_FILE}"
$(sed -i.tmp "
s/^STACKS_EXPORT_EVENTS_FILE=/#STACKS_EXPORT_EVENTS_FILE=/;
" "${ENV_FILE}" 2>&1) || {
log_exit "Unable to update STACKS_EXPORT_EVENTS_FILE value in .env file: ${COLCYAN}${ENV_FILE}${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Grepping for STACKS_EXPORT_EVENTS_FILE"
cat ${ENV_FILE} | grep "STACKS_EXPORT_EVENTS_FILE"
${VERBOSE} && log "${COLYELLOW}Deleting temp .env file: ${ENV_FILE}.tmp${COLRESET}"
$(rm "${ENV_FILE}.tmp" 2>&1) || {
log_exit "Unable to delete tmp .env file: ${COLCYAN}${ENV_FILE}.tmp${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Set REVERT_EVENTS to true${COLRESET}"
REVERT_EVENTS=true
${VERBOSE} && log "${COLYELLOW}Unset STACKS_EXPORT_EVENTS_FILE${COLRESET}"
unset STACKS_EXPORT_EVENTS_FILE
fi
${VERBOSE} && log "Sourcing updated .env file: ${COLCYAN}${ENV_FILE}${COLRESET}"
source "${ENV_FILE}"
return 0
}
# Function that updates Config.toml
update_configs(){
if [ "${NETWORK}" == "testnet" ]; then
BTC_HOST=${TBTC_HOST}
BTC_RPC_USER=${TBTC_RPC_USER}
BTC_RPC_PASS=${TBTC_RPC_PASS}
BTC_RPC_PORT=${TBTC_RPC_PORT}
BTC_P2P_PORT=${TBTC_P2P_PORT}
SIGNER_PRIVATE_KEY=${TESTNET_SIGNER_PRIVATE_KEY}
fi
CONFIG_TOML="${SCRIPTPATH}/conf/${NETWORK}/Config.toml"
SIGNER_TOML="${SCRIPTPATH}/conf/${NETWORK}/Signer.toml"
## update Config.toml with signer options
if [ "${SIGNER}" != "true" ]; then
${VERBOSE} && log "${COLYELLOW}Disabling signer options in ${CONFIG_TOML}${COLRESET}"
sed -i.tmp "
/^\[\[events_observer\]\]/{
:a
N
/endpoint.*stacks-signer/!ba
s/^/#/mg
}
/^stacker = true/ s/^/#/
" "${CONFIG_TOML}" || {
log_exit "Unable to update values in Config.toml file: ${COLCYAN}${CONFIG_TOML}${COLRESET}"
}
else
[ ! ${SIGNER_PRIVATE_KEY} ] && log_exit "Signer private key not set!"
${VERBOSE} && log "${COLYELLOW}Enabling signer options in ${CONFIG_TOML}${COLRESET}"
sed -i.tmp "
/^#\[\[events_observer\]\]/{
:a
N
/endpoint.*stacks-signer/!ba
s/^#//mg
}
/^#stacker = true/ s/^#//
" "${CONFIG_TOML}" || {
log_exit "Unable to update values in Config.toml file: ${COLCYAN}${CONFIG_TOML}${COLRESET}"
}
## update Signer.toml with env vars
[[ ! -f "${SIGNER_TOML}" ]] && cp "${SIGNER_TOML}.sample" "${SIGNER_TOML}"
${VERBOSE} && log "${COLYELLOW}Updating values in ${SIGNER_TOML} from .env${COLRESET}"
$(sed -i.tmp "
/^node_host/s/.*/node_host = \"${STACKS_CORE_RPC_HOST}:${STACKS_CORE_RPC_PORT}\"/;
/^endpoint/s/.*/endpoint = \"0.0.0.0:${STACKS_SIGNER_PORT}\"/;
/^metrics_endpoint/s/.*/metrics_endpoint = \"0.0.0.0:${SIGNER_METRICS_PORT}\"/;
/^auth_password/s/.*/auth_password = \"${AUTH_TOKEN}\"/;
/^stacks_private_key/s/.*/stacks_private_key = \"${SIGNER_PRIVATE_KEY}\"/;
" "${SIGNER_TOML}" 2>&1) || {
log_exit "Unable to update values in Signer.toml file: ${COLCYAN}${SIGNER_TOML}${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Deleting temp Signer.toml file: ${SIGNER_TOML}.tmp${COLRESET}"
$(rm "${SIGNER_TOML}.tmp" 2>&1) || {
log_exit "Unable to delete tmp Signer.toml file: ${COLCYAN}${SIGNER_TOML}.tmp${COLRESET}"
}
fi
## update Config.toml with btc vars
[[ ! -f "${CONFIG_TOML}" ]] && cp "${CONFIG_TOML}.sample" "${CONFIG_TOML}"
${VERBOSE} && log "${COLYELLOW}Updating values in ${CONFIG_TOML} from .env${COLRESET}"
$(sed -i.tmp "
/^peer_host/s/.*/peer_host = \"${BTC_HOST}\"/;
/^username/s/.*/username = \"${BTC_RPC_USER}\"/;
/^password/s/.*/password = \"${BTC_RPC_PASS}\"/;
/^rpc_port/s/.*/rpc_port = ${BTC_RPC_PORT}/;
/^peer_port/s/.*/peer_port = ${BTC_P2P_PORT}/;
/^auth_token/s/.*/auth_token = \"${AUTH_TOKEN}\"/;
/^endpoint = \"stacks-signer/s/.*/endpoint = \"stacks-signer:${STACKS_SIGNER_PORT}\"/;
/^prometheus_bind/s/.*/prometheus_bind = \"0.0.0.0:${NODE_METRICS_PORT}\"/;
" "${CONFIG_TOML}" 2>&1) || {
log_exit "Unable to update values in Config.toml file: ${COLCYAN}${CONFIG_TOML}${COLRESET}"
}
${VERBOSE} && log "${COLYELLOW}Deleting temp Config.toml file: ${CONFIG_TOML}.tmp${COLRESET}"
$(rm "${CONFIG_TOML}.tmp" 2>&1) || {
log_exit "Unable to delete tmp Config.toml file: ${COLCYAN}${CONFIG_TOML}.tmp${COLRESET}"
}
return 0
}
# Loop through supplied flags and set FLAGS for the yaml files to load
# - Silently fail if a flag isn't supported or a yaml file doesn't exist
set_flags() {
local array="${*}"
local flags=""
local flag_path=""
${VERBOSE} && log "EXPOSE_POSTGRES: ${EXPOSE_POSTGRES}"
if [ "${EXPOSE_POSTGRES}" -a -f "${SCRIPTPATH}/compose-files/extra-services/postgres.yaml" ]; then
${EXPOSE_POSTGRES} && flags="-f ${SCRIPTPATH}/compose-files/extra-services/postgres.yaml"
fi
# Case to change the path of files based on profile
${VERBOSE} && log "Setting optional flags for cmd to eval"
case ${profile} in
event-replay)
flag_path="event-replay"
;;
*)
flag_path="extra-services"
;;
esac
${VERBOSE} && log "array: ${array}"
${VERBOSE} && log "flags: ${flags}"
${VERBOSE} && log "flag_path: ${flag_path}"
${VERBOSE} && log "profile: ${profile}"
for item in ${array}; do
${VERBOSE} && log "checking if ${item} is a supported flag"
if check_flags "${SUPPORTED_FLAGS[*]}" "${item}"; then
# Add to local flags if found in SUPPORTED_FLAGS array *and* the file exists in the expected path
# - If no yaml file exists, silently fail
${VERBOSE} && log "Checking for compose file: ${SCRIPTPATH}/compose-files/${flag_path}/${item}.yaml"
if [ -f "${SCRIPTPATH}/compose-files/${flag_path}/${item}.yaml" ]; then
${VERBOSE} && log "compose file for ${item} is found"
flags="${flags} -f ${SCRIPTPATH}/compose-files/${flag_path}/${item}.yaml"
else
if [ "${profile}" != "stacks-blockchain" ];then
log_error "Missing compose file: ${COLCYAN}${SCRIPTPATH}/compose-files/${flag_path}/${item}.yaml${COLRESET}"
${VERBOSE} && log "calling usage function"
usage
fi
fi
fi
done
OPTIONAL_FLAGS=${flags}
${VERBOSE} && log "OPTIONAL_FLAGS: ${OPTIONAL_FLAGS}"
true
}
# Stop the services in a specific order, individually
ordered_stop() {
${VERBOSE} && log "Starting the ordered stop of services"
if [[ -f "${SCRIPTPATH}/compose-files/common.yaml" && -f "${SCRIPTPATH}/compose-files/networks/${NETWORK}.yaml" ]]; then
if eval "docker-compose -f ${SCRIPTPATH}/compose-files/common.yaml -f ${SCRIPTPATH}/compose-files/networks/${NETWORK}.yaml ps -q stacks-blockchain > /dev/null 2>&1"; then
${VERBOSE} && log "Services are running. continuing to stop services"
for service in "${DEFAULT_SERVICES[@]}"; do
if check_container "${service}"; then
local timeout=""
log "${COLBOLD}Stopping ${service}${COLRESET}"
if [ "${service}" == "stacks-blockchain" ]; then
# Wait for the stacks blockchain runloop to end by waiting for STACKS_SHUTDOWN_TIMEOUT
timeout="-t ${STACKS_SHUTDOWN_TIMEOUT}"
log " Timeout is set for ${STACKS_SHUTDOWN_TIMEOUT} seconds"
fi
# Compose a command to run using provided vars
cmd="docker-compose --env-file ${ENV_FILE} -f ${SCRIPTPATH}/compose-files/common.yaml -f ${SCRIPTPATH}/compose-files/networks/${NETWORK}.yaml --profile ${PROFILE} stop ${timeout} ${service}"
${VERBOSE} && log "Running: ${cmd}"
eval "${cmd}"
fi
done
return 0
fi
# stacks-blockchain isn't running, so order of stop isn't important and we just run docker_down
log "${COLBOLD}Stacks Blockchain services are not running. Continuing${COLRESET}"
return 1
fi
}
# Configure options to bring services up
docker_up() {
if check_network "${PROFILE}"; then
echo
log_exit "Stacks Blockchain services are already running"
fi
if ! check_event_replay; then
log_exit "Event-replay in progress. Refusing to start services"
fi
# Set signer env based on flag
if [[ "${FLAGS_ARRAY[*]}" == *"signer"* ]]; then
SIGNER=true
fi
# Sanity checks before starting services
local param="-d"
if [ "${PROFILE}" == "bns" ]; then
param=""
fi
# Create required config files and directories
[[ ! -f "${SCRIPTPATH}/conf/${NETWORK}/Config.toml" ]] && cp "${SCRIPTPATH}/conf/${NETWORK}/Config.toml.sample" "${SCRIPTPATH}/conf/${NETWORK}/Config.toml"
if [[ "${NETWORK}" == "private-testnet" ]]; then
[[ ! -f "${SCRIPTPATH}/conf/${NETWORK}/puppet-chain.toml" ]] && cp "${SCRIPTPATH}/conf/${NETWORK}/puppet-chain.toml.sample" "${SCRIPTPATH}/conf/${NETWORK}/puppet-chain.toml"
fi
if [[ "${NETWORK}" == "mainnet" || "${NETWORK}" == "testnet" ]];then
if [[ ! -d "${SCRIPTPATH}/persistent-data/${NETWORK}" ]];then
log "Creating persistent-data for ${NETWORK}"
mkdir -p "${SCRIPTPATH}/persistent-data/${NETWORK}/event-replay" >/dev/null 2>&1 || {
log_exit "Unable to create required dir: ${COLCYAN}${SCRIPTPATH}/persistent-data/${NETWORK}/event-replay${COLRESET}"
}
${VERBOSE} && log "created (recursive) persistent-data dir ${SCRIPTPATH}/persistent-data/${NETWORK}/event-replay"
fi
${VERBOSE} && log "Using existing data dir: ${SCRIPTPATH}/persistent-data/${NETWORK}"
fi
update_configs
# # See if we can detect a Hiro API major version change requiring an event-replay import
# if check_api; then
# log_warn " Required to perform a stacks-blockchain-api event-replay:"
# log_warn " https://github.com/hirosystems/stacks-blockchain-api#event-replay "
# if confirm "Run event-replay now?"; then
# ## Bring running services down
# ${VERBOSE} && log "upgrade api: calling docker_down function"
# docker_down
# ## Pull new images if available
# ${VERBOSE} && log "upgrade api: docker_pull function"
# docker_pull
# ## Run the event-replay import
# ${VERBOSE} && log "upgrade api: event-replay import function"
# event_replay "import"
# fi
# log_exit "Event-replay is required"
# fi
${VERBOSE} && log "Copying ${COLCYAN}${ENV_FILE}${COLRESET} -> ${COLCYAN}${ENV_FILE}.save${COLRESET}"
$(cp -a "${ENV_FILE}" "${ENV_FILE}.save") >/dev/null 2>&1 || {
log_exit "Unable to copy ${COLCYAN}${ENV_FILE}${COLRESET} -> ${COLCYAN}${ENV_FILE}.save${COLRESET}"
}
log "Starting all services for ${COLYELLOW}${PROFILE}${COLRESET}"
${VERBOSE} && log "calling run_docker function: run_docker \"up\" \"${FLAGS_ARRAY[*]}\" \"${PROFILE}\" \"${param}\""
run_docker "up" "${FLAGS_ARRAY[*]}" "${PROFILE}" "${param}"
}
# Configure options to bring services down
docker_down() {
if ! check_network "${PROFILE}"; then
if [ "${ACTION}" != "restart" ];then
${VERBOSE} && log "calling status function"
status
fi
return
fi
# sanity checks before stopping services
if ! check_event_replay;then
log_exit "Event-replay in progress. Refusing to stop services"
fi
if [[ "${NETWORK}" == "mainnet" || "${NETWORK}" == "testnet" ]] && [ "${PROFILE}" != "bns" ]; then
# if this is mainnet/testnet and the profile is not bns, stop the blockchain service first
${VERBOSE} && log "calling ordered_stop function"
if ordered_stop; then
# stop the rest of the services after the blockchain has been stopped
log "Stopping all services for ${COLYELLOW}${PROFILE}${COLRESET}"
fi
fi
# # stop the rest of the services after the blockchain has been stopped
log "${COLBOLD}Stopping all services${COLRESET}"
${VERBOSE} && log "calling run_docker function: run_docker \"down\" \"${SUPPORTED_FLAGS[*]}\" \"${PROFILE}\""
run_docker "down" "${SUPPORTED_FLAGS[*]}" "${PROFILE}"
}
# Output the service logs
docker_logs(){
# Tail docker logs for the last x lines via LOG_TAIL as 'param'
local param="${1}"
${VERBOSE} && log "param: ${param}"
if ! check_network "${PROFILE}"; then
log_error "No ${COLYELLOW}${NETWORK}${COLRESET} services running"
usage
fi
${VERBOSE} && log "calling run_docker function: run_docker \"logs\" \"${SUPPORTED_FLAGS[*]}\" \"${PROFILE}\" \"${param}\""
run_docker "logs" "${SUPPORTED_FLAGS[*]}" "${PROFILE}" "${param}"
}
# Export docker logs for the main services to files in ./exported-logs
logs_export(){
if ! check_network "${PROFILE}"; then
log_error "No ${COLYELLOW}${NETWORK}${COLRESET} services running"
usage
fi
log "Exporting log data to text file"
# create exported-logs if it doesn't exist
if [[ ! -d "${SCRIPTPATH}/exported-logs" ]];then
log " - Creating log dir: ${COLCYAN}${SCRIPTPATH}/exported-logs${COLRESET}"
mkdir -p "${SCRIPTPATH}/exported-logs" >/dev/null 2>&1 || {
log_exit "Unable to create required dir: ${COLCYAN}${SCRIPTPATH}/exported-logs${COLRESET}"
}
${VERBOSE} && log "created logs dir: ${SCRIPTPATH}/exported-logs"
fi
${VERBOSE} && log "using existing logs dir: ${SCRIPTPATH}/exported-logs"
# loop through main services, storing the logs as a text file
for service in "${DEFAULT_SERVICES[@]}"; do
if check_container "${service}"; then
log " - Exporting logs for ${COLCYAN}${service}${COLRESET} -> ${COLCYAN}${SCRIPTPATH}/exported-logs/${service}.log${COLRESET}"
eval "docker logs ${service} > ${SCRIPTPATH}/exported-logs/${service}.log 2>&1"
else
log " - Skipping export for non-running service ${COLYELLOW}${service}${COLRESET}"
fi
done
log_info "Log export complete"
exit 0
}
# Pull any updated images that may have been published
docker_pull() {
${VERBOSE} && log "pulling new images for ${PROFILE}"
${VERBOSE} && log "calling run_docker function: run_docker \"pull\" \"${SUPPORTED_FLAGS[*]}\" \"${PROFILE}\""
run_docker "pull" "${SUPPORTED_FLAGS[*]}" "${PROFILE}"
}
# Check if the services are running
status() {
if check_network "${PROFILE}"; then
echo
log "${COLBOLD}Stacks Blockchain services are running${COLRESET}"
echo
${VERBOSE} && echo -e "$(docker-compose -f "${SCRIPTPATH}/compose-files/common.yaml" ps)"
exit 0
else
echo
log "${COLBOLD}Stacks Blockchain services are not running${COLRESET}"
echo
exit 1
fi
}
# Delete persistent data for NETWORK
reset_data() {
if [ -d "${SCRIPTPATH}/persistent-data/${NETWORK}" ]; then
${VERBOSE} && log "Found existing data: ${SCRIPTPATH}/persistent-data/${NETWORK}"
if ! check_network "${PROFILE}"; then
# Exit if operation isn't confirmed
confirm "Delete Persistent data for ${COLYELLOW}${NETWORK}${COLRESET}?" || log_exit "Delete Cancelled"
${VERBOSE} && log " Running: rm -rf ${SCRIPTPATH}/persistent-data/${NETWORK}"
rm -rf "${SCRIPTPATH}/persistent-data/${NETWORK}" >/dev/null 2>&1 || {
# Log error and exit if data wasn't deleted (permission denied etc)
log_error "Failed to remove ${COLCYAN}${SCRIPTPATH}/persistent-data/${NETWORK}${COLRESET}"
log_exit " Re-run the command with sudo: ${COLCYAN}sudo ${0} -n ${NETWORK} -a reset${COLRESET}"
}
log_info "Persistent data deleted"
echo
exit 0
else
# Log error and exit if services are already running
log_error "Can't reset while services are running"
log_exit " Try again after running: ${COLCYAN}${0} -n ${NETWORK} -a stop${COLRESET}"
fi
fi
# No data exists, log error and move on
log_error "No data exists for ${COLYELLOW}${NETWORK}${COLRESET}"
${VERBOSE} && log "calling usage function"
usage
}
# Download V1 BNS data to import via .env file BNS_IMPORT_DIR
download_bns_data() {
if [ "${BNS_IMPORT_DIR}" ]; then
${VERBOSE} && log "Using defined BNS_IMPORT_DIR: ${BNS_IMPORT_DIR}"
if ! check_network "${PROFILE}"; then
SUPPORTED_FLAGS+=("bns")
FLAGS_ARRAY=(bns)
PROFILE="bns"
${VERBOSE} && log "SUPPORTED_FLAGS: ${SUPPORTED_FLAGS[*]}"
${VERBOSE} && log "FLAGS_ARRAY: ${FLAGS_ARRAY[*]}"
${VERBOSE} && log "PROFILE: ${PROFILE}"
if [ ! -f "${SCRIPTPATH}/compose-files/extra-services/bns.yaml" ]; then
log_exit "Missing bns compose file: ${COLCYAN}${SCRIPTPATH}/compose-files/extra-services/bns.yaml${COLRESET}"
fi
log "Downloading and extracting V1 bns-data"
${VERBOSE} && log "calling docker_up function"
docker_up
${VERBOSE} && log "calling docker_down function"
docker_down
log_info "BNS Download Operation is complete"
log " Start the services with: ${COLCYAN}${0} -n ${NETWORK} -a start${COLRESET}"
exit 0
fi
echo
log_error "Refusing to download BNS data - ${COLBOLD}services need to be stopped first${COLRESET}"
log_exit " Stop the services with: ${COLCYAN}${0} -n ${NETWORK} -a stop${COLRESET}"
fi
echo
log_error "Undefined or commented ${COLYELLOW}BNS_IMPORT_DIR${COLRESET} variable in ${COLCYAN}${ENV_FILE}${COLRESET}"
exit 0
}
# Perform the Hiro API event-replay
event_replay(){
if [ "${STACKS_BLOCKCHAIN_API_VERSION}" == "5.0.1" ]; then
echo
log "${COLYELLOW}${COLBOLD}There is an open issue running event-replay with this version (${STACKS_BLOCKCHAIN_API_VERSION}) of the API${COLRESET}"
log " https://github.com/hirosystems/stacks-blockchain-api/issues/1336"
log "For now, use prior version of the API: ${COLBOLD}4.2.1${COLRESET}"
log "Or sync from genesis using API: ${COLBOLD}5.0.1${COLRESET}"
echo
log_exit "${1} not supported for this version of the API"
fi
if [ "${STACKS_EXPORT_EVENTS_FILE}" != "" ]; then
${VERBOSE} && log "Using defined STACKS_EXPORT_EVENTS_FILE: ${STACKS_EXPORT_EVENTS_FILE}"
# Check if the event-replay file exists first
local tsv_file
tsv_file="${SCRIPTPATH}/persistent-data/mainnet/event-replay"/$(basename "${STACKS_EXPORT_EVENTS_FILE}")
if [ ! -f "${tsv_file}" ]; then
log_error "Missing event-replay file: ${COLCYAN}${tsv_file}${COLRESET}"
fi
${VERBOSE} && log "Using local event-replay file: ${tsv_file}"
if check_network "${PROFILE}"; then
${VERBOSE} && log "calling docker_down function"
docker_down
fi
PROFILE="event-replay"
local action="${1}"
SUPPORTED_FLAGS+=("api-${action}-events")
FLAGS_ARRAY=("api-${action}-events")
${VERBOSE} && log "PROFILE: ${PROFILE}"
${VERBOSE} && log "SUPPPORTED_FLAGS: ${SUPPORTED_FLAGS[*]}"
${VERBOSE} && log "FLAGS_ARRAY: ${FLAGS_ARRAY[*]}"
if [ ! -f "${SCRIPTPATH}/compose-files/event-replay/api-${action}-events.yaml" ]; then
echo
log_exit "Missing events compose file: ${COLCYAN}${SCRIPTPATH}/compose-files/event-replay/api-${action}-events.yaml${COLRESET}"
fi
${VERBOSE} && log "calling docker_up function"
docker_up
echo
log "${COLBRRED}${COLBOLD}This operation can take a long while${COLRESET}"
log "Check logs for completion: ${COLCYAN}${0} -n ${NETWORK} -a logs${COLRESET}"
if [ "${action}" == "export" ]; then
log " - Look for a export log entry: ${COLYELLOW}\"Export successful.\"${COLRESET}"
fi
if [ "${action}" == "import" ]; then
log " - Look for a import log entry: ${COLYELLOW}\"Event import and playback successful.\"${COLRESET}"
fi
log "${COLBOLD}Once the operation is complete${COLRESET}, restart the service with: ${COLCYAN}${0} -n ${NETWORK} -a restart${COLRESET}"
echo
exit 0
fi
echo
log_error "Undefined or commented ${COLYELLOW}STACKS_EXPORT_EVENTS_FILE${COLRESET} variable in ${COLCYAN}${ENV_FILE}${COLRESET}"
exit 0
}
# Execute the docker-compose command using provided args
run_docker() {
local action="${1}"
local flags="${2}"
local profile="${3}"
local param="${4}"
# # set any optional flags
set_flags "${flags}"
cmd="docker-compose --env-file ${ENV_FILE} -f ${SCRIPTPATH}/compose-files/common.yaml -f ${SCRIPTPATH}/compose-files/networks/${NETWORK}.yaml ${OPTIONAL_FLAGS} --profile ${profile} ${action} ${param}"
# Log the command we'll be running for verbosity
${VERBOSE} && log "action: ${action}"
${VERBOSE} && log "profile: ${profile}"
${VERBOSE} && log "param: ${param}"
${VERBOSE} && log "OPTIONAL_FLAGS: ${OPTIONAL_FLAGS}"
${VERBOSE} && log "Running: eval ${cmd}"
if [[ "${NETWORK}" == "mocknet" && "${action}" == "up" ]]; then
${VERBOSE} && log "Disabling STACKS_EXPORT_EVENTS_FILE for ${NETWORK}"
events_file_env
${VERBOSE} && log "Disabling BNS_IMPORT_DIR for ${NETWORK}"
bns_import_env
fi
if [[ "${NETWORK}" == "mainnet" || "${NETWORK}" == "testnet" ]] && [ "${action}" == "up" ]; then
${VERBOSE} && log "Checking if BNS_IMPORT_DIR is defined and has data"
if ! check_bns; then
log "Missing some BNS files"
log " run: ${COLCYAN}${0} bns"
log " -or -"
log " comment BNS_IMPORT_DIR in ${COLYELLOW}${ENV_FILE}${COLRESET}"
exit_error "Exiting"
fi
fi
eval "${cmd}"
local ret="${?}"
if [[ "${NETWORK}" == "mocknet" && "${action}" == "up" ]]; then
${VERBOSE} && log "Re-enabling STACKS_EXPORT_EVENTS_FILE for ${NETWORK}"
events_file_env
${VERBOSE} && log "Re-enabling BNS_IMPORT_DIR for ${NETWORK}"
bns_import_env
fi
${VERBOSE} && log "cmd returned: ${ret}"
# If return is not zero, it should be apparent. if it worked, print how to see the logs
if [[ "$ret" -eq 0 && "${action}" == "up" && "${profile}" != "bns" ]]; then
log_info "Brought up ${COLYELLOW}${NETWORK}${COLRESET}"
log " Follow logs: ${COLCYAN}${0} -n ${NETWORK} -a logs${COLRESET}"
fi
if [[ "$ret" -eq 0 && "${action}" == "down" && "${profile}" != "bns" ]]; then
log_info "Brought down ${COLYELLOW}${NETWORK}${COLRESET}"
fi
}
# Check for required binaries, exit if missing
for cmd in docker-compose docker id; do
command -v "${cmd}" >/dev/null 2>&1 || log_exit "Missing command: ${cmd}"
done
# If no args are provided, print usage
if [[ ${#} -eq 0 ]]; then
${VERBOSE} && log "No args provided"
${VERBOSE} && log "calling usage function"
usage
fi
USER_ID=$(id -u "$(whoami)")
export USER_ID="${USER_ID}"
export DOCKER_NETWORK="${DOCKER_NETWORK}"
export STACKS_CHAIN_ID=${STACKS_CHAIN_ID}
${VERBOSE} && log "Exporting STACKS_CHAIN_ID: ${STACKS_CHAIN_ID}"
${VERBOSE} && log "Exporting USER_ID: ${USER_ID}"
${VERBOSE} && log "Exporting DOCKER_NETWORK: ${DOCKER_NETWORK}"
# loop through the args and try to determine what options we have
# - simple check for logs/status/upgrade/bns since these are not network dependent
while [ ${#} -gt 0 ]
do
case ${1} in
-n|--network)
# Retrieve the network arg, converted to lowercase
if [ "${2}" == "" ]; then
log_error "Missing required value for ${COLYELLOW}${1}${COLRESET}"
${VERBOSE} && log "calling usage function"
usage
fi
NETWORK=$(echo "${2}" | tr -d ' ' | awk '{print tolower($0)}')
${VERBOSE} && log "calling check_flags function with (SUPPORTED_NETWORKS: ${SUPPORTED_NETWORKS[*]}) (NETWORK: ${NETWORK})"
if ! check_flags "${SUPPORTED_NETWORKS[*]}" "${NETWORK}"; then
log_error "Network (${COLYELLOW}${NETWORK}${COLRESET}) not supported"
${VERBOSE} && log "calling usage function"
usage
fi
${VERBOSE} && log "Defining NETWORK: ${NETWORK}"
${VERBOSE} && log "SUPPORTED_NETWORKS: ${SUPPORTED_NETWORKS[*]}"
shift
;;
-a|--action)
# Retrieve the action arg, converted to lowercase
if [ "${2}" == "" ]; then
log_error "Missing required value for ${COLYELLOW}${1}${COLRESET}"
${VERBOSE} && log "calling usage function"
usage
fi
ACTION=$(echo "${2}" | tr -d ' ' | awk '{print tolower($0)}')
${VERBOSE} && log "calling check_flags function with (SUPPORTED_ACTIONS: ${SUPPORTED_ACTIONS[*]}) (ACTION: ${ACTION})"
if ! check_flags "${SUPPORTED_ACTIONS[*]}" "${ACTION}"; then
log_error "Action (${COLYELLOW}${ACTION}${COLRESET}) not supported"
${VERBOSE} && log "calling usage function"
usage
fi
${VERBOSE} && log "Defining ACTION: ${ACTION}"
# If the action is log/logs, we also accept a second option 'export' to save the log output to file
if [[ "${ACTION}" =~ ^(log|logs)$ && "${3}" == "export" ]]; then
${VERBOSE} && log "calling logs_export function"
logs_export
fi
shift
;;
-f|--flags)
# Retrieve the flags arg as a comma separated list, converted to lowercase
# Check against the dynamic list 'FLAGS_ARRAY' which validates against folder contents
if [ "${2}" == "" ]; then
log_error "Missing required value for ${COLYELLOW}${1}${COLRESET}"
${VERBOSE} && log "calling usage function"
usage
fi
FLAGS=$(echo "${2}" | tr -d ' ' | awk '{print tolower($0)}')
set -f; IFS=','
FLAGS_ARRAY=("${FLAGS}")
${VERBOSE} && log "calling check_flags function with (FLAGS_ARRAY: ${FLAGS_ARRAY[*]}) (FLAGS: ${FLAGS[*]})"
if check_flags "${FLAGS_ARRAY[*]}" "bns" && [ "${ACTION}" != "bns" ]; then
log_error "${COLYELLOW}bns${COLRESET} is not a valid flag"
usage
fi
${VERBOSE} && log "Defining FLAGS: ${FLAGS[*]}"
shift
;;