diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 626a4a8..ce65dd8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,8 +23,8 @@ jobs: with: go-version: 1.17 - - name: Cache Golang modules - uses: actions/cache@v1 + - name: Load Golang modules from cache + uses: martijnhols/actions-cache/restore@v3 with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} @@ -36,8 +36,15 @@ jobs: if: success() && startsWith(github.ref, 'refs/tags/') with: distribution: goreleaser - version: latest + version: 1.4.1 args: release --rm-dist env: LDFLAGS: "-X github.com/lflare/mdathome-golang/internal/mdathome.ClientVersion=${{ github.ref_name }} -X mdathome.Build=${{ github.sha }}" GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Save Golang modules to cache + if: always() + uses: martijnhols/actions-cache/save@v3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} diff --git a/.gitignore b/.gitignore index 42f7229..f0e01c3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ # Ignore build directory & single build artifact build/ mdathome-golang + +# Runtime configuration +config.toml +settings.json diff --git a/.goreleaser.yml b/.goreleaser.yml index 4ed81e9..4d485b1 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -5,9 +5,12 @@ before: hooks: - go mod download +release: + prerelease: auto + builds: - binary: mdathome - main: cmd/mdathome/main.go + main: main.go env: - CGO_ENABLED=0 - GO111MODULE=on @@ -19,44 +22,20 @@ builds: post: - /bin/sh -c "upx -q '{{.Path}}' || true" goos: - - aix - - android - darwin - - dragonfly - - freebsd - - illumos - - js - linux - - netbsd - - openbsd - - plan9 - - solaris - windows + - freebsd goarch: - - 386 - amd64 - arm - arm64 - - ppc64 - - ppc64le - - mips - - mipsle - - mips64 - - mips64le - - riscv64 - s390x - - wasm + - ppc64le goarm: - 5 - 6 - 7 - gomips: - - hardfloat - - softfloat - ignore: - - goos: android - - goos: js - - goos: plan9 archives: - format: binary diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100755 index 7a8c24c..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,357 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] -### Added - -### Changed - -### Deprecated - -### Removed - -### Fixed - -### Security - -## [v1.12.2] - 2022-02-21 -### Added -- [2022-02-20] Added fallback invalid hostname rejecter by [@lflare]. - -### Changed -- [2022-02-20] Disabled buggy TLS SNI whitelist by default by [@lflare]. - -### Fixed -- [2022-02-20] Attempt to mitigate SNI crashes by [@lflare] - -## [v1.12.1] - 2022-01-30 -### Added -- [2022-01-30] Added GitHub Actions build and publish mechanism by [@lflare]. -- [2022-01-30] Added configuration of visitor timeouts by [@lflare]. - -## [v1.12.0] - 2021-08-11 -### Added -- [2021-08-11] Added TLS certificate reloader by [@lflare]. - -## [v1.11.6] - 2021-07-03 -### Changed -- [2021-07-03] Increased timeout for Read by [@lflare]. -- [2021-07-03] Increase timeouts to 5 minutes by [@lflare]. - -### Fixed -- [2021-07-03] Ignore non-existant cache files when deleting by [@lflare]. - -## [v1.11.5] - 2021-06-15 -### Added -- [2021-06-14] Added experimental auto-recovery and gzip support by [@lflare]. - -### Changed -- [2021-06-14] Took advantage of new `bolt.Compact()` command for database shrinking by [@lflare]. - -### Fixed -- [2021-06-14] Fixed reverse proxy `X-Forwarded-For` IP handling by [@lflare]. -- [2021-06-14] Automatically serve robots.txt, fixes #19 by [@lflare]. - -## [v1.11.4] - 2021-06-10 -### Fixed -- [2021-06-10] Properly refixed TLS SNI random crashes again by [@lflare]. - -## [v1.11.3] - 2021-06-07 -### Changed -- [2021-06-02] Removed `upx` from macOS/Darwin builds by [@lflare]. - -### Fixed -- [2021-06-07] Fixed TLS SNI random crashes by [@lflare]. - -## [v1.11.2] - 2021-05-30 -### Added -- [2021-05-30] Very rudimentary SNI whitelist support by [@lflare]. - -### Fixed -- [2021-05-30] Fixed headers change in specification 31 by [@lflare]. -- [2021-05-30] Use clientSettings for HTTP/2 setting by [@lflare]. - -## [v1.11.1] - 2021-05-29 -### Fixed -- [2021-05-29] Fixed incorrect size report override by [@lflare]. - -## [v1.11.0] - 2021-05-29 -### Added -- [2021-05-28] Added `send_server_header` parameter to disable sending the `Server` header by [@Korbeil]. -- [2021-05-29] Added some form of reverse proxy IP middleware by [@lflare]. -- [2021-05-29] Added rudimentary settings version migrator by [@lflare]. - -### Changed -- [2021-05-29] Reworked configuration settings to be on a per-category basis by [@lflare]. -- [2021-05-29] Updated client specification to 31 by [@lflare]. - -## [v1.10.3] - 2021-05-13 -### Added -- [2021-05-13] Added configuration option in JSON for specifying logs folder by [@lflare]. - -## [v1.10.2] - 2021-05-13 -### Added -- [2021-05-13] Commandline flag to read configuration from specific path by [@lflare]. - -### Fixed -- [2021-05-13] Bind specifically to IPv4 ports by [@lflare]. - -## [v1.10.1] - 2021-04-16 -### Changed -- [2021-04-16] Made API backend configurable by [@lflare]. - -### Fixed -- [2021-04-16] Redid project linting and formatting guidelines with staticcheck by [@lflare]. - -## [v1.10.0] - 2021-03-18 -### Changed -- [2021-03-11] Added more logging fields in JSON structure by [@lflare]. -- [2021-03-12] Updated to client specification 30 by [@lflare]. - -### Removed -- [2021-03-12] Removed test chapter exemptions by [@lflare]. - -## [v1.9.3] - 2021-03-05 -### Changed -- [2021-02-28] Updated `go.mod` with Golang 1.16 by [@lflare]. - -### Fixed -- [2021-02-25] Fixed missing `f` thanks to LittleEndu by [@lflare]. -- [2021-03-05] Fixed IPv6 issue with backend communication by [@lflare]. - -## [v1.9.2] - 2021-02-25 -### Changed -- [2021-02-13] Updated README.md with more up-to-date instructions by [@lflare]. -- [2021-02-25] Recompiled with Golang v1.16 by [@lflare]. - -## [v1.9.1] - 2021-02-02 -### Added -- [2021-01-28] Added low-memory mode option to stream images straight from disk by [@lflare]. -- [2021-02-02] Added back ALL THE COMPILATIONS by [@lflare]. - -### Changed -- [2021-01-28] Reworked diskcache to stream files more efficiently by [@lflare]. -- [2021-02-02] Added `disable_tokens` handling from backend by [@lflare]. -- [2021-02-02] Updated to client specification 23 by [@lflare]. -- [2021-02-02] Lowered startup delay to 5 seconds on older versions of client by [@lflare]. - -## [v1.9.0] - 2021-01-24 -### Added -- [2021-01-21] Added Prometheus metrics of diskcache by [@lflare]. -- [2021-01-23] Added 15 seconds upstream timeout by [@lflare]. -- [2021-01-23] Added experimental geoip support to Prometheus metrics by [@lflare]. - -### Changed -- [2021-01-21] Adjusted logging of diskcache by [@lflare]. -- [2021-01-23] Made server read/write timeouts more aggresive with 30s and 1m respectively by [@lflare]. -- [2021-01-23] Properly pre-processed IP address to only log IP addresses without ports by [@lflare]. -- [2021-01-24] Reworked for auto-downloading of MaxMind databases for geolocation by [@lflare]. - -## [v1.8.1] - 2021-01-10 -### Added -- [2021-01-20] Allow overriding of reported address to backend by [@lflare]. - -### Changed -- [2021-01-10] Increased interval of refresh & backend ping to 30 seconds by [@lflare]. -- [2021-01-10] Comply with specification version 20 and default to verify tokens by [@lflare]. -- [2021-01-20] Decreased interval of refresh and server ping back to 10 seconds by [@lflare]. - -### Removed -- [2021-01-15] Removed intermediary and stream image direct from cache to visitor by [@lflare]. - -## [v1.8.0] - 2021-01-10 -### Added -- [2021-01-04] Added option for overriding port advertisement made to backend server by [@lflare]. -- [2021-01-10] Added token whitelist for client specification compliance by [@lflare]. -- [2021-01-10] Updated to client specification version 20 by [@lflare]. - -### Changed -- [2021-01-07] Heavily refactored Prometheus metric labels for clarity by [@lflare]. - -## [v1.7.6] - 2021-01-04 -### Fixed -- [2021-01-04] Fixed streamed images Content-Length header being inaccurate on `data-saver` images by [@lflare]. - -## [v1.7.5] - 2021-01-04 -### Added -- [2021-01-04] Adding Prometheus metric for invalid checksum images by [@lflare]. - -## [v1.7.4] - 2021-01-04 -### Fixed -- [2021-01-04] Disabled image verification for `data-saver` images by [@lflare]. - -## [v1.7.3] - 2021-01-03 -### Fixed -- [2021-01-03] Fixed Last-Modified header reporting by [@lflare]. - -## [v1.7.2] - 2021-01-03 -### Changed -- [2021-01-03] Improved goreleaser configuration by [@lflare]. - -### Fixed -- [2021-01-03] Get diskcache to work with logrus logger by [@lflare]. - -## [v1.7.1] - 2021-01-03 -### Changed -- [2021-01-03] Swapped to VictoriaMetrics for better Histogram by [@lflare]. - -## [v1.7.0] - 2021-01-03 -### Added -- [2021-01-03] Added Prometheus metrics endpoint by [@lflare]. - -### Changed -- [2021-01-03] Organised settings by type by [@lflare]. - -## [v1.6.2] - 2021-01-01 -### Changed -- [2021-01-01] Changed timestamp format to RFC3339 instead of RFC822 by [@lflare]. - -### Fixed -- [2021-01-01] Fixed file logging not updating log level by [@lflare]. - -## [v1.6.1] - 2020-12-29 -### Fixed -- [2020-12-29] Fixed invalid logging on invalid token but not rejected requests by [@lflare]. - -## [v1.6.0] - 2020-12-29 -### Added -- [2020-12-29] Added option to disable upstream connection pooling by [@lflare]. - -### Changed -- [2020-12-29] Revamped logging system with loglevels and more by [@lflare]. - -## [v1.5.5] - 2020-12-22 -### Changed -- [2020-12-22] Added configuration option for upstream override by [@lflare]. - -## [v1.5.4] - 2020-10-12 -### Changed -- [2020-10-12] Replaced boltdb implementation with etcd's by [@lflare]. - -## [v1.5.3] - 2020-09-29 -### Added -- [2020-09-29] Added client configuration of allowing visitor-forced image refresh by [@lflare]. - -## [v1.5.2] - 2020-09-08 -### Added -- [2020-09-08] Added client configuration of optional HTTP2 by [@lflare]. -- [2020-08-23] Added some form of image integrity check via use of SHA256 checksums provided by upstream by [@lflare]. - -### Changed -- [2020-09-08] Bumped client version up to 19 by [@lflare]. - -## [v1.5.1] - 2020-08-16 -### Fixed -- [2020-08-16] Fixed fatalistic logging for cache miss by [@lflare]. - -## [v1.5.0] - 2020-08-15 -### Added -- [2020-08-15] Added argument system by [@lflare]. -- [2020-08-15] Added `-shrink-database` argument flag to shrink overly huge cache.db files by [@lflare]. - -### Changed -- [2020-08-15] Massively refactored code and included diskcache-golang as an internal module by [@lflare]. - -## [v1.4.1] - 2020-08-15 -### Changed -- [2020-08-15] Updated to v0.5.1 of diskcache by [@lflare]. - -## [v1.4.0] - 2020-08-14 -### Added -- [2020-08-14] Added `cache_refresh_age_in_seconds` configuration option to reduce cache update speeds for large caches by [@lflare]. - -### Changed -- [2020-08-14] Updated to v0.5.0 of diskcache by [@lflare]. -- [2020-08-14] Massively refactored codebase by [@lflare]. - -## [v1.3.2] - 2020-08-09 -### Changed -- [2020-08-07] Swapped out retryablehttp for default vanilla http.Client for keep-alive reuse by [@lflare]. - -## [v1.3.1] - 2020-08-01 -### Fixed -- [2020-08-01] Updated filename regex for more flexibility in image filenames by [@lflare]. - -## [v1.3.0] - 2020-07-19 -### Added -- [2020-07-18] Added version checker by [@lflare]. - -### Fixed -- [2020-07-18] Fixed incorrect reported disk space to server for edge cases by [@lflare]. - -## [v1.2.4] - 2020-07-18 -### Added -- [2020-07-18] Added `make local` support for development builds by [@lflare]. - -### Fixed -- [2020-07-18] Dropped connections no longer save half-corrupted images to cache by [@lflare]. - -### Changed -- [2020-07-18] Properly refactored code to fit golangci-lint styles with advisory from @columna1 by [@lflare]. - -## [v1.2.3] - 2020-07-14 -### Added -- [2020-07-14] Added image verification code by [@lflare]. - -### Fixed -- [2020-07-10] Fixed invalid response code for invalid tokens due to typo by [@lflare]. - -## [v1.2.2] - 2020-07-09 -### Changed -- [2020-07-09] Add client spec version to Server header sent by client by [@lflare]. - -## [v1.2.1] - 2020-07-09 -### Changed -- [2020-07-09] Increased WriteTimeout to 5 minutes to match token expiration timing by [@lflare]. -- [2020-07-09] Bumped version number to 16 to match 1.1.5 official build by [@lflare]. - -## [v1.2.0] - 2020-07-05 -### Added -- [2020-07-05] Added rudimentary validation of request tokens by [@lflare]. -- [2020-07-05] Automatic update of client settings in the event of new fields by [@lflare]. -- [2020-07-05] Added version numbers to build artifacts by [@lflare]. - -### Changed -- [2020-07-03] Updated README.md with relevant up-to-date information by [@lflare]. -- [2020-07-03] Updated client defaults by [@lflare]. -- [2020-07-04] Changed graceful shutdown timer from 15 to 30 seconds by [@lflare]. -- [2020-07-04] Updated Makefile for single builds to produce static binaries by [@lflare]. -- [2020-07-05] Convert `sanitized_url` to `sanitizedUrl` for better cohesion by [@lflare]. - -### Fixed -- [2020-07-04] Reduced aborted requests due to faulty timer updating by [@lflare]. - -## [v1.1.0] - 2020-07-03 -### Added -- [2020-07-01] Added official CHANGELOG.md file to keep track of changes from v1.0.0 release by [@lflare]. -- [2020-07-01] Simple Makefile to batch build for multiple architectures by [@lflare]. -- [2020-07-01] Added badge for linking to latest release on GitHub by [@lflare]. -- [2020-07-03] Preliminary check for `Cache-Control` header to pull from upstream by [@lflare]. -- [2020-07-03] goreleaser for easier publishing of binaries by [@lflare]. - -### Changed -- [2020-07-01] Updated Makefile for proper Windows executable file extension by [@lflare]. -- [2020-07-03] Upgraded lflare/diskcache-golang to v0.2.3 by [@lflare]. - -## [v1.0.0] - 2020-07-01 -### Added -- [2020-07-01] First stable unofficial client public release by [@lflare] - -[Unreleased]: https://github.com/lflare/mdathome-golang/compare/v1.5.1...HEAD -[v1.5.1]: https://github.com/lflare/mdathome-golang/compare/v1.5.0...v1.5.1 -[v1.5.0]: https://github.com/lflare/mdathome-golang/compare/v1.4.1...v1.5.0 -[v1.4.1]: https://github.com/lflare/mdathome-golang/compare/v1.4.0...v1.4.1 -[v1.4.0]: https://github.com/lflare/mdathome-golang/compare/v1.3.2...v1.4.0 -[v1.3.2]: https://github.com/lflare/mdathome-golang/compare/v1.3.1...v1.3.2 -[v1.3.1]: https://github.com/lflare/mdathome-golang/compare/v1.3.0...v1.3.1 -[v1.3.0]: https://github.com/lflare/mdathome-golang/compare/v1.2.4...v1.3.0 -[v1.2.4]: https://github.com/lflare/mdathome-golang/compare/v1.2.3...v1.2.4 -[v1.2.3]: https://github.com/lflare/mdathome-golang/compare/v1.2.2...v1.2.3 -[v1.2.2]: https://github.com/lflare/mdathome-golang/compare/v1.2.1...v1.2.2 -[v1.2.1]: https://github.com/lflare/mdathome-golang/compare/v1.2.0...v1.2.1 -[v1.2.0]: https://github.com/lflare/mdathome-golang/compare/v1.1.0...v1.2.0 -[v1.1.0]: https://github.com/lflare/mdathome-golang/compare/v1.0.0...v1.1.0 -[v1.0.0]: https://github.com/lflare/mdathome-golang/releases/tag/v1.0.0 diff --git a/Makefile b/Makefile index c0697bf..9de1d9a 100755 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ ARCHITECTURES = 386 amd64 arm arm64 LDFLAGS = "-X github.com/lflare/mdathome-golang/internal/mdathome.ClientVersion=${VERSION} -X mdathome.Build=${BUILD}" default: - CGO_ENABLED=0 go build -o ./mdathome-golang -tags netgo -trimpath -ldflags=${LDFLAGS} ./cmd/mdathome + CGO_ENABLED=0 go build -o ./mdathome-golang -tags netgo -trimpath -ldflags=${LDFLAGS} . upx -qq mdathome-golang snapshot: diff --git a/assets/config.example.toml b/assets/config.example.toml new file mode 100644 index 0000000..c33b4ea --- /dev/null +++ b/assets/config.example.toml @@ -0,0 +1,47 @@ +version = 2 + +[client] +control_server = "https://api.mangadex.network" +graceful_shutdown_seconds = 300 +max_speed_kbps = 10000 +port = 443 +secret = "" + +[override] +address = "" +port = 0 +size = 0 +upstream = "" + +[cache] +directory = "cache/" +max_scan_interval_seconds = 300 +max_scan_time_seconds = 60 +max_size_mebibytes = 10240 +refresh_age_seconds = 86400 + +[performance] +allow_http2 = true +client_timeout_seconds = 60 +low_memory_mode = true +upstream_connection_reuse = true + +[security] +allow_visitor_cache_refresh = false +reject_invalid_hostname = false +reject_invalid_sni = false +reject_invalid_tokens = true +send_server_header = false +use_forwarded_for_headers = false +verify_image_integrity = false + +[metrics] +enable_prometheus = false +maxmind_license_key = "" + +[log] +directory = "log/" +level = "info" +max_age_days = 7 +max_backups = 3 +max_size_mebibytes = 64 diff --git a/cmd/mdathome/main.go b/cmd/mdathome/main.go deleted file mode 100644 index 8ebac56..0000000 --- a/cmd/mdathome/main.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "flag" - - "github.com/lflare/mdathome-golang/internal/mdathome" - "github.com/sirupsen/logrus" -) - -var log = logrus.New() - -func main() { - // Define arguments - configFile := flag.String("config", "settings.json", "Location of config.json file") - printVersion := flag.Bool("version", false, "Prints version of client") - shrinkDatabase := flag.Bool("shrink-database", false, "Shrink cache.db (may take a long time)") - - // Parse arguments - flag.Parse() - - // Set configuration file path - mdathome.ConfigFilePath = *configFile - - // Shrink database if flag given, otherwise start server - if *printVersion { - log.Infof("MD@Home Client %s (%d) written in Golang by @lflare", mdathome.ClientVersion, mdathome.ClientSpecification) - } else if *shrinkDatabase { - mdathome.ShrinkDatabase() - } else { - mdathome.StartServer() - } -} diff --git a/go.mod b/go.mod index e2a905f..1c51b3b 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.17 require ( github.com/VictoriaMetrics/metrics v1.18.1 + github.com/fsnotify/fsnotify v1.5.1 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/mattn/go-colorable v0.1.12 @@ -11,21 +12,35 @@ require ( github.com/sirupsen/logrus v1.8.1 github.com/snowzach/rotatefilehook v0.0.0-20220211133110-53752135082d github.com/spacemonkeygo/tlshowdy v0.0.0-20160207005338-8fa2cec1d7cd + github.com/spf13/viper v1.10.1 github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e go.etcd.io/bbolt v1.3.6 golang.org/x/crypto v0.0.0-20220214200702-86341886e292 ) require ( + github.com/BurntSushi/toml v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/google/go-github v17.0.0+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/hashicorp/go-version v1.4.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/magiconair/properties v1.8.5 // indirect github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/oschwald/maxminddb-golang v1.8.0 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.2.0 // indirect github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect + golang.org/x/text v0.3.7 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index cf47c50..49db130 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,17 @@ -github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0= github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -19,35 +20,53 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/oschwald/geoip2-golang v1.5.0 h1:igg2yQIrrcRccB1ytFXqBfOHCjXWIoMv85lVJ1ONZzw= -github.com/oschwald/geoip2-golang v1.5.0/go.mod h1:xdvYt5xQzB8ORWFqPnqMwZpCpgNagttWdoZLlJQzg7s= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/oschwald/geoip2-golang v1.6.1 h1:GKxT3yaWWNXSb7vj6D7eoJBns+lGYgx08QO0UcNm0YY= github.com/oschwald/geoip2-golang v1.6.1/go.mod h1:xdvYt5xQzB8ORWFqPnqMwZpCpgNagttWdoZLlJQzg7s= github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk= github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/snowzach/rotatefilehook v0.0.0-20180327172521-2f64f265f58c h1:iUEy7/LRto3JqR/GLXDTEFP+s+qIjWw4pM8yzMfXC9A= -github.com/snowzach/rotatefilehook v0.0.0-20180327172521-2f64f265f58c/go.mod h1:ZLVe3VfhAuMYLYWliGEydMBoRnfib8EFSqkBYu1ck9E= github.com/snowzach/rotatefilehook v0.0.0-20220211133110-53752135082d h1:4660u5vJtsyrn3QwJNfESwCws+TM1CMhRn123xjVyQ8= github.com/snowzach/rotatefilehook v0.0.0-20220211133110-53752135082d/go.mod h1:ZLVe3VfhAuMYLYWliGEydMBoRnfib8EFSqkBYu1ck9E= github.com/spacemonkeygo/tlshowdy v0.0.0-20160207005338-8fa2cec1d7cd h1:1DS6oRTNvEIlcFDVe4OU/LKlrkRB/wx85GHJthitXw0= github.com/spacemonkeygo/tlshowdy v0.0.0-20160207005338-8fa2cec1d7cd/go.mod h1:MF7JYJoS2y353JlawNbpcLA0HAh4FzC4G+XrSIRP78c= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2oeKxpIUmtiDV5sn71VgeQgg6vcE7k= github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= @@ -56,33 +75,36 @@ github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OL github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6 h1:foEbQz/B0Oz6YIqu/69kfXPYeFQAuuMYFkjaqXzl5Wo= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= diff --git a/internal/mdathome/backend.go b/internal/mdathome/backend.go index 67b004b..2a17401 100644 --- a/internal/mdathome/backend.go +++ b/internal/mdathome/backend.go @@ -5,93 +5,97 @@ import ( "context" "crypto/tls" "encoding/json" - "io/ioutil" + "io" "net" "net/http" "net/url" "strings" + + "github.com/sirupsen/logrus" + "github.com/spf13/viper" ) -// Server ping handler -func backendPing() *ServerResponse { +var controlClient *http.Client + +func init() { + // Prepare control server HTTP client + controlClient = &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + return net.Dial("tcp4", addr) + }, + }, + } +} + +func controlPing() *ServerResponse { + // Prepare logger + log := log.WithFields(logrus.Fields{"type": "control"}) + // Create settings JSON settings := ServerSettings{ - Secret: clientSettings.ClientSecret, - Port: clientSettings.ClientPort, - DiskSpace: clientSettings.MaxCacheSizeInMebibytes * 1024 * 1024, // 1GB - NetworkSpeed: clientSettings.MaxKilobitsPerSecond * 1000 / 8, // 100Mbps + Secret: viper.GetString("client.secret"), + Port: viper.GetInt("client.port"), + DiskSpace: viper.GetInt("cache.max_size_mebibytes") * 1024 * 1024, // 1GB + NetworkSpeed: viper.GetInt("client.max_speed_kbps") * 1000 / 8, // 100Mbps BuildVersion: ClientSpecification, TLSCreatedAt: nil, } - // Check if we are overriding reported port - if clientSettings.OverridePortReport != 0 { - settings.Port = clientSettings.OverridePortReport + // Override necessary settings + if viper.GetInt("override.port") != 0 { + settings.Port = viper.GetInt("override.port") } - - // Check if we are overriding reported address - if clientSettings.OverrideAddressReport != "" { - settings.IPAddress = clientSettings.OverrideAddressReport + if viper.GetString("override.address") != "" { + settings.IPAddress = viper.GetString("override.address") } - - // Check if we are overriding reported cache size - if clientSettings.OverrideSizeReport != 0 { - settings.DiskSpace = clientSettings.OverrideSizeReport * 1024 * 1024 + if viper.GetInt("override.size") != 0 { + settings.DiskSpace = viper.GetInt("override.size") * 1024 * 1024 } - // Marshal JSON + // Marshal server settings to JSON settingsJSON, _ := json.Marshal(&settings) - // Prepare backend client - client = &http.Client{ - Transport: &http.Transport{ - DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - return net.Dial("tcp4", addr) - }, - }, - } - - // Ping backend server - r, err := client.Post(clientSettings.APIBackend+"/ping", "application/json", bytes.NewBuffer(settingsJSON)) + // Ping control server + res, err := controlClient.Post(viper.GetString("client.control_server")+"/ping", "application/json", bytes.NewBuffer(settingsJSON)) if err != nil { - log.Printf("Failed to ping control server: %v", err) + log.Errorf("Failed to ping control server: %v", err) return nil } - defer r.Body.Close() + defer res.Body.Close() - // Read response fully - response, err := ioutil.ReadAll(r.Body) + // Read server response fully + controlResponse, err := io.ReadAll(res.Body) if err != nil { - log.Printf("Failed to ping control server: %v", err) + log.Errorf("Failed to ping control server: %v", err) return nil } - // Print server settings out - printableResponse := string(response) - tlsIndex := strings.Index(printableResponse, "\"tls\"") + // Verify TLS certificate exists in response before proceeding + tlsIndex := strings.Index(string(controlResponse), "\"tls\"") if tlsIndex == -1 { - log.Printf("Received invalid server response: %s", printableResponse) + log.Errorf("Received invalid server response: %s", controlResponse) + // If existing TLS certificate not already running in client, fail spectacularly if serverResponse.TLS.Certificate == "" { log.Fatalln("No valid TLS certificate found in memory, cannot continue!") } + + // Return early if unable to proceed return nil } - log.Printf("Server settings received! - %s...", string(response[:tlsIndex])) + log.Infof("Server settings received! - %s...", string(controlResponse[:tlsIndex])) // Decode & unmarshal server response - newServerResponse := ServerResponse{ - DisableTokens: false, // Default to not force disabling tokens - } - err = json.Unmarshal(response, &newServerResponse) - if err != nil { - log.Printf("Failed to ping control server: %v", err) + newServerResponse := ServerResponse{} + if err := json.Unmarshal(controlResponse, &newServerResponse); err != nil { + log.Errorf("Failed to ping control server: %v", err) return nil } // Check response for valid image server if newServerResponse.ImageServer == "" { - log.Printf("Failed to verify server response: %s", response) + log.Printf("Failed to verify server response: %s", controlResponse) return nil } @@ -103,22 +107,22 @@ func backendPing() *ServerResponse { return &newServerResponse } -func backendShutdown() { - // Sent stop request to backend +func controlShutdown() { + // Send stop request to control server request := ServerRequest{ - Secret: clientSettings.ClientSecret, + Secret: viper.GetString("client.secret"), } requestJSON, _ := json.Marshal(&request) - r, err := http.Post(clientSettings.APIBackend+"/stop", "application/json", bytes.NewBuffer(requestJSON)) - if err != nil { + if res, err := http.Post(viper.GetString("client.control_server")+"/stop", "application/json", bytes.NewBuffer(requestJSON)); err != nil { log.Fatalf("Failed to shutdown server gracefully: %v", err) + } else { + res.Body.Close() } - defer r.Body.Close() } -func backendGetCertificate() tls.Certificate { - // Make backend ping - serverResponse = *backendPing() +func controlGetCertificate() tls.Certificate { + // Make control ping + serverResponse = *controlPing() if serverResponse.TLS.Certificate == "" { log.Fatalln("Unable to contact API server!") } diff --git a/internal/mdathome/cache.go b/internal/mdathome/cache.go new file mode 100644 index 0000000..c9ba883 --- /dev/null +++ b/internal/mdathome/cache.go @@ -0,0 +1,504 @@ +package mdathome + +import ( + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "os" + "os/signal" + "sort" + "syscall" + "time" + + "github.com/VictoriaMetrics/metrics" + "github.com/spf13/viper" + bolt "go.etcd.io/bbolt" +) + +var ( + clientCacheSize = metrics.NewCounter("client_cache_size_bytes") + clientCacheLimit = metrics.NewCounter("client_cache_limit_bytes") + clientCacheEvicted = metrics.NewCounter("client_cache_evicted_bytes") +) + +type KeyPair struct { + Key string + Timestamp int64 + Size int +} + +func (a *KeyPair) UpdateTimestamp() { + a.Timestamp = time.Now().Unix() +} + +func getPathFromHash(hash string) (string, string) { + dir := hash[0:2] + "/" + hash[2:4] + "/" + hash[4:6] + parent := viper.GetString("cache.directory") + "/" + dir + path := parent + "/" + hash + return parent, path +} + +func hashRequestURI(requestURI string) string { + // Create MD5 hasher + h := md5.New() + + // Write key to MD5 hasher (should not ever fail) + _, _ = io.WriteString(h, requestURI) + + // Encode MD5 hash to hexadecimal + hash := hex.EncodeToString(h.Sum(nil)) + + // Return hash + return hash +} + +type Cache struct { + cacheLimitInBytes int + database *bolt.DB +} + +func (c *Cache) DeleteFileByKey(hash string) error { + _, path := getPathFromHash(hash) + + // Delete file off disk + if err := os.Remove(path); err != nil { + log.Errorf("File does not seem to exist on disk, ignoring: %v", err) + } + + // Delete key off database + if err := c.database.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("KEYS")).Delete([]byte(hash)); err != nil { + return fmt.Errorf("could not delete entry: %v", err) + } + + // Return with no errors + return nil + }); err != nil { + return fmt.Errorf("entry does not exist on database: %v", err) + } + + // Return nil if no errors encountered + return nil +} + +// setEntry adds or modifies an entry in the database from a keyPair +func (c *Cache) setEntry(keyPair KeyPair) error { + // Marshal keyPair struct into bytes + keyPairBytes, err := json.Marshal(keyPair) + if err != nil { + return fmt.Errorf("unable to marshal keyPair: %v", err) + } + + // Update database with marshaled keyPair + err = c.database.Update(func(tx *bolt.Tx) error { + err = tx.Bucket([]byte("KEYS")).Put([]byte(keyPair.Key), keyPairBytes) + if err != nil { + return fmt.Errorf("could not set entry: %v", err) + } + return nil + }) + + // Return error if any + return err +} + +func (c *Cache) Get(requestURI string) (reader *os.File, size int64, mtime time.Time, err error) { + // Check for empty cache key + if len(requestURI) == 0 { + return nil, 0, time.Now(), fmt.Errorf("empty cache key") + } + + // Get cache key + hash := hashRequestURI(requestURI) + _, path := getPathFromHash(hash) + + // Read image from directory + file, err := os.Open(path) + if err != nil { + return nil, 0, time.Now(), fmt.Errorf("failed to read image from '%s': %v", path, err) + } + + // Get file information + fileInfo, err := os.Stat(path) + if err != nil { + return nil, 0, time.Now(), fmt.Errorf("failed to retrieve file information from '%s': %v", path, err) + } + + // Attempt to get keyPair + keyPair, err := c.getEntry(hash) + if err != nil { + return nil, 0, time.Now(), fmt.Errorf("failed to get entry for cache key %s: %v", path, err) + } + + // If keyPair is older than configured cacheRefreshAge, refresh + if keyPair.Timestamp < time.Now().Add(-1*time.Duration(viper.GetInt("cache.refresh_age_seconds"))*time.Second).Unix() { + log.Debugf("Updating timestamp: %+v", keyPair) + if err != nil { + size := fileInfo.Size() + timestamp := time.Now().Unix() + keyPair = KeyPair{hash, timestamp, int(size)} + } + + // Update timestamp + keyPair.UpdateTimestamp() + + // Set entry + err := c.setEntry(keyPair) + if err != nil { + return nil, 0, time.Now(), fmt.Errorf("failed to set entry for key %s: %v", requestURI, err) + } + } + + // Return file + return file, fileInfo.Size(), fileInfo.ModTime(), nil +} + +// Set takes a key, hashes it, and saves the `resp` bytearray into the corresponding file +func (c *Cache) Set(requestURI string, mtime time.Time, resp []byte) error { + // Check for empty cache key + if len(requestURI) == 0 { + return fmt.Errorf("empty cache key") + } + + // Get cache key + hash := hashRequestURI(requestURI) + parent, path := getPathFromHash(hash) + + // Create necessary cache subfolder + if err := os.MkdirAll(parent, os.ModePerm); err != nil { + return fmt.Errorf("failed to create parent folder for '%s' at '%s': %v", requestURI, parent, err) + } + + // Write image + if err := os.WriteFile(path, resp, 0644); err != nil { + return fmt.Errorf("failed to write image to disk for '%s' at '%s': %v", requestURI, path, err) + } + + // Update modification time + if err := os.Chtimes(path, mtime, mtime); err != nil { + return fmt.Errorf("failed to set modification time of image '%s': %v", path, err) + } + + // Update database + size := len(resp) + timestamp := time.Now().Unix() + keyPair := KeyPair{hash, timestamp, size} + + // Set database entry + if err := c.setEntry(keyPair); err != nil { + return fmt.Errorf("failed to write image to database of key '%s' at '%s' : %v", hash, path, err) + } + + // Update Prometheus metrics + clientCacheSize.Add(size) + + // Return no error + return nil +} + +// UpdateCacheLimit allows for updating of cache limit= +func (c *Cache) UpdateCacheLimit(cacheLimit int) { + c.cacheLimitInBytes = cacheLimit + clientCacheLimit.Set(uint64(cacheLimit)) +} + +func (c *Cache) loadCacheInfo() (int, []KeyPair, error) { + // Create running variables + totalSize := 0 + + // Pull keys from BoltDB + keyPairs, err := c.Scan() + if err != nil { + log.Fatal(err) + } + + // Count total size + for _, keyPair := range keyPairs { + totalSize += keyPair.Size + } + + // Sort cache by access time + sort.Sort(ByTimestamp(keyPairs)) + + // Return running variables + return totalSize, keyPairs, err +} + +func (c *Cache) StartCompanionThread(keys []KeyPair) { + for { + // Sleep for 15 seconds before continuing + time.Sleep(15 * time.Second) + + // Continue if clientCacheSize == 0 + if clientCacheSize.Get() == 0 { + continue + } + + // Calculate usage + usage := 100 * (float32(clientCacheSize.Get()) / float32(c.cacheLimitInBytes)) + log.Debugf("Current diskcache size: %s, limit: %s, usage: %0.3f%%", ByteCountIEC(int(clientCacheSize.Get())), ByteCountIEC(c.cacheLimitInBytes), usage) + + // Continue if clientCacheSize under limit + if int(clientCacheSize.Get()) < c.cacheLimitInBytes { + continue + } + + // Get ready to shrink cache + deletedSize := 0 + deletedItems := 0 + startTime := time.Now() + + // Loop over keys and delete till we are under threshold + for { + // Pop key + v := keys[0] + keys = keys[1:] + + // Delete file + err := c.DeleteFileByKey(v.Key) + if err != nil { + log.Warnf("Unable to delete file in key '%s': %v", v.Key, err) + } + + // Add to deletedSize + clientCacheSize.Add(-1 * v.Size) + clientCacheEvicted.Add(v.Size) + deletedSize += v.Size + deletedItems++ + + // Check if we are under threshold + if int(clientCacheSize.Get()) < c.cacheLimitInBytes { + break + } + + // Check time elapsed + if timeElapsed := time.Since(startTime).Seconds(); timeElapsed > float64(viper.GetInt("cache.max_scan_time_seconds")) { + break + } + } + } +} + +func (c *Cache) StartBackgroundThread() { + var keys []KeyPair + + // Rescan every scan interval for fresh keys + companionRunning := false + for { + // Retrieve cache information + var err error + var size int + if size, keys, err = c.loadCacheInfo(); err != nil { + log.Fatal(err) + } + + // Update Prometheus metrics + if size > 0 { + clientCacheSize.Set(uint64(size)) + } + + // If partner thread not running, run now + if !companionRunning { + go c.StartCompanionThread(keys) + companionRunning = true + } + + // Sleep till next execution + time.Sleep(viper.GetDuration("cache.max_scan_interval_seconds") * time.Second) + } + +} + +// Close closes the database +func (c *Cache) Close() { + c.database.Close() +} + +// getEntry retrieves an entry from the database from a key +func (c *Cache) getEntry(hash string) (KeyPair, error) { + // Prepare empty keyPair variable + var keyPair KeyPair + + // Retrieve entry from database + err := c.database.View(func(tx *bolt.Tx) error { + // Retrieve key value + keyPairBytes := tx.Bucket([]byte("KEYS")).Get([]byte(hash)) + if keyPairBytes == nil { + return fmt.Errorf("key does not exist") + } + + // Unmarshal keyPairBytes into previously declared keyPair + err := json.Unmarshal(keyPairBytes, &keyPair) + if err != nil { + return err + } + + return nil + }) + + // Return keyPair and error if any + return keyPair, err +} + +func (c *Cache) Scan() ([]KeyPair, error) { + // Prepare empty keyPairs reference + var keyPairs []KeyPair + + // Retrieve all entries from database, unmarshaling and appending to []keyPair slice + err := c.database.View(func(tx *bolt.Tx) error { + // Get bucket + b := tx.Bucket([]byte("KEYS")) + + // Create slice of keypairs of size of bucket + keyPairs = make([]KeyPair, b.Stats().KeyN) + index := 0 + + // Prepare timer + startTime := time.Now() + + // Cursor + cur := b.Cursor() + for key, keyPairBytes := cur.First(); key != nil; key, keyPairBytes = cur.Next() { + // Prepare empty keyPair struct + var keyPair KeyPair + + // Unmarshal bytes + err := json.Unmarshal(keyPairBytes, &keyPair) + if err != nil { + return err + } + + // Append to keyPairs + keyPairs[index] = keyPair + index++ + + // Check time + if timeElapsed := time.Since(startTime).Seconds(); timeElapsed > float64(viper.GetInt("cache.max_scan_time_seconds")) { + break + } + } + + return nil + }) + + // Return keyPairs and errors if any + return keyPairs, err +} + +func (c *Cache) Shrink() error { + // Hook on to SIGTERM + sigtermChannel := make(chan os.Signal, 1) + signal.Notify(sigtermChannel, os.Interrupt, syscall.SIGTERM) + + // Start coroutine to wait for SIGTERM + handler := make(chan struct{}) + go func() { + for { + select { + case <-sigtermChannel: + // Prepare to shutdown server + log.Println("Aborted database shrinking!") + + // Delete half-shrunk database + os.Remove(viper.GetString("cache.directory") + "/cache.db.tmp") + + // Exit properly + close(handler) + os.Exit(0) + case <-handler: + close(sigtermChannel) + return + } + } + }() + + // Prepare new database location + newDB, err := bolt.Open(viper.GetString("cache.directory")+"/cache.db.tmp", 0600, nil) + if err != nil { + log.Errorf("failed to open new database location: %v", err) + os.Exit(1) + } + + // Attempt to compact database + if err = bolt.Compact(newDB, c.database, 0); err != nil { + log.Fatalf("failed to compact database: %v", err) + } + + // Close new database + if err = newDB.Close(); err != nil { + log.Errorf("failed to close new database: %v", err) + } + + // Close old database + if err = c.database.Close(); err != nil { + log.Errorf("failed to close old database: %v", err) + os.Exit(1) + } + + // Rename database files + if err := os.Rename(viper.GetString("cache.directory")+"/cache.db", viper.GetString("cache.directory")+"/cache.db.bak"); err != nil { + log.Fatalf("failed to backup database: %v", err) + } + if err := os.Rename(viper.GetString("cache.directory")+"/cache.db.tmp", viper.GetString("cache.directory")+"/cache.db"); err != nil { + log.Fatalf("failed to restore new database: %v", err) + } + log.Infof("Database backed up and renamed!") + + // Stop goroutine + handler <- struct{}{} + return nil +} + +func (c *Cache) Setup() (err error) { + // Create cache directory if not exists + if err = os.MkdirAll(viper.GetString("cache.directory"), os.ModePerm); err != nil { + return fmt.Errorf("could not create cache directory '%s': %v", viper.GetString("cache.directory"), err) + } + + // Open BoltDB database + options := c.getOptions() + if c.database, err = bolt.Open(viper.GetString("cache.directory")+"/cache.db", 0600, options); err != nil { + return fmt.Errorf("could not open database: %v", err) + } + + // Create bucket if not exists + if err := c.database.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte("KEYS")); err != nil { + return fmt.Errorf("could not create bucket: %v", err) + } + + // Return with no errors + return nil + }); err != nil { + return fmt.Errorf("failed to craete bucket: %v", err) + } + + // Database ready! + log.Infof("Database ready!") + return nil +} + +func OpenCache(directory string, cacheLimit int) *Cache { + cache := Cache{ + cacheLimitInBytes: cacheLimit, + } + + // Setup BoltDB + err := cache.Setup() + if err != nil { + log.Fatalf("failed to setup BoltDB: %v", err) + } + + // Prep metrics counter + clientCacheLimit.Set(uint64(cacheLimit)) + + // Start background clean-up thread + if viper.GetDuration("cache.max_scan_interval_seconds") > 0 { + go cache.StartBackgroundThread() + } + + // Return cache object + return &cache +} diff --git a/internal/mdathome/constants.go b/internal/mdathome/constants.go index 4e8b2ff..db50a3e 100644 --- a/internal/mdathome/constants.go +++ b/internal/mdathome/constants.go @@ -9,3 +9,8 @@ var ClientVersion string // ClientSpecification is the integer version of the official specification this client is supposed to work against const ClientSpecification int = 31 + +const ( + KeyCacheDirectory string = "cache.directory" + KeyCacheSize string = "cache.max_size_mebibytes" +) diff --git a/internal/mdathome/geoip.go b/internal/mdathome/geoip.go index 3288ddd..420fe89 100644 --- a/internal/mdathome/geoip.go +++ b/internal/mdathome/geoip.go @@ -10,68 +10,77 @@ import ( "strings" "github.com/oschwald/geoip2-golang" + "github.com/spf13/viper" ) var geodb *geoip2.Reader -func prepareGeoIPDatabase() { +func downloadGeoIPDatabase(path string) error { + // Log + log.Warnf("Downloading geolocation data in the background...") - // Set MaxMind database filename - maxMindDatabaseFilename := "GeoLite2-Country.mmdb" + // Prepare URL + databaseURL := fmt.Sprintf("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=%s&suffix=tar.gz", viper.GetString("metrics.maxmind_license_key")) - // Check if database already downloaded - if _, err := os.Stat(maxMindDatabaseFilename); os.IsNotExist(err) { - // Log - log.Warnf("Downloding geolocation data in the background...") + // Download database if not exists + resp, err := http.Get(databaseURL) + if err != nil { + return fmt.Errorf("failed to download MaxMind database: %v", err) + } + defer resp.Body.Close() + + // Uncompress archive + uncompressedArchive, err := gzip.NewReader(resp.Body) + if err != nil { + return fmt.Errorf("failed to uncompress MaxMind database: %v", err) + } + defer uncompressedArchive.Close() - // Prepare URL - databaseURL := fmt.Sprintf("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=%s&suffix=tar.gz", clientSettings.MaxMindLicenseKey) + // Loop through tar archive entries + tarReader := tar.NewReader(uncompressedArchive) + for { + // Get next tar archive entry + header, err := tarReader.Next() - // Download database if not exists - resp, err := http.Get(databaseURL) - if err != nil { - log.Fatalf("Failed to download MaxMind database: %v", err) + // If finished entire file and no database found + if err == io.EOF { + return fmt.Errorf("Unable to find MaxMind database file in archive") } - defer resp.Body.Close() - // Uncompress archive - uncompressedArchive, err := gzip.NewReader(resp.Body) + // If EOF if err != nil { - log.Fatalf("Failed to uncompress MaxMind database: %v", err) + return fmt.Errorf("Failed to extract MaxMind database file: %v", err) } - defer uncompressedArchive.Close() - - // Loop through tar archive entries - tarReader := tar.NewReader(uncompressedArchive) - for { - // Get next tar archive entry - header, err := tarReader.Next() - // If finished entire file and no database found - if err == io.EOF { - log.Fatalln("Unable to find MaxMind database file in archive") + // If tar archive entry matches our requirements, save to file + if header.Typeflag == tar.TypeReg && strings.HasSuffix(header.Name, path) { + outFile, err := os.Create(path) + if err != nil { + return fmt.Errorf("Failed to create MaxMind database file: %s", err.Error()) } + defer outFile.Close() - // If EOF - if err != nil { - log.Fatalf("Failed to extract MaxMind database file: %v", err) + if _, err := io.Copy(outFile, tarReader); err != nil { + return fmt.Errorf("Failed to write to MaxMind database file: %s", err.Error()) } - // If tar archive entry matches our requirements, save to file - if header.Typeflag == tar.TypeReg && strings.HasSuffix(header.Name, maxMindDatabaseFilename) { - outFile, err := os.Create(maxMindDatabaseFilename) - if err != nil { - log.Fatalf("Failed to create MaxMind database file: %s", err.Error()) - } - defer outFile.Close() + log.Warnf("Downloaded MaxMind database") + break + } + } - if _, err := io.Copy(outFile, tarReader); err != nil { - log.Fatalf("Failed to write to MaxMind database file: %s", err.Error()) - } + // Return with no errors + return nil +} - log.Warnf("Downloaded MaxMind database") - break - } +func prepareGeoIPDatabase() { + // Set MaxMind database filename + maxMindDatabaseFilename := "GeoLite2-Country.mmdb" + + // Check if database already downloaded + if _, err := os.Stat(maxMindDatabaseFilename); os.IsNotExist(err) { + if err := downloadGeoIPDatabase(maxMindDatabaseFilename); err != nil { + log.Fatalf("Failed to download GeoIP database: %v", err) } } diff --git a/internal/mdathome/log.go b/internal/mdathome/log.go index cab054c..268720d 100644 --- a/internal/mdathome/log.go +++ b/internal/mdathome/log.go @@ -6,16 +6,16 @@ import ( colorable "github.com/mattn/go-colorable" "github.com/sirupsen/logrus" "github.com/snowzach/rotatefilehook" + "github.com/spf13/viper" ) var log = logrus.New() -// InitLogger initialises global logger func initLogger(logLevelString string, maxLogSizeInMb int, maxLogBackups int, maxLogAgeInDays int) { logLevel, _ := logrus.ParseLevel(logLevelString) rotateFileHook, err := rotatefilehook.NewRotateFileHook(rotatefilehook.RotateFileConfig{ - Filename: clientSettings.LogDirectory + "/mdathome.log", + Filename: viper.GetString("log.directory") + "/mdathome.log", MaxSize: maxLogSizeInMb, MaxBackups: 3, MaxAge: 28, diff --git a/internal/mdathome/main.go b/internal/mdathome/main.go index af45f51..241d66f 100644 --- a/internal/mdathome/main.go +++ b/internal/mdathome/main.go @@ -15,65 +15,12 @@ import ( "github.com/VictoriaMetrics/metrics" "github.com/gorilla/handlers" "github.com/gorilla/mux" - "github.com/lflare/mdathome-golang/pkg/diskcache" "github.com/sirupsen/logrus" + "github.com/spf13/viper" ) -var clientSettings = ClientSettings{ - // Version - Version: 2, - - // Client - LogDirectory: "log/", // Default log directory - CacheDirectory: "cache/", // Default cache directory - GracefulShutdownInSeconds: 300, // Default 5m graceful shutdown - - // Overrides - OverridePortReport: 0, // Default to advertise for port 443 - OverrideAddressReport: "", // Default to not overriding address report - OverrideSizeReport: 10240, // Default 10GB - OverrideUpstream: "", // Default to nil to follow upstream by controller - - // Node - ClientPort: 443, // Default to listen for requests on port 443 - MaxKilobitsPerSecond: 10000, // Default 10Mbps - MaxCacheSizeInMebibytes: 10240, // Default 10GB - - // Cache - CacheScanIntervalInSeconds: 300, // Default 5m scan interval - CacheRefreshAgeInSeconds: 3600, // Default 1h cache refresh age - MaxCacheScanTimeInSeconds: 15, // Default 15s max scan period - - // Performance - LowMemoryMode: false, // Default to not doing low-memory mode - AllowHTTP2: true, // Allow HTTP2 by default - AllowUpstreamPooling: true, // Allow upstream pooling by default - ClientTimeout: 60, // Default to 1 minute timeout - - // Security - AllowVisitorRefresh: false, // Default to not allow visitors to force-refresh images through - RejectInvalidSNI: false, // Default to not rejecting valid SNIs - RejectInvalidHostname: false, // Default to rejecting invalid hostnames - RejectInvalidTokens: true, // Default to reject invalid tokens - SendServerHeader: false, // Default to not send server headers - UseReverseProxyHeaders: false, // Default to not using X-Forwarded-For header in proxy - VerifyImageIntegrity: false, // Default to not verify image integrity - - // Metrics - EnablePrometheusMetrics: false, // Default to not enable Prometheus metrics - MaxMindLicenseKey: "", // Default to not have any MaxMind Geolocation DB - - // Log - LogLevel: "trace", // Default to "trace" for all logs - MaxLogSizeInMebibytes: 64, // Default to maximum log size of 64MiB - MaxLogBackups: 3, // Default to maximum log backups of 3 - MaxLogAgeInDays: 7, // Default to maximum log age of 7 days - - // Development - APIBackend: "https://api.mangadex.network", // Default to "https://api.mangadex.network" -} var serverResponse ServerResponse -var cache *diskcache.Cache +var cache *Cache var timeLastRequest time.Time var running = true var client *http.Client @@ -81,8 +28,6 @@ var certHandler *certificateHandler var clientHostname string -var ConfigFilePath string - func requestHandler(w http.ResponseWriter, r *http.Request) { // Start timer startTime := time.Now() @@ -100,7 +45,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { } // Prepare logger for request - requestLogger := log.WithFields(logrus.Fields{"url_path": r.URL.Path, "remote_addr": remoteAddr, "referer": r.Header.Get("Referer")}) + requestLogger := log.WithFields(logrus.Fields{"type": "request", "url_path": r.URL.Path, "remote_addr": remoteAddr, "referer": r.Header.Get("Referer")}) // Parse GeoIP labels := "" @@ -133,7 +78,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { // Check if hostname is rejected requestHostname := strings.Split(r.Host, ":")[0] - if clientSettings.RejectInvalidHostname && requestHostname != clientHostname { + if viper.GetBool("security.reject_invalid_hostname") && requestHostname != clientHostname { requestLogger.WithFields(logrus.Fields{"event": "dropped", "reason": "invalid hostname"}).Warnf("Request from %s dropped due to invalid hostname: %s", remoteAddr, requestHostname) clientDroppedTotal.Inc() w.WriteHeader(http.StatusBadRequest) @@ -161,13 +106,12 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { } // If configured to reject invalid tokens - if clientSettings.RejectInvalidTokens && !serverResponse.DisableTokens { + if viper.GetBool("security.reject_invalid_tokens") && !serverResponse.DisableTokens { // Verify token if checking for invalid token and not a test chapter - code, err := verifyToken(tokens["token"], tokens["chapter_hash"]) - if err != nil { + if code, err := verifyToken(tokens["token"], tokens["chapter_hash"]); err != nil { requestLogger.WithFields(logrus.Fields{"event": "dropped", "reason": "invalid token"}).Warnf("Request from %s dropped due to invalid token", remoteAddr) - w.WriteHeader(code) clientDroppedTotal.Inc() + w.WriteHeader(code) return } } @@ -176,7 +120,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { sanitizedURL := "/" + tokens["image_type"] + "/" + tokens["chapter_hash"] + "/" + tokens["image_filename"] // Update requestLogger with new fields - requestLogger = log.WithFields(logrus.Fields{"url_path": r.URL.Path, "remote_addr": remoteAddr, "referer": r.Header.Get("Referer"), "token": tokens["token"], "image_type": tokens["image_type"], "chapter_hash": tokens["chapter_hash"], "filename": tokens["image_filename"]}) + requestLogger = requestLogger.WithFields(logrus.Fields{ + "url_path": r.URL.Path, + "remote_addr": remoteAddr, + "referer": r.Header.Get("Referer"), + "token": tokens["token"], + "image_type": tokens["image_type"], + "chapter_hash": tokens["chapter_hash"], + "filename": tokens["image_filename"], + }) // Update last request timeLastRequest = time.Now() @@ -195,7 +147,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Content-Type-Options", "nosniff") // Depending on client configuration, choose to hide Server header identifier - if clientSettings.SendServerHeader { + if viper.GetBool("security.send_server_header") { serverHeader := fmt.Sprintf("MD@Home Golang Client %s (%d) - github.com/lflare/mdathome-golang", ClientVersion, ClientSpecification) w.Header().Set("Server", serverHeader) } @@ -224,7 +176,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { defer imageFile.Close() // Check if client is running in low-memory mode - if !clientSettings.LowMemoryMode { + if !viper.GetBool("performance.low_memory_mode") { // Load image from disk to buffer if not low-memory mode imageBuffer.Grow(int(imageSize)) if _, err := io.Copy(&imageBuffer, imageFile); err != nil { @@ -232,7 +184,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { } // Check if verifying image integrity - if clientSettings.VerifyImageIntegrity && tokens["image_type"] == "data" { + if viper.GetBool("security.verify_image_integrity") && tokens["image_type"] == "data" { // Check and get hash from image filename subTokens := strings.Split(tokens["image_filename"], "-") if len(subTokens) == 2 { @@ -258,7 +210,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { } // Check if image refresh is enabled and Cache-Control header is set - if clientSettings.AllowVisitorRefresh && r.Header.Get("Cache-Control") == "no-cache" { + if viper.GetBool("security.allow_visitor_cache_refresh") && r.Header.Get("Cache-Control") == "no-cache" { // Log cache ignored requestLogger.WithFields(logrus.Fields{"event": "no-cache"}).Debugf("Request from %s ignored cache", remoteAddr) clientRefreshedTotal.Inc() @@ -386,49 +338,38 @@ func requestHandler(w http.ResponseWriter, r *http.Request) { // ShrinkDatabase initialises and shrinks the MD@Home database func ShrinkDatabase() { - // Load & prepare client settings - loadClientSettings() - saveClientSettings() - // Prepare diskcache log.Info("Preparing database...") - cache = diskcache.New(clientSettings.CacheDirectory, 0, 0, 0, 0, log, clientCacheSize, clientCacheLimit) + cache = OpenCache(viper.GetString(KeyCacheDirectory), 0) defer cache.Close() // Attempts to start cache shrinking log.Info("Shrinking database...") - if err := cache.ShrinkDatabase(); err != nil { + if err := cache.Shrink(); err != nil { log.Errorf("Failed to shrink database: %v", err) } } // StartServer starts the MD@Home client func StartServer() { + // Watch for configuration changes + configureConfigAutoReload() + // Check client version checkClientVersion() - // Load & prepare client settings - loadClientSettings() - saveClientSettings() - // Initialise logger - initLogger(clientSettings.LogLevel, clientSettings.MaxLogSizeInMebibytes, clientSettings.MaxLogBackups, clientSettings.MaxLogAgeInDays) + initLogger(viper.GetString("log.level"), viper.GetInt("log.max_size_mebibytes"), viper.GetInt("log.max_backups"), viper.GetInt("log.max_age_days")) // Prepare diskcache - cache = diskcache.New( - clientSettings.CacheDirectory, - clientSettings.MaxCacheSizeInMebibytes*1024*1024, - clientSettings.CacheScanIntervalInSeconds, - clientSettings.CacheRefreshAgeInSeconds, - clientSettings.MaxCacheScanTimeInSeconds, - log, - clientCacheSize, - clientCacheLimit, + cache = OpenCache( + viper.GetString(KeyCacheDirectory), + viper.GetInt(KeyCacheSize)*1024*1024, ) defer cache.Close() // Prepare MaxMind geolocation database - if clientSettings.MaxMindLicenseKey != "" { + if viper.GetString("metrics.maxmind_license_key") != "" { log.Warnf("Loading geolocation data in the background...") go prepareGeoIPDatabase() defer geodb.Close() @@ -439,7 +380,7 @@ func StartServer() { Transport: &http.Transport{ MaxIdleConns: 10, IdleConnTimeout: 60 * time.Second, - DisableKeepAlives: !clientSettings.AllowUpstreamPooling, + DisableKeepAlives: !viper.GetBool("performance.upstream_connection_reuse"), }, Timeout: 30 * time.Second, } @@ -448,14 +389,14 @@ func StartServer() { registerShutdownHandler() // Prepare TLS reloader - certHandler = NewCertificateReloader(backendGetCertificate()) + certHandler = NewCertificateReloader(controlGetCertificate()) go func() { for { time.Sleep(24 * time.Hour) // Update certificate log.Infof("Reloading certificates...") - if err := certHandler.updateCertificate(backendGetCertificate()); err != nil { + if err := certHandler.updateCertificate(controlGetCertificate()); err != nil { log.Errorf("Failed to reload certificate: %v", err) } } @@ -479,14 +420,14 @@ func StartServer() { }) // Handle Prometheus metrics - if clientSettings.EnablePrometheusMetrics { - r.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { + r.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { + if viper.GetBool("metrics.enable_prometheus") { metrics.WritePrometheus(w, true) - }) - } + } + }) // If configured behind reverse proxies - if clientSettings.UseReverseProxyHeaders { + if viper.GetBool("metrics.use_forwarded_for_headers") { r.Use(handlers.ProxyHeaders) } @@ -494,7 +435,7 @@ func StartServer() { http.Handle("/", handlers.RecoveryHandler()(handlers.CompressHandler(r))) // Start server - err := listenAndServeTLSKeyPair(clientSettings, r) + err := listenAndServeTLSKeyPair(r) if err != nil { log.Fatalf("Cannot start server: %v", err) } diff --git a/internal/mdathome/metrics.go b/internal/mdathome/metrics.go deleted file mode 100644 index 6681296..0000000 --- a/internal/mdathome/metrics.go +++ /dev/null @@ -1,10 +0,0 @@ -package mdathome - -import ( - "github.com/VictoriaMetrics/metrics" -) - -var ( - clientCacheSize = metrics.NewCounter("client_cache_size") - clientCacheLimit = metrics.NewCounter("client_cache_limit") -) diff --git a/internal/mdathome/options.go b/internal/mdathome/options.go new file mode 100644 index 0000000..4b373f4 --- /dev/null +++ b/internal/mdathome/options.go @@ -0,0 +1,18 @@ +//go:build !linux +// +build !linux + +package mdathome + +import ( + bolt "go.etcd.io/bbolt" +) + +func (c *Cache) getOptions() *bolt.Options { + // Return no custom options because Windows does not support + options := &bolt.Options{} + return options +} + +func configureConfigAutoReload() { + // Do absolutely nothing because Windows does not support +} diff --git a/internal/mdathome/options_linux.go b/internal/mdathome/options_linux.go new file mode 100644 index 0000000..b9d1a91 --- /dev/null +++ b/internal/mdathome/options_linux.go @@ -0,0 +1,27 @@ +package mdathome + +import ( + "syscall" + + "github.com/fsnotify/fsnotify" + "github.com/spf13/viper" + bolt "go.etcd.io/bbolt" +) + +func (c *Cache) getOptions() *bolt.Options { + options := &bolt.Options{ + MmapFlags: syscall.MAP_POPULATE, + } + return options +} + +func configureConfigAutoReload() { + // Watch for configuration changes + viper.OnConfigChange(func(e fsnotify.Event) { + log.Infof("Configuration updated: %v", viper.AllSettings()) + + // Run manual configuration updates + cache.UpdateCacheLimit(viper.GetInt(KeyCacheSize) * 1024 * 1024) + }) + viper.WatchConfig() +} diff --git a/internal/mdathome/structs.go b/internal/mdathome/structs.go index 43de892..d2b60e1 100644 --- a/internal/mdathome/structs.go +++ b/internal/mdathome/structs.go @@ -1,64 +1,5 @@ package mdathome -// ClientSettings stores client settings -type ClientSettings struct { - // Settings Versioning - Version int `json:"version"` - - // Client - LogDirectory string `json:"log_directory"` - CacheDirectory string `json:"cache_directory"` - GracefulShutdownInSeconds int `json:"graceful_shutdown_in_seconds"` - - // Overrides - OverridePortReport int `json:"override_port_report"` - OverrideAddressReport string `json:"override_address_report"` - OverrideSizeReport int `json:"override_size_report"` - OverrideUpstream string `json:"override_upstream"` - - // Node - ClientPort int `json:"client_port"` - ClientSecret string `json:"client_secret"` - MaxKilobitsPerSecond int `json:"max_kilobits_per_second"` - MaxCacheSizeInMebibytes int `json:"max_cache_size_in_mebibytes"` - - // Cache - CacheScanIntervalInSeconds int `json:"cache_scan_interval_in_seconds"` - CacheRefreshAgeInSeconds int `json:"cache_refresh_age_in_seconds"` - MaxCacheScanTimeInSeconds int `json:"max_cache_scan_time_in_seconds"` - - // Performance - AllowHTTP2 bool `json:"allow_http2"` - AllowUpstreamPooling bool `json:"allow_upstream_pooling"` - LowMemoryMode bool `json:"low_memory_mode"` - ClientTimeout int `json:"client_timeout"` - - // Security - AllowVisitorRefresh bool `json:"allow_visitor_refresh"` - RejectInvalidHostname bool `json:"reject_invalid_hostname"` - RejectInvalidSNI bool `json:"reject_invalid_sni"` - RejectInvalidTokens bool `json:"reject_invalid_tokens"` - SendServerHeader bool `json:"send_server_header"` - UseReverseProxyHeaders bool `json:"use_reverse_proxy_ip"` - VerifyImageIntegrity bool `json:"verify_image_integrity"` - - // Metrics - EnablePrometheusMetrics bool `json:"enable_prometheus_metrics"` - MaxMindLicenseKey string `json:"maxmind_license_key"` - - // Log - LogLevel string `json:"log_level"` - MaxLogSizeInMebibytes int `json:"max_log_size_in_mebibytes"` - MaxLogBackups int `json:"max_log_backups"` - MaxLogAgeInDays int `json:"max_log_age_in_days"` - - // Development settings - APIBackend string `json:"api_backend"` - - // Deprecated settings - MaxReportedSizeInMebibytes int `json:"max_reported_size_in_mebibytes,omitempty"` -} - // ServerRequest stores a single `secret` field for miscellaneous operations type ServerRequest struct { Secret string `json:"secret"` diff --git a/internal/mdathome/tls.go b/internal/mdathome/tls.go index b73b712..122d757 100644 --- a/internal/mdathome/tls.go +++ b/internal/mdathome/tls.go @@ -9,6 +9,7 @@ import ( "time" "github.com/spacemonkeygo/tlshowdy" + "github.com/spf13/viper" ) type tcpKeepAliveListener struct { @@ -34,7 +35,7 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { } // Check SNI if configured to do so - if clientSettings.RejectInvalidSNI { + if viper.GetBool("security.reject_invalid_sni") { // Set deadline to prevent connection leaks if err = tc.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { log.Warn(fmt.Sprintf("failed to SetDeadline(): %s", err)) @@ -76,16 +77,16 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { return tc, nil } -func listenAndServeTLSKeyPair(clientSettings ClientSettings, handler http.Handler) error { +func listenAndServeTLSKeyPair(handler http.Handler) error { // Build address - addr := ":" + strconv.Itoa(clientSettings.ClientPort) + addr := ":" + strconv.Itoa(viper.GetInt("client.port")) // Build HTTP server configuration server := &http.Server{ Addr: addr, Handler: handler, - ReadTimeout: time.Second * time.Duration(clientSettings.ClientTimeout), - WriteTimeout: time.Second * time.Duration(clientSettings.ClientTimeout), + ReadTimeout: time.Second * time.Duration(viper.GetDuration("performance.client_timeout_seconds")), + WriteTimeout: time.Second * time.Duration(viper.GetDuration("performance.client_timeout_seconds")), } config := &tls.Config{ PreferServerCipherSuites: true, @@ -99,7 +100,7 @@ func listenAndServeTLSKeyPair(clientSettings ClientSettings, handler http.Handle config.GetCertificate = certHandler.GetCertificate() // If allowing http2 - if clientSettings.AllowHTTP2 { + if viper.GetBool("performance.allow_http2") { config.NextProtos = []string{"h2", "http/1.1"} } else { config.NextProtos = []string{"http/1.1"} diff --git a/internal/mdathome/token.go b/internal/mdathome/token.go index 5a8307d..292bd45 100644 --- a/internal/mdathome/token.go +++ b/internal/mdathome/token.go @@ -12,17 +12,17 @@ import ( func verifyToken(tokenString string, chapterHash string) (int, error) { // Check if given token string is empty if tokenString == "" { - return 403, fmt.Errorf("Token is empty") + return 403, fmt.Errorf("token cannot be empty") } // Decode base64-encoded token & key tokenBytes, err := base64.RawURLEncoding.DecodeString(tokenString) if err != nil { - return 403, fmt.Errorf("cannot decode token - %v", err) + return 403, fmt.Errorf("token is not valid base64: %v", err) } keyBytes, err := base64.StdEncoding.DecodeString(serverResponse.TokenKey) if err != nil { - return 403, fmt.Errorf("cannot decode key - %v", err) + return 403, fmt.Errorf("key is not valid base64: %v", err) } // Copy over byte slices to fixed-length byte arrays for decryption @@ -40,13 +40,13 @@ func verifyToken(tokenString string, chapterHash string) (int, error) { // Unmarshal to struct token := Token{} if err := json.Unmarshal(data, &token); err != nil { - return 403, fmt.Errorf("failed to unmarshal token - %v", err) + return 403, fmt.Errorf("failed to unmarshal token from json: %v", err) } // Parse expiry time expires, err := time.Parse(time.RFC3339, token.Expires) if err != nil { - return 403, fmt.Errorf("failed to parse expiry from token - %v", err) + return 403, fmt.Errorf("failed to parse expiry time from token: %v", err) } // Check token expiry timing diff --git a/internal/mdathome/utils.go b/internal/mdathome/utils.go index fa4b2ce..265643a 100644 --- a/internal/mdathome/utils.go +++ b/internal/mdathome/utils.go @@ -1,75 +1,17 @@ package mdathome import ( - "encoding/json" "fmt" - "io/ioutil" "os" "os/signal" "syscall" "time" "github.com/sirupsen/logrus" + "github.com/spf13/viper" "github.com/tcnksm/go-latest" ) -func saveClientSettings() { - clientSettingsSampleBytes, err := json.MarshalIndent(clientSettings, "", " ") - if err != nil { - log.Fatalln("Failed to marshal sample settings.json") - } - - err = ioutil.WriteFile(ConfigFilePath, clientSettingsSampleBytes, 0600) - if err != nil { - log.Fatalf("Failed to create sample settings.json: %v", err) - } -} - -func loadClientSettings() { - // Read JSON from file - clientSettingsJSON, err := ioutil.ReadFile(ConfigFilePath) - if err != nil { - log.Printf("Failed to read client configuration file - %v", err) - saveClientSettings() - log.Fatalf("Created sample settings.json! Please edit it before running again!") - } - - // Unmarshal JSON to clientSettings struct - err = json.Unmarshal(clientSettingsJSON, &clientSettings) - if err != nil { - log.Fatalf("Unable to unmarshal JSON file: %v", err) - } - - // Migrate settings to the latest version - migrateClientSettings(&clientSettings) - - // Check client configuration - if clientSettings.ClientSecret == "" { - log.Fatalf("Empty secret! Cannot run!") - } - - if clientSettings.CacheDirectory == "" { - log.Fatalf("Empty cache directory! Cannot run!") - } - - // Print client configuration - log.Printf("Client configuration loaded: %+v", clientSettings) -} - -func migrateClientSettings(cs *ClientSettings) { - // Migrate from settings before version 1 - switch cs.Version { - case 0: - cs.OverrideSizeReport = cs.MaxReportedSizeInMebibytes - cs.MaxReportedSizeInMebibytes = 0 - cs.Version = 1 - fallthrough - case 1: - cs.RejectInvalidSNI = false - cs.Version = 2 - } -} - func checkClientVersion() { // Prepare version check githubTag := &latest.GithubTag{ @@ -94,39 +36,33 @@ func checkClientVersion() { } func startBackgroundWorker() { - // Wait 10 seconds + // Wait 15 seconds log.Println("Starting background jobs!") - time.Sleep(10 * time.Second) + time.Sleep(15 * time.Second) for running { // Reload client configuration log.Println("Reloading client configuration") - loadClientSettings() // Update log level if need be - newLogLevel, err := logrus.ParseLevel(clientSettings.LogLevel) + newLogLevel, err := logrus.ParseLevel(viper.GetString("log.level")) if err == nil { log.SetLevel(newLogLevel) } - // Update max cache size - cache.UpdateCacheLimit(clientSettings.MaxCacheSizeInMebibytes * 1024 * 1024) - cache.UpdateCacheScanInterval(clientSettings.CacheScanIntervalInSeconds) - cache.UpdateCacheRefreshAge(clientSettings.CacheRefreshAgeInSeconds) - // Update server response in a goroutine - newServerResponse := backendPing() + newServerResponse := controlPing() if newServerResponse != nil { // Check if overriding upstream - if clientSettings.OverrideUpstream != "" { - newServerResponse.ImageServer = clientSettings.OverrideUpstream + if viper.GetString("override.upstream") != "" { + newServerResponse.ImageServer = viper.GetString("override.upstream") } serverResponse = *newServerResponse } - // Wait 10 seconds - time.Sleep(10 * time.Second) + // Wait 15 seconds + time.Sleep(15 * time.Second) } } @@ -145,7 +81,7 @@ func registerShutdownHandler() { running = false // Send shutdown command to backend - backendShutdown() + controlShutdown() // Wait till last request is normalised timeShutdown := time.Now() @@ -154,7 +90,7 @@ func registerShutdownHandler() { log.Printf("%.2f seconds have elapsed since CTRL-C", secondsSinceLastRequest) // Give up after one minute - if time.Since(timeShutdown).Seconds() > float64(clientSettings.GracefulShutdownInSeconds) { + if time.Since(timeShutdown).Seconds() > float64(viper.GetFloat64("client.graceful_shutdown_seconds")) { log.Printf("Giving up, quitting now!") break } @@ -168,3 +104,25 @@ func registerShutdownHandler() { os.Exit(0) }() } + +// ByteCountIEC returns a human-readable string describing the size of bytes in int +func ByteCountIEC(b int) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %ciB", + float64(b)/float64(div), "KMGTPE"[exp]) +} + +// ByTimestamp is a sortable slice of KeyPair based off timestamp +type ByTimestamp []KeyPair + +func (a ByTimestamp) Len() int { return len(a) } +func (a ByTimestamp) Less(i, j int) bool { return a[i].Timestamp < a[j].Timestamp } +func (a ByTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/main.go b/main.go new file mode 100644 index 0000000..941b9a4 --- /dev/null +++ b/main.go @@ -0,0 +1,67 @@ +package main + +import ( + _ "embed" + "flag" + "os" + + "github.com/lflare/mdathome-golang/internal/mdathome" + "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +//go:embed assets/config.example.toml +var defaultConfiguration string + +var log *logrus.Logger + +func loadConfiguration() { + log.Infof("%+v", viper.AllSettings()) +} + +func init() { + // Initialise logger + log = logrus.New() + + // Configure Viper + viper.AddConfigPath(".") + viper.SetConfigName("config.toml") + viper.SetConfigType("toml") + + // Load in configuration + if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + log.Info("Configuration not found, creating!") + if err := os.WriteFile("config.toml", []byte(defaultConfiguration), 0600); err != nil { + log.Fatalf("Failed to write default configuration to 'config.toml'!") + } else { + log.Fatalf("Default configuration written to 'config.toml', please modify before running client again!") + } + } else { + // Config file was found but another error was produced + log.Errorf("Failed to read configuration: %v", err) + } + } + + // Reload configuration + loadConfiguration() +} + +func main() { + // Define arguments + printVersion := flag.Bool("version", false, "Prints version of client") + shrinkDatabase := flag.Bool("shrink-database", false, "Shrink cache.db (may take a long time)") + + // Parse arguments + flag.Parse() + + // Shrink database if flag given, otherwise start server + if *printVersion { + log.Infof("MD@Home Client %s (%d) written in Golang by @lflare", mdathome.ClientVersion, mdathome.ClientSpecification) + } else if *shrinkDatabase { + mdathome.ShrinkDatabase() + } else { + mdathome.StartServer() + } +} diff --git a/pkg/diskcache/database.go b/pkg/diskcache/database.go deleted file mode 100644 index 86147e9..0000000 --- a/pkg/diskcache/database.go +++ /dev/null @@ -1,234 +0,0 @@ -package diskcache - -import ( - "encoding/json" - "fmt" - "os" - "os/signal" - "syscall" - "time" - - bolt "go.etcd.io/bbolt" -) - -// setEntry adds or modifies an entry in the database from a keyPair -func (c *Cache) setEntry(keyPair KeyPair) error { - // Marshal keyPair struct into bytes - keyPairBytes, err := json.Marshal(keyPair) - if err != nil { - return fmt.Errorf("Unable to marshal keyPair: %v", err) - } - - // Update database with marshaled keyPair - err = c.database.Update(func(tx *bolt.Tx) error { - err = tx.Bucket([]byte("KEYS")).Put([]byte(keyPair.Key), keyPairBytes) - if err != nil { - return fmt.Errorf("Could not set entry: %v", err) - } - return nil - }) - - // Return error if any - return err -} - -// deleteEntry deletes an entry from database from a key -func (c *Cache) deleteEntry(key string) error { - // Update database and delete entry - err := c.database.Update(func(tx *bolt.Tx) error { - err := tx.Bucket([]byte("KEYS")).Delete([]byte(key)) - if err != nil { - return fmt.Errorf("Could not delete entry: %v", err) - } - return nil - }) - - // Return error if any - return err -} - -// getEntry retrieves an entry from the database from a key -func (c *Cache) getEntry(key string) (KeyPair, error) { - // Prepare empty keyPair variable - var keyPair KeyPair - - // Retrieve entry from database - err := c.database.View(func(tx *bolt.Tx) error { - // Retrieve key value - keyPairBytes := tx.Bucket([]byte("KEYS")).Get([]byte(key)) - if keyPairBytes == nil { - return fmt.Errorf("Key does not exist") - } - - // Unmarshal keyPairBytes into previously declared keyPair - err := json.Unmarshal(keyPairBytes, &keyPair) - if err != nil { - return err - } - - return nil - }) - - // Return keyPair and error if any - return keyPair, err -} - -// getAllKeys returns a full slice of keyPairs from the database -func (c *Cache) getAllKeys() ([]KeyPair, error) { - // Prepare empty keyPairs reference - var keyPairs []KeyPair - - // Retrieve all entries from database, unmarshaling and appending to []keyPair slice - err := c.database.View(func(tx *bolt.Tx) error { - // Get bucket - b := tx.Bucket([]byte("KEYS")) - - // Create slice of keypairs of size of bucket - keyPairs = make([]KeyPair, b.Stats().KeyN) - index := 0 - - // Prepare timer - startTime := time.Now() - - // Cursor - cur := b.Cursor() - for key, keyPairBytes := cur.First(); key != nil; key, keyPairBytes = cur.Next() { - // Prepare empty keyPair struct - var keyPair KeyPair - - // Unmarshal bytes - err := json.Unmarshal(keyPairBytes, &keyPair) - if err != nil { - return err - } - - // Append to keyPairs - keyPairs[index] = keyPair - index++ - - // Check time - if timeElapsed := time.Since(startTime).Seconds(); timeElapsed > float64(c.maxCacheScanTime) { - break - } - } - - return nil - }) - - // Return keyPairs and errors if any - return keyPairs, err -} - -// ShrinkDatabase manually re-creates the cache.db file and effectively shrinks it -func (c *Cache) ShrinkDatabase() error { - // Hook on to SIGTERM - sigtermChannel := make(chan os.Signal, 1) - signal.Notify(sigtermChannel, os.Interrupt, syscall.SIGTERM) - - // Start coroutine to wait for SIGTERM - handler := make(chan struct{}) - go func() { - for { - select { - case <-sigtermChannel: - // Prepare to shutdown server - log.Println("Aborted database shrinking!") - - // Delete half-shrunk database - os.Remove(c.directory + "/cache.db.tmp") - - // Exit properly - close(handler) - os.Exit(0) - case <-handler: - close(sigtermChannel) - return - } - } - }() - - // Prepare new database location - newDB, err := bolt.Open(c.directory+"/cache.db.tmp", 0600, nil) - if err != nil { - log.Errorf("Failed to open new database location: %v", err) - os.Exit(1) - } - - // Attempt to compact database - err = bolt.Compact(newDB, c.database, 0) - if err != nil { - log.Errorf("Failed to compact database: %v", err) - os.Exit(1) - } - - // Close new database - err = newDB.Close() - if err != nil { - log.Errorf("Failed to close new database: %v", err) - os.Exit(1) - } - - // Close old database - err = c.database.Close() - if err != nil { - log.Errorf("Failed to close old database: %v", err) - os.Exit(1) - } - - // Rename database files - if err := os.Rename(c.directory+"/cache.db", c.directory+"/cache.db.bak"); err != nil { - log.Fatalf("Failed to backup database: %v", err) - } - if err := os.Rename(c.directory+"/cache.db.tmp", c.directory+"/cache.db"); err != nil { - log.Fatalf("Failed to restore new database: %v", err) - } - log.Infof("Database backed up and renamed!") - - // Stop goroutine - handler <- struct{}{} - return nil -} - -func (c *Cache) openDB() error { - // Open BoltDB database - options := c.getOptions() - database, err := bolt.Open(c.directory+"/cache.db", 0600, options) - if err != nil { - return fmt.Errorf("Could not open database: %v", err) - } - - // Set database to cache struct - c.database = database - return nil -} - -// setupDB initialises the BoltDB database -func (c *Cache) setupDB() error { - // Create cache directory if not exists - err := os.MkdirAll(c.directory, os.ModePerm) - if err != nil { - return fmt.Errorf("Could not create cache directory %s: %v", c.directory, err) - } - - // Open database - err = c.openDB() - if err != nil { - return fmt.Errorf("Failed to open database: %v", err) - } - - // Create bucket if not exists - err = c.database.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists([]byte("KEYS")) - if err != nil { - return fmt.Errorf("Could not create bucket: %v", err) - } - return nil - }) - if err != nil { - return fmt.Errorf("Failed to setup bucket, %v", err) - } - - // Database ready! - log.Println("Database ready!") - return nil -} diff --git a/pkg/diskcache/diskcache.go b/pkg/diskcache/diskcache.go deleted file mode 100644 index 49b209e..0000000 --- a/pkg/diskcache/diskcache.go +++ /dev/null @@ -1,289 +0,0 @@ -package diskcache - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "sort" - "time" - - "github.com/VictoriaMetrics/metrics" - "github.com/sirupsen/logrus" -) - -var log *logrus.Logger -var clientCacheSize *metrics.Counter -var clientCacheLimit *metrics.Counter - -// DeleteFile takes an absolute path to a file and deletes it -func (c *Cache) DeleteFile(file string) error { - dir, file := file[0:2]+"/"+file[2:4]+"/"+file[4:6], file - - // Delete file off disk - err := os.Remove(c.directory + "/" + dir + "/" + file) - if err != nil { - log.Errorf("File does not seem to exist on disk, ignoring: %v", err) - } - - // Delete key off database - err = c.deleteEntry(file) - if err != nil { - err = fmt.Errorf("Entry does not seem to exist on database: %v", err) - return err - } - - // Return nil if no errors encountered - return nil -} - -// Get takes a key, hashes it, and returns the corresponding file in the directory -func (c *Cache) Get(key string) (reader *os.File, size int64, mtime time.Time, err error) { - // Check for empty cache key - if len(key) == 0 { - return nil, 0, time.Now(), fmt.Errorf("Empty cache key") - } - - // Get cache key - dir, key := getCacheKey(key) - - // Read image from directory - file, err := os.Open(c.directory + "/" + dir + "/" + key) - if err != nil { - err = fmt.Errorf("Failed to read image from key %s: %v", key, err) - return nil, 0, time.Now(), err - } - - // Get file information - fileInfo, err := os.Stat(c.directory + "/" + dir + "/" + key) - if err != nil { - err = fmt.Errorf("Failed to retrieve file information from key %s: %v", key, err) - return nil, 0, time.Now(), err - } - - // Attempt to get keyPair - keyPair, err := c.getEntry(key) - if err != nil { - err = fmt.Errorf("Failed to get entry for cache key %s: %v", key, err) - return nil, 0, time.Now(), err - } - - // If keyPair is older than configured cacheRefreshAge, refresh - if keyPair.Timestamp < time.Now().Add(-1*time.Duration(c.cacheRefreshAge)*time.Second).Unix() { - log.Debugf("Updating timestamp: %+v", keyPair) - if err != nil { - size := fileInfo.Size() - timestamp := time.Now().Unix() - keyPair = KeyPair{key, timestamp, int(size)} - } - - // Update timestamp - keyPair.UpdateTimestamp() - - // Set entry - err := c.setEntry(keyPair) - if err != nil { - err = fmt.Errorf("Failed to set entry for key %s: %v", key, err) - return nil, 0, time.Now(), err - } - } - - // Return file - return file, fileInfo.Size(), fileInfo.ModTime(), nil -} - -// Set takes a key, hashes it, and saves the `resp` bytearray into the corresponding file -func (c *Cache) Set(key string, mtime time.Time, resp []byte) error { - // Check for empty cache key - if len(key) == 0 { - return fmt.Errorf("Empty cache key") - } - - // Get cache key - dir, key := getCacheKey(key) - - // Create necessary cache subfolder - err := os.MkdirAll(c.directory+"/"+dir, os.ModePerm) - if err != nil { - err = fmt.Errorf("Failed to create cache folder for key %s: %v", key, err) - return err - } - - // Write image - err = ioutil.WriteFile(c.directory+"/"+dir+"/"+key, resp, 0644) - if err != nil { - err = fmt.Errorf("Failed to write image to disk for key %s: %v", key, err) - return err - } - - // Update modification time - if err := os.Chtimes(c.directory+"/"+dir+"/"+key, mtime, mtime); err != nil { - err = fmt.Errorf("Failed to set modification time of image '%s': %v", c.directory+"/"+dir+"/"+key, err) - return err - } - - // Update database - size := len(resp) - timestamp := time.Now().Unix() - keyPair := KeyPair{key, timestamp, size} - - // Set database entry - err = c.setEntry(keyPair) - if err != nil { - err = fmt.Errorf("Failed to set entry for key %s: %v", key, err) - return err - } - - // Update Prometheus metrics - clientCacheSize.Add(size) - - // Return no error - return nil -} - -// UpdateCacheLimit allows for updating of cache limit= -func (c *Cache) UpdateCacheLimit(cacheLimit int) { - c.cacheLimit = cacheLimit -} - -// UpdateCacheScanInterval allows for updating of cache scanning interval -func (c *Cache) UpdateCacheScanInterval(cacheScanInterval int) { - c.cacheScanInterval = cacheScanInterval -} - -// UpdateCacheRefreshAge allows for updating of cache refresh age -func (c *Cache) UpdateCacheRefreshAge(cacheRefreshAge int) { - c.cacheRefreshAge = cacheRefreshAge -} - -// StartBackgroundThread starts a background thread that automatically scans the directory and removes older files -// when cache exceeds size limits -func (c *Cache) StartBackgroundThread() { - for { - // Retrieve cache information - size, keys, err := c.loadCacheInfo() - if err != nil { - log.Fatal(err) - } - - // Log - usage := 100 * (float32(size) / float32(c.cacheLimit)) - log.Warnf("Current diskcache size: %s, limit: %s, usage: %0.3f%%", ByteCountIEC(size), ByteCountIEC(c.cacheLimit), usage) - - // If size is bigger than configured byte limit, keep deleting last recently used files - if size > c.cacheLimit { - // Get ready to shrink cache - log.Warnf("Shrinking diskcache size: %s, limit: %s", ByteCountIEC(size), ByteCountIEC(c.cacheLimit)) - deletedSize := 0 - deletedItems := 0 - - // Prepare timer - startTime := time.Now() - - // Loop over keys and delete till we are under threshold - for _, v := range keys { - // Delete file - err := c.DeleteFile(v.Key) - if err != nil { - log.Warnf("Unable to delete file in key %s: %v", v.Key, err) - } - - // Add to deletedSize - deletedSize += v.Size - deletedItems++ - - // Check if we are under threshold - if size-deletedSize < c.cacheLimit { - break - } - - // Check time elapsed - if timeElapsed := time.Since(startTime).Seconds(); timeElapsed > float64(c.maxCacheScanTime) { - break - } - } - - // Log success - log.Infof("Successfully shrunk diskcache by: %s, %d items", ByteCountIEC(deletedSize), deletedItems) - } - - // Update Prometheus metrics - clientCacheSize.Set(uint64(size)) - clientCacheLimit.Set(uint64(c.cacheLimit)) - - // Sleep till next execution - time.Sleep(time.Duration(c.cacheScanInterval) * time.Second) - } -} - -// loadCacheInfo -func (c *Cache) loadCacheInfo() (int, []KeyPair, error) { - // Create running variables - totalSize := 0 - - // Pull keys from BoltDB - keyPairs, err := c.getAllKeys() - if err != nil { - log.Fatal(err) - } - - // Count total size - for _, keyPair := range keyPairs { - totalSize += keyPair.Size - } - - // Sort cache by access time - sort.Sort(ByTimestamp(keyPairs)) - - // Return running variables - return totalSize, keyPairs, err -} - -// Close closes the database -func (c *Cache) Close() { - c.database.Close() -} - -func getCacheKey(key string) (string, string) { - // Create MD5 hasher - h := md5.New() - - // Write key to MD5 hasher (should not ever fail) - _, _ = io.WriteString(h, key) - - // Encode MD5 hash to hexadecimal - hash := hex.EncodeToString(h.Sum(nil)) - - // Return cache key - return hash[0:2] + "/" + hash[2:4] + "/" + hash[4:6], hash -} - -// New returns a new Cache that will store files in basePath -func New(directory string, cacheLimit int, cacheScanInterval int, cacheRefreshAge int, maxCacheScanTime int, logger *logrus.Logger, clientCacheSizeCounter *metrics.Counter, clientCacheLimitCounter *metrics.Counter) *Cache { - cache := Cache{ - directory: directory, - cacheLimit: cacheLimit, - cacheScanInterval: cacheScanInterval, - cacheRefreshAge: cacheRefreshAge, - maxCacheScanTime: maxCacheScanTime, - } - log = logger - clientCacheSize = clientCacheSizeCounter - clientCacheLimit = clientCacheLimitCounter - - // Setup BoltDB - err := cache.setupDB() - if err != nil { - log.Fatalf("Failed to setup BoltDB: %v", err) - } - - // Start background clean-up thread - if cacheScanInterval != 0 { - go cache.StartBackgroundThread() - } - - // Return cache object - return &cache -} diff --git a/pkg/diskcache/options.go b/pkg/diskcache/options.go deleted file mode 100644 index 6241c85..0000000 --- a/pkg/diskcache/options.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package diskcache - -import ( - bolt "go.etcd.io/bbolt" -) - -func (c *Cache) getOptions() *bolt.Options { - options := &bolt.Options{} - return options -} diff --git a/pkg/diskcache/options_linux.go b/pkg/diskcache/options_linux.go deleted file mode 100644 index 922896a..0000000 --- a/pkg/diskcache/options_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -package diskcache - -import ( - "syscall" - - bolt "go.etcd.io/bbolt" -) - -func (c *Cache) getOptions() *bolt.Options { - options := &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, - } - return options -} diff --git a/pkg/diskcache/structs.go b/pkg/diskcache/structs.go deleted file mode 100644 index 76bec14..0000000 --- a/pkg/diskcache/structs.go +++ /dev/null @@ -1,34 +0,0 @@ -package diskcache - -import ( - "time" - - bolt "go.etcd.io/bbolt" -) - -// Cache is a struct that represents a cache object -type Cache struct { - directory string - cacheLimit int - cacheScanInterval int - cacheRefreshAge int - maxCacheScanTime int - database *bolt.DB -} - -// KeyPair is a struct that represents a cache key in database -type KeyPair struct { - Key string - Timestamp int64 - Size int -} - -// UpdateTimestamp allows for updating of a KeyPair timestamp field -func (a *KeyPair) UpdateTimestamp() { a.Timestamp = time.Now().Unix() } - -// ByTimestamp is a sortable slice of KeyPair based off timestamp -type ByTimestamp []KeyPair - -func (a ByTimestamp) Len() int { return len(a) } -func (a ByTimestamp) Less(i, j int) bool { return a[i].Timestamp < a[j].Timestamp } -func (a ByTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/pkg/diskcache/utils.go b/pkg/diskcache/utils.go deleted file mode 100644 index 4cdda82..0000000 --- a/pkg/diskcache/utils.go +++ /dev/null @@ -1,20 +0,0 @@ -package diskcache - -import ( - "fmt" -) - -// ByteCountIEC returns a human-readable string describing the size of bytes in int -func ByteCountIEC(b int) string { - const unit = 1024 - if b < unit { - return fmt.Sprintf("%d B", b) - } - div, exp := int64(unit), 0 - for n := b / unit; n >= unit; n /= unit { - div *= unit - exp++ - } - return fmt.Sprintf("%.1f %ciB", - float64(b)/float64(div), "KMGTPE"[exp]) -}