diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8dac321 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +*~ +bin +*.db +work +go.work* \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..10a54ff --- /dev/null +++ b/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2024, Aaron Straup Cope +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a5b736d --- /dev/null +++ b/Makefile @@ -0,0 +1,5 @@ +GOMOD=$(shell test -f "go.work" && echo "readonly" || echo "vendor") +LDFLAGS=-s -w + +cli: + go build -mod $(GOMOD) -ldflags="$(LDFLAGS)" -o bin/wof-sql-index cmd/wof-sql-index/main.go diff --git a/README.md b/README.md new file mode 100644 index 0000000..3bb6333 --- /dev/null +++ b/README.md @@ -0,0 +1,11 @@ +# go-whosonfirst-database + +Go package implementing common properties and methods for working with Who's On First databases. + +## Documentation + +Documentation is incomplete at this time. + +## Motivation + +The goal is for this package to replace the `whosonfirst/go-whosonfirst-sqlite*` packages. Everything works but there are still details to be finalized. diff --git a/app/sql/index/app.go b/app/sql/index/app.go new file mode 100755 index 0000000..77cc789 --- /dev/null +++ b/app/sql/index/app.go @@ -0,0 +1,378 @@ +package index + +import ( + "context" + "database/sql" + "flag" + "fmt" + "log/slog" + "net/url" + "runtime" + "slices" + + "github.com/sfomuseum/go-database" + "github.com/sfomuseum/go-flags/flagset" + "github.com/whosonfirst/go-reader" + "github.com/whosonfirst/go-whosonfirst-database/sql/indexer" + "github.com/whosonfirst/go-whosonfirst-database/sql/tables" +) + +const index_alt_all string = "*" + +func Run(ctx context.Context) error { + fs := DefaultFlagSet() + return RunWithFlagSet(ctx, fs) +} + +// To do: Add RunWithOptions... + +func RunWithFlagSet(ctx context.Context, fs *flag.FlagSet) error { + + flagset.Parse(fs) + + if verbose { + slog.SetLogLoggerLevel(slog.LevelDebug) + slog.Debug("Verbose logging enabled") + } + + runtime.GOMAXPROCS(procs) + + if spatial_tables { + rtree = true + geojson = true + properties = true + spr = true + } + + if spelunker_tables { + // rtree = true + spr = true + spelunker = true + geojson = true + concordances = true + ancestors = true + search = true + + to_index_alt := []string{ + tables.GEOJSON_TABLE_NAME, + } + + for _, table_name := range to_index_alt { + + if !slices.Contains(index_alt, table_name) { + index_alt = append(index_alt, table_name) + } + } + + } + + logger := slog.Default() + + // START OF put me in sfomuseum/go-database/sql ... + + u, err := url.Parse(db_uri) + + if err != nil { + return err + } + + q := u.Query() + + engine := u.Host + dsn := q.Get("dsn") + + db, err := sql.Open(engine, dsn) + + if err != nil { + return fmt.Errorf("Unable to create database (%s) because %v", db_uri, err) + } + + defer func() { + + err := db.Close() + + if err != nil { + logger.Error("Failed to close database connection", "error", err) + } + }() + + switch database.Driver(db) { + case "sqlite": + + pragma := database.DefaultSQLitePragma() + err := database.ConfigureSQLitePragma(ctx, db, pragma) + + if err != nil { + return fmt.Errorf("Failed to configure SQLite pragma, %w", err) + } + + // END OF put me in sfomuseum/go-database/sql ... + + // optimize query performance + // https://www.sqlite.org/pragma.html#pragma_optimize + if optimize { + + defer func() { + + _, err = db.Exec("PRAGMA optimize") + + if err != nil { + logger.Error("Failed to optimize", "error", err) + return + } + }() + + } + + } + + to_index := make([]database.Table, 0) + + if geojson || all { + + geojson_opts, err := tables.DefaultGeoJSONTableOptions() + + if err != nil { + return fmt.Errorf("failed to create '%s' table options because %s", tables.GEOJSON_TABLE_NAME, err) + } + + // alt_files is deprecated (20240229/straup) + + if alt_files || slices.Contains(index_alt, tables.GEOJSON_TABLE_NAME) || slices.Contains(index_alt, index_alt_all) { + geojson_opts.IndexAltFiles = true + } + + gt, err := tables.NewGeoJSONTableWithDatabaseAndOptions(ctx, db, geojson_opts) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.GEOJSON_TABLE_NAME, err) + } + + to_index = append(to_index, gt) + } + + if supersedes || all { + + t, err := tables.NewSupersedesTableWithDatabase(ctx, db) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.SUPERSEDES_TABLE_NAME, err) + } + + to_index = append(to_index, t) + } + + if rtree || all { + + rtree_opts, err := tables.DefaultRTreeTableOptions() + + if err != nil { + return fmt.Errorf("failed to create 'rtree' table options because %s", err) + } + + // alt_files is deprecated (20240229/straup) + + if alt_files || slices.Contains(index_alt, tables.RTREE_TABLE_NAME) || slices.Contains(index_alt, index_alt_all) { + rtree_opts.IndexAltFiles = true + } + + gt, err := tables.NewRTreeTableWithDatabaseAndOptions(ctx, db, rtree_opts) + + if err != nil { + return fmt.Errorf("failed to create 'rtree' table because %s", err) + } + + to_index = append(to_index, gt) + } + + if properties || all { + + properties_opts, err := tables.DefaultPropertiesTableOptions() + + if err != nil { + return fmt.Errorf("failed to create 'properties' table options because %s", err) + } + + // alt_files is deprecated (20240229/straup) + + if alt_files || slices.Contains(index_alt, tables.PROPERTIES_TABLE_NAME) || slices.Contains(index_alt, index_alt_all) { + properties_opts.IndexAltFiles = true + } + + gt, err := tables.NewPropertiesTableWithDatabaseAndOptions(ctx, db, properties_opts) + + if err != nil { + return fmt.Errorf("failed to create 'properties' table because %s", err) + } + + to_index = append(to_index, gt) + } + + if spr || all { + + spr_opts, err := tables.DefaultSPRTableOptions() + + if err != nil { + return fmt.Errorf("Failed to create '%s' table options because %v", tables.SPR_TABLE_NAME, err) + } + + // alt_files is deprecated (20240229/straup) + + if alt_files || slices.Contains(index_alt, tables.SPR_TABLE_NAME) || slices.Contains(index_alt, index_alt_all) { + spr_opts.IndexAltFiles = true + } + + st, err := tables.NewSPRTableWithDatabaseAndOptions(ctx, db, spr_opts) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.SPR_TABLE_NAME, err) + } + + to_index = append(to_index, st) + } + + if spelunker || all { + + spelunker_opts, err := tables.DefaultSpelunkerTableOptions() + + if err != nil { + return fmt.Errorf("Failed to create '%s' table options because %v", tables.SPELUNKER_TABLE_NAME, err) + } + + // alt_files is deprecated (20240229/straup) + + if alt_files || slices.Contains(index_alt, tables.SPELUNKER_TABLE_NAME) || slices.Contains(index_alt, index_alt_all) { + spelunker_opts.IndexAltFiles = true + } + + st, err := tables.NewSpelunkerTableWithDatabaseAndOptions(ctx, db, spelunker_opts) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.SPELUNKER_TABLE_NAME, err) + } + + to_index = append(to_index, st) + } + + if names || all { + + nm, err := tables.NewNamesTableWithDatabase(ctx, db) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.NAMES_TABLE_NAME, err) + } + + to_index = append(to_index, nm) + } + + if ancestors || all { + + an, err := tables.NewAncestorsTableWithDatabase(ctx, db) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.ANCESTORS_TABLE_NAME, err) + } + + to_index = append(to_index, an) + } + + if concordances || all { + + cn, err := tables.NewConcordancesTableWithDatabase(ctx, db) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %s", tables.CONCORDANCES_TABLE_NAME, err) + } + + to_index = append(to_index, cn) + } + + // see the way we don't check all here - that's so people who don't have + // spatialite installed can still use all (20180122/thisisaaronland) + + if geometries { + + geometries_opts, err := tables.DefaultGeometriesTableOptions() + + if err != nil { + return fmt.Errorf("failed to create '%s' table options because %v", tables.GEOMETRIES_TABLE_NAME, err) + } + + // alt_files is deprecated (20240229/straup) + + if alt_files || slices.Contains(index_alt, tables.CONCORDANCES_TABLE_NAME) || slices.Contains(index_alt, index_alt_all) { + geometries_opts.IndexAltFiles = true + } + + gm, err := tables.NewGeometriesTableWithDatabaseAndOptions(ctx, db, geometries_opts) + + if err != nil { + return fmt.Errorf("failed to create '%s' table because %v", tables.CONCORDANCES_TABLE_NAME, err) + } + + to_index = append(to_index, gm) + } + + // see the way we don't check all here either - that's because this table can be + // brutally slow to index and should probably really just be a separate database + // anyway... (20180214/thisisaaronland) + + if search { + + // ALT FILES... + + st, err := tables.NewSearchTableWithDatabase(ctx, db) + + if err != nil { + return fmt.Errorf("failed to create 'search' table because %v", err) + } + + to_index = append(to_index, st) + } + + if len(to_index) == 0 { + return fmt.Errorf("You forgot to specify which (any) tables to index") + } + + record_opts := &indexer.LoadRecordFuncOptions{ + StrictAltFiles: strict_alt_files, + } + + record_func := indexer.LoadRecordFunc(record_opts) + + idx_opts := &indexer.IndexerOptions{ + DB: db, + Tables: to_index, + LoadRecordFunc: record_func, + } + + if index_relations { + + r, err := reader.NewReader(ctx, relations_uri) + + if err != nil { + return fmt.Errorf("Failed to load reader (%s), %v", relations_uri, err) + } + + belongsto_func := indexer.IndexRelationsFunc(r) + idx_opts.PostIndexFunc = belongsto_func + } + + idx, err := indexer.NewIndexer(idx_opts) + + if err != nil { + return fmt.Errorf("failed to create sqlite indexer because %v", err) + } + + idx.Timings = timings + + uris := fs.Args() + + err = idx.IndexURIs(ctx, iterator_uri, uris...) + + if err != nil { + return fmt.Errorf("Failed to index paths in %s mode because: %s", iterator_uri, err) + } + + return nil +} diff --git a/app/sql/index/flags.go b/app/sql/index/flags.go new file mode 100755 index 0000000..a897510 --- /dev/null +++ b/app/sql/index/flags.go @@ -0,0 +1,90 @@ +package index + +import ( + "flag" + "fmt" + "runtime" + "strings" + + "github.com/sfomuseum/go-flags/flagset" + "github.com/sfomuseum/go-flags/multi" + "github.com/whosonfirst/go-whosonfirst-iterate/v2/emitter" +) + +var iterator_uri string + +var db_uri string + +var all bool +var ancestors bool +var concordances bool +var geojson bool +var spelunker bool +var geometries bool +var names bool +var rtree bool +var properties bool +var search bool +var spr bool +var supersedes bool + +var spatial_tables bool +var spelunker_tables bool + +var timings bool +var optimize bool + +var alt_files bool +var strict_alt_files bool + +var index_alt multi.MultiString + +var index_relations bool +var relations_uri string + +var procs int +var verbose bool + +func DefaultFlagSet() *flag.FlagSet { + + fs := flagset.NewFlagSet("index") + + valid_schemes := strings.Join(emitter.Schemes(), ",") + iterator_desc := fmt.Sprintf("A valid whosonfirst/go-whosonfirst-iterate/v2 URI. Supported emitter URI schemes are: %s", valid_schemes) + + fs.StringVar(&iterator_uri, "iterator-uri", "repo://", iterator_desc) + + fs.StringVar(&db_uri, "database-uri", "", "A URI in the form of 'sql://{DATABASE_SQL_ENGINE}?dsn={DATABASE_SQL_DSN}'. For example: sql://sqlite3?dsn=test.db") + + fs.BoolVar(&all, "all", false, "Index all tables (except the 'search' and 'geometries' tables which you need to specify explicitly)") + fs.BoolVar(&ancestors, "ancestors", false, "Index the 'ancestors' tables") + fs.BoolVar(&concordances, "concordances", false, "Index the 'concordances' tables") + fs.BoolVar(&geojson, "geojson", false, "Index the 'geojson' table") + fs.BoolVar(&spelunker, "spelunker", false, "Index the 'spelunker' table") + fs.BoolVar(&geometries, "geometries", false, "Index the 'geometries' table (requires that libspatialite already be installed)") + fs.BoolVar(&names, "names", false, "Index the 'names' table") + fs.BoolVar(&rtree, "rtree", false, "Index the 'rtree' table") + fs.BoolVar(&properties, "properties", false, "Index the 'properties' table") + fs.BoolVar(&search, "search", false, "Index the 'search' table (using SQLite FTS4 full-text indexer)") + fs.BoolVar(&spr, "spr", false, "Index the 'spr' table") + fs.BoolVar(&supersedes, "supersedes", false, "Index the 'supersedes' table") + + fs.BoolVar(&spatial_tables, "spatial-tables", false, "If true then index the necessary tables for use with the whosonfirst/go-whosonfirst-spatial-sqlite package.") + fs.BoolVar(&spelunker_tables, "spelunker-tables", false, "If true then index the necessary tables for use with the whosonfirst/go-whosonfirst-spelunker packages") + + fs.BoolVar(&timings, "timings", false, "Display timings during and after indexing") + fs.BoolVar(&optimize, "optimize", true, "Attempt to optimize the database before closing connection") + + fs.BoolVar(&alt_files, "index-alt-files", false, "Index alt geometries. This flag is deprecated, please use -index-alt=TABLE,TABLE,etc. instead. To index alt geometries in all the applicable tables use -index-alt=*") + fs.Var(&index_alt, "index-alt", "Zero or more table names where alt geometry files should be indexed.") + + fs.BoolVar(&strict_alt_files, "strict-alt-files", true, "Be strict when indexing alt geometries") + + fs.BoolVar(&index_relations, "index-relations", false, "Index the records related to a feature, specifically wof:belongsto, wof:depicts and wof:involves. Alt files for relations are not indexed at this time.") + fs.StringVar(&relations_uri, "index-relations-reader-uri", "", "A valid go-reader.Reader URI from which to read data for a relations candidate.") + + fs.IntVar(&procs, "processes", (runtime.NumCPU() * 2), "The number of concurrent processes to index data with") + + fs.BoolVar(&verbose, "verbose", false, "Enable verbose (debug) logging") + return fs +} diff --git a/cmd/wof-sql-index/main.go b/cmd/wof-sql-index/main.go new file mode 100755 index 0000000..4c8b754 --- /dev/null +++ b/cmd/wof-sql-index/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "context" + "log" + + "github.com/whosonfirst/go-whosonfirst-database/app/sql/index" +) + +func main() { + + ctx := context.Background() + err := index.Run(ctx) + + if err != nil { + log.Fatalf("Failed to index, %v", err) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..3d02145 --- /dev/null +++ b/go.mod @@ -0,0 +1,37 @@ +module github.com/whosonfirst/go-whosonfirst-database + +go 1.23.3 + +require ( + github.com/paulmach/orb v0.11.1 + github.com/sfomuseum/go-database v0.0.6 + github.com/sfomuseum/go-flags v0.10.0 + github.com/tidwall/gjson v1.18.0 + github.com/whosonfirst/go-reader v1.0.2 + github.com/whosonfirst/go-whosonfirst-feature v0.0.28 + github.com/whosonfirst/go-whosonfirst-iterate/v2 v2.3.4 + github.com/whosonfirst/go-whosonfirst-names v0.1.0 + github.com/whosonfirst/go-whosonfirst-spelunker v0.0.5 + github.com/whosonfirst/go-whosonfirst-spr/v2 v2.3.7 + github.com/whosonfirst/go-whosonfirst-uri v1.3.0 +) + +require ( + github.com/aaronland/go-json-query v0.1.4 // indirect + github.com/aaronland/go-roster v1.0.0 // indirect + github.com/dominikbraun/graph v0.23.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/sfomuseum/go-edtf v1.1.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/whosonfirst/go-ioutil v1.0.2 // indirect + github.com/whosonfirst/go-rfc-5646 v0.1.0 // indirect + github.com/whosonfirst/go-whosonfirst-crawl v0.2.2 // indirect + github.com/whosonfirst/go-whosonfirst-flags v0.5.1 // indirect + github.com/whosonfirst/go-whosonfirst-placetypes v0.7.2 // indirect + github.com/whosonfirst/go-whosonfirst-sources v0.1.0 // indirect + github.com/whosonfirst/walk v0.0.2 // indirect + go.mongodb.org/mongo-driver v1.11.4 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..b499f1a --- /dev/null +++ b/go.sum @@ -0,0 +1,128 @@ +github.com/aaronland/go-json-query v0.1.4 h1:iM5GkF0VDsOeVgp0/WrDaFUB64ubJvmm+TZ0H4OQxxM= +github.com/aaronland/go-json-query v0.1.4/go.mod h1:S7V5eQko+XDPq+dfdSYub5mZI0VapVgUH2NLG0buZr4= +github.com/aaronland/go-roster v1.0.0 h1:FRDGrTqsYySKjWnAhbBGXyeGlI/o5/t9FZYCbUmyQtI= +github.com/aaronland/go-roster v1.0.0/go.mod h1:KIsYZgrJlAsyb9LsXSCvlqvbcCBVjCSqcQiZx42i9ro= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= +github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sfomuseum/go-database v0.0.6 h1:TNugRfRZkjq4fdQAA66H2gxX+EQO729oYzQTByR+SVY= +github.com/sfomuseum/go-database v0.0.6/go.mod h1:c1oZwb0M0aYSI48SgE8b5iIrGrLFA8eJFk46E5g4JOM= +github.com/sfomuseum/go-edtf v1.1.1 h1:R5gElndHGDaK/rGSh2X+ulaLtlcHCdQA1cTzB8e9wv8= +github.com/sfomuseum/go-edtf v1.1.1/go.mod h1:1rP0EJZ/84j3HO80vGcnG2T9MFBDAFyTNtjrr8cv3T4= +github.com/sfomuseum/go-flags v0.10.0 h1:1OC1ACxpWMsl3XQ9OeNVMQj7Zi2CzufP3Rym3mPI8HU= +github.com/sfomuseum/go-flags v0.10.0/go.mod h1:VXOnnX1/yxQpX2yiwHaBV6aCmhtszQOL5bL1/nNo3co= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/whosonfirst/go-ioutil v1.0.2 h1:+GJPfa42OFn5A+5yJSc5jQTQIkNV3/MhYyg4pavdrC8= +github.com/whosonfirst/go-ioutil v1.0.2/go.mod h1:2dS1vWdAIkiHDvDF8fYyjv6k2NISmwaIjJJeEDBEdvg= +github.com/whosonfirst/go-reader v1.0.2 h1:eccnKKSMGR+X1SJyHUZN0/7qE7VbFQULqSVQU0Su3xs= +github.com/whosonfirst/go-reader v1.0.2/go.mod h1:2w9l/QusYZSiGuEof3RwCHUFnM492SSOF2H7UxS4YIE= +github.com/whosonfirst/go-rfc-5646 v0.1.0 h1:HNFPAem6v5De61PXLgbGzx9tfNOP83AAkVvm9WAddJY= +github.com/whosonfirst/go-rfc-5646 v0.1.0/go.mod h1:JZj//FV9YeV3fkyOY/82V53EMLQXwRwNPuQIGs8BUmo= +github.com/whosonfirst/go-whosonfirst-crawl v0.2.2 h1:7nwpNV/BFoPR0R7KMMy1iiYAer7wlHJBUOiL+NLzIFs= +github.com/whosonfirst/go-whosonfirst-crawl v0.2.2/go.mod h1:2GZkaK9jaOisWRnBQGWzmb7H55TUFl9y9F30lrk2hwk= +github.com/whosonfirst/go-whosonfirst-feature v0.0.28 h1:XCNNzjnk1i50UnqrNtrN9xNvJDwZGjsPHbb3HRPixXA= +github.com/whosonfirst/go-whosonfirst-feature v0.0.28/go.mod h1:8kc2Ei7mCankyWQcfzLHdKAc1//B/ptY7uEFsT5I8m4= +github.com/whosonfirst/go-whosonfirst-flags v0.5.1 h1:kRzXK7WZlEK1hNw+CECEdnWNtEDbWbjWdEg0imu1mGE= +github.com/whosonfirst/go-whosonfirst-flags v0.5.1/go.mod h1:VgXcWNtsCZGy/Xnt9bpSUTKJ3nYeqXqvLD3NrE6kzZg= +github.com/whosonfirst/go-whosonfirst-iterate/v2 v2.3.4 h1:fEhr0jwqxXvG7Nb7id4Q26ucdBZD51TcFNIiU+VNopM= +github.com/whosonfirst/go-whosonfirst-iterate/v2 v2.3.4/go.mod h1:kDIvHxJTo6XgFKmLHliGLxlqeeHhxuYtdeVZBAcP68g= +github.com/whosonfirst/go-whosonfirst-names v0.1.0 h1:uXop/DwQqH60uDBZvHCPg1yRSQLScbm6VZyqcaED2KE= +github.com/whosonfirst/go-whosonfirst-names v0.1.0/go.mod h1:0z86/nedM9T/5C8cAdbCMfRuBrkc33oEQ6vdJ6WybSg= +github.com/whosonfirst/go-whosonfirst-placetypes v0.7.2 h1:o781pAQUuSg1irAATgvG0uUSLtvJl9tOY1HW85aePtM= +github.com/whosonfirst/go-whosonfirst-placetypes v0.7.2/go.mod h1:Z9Rz7A2aTccdldVzVuuC2eG+R1dvz+sabtG9fA2i44Q= +github.com/whosonfirst/go-whosonfirst-sources v0.1.0 h1:JuKLa6KWke22jBfJ1pM9WQHoz1/3pbDv2C+aR+THPPQ= +github.com/whosonfirst/go-whosonfirst-sources v0.1.0/go.mod h1:EUMHyGzUmqPPxlMmOp+28BFeoBdxxE0HCKRd67lkqGM= +github.com/whosonfirst/go-whosonfirst-spelunker v0.0.5 h1:cZZNft+n3eJ1UFcJIPfHudobUOTvJsx6FDismS4OiUA= +github.com/whosonfirst/go-whosonfirst-spelunker v0.0.5/go.mod h1:/piw3kZ2BSx320SShlHTPHQkHPJJUUizBBzKmOPaiog= +github.com/whosonfirst/go-whosonfirst-spr/v2 v2.3.7 h1:1j4IMuVua4/NhqKm2ke16h2V5Z2XiLoOdXbjDrfZb0E= +github.com/whosonfirst/go-whosonfirst-spr/v2 v2.3.7/go.mod h1:I2IqsIutlDrvYp37Xzwk4WGq+m/gLh+gEifiLmH0bac= +github.com/whosonfirst/go-whosonfirst-uri v1.3.0 h1:LYOVLqP9rWQxauYVkdw65j5LZxEi8OK0GHh/qCEpX4g= +github.com/whosonfirst/go-whosonfirst-uri v1.3.0/go.mod h1:CuVygTCUpMG945MMvqHyqxvc/L5YkDaMrrVpRFr7ZxY= +github.com/whosonfirst/walk v0.0.2 h1:fA0xskpnorC8OTvA1IKVOwQ12joCW6R5k2+/qd9lS7k= +github.com/whosonfirst/walk v0.0.2/go.mod h1:HvRa/XX8jGtggvwAFMWMMuNy1M3+ZDqDX2hxiGR9b2o= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sql/indexer/indexer.go b/sql/indexer/indexer.go new file mode 100644 index 0000000..6b85e51 --- /dev/null +++ b/sql/indexer/indexer.go @@ -0,0 +1,196 @@ +package indexer + +import ( + "context" + "database/sql" + "fmt" + "io" + "log/slog" + "sync" + "sync/atomic" + "time" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-iterate/v2/emitter" + "github.com/whosonfirst/go-whosonfirst-iterate/v2/iterator" +) + +// IndexerPostIndexFunc is a custom function to invoke after a record has been indexed. +type IndexerPostIndexFunc func(context.Context, *sql.DB, []database.Table, interface{}) error + +// IndexerLoadRecordFunc is a custom `whosonfirst/go-whosonfirst-iterate/v2` callback function to be invoked +// for each record processed by the `IndexURIs` method. +type IndexerLoadRecordFunc func(context.Context, string, io.ReadSeeker, ...interface{}) (interface{}, error) + +// Indexer is a struct that provides methods for indexing records in one or more SQLite database tables +type Indexer struct { + // iterator_callback is the `whosonfirst/go-whosonfirst-iterate/v2` callback function used by the `IndexPaths` method + iterator_callback emitter.EmitterCallbackFunc + table_timings map[string]time.Duration + mu *sync.RWMutex + // Timings is a boolean flag indicating whether timings (time to index records) should be recorded) + Timings bool +} + +// IndexerOptions +type IndexerOptions struct { + // DB is the `database/sql.DB` instance that records will be indexed in. + DB *sql.DB + // Tables is the list of `sfomuseum/go-database.Table` instances that records will be indexed in. + Tables []database.Table + // LoadRecordFunc is a custom `whosonfirst/go-whosonfirst-iterate/v2` callback function to be invoked + // for each record processed by the `IndexURIs` method. + LoadRecordFunc IndexerLoadRecordFunc + // PostIndexFunc is an optional custom function to invoke after a record has been indexed. + PostIndexFunc IndexerPostIndexFunc +} + +// NewSQLiteInder returns a `Indexer` configured with 'opts'. +func NewIndexer(opts *IndexerOptions) (*Indexer, error) { + + db := opts.DB + tables := opts.Tables + record_func := opts.LoadRecordFunc + + table_timings := make(map[string]time.Duration) + mu := new(sync.RWMutex) + + iterator_cb := func(ctx context.Context, path string, r io.ReadSeeker, args ...interface{}) error { + + logger := slog.Default() + logger = logger.With("path", path) + + /* + t1 := time.Now() + + defer func() { + logger.Debug("Time to index record", "time", time.Since(t1)) + }() + */ + + record, err := record_func(ctx, path, r, args...) + + if err != nil { + logger.Error("Failed to load record", "error", err) + return err + } + + if record == nil { + logger.Debug("Record func returned nil") + return nil + } + + mu.Lock() + defer mu.Unlock() + + for _, t := range tables { + + logger := slog.Default() + logger = logger.With("path", path) + logger = logger.With("table", t.Name()) + + t1 := time.Now() + + err = t.IndexRecord(ctx, db, record) + + if err != nil { + logger.Error("Failed to index feature", "error", err) + return err + } + + t2 := time.Since(t1) + + n := t.Name() + + _, ok := table_timings[n] + + if ok { + table_timings[n] += t2 + } else { + table_timings[n] = t2 + } + } + + if opts.PostIndexFunc != nil { + + err := opts.PostIndexFunc(ctx, db, tables, record) + + if err != nil { + logger.Error("Post-index function failed", "error", err) + return err + } + } + + return nil + } + + i := Indexer{ + iterator_callback: iterator_cb, + table_timings: table_timings, + mu: mu, + Timings: false, + } + + return &i, nil +} + +// IndexURIs will index records returned by the `whosonfirst/go-whosonfirst-iterate` instance for 'uris', +func (idx *Indexer) IndexURIs(ctx context.Context, iterator_uri string, uris ...string) error { + + iter, err := iterator.NewIterator(ctx, iterator_uri, idx.iterator_callback) + + if err != nil { + return fmt.Errorf("Failed to create new iterator, %w", err) + } + + done_ch := make(chan bool) + t1 := time.Now() + + // ideally this could be a proper stand-along package method but then + // we have to set up a whole bunch of scaffolding just to pass 'indexer' + // around so... we're not doing that (20180205/thisisaaronland) + + show_timings := func() { + + t2 := time.Since(t1) + + i := atomic.LoadInt64(&iter.Seen) + + idx.mu.RLock() + defer idx.mu.RUnlock() + + for t, d := range idx.table_timings { + slog.Info("Time to index table", "table", t, "count", i, "time", d) + } + + slog.Info("Time to index all", "count", i, "time", t2) + } + + if idx.Timings { + + go func() { + + for { + + select { + case <-done_ch: + return + case <-time.After(1 * time.Minute): + show_timings() + } + } + }() + + defer func() { + done_ch <- true + }() + } + + err = iter.IterateURIs(ctx, uris...) + + if err != nil { + return err + } + + return nil +} diff --git a/sql/indexer/relations.go b/sql/indexer/relations.go new file mode 100755 index 0000000..1ffdb57 --- /dev/null +++ b/sql/indexer/relations.go @@ -0,0 +1,197 @@ +package indexer + +import ( + "context" + "fmt" + "io" + "log/slog" + "database/sql" + "sync" + + "github.com/tidwall/gjson" + "github.com/whosonfirst/go-reader" + "github.com/whosonfirst/go-whosonfirst-feature/geometry" + "github.com/whosonfirst/go-whosonfirst-feature/properties" + wof_tables "github.com/whosonfirst/go-whosonfirst-database/sql/tables" + "github.com/whosonfirst/go-whosonfirst-uri" + "github.com/sfomuseum/go-database" +) + +// LoadRecordFuncOptions is a struct to define options when loading Who's On First feature records. +type LoadRecordFuncOptions struct { + // StrictAltFiles is a boolean flag indicating whether the failure to load or parse an alternate geometry file should trigger a critical error. + StrictAltFiles bool +} + +// IndexRelationsFuncOptions +type IndexRelationsFuncOptions struct { + // Reader is a valid `whosonfirst/go-reader` instance used to load Who's On First feature data + Reader reader.Reader + // Strict is a boolean flag indicating whether the failure to load or parse feature record should trigger a critical error. + Strict bool +} + +// LoadRecordFunc returns a `go-whosonfirst-sql/indexer/IndexerLoadRecordFunc` callback +// function that will ensure the the record being processed is a valid Who's On First GeoJSON Feature record. +func LoadRecordFunc(opts *LoadRecordFuncOptions) IndexerLoadRecordFunc { + + cb := func(ctx context.Context, path string, r io.ReadSeeker, args ...interface{}) (interface{}, error) { + + select { + + case <-ctx.Done(): + return nil, nil + default: + // pass + } + + body, err := io.ReadAll(r) + + if err != nil { + return nil, fmt.Errorf("Failed read %s, %w", path, err) + } + + _, err = properties.Id(body) + + if err != nil { + return nil, fmt.Errorf("Failed to derive wof:id for %s, %w", path, err) + } + + _, err = geometry.Geometry(body) + + if err != nil { + return nil, fmt.Errorf("Failed to derive geometry for %s, %w", path, err) + } + + return body, nil + } + + return cb +} + +// IndexRelationsFunc returns a `go-whosonfirst-sql/indexer/IndexerPostIndexFunc` callback +// function used to index relations for a WOF record after that record has been successfully indexed. +func IndexRelationsFunc(r reader.Reader) IndexerPostIndexFunc { + + opts := &IndexRelationsFuncOptions{} + opts.Reader = r + + return IndexRelationsFuncWithOptions(opts) +} + +// IndexRelationsFuncWithOptions returns a `go-whosonfirst-sql/indexer/IndexerPostIndexFunc` callback +// function used to index relations for a WOF record after that record has been successfully indexed, but with custom +// `IndexRelationsFuncOptions` options defined in 'opts'. +func IndexRelationsFuncWithOptions(opts *IndexRelationsFuncOptions) IndexerPostIndexFunc { + + seen := new(sync.Map) + + cb := func(ctx context.Context, db *sql.DB, tables []database.Table, record interface{}) error { + + geojson_t, err := wof_tables.NewGeoJSONTable(ctx) + + if err != nil { + return fmt.Errorf("Failed to create new GeoJSON table, %w", err) + } + + body := record.([]byte) + + relations := make(map[int64]bool) + + candidates := []string{ + "properties.wof:belongsto", + "properties.wof:involves", + "properties.wof:depicts", + } + + for _, path := range candidates { + + // log.Println("RELATIONS", path) + + rsp := gjson.GetBytes(body, path) + + if !rsp.Exists() { + // log.Println("MISSING", path) + continue + } + + for _, r := range rsp.Array() { + + id := r.Int() + + // skip -1, -4, etc. + // (20201224/thisisaaronland) + + if id <= 0 { + continue + } + + relations[id] = true + } + } + + for id, _ := range relations { + + _, ok := seen.Load(id) + + if ok { + continue + } + + seen.Store(id, true) + + sql := fmt.Sprintf("SELECT COUNT(id) FROM %s WHERE id=?", geojson_t.Name()) + row := db.QueryRow(sql, id) + + var count int + err = row.Scan(&count) + + if err != nil { + return fmt.Errorf("Failed to count records for ID %d, %v", id, err) + } + + if count != 0 { + continue + } + + rel_path, err := uri.Id2RelPath(id) + + if err != nil { + return fmt.Errorf("Failed to determine relative path for %d, %v", id, err) + } + + fh, err := opts.Reader.Read(ctx, rel_path) + + if err != nil { + + if opts.Strict { + return fmt.Errorf("Failed to open %s, %v", rel_path, err) + } + + slog.Debug("Failed to read '%s' because '%v'. Strict mode is disabled so skipping\n", rel_path, err) + continue + } + + defer fh.Close() + + ancestor, err := io.ReadAll(fh) + + if err != nil { + return fmt.Errorf("Failed to read data for %s, %v", rel_path, err) + } + + for _, t := range tables { + + err = t.IndexRecord(ctx, db, ancestor) + + if err != nil { + return fmt.Errorf("Failed to index ancestor (%s), %v", rel_path, err) + } + } + } + + return nil + } + + return cb +} diff --git a/sql/tables/ancestors.go b/sql/tables/ancestors.go new file mode 100644 index 0000000..f61c722 --- /dev/null +++ b/sql/tables/ancestors.go @@ -0,0 +1,146 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const ANCESTORS_TABLE_NAME string = "ancestors" + +type AncestorsTable struct { + database.Table + FeatureTable + name string +} + +type AncestorsRow struct { + Id int64 + AncestorID int64 + AncestorPlacetype string + LastModified int64 +} + +func NewAncestorsTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + t, err := NewAncestorsTable(ctx) + + if err != nil { + return nil, fmt.Errorf("Failed to create '%s' table, %w", ANCESTORS_TABLE_NAME, err) + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, database.InitializeTableError(t, err) + } + + return t, nil +} + +func NewAncestorsTable(ctx context.Context) (database.Table, error) { + + t := AncestorsTable{ + name: ANCESTORS_TABLE_NAME, + } + + return &t, nil +} + +func (t *AncestorsTable) Name() string { + return t.name +} + +func (t *AncestorsTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, ANCESTORS_TABLE_NAME) + +} + +func (t *AncestorsTable) InitializeTable(ctx context.Context, db *sql.DB) error { + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *AncestorsTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *AncestorsTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + if alt.IsAlt(f) { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`DELETE FROM %s WHERE id = ?`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(id) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + hierarchies := properties.Hierarchies(f) + lastmod := properties.LastModified(f) + + for _, h := range hierarchies { + + for pt_key, ancestor_id := range h { + + ancestor_placetype := strings.Replace(pt_key, "_id", "", -1) + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, ancestor_id, ancestor_placetype, lastmodified + ) VALUES ( + ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(id, ancestor_id, ancestor_placetype, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + } + + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/ancestors.sqlite.schema b/sql/tables/ancestors.sqlite.schema new file mode 100644 index 0000000..e77d697 --- /dev/null +++ b/sql/tables/ancestors.sqlite.schema @@ -0,0 +1,8 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + ancestor_id INTEGER NOT NULL, + ancestor_placetype TEXT, + lastmodified INTEGER +); + +CREATE UNIQUE INDEX `{{ .Name }}_by_ancestor` ON {{ .Name }} (`id`, `ancestor_id`); \ No newline at end of file diff --git a/sql/tables/concordances.go b/sql/tables/concordances.go new file mode 100644 index 0000000..10e3eab --- /dev/null +++ b/sql/tables/concordances.go @@ -0,0 +1,137 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const CONCORDANCES_TABLE_NAME string = "concordances" + +type ConcordancesTable struct { + database.Table + FeatureTable + name string +} + +type ConcordancesRow struct { + Id int64 + OtherID string + OtherSource string + LastModified int64 +} + +func NewConcordancesTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + t, err := NewConcordancesTable(ctx) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, database.InitializeTableError(t, err) + } + + return t, nil +} + +func NewConcordancesTable(ctx context.Context) (database.Table, error) { + + t := ConcordancesTable{ + name: CONCORDANCES_TABLE_NAME, + } + + return &t, nil +} + +func (t *ConcordancesTable) Name() string { + return t.name +} + +func (t *ConcordancesTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, CONCORDANCES_TABLE_NAME) +} + +func (t *ConcordancesTable) InitializeTable(ctx context.Context, db *sql.DB) error { + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *ConcordancesTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *ConcordancesTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + if alt.IsAlt(f) { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`DELETE FROM %s WHERE id = ?`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(id) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + concordances := properties.Concordances(f) + lastmod := properties.LastModified(f) + + for other_source, other_id := range concordances { + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, other_id, other_source, lastmodified + ) VALUES ( + ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(id, other_id, other_source, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/concordances.sqlite.schema b/sql/tables/concordances.sqlite.schema new file mode 100644 index 0000000..1b1fc6c --- /dev/null +++ b/sql/tables/concordances.sqlite.schema @@ -0,0 +1,9 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + other_id INTEGER NOT NULL, + other_source TEXT, + lastmodified INTEGER +); + +CREATE UNIQUE INDEX `{{ .Name }}_by_other` ON {{ .Name }} (`id`, `other_source`); +CREATE INDEX `{{ .Name }}_other_id` ON {{ .Name }} (`other_source`, `other_id`); diff --git a/sql/tables/errors.go b/sql/tables/errors.go new file mode 100644 index 0000000..837b2be --- /dev/null +++ b/sql/tables/errors.go @@ -0,0 +1,12 @@ +package tables + +import ( + "fmt" + + "github.com/sfomuseum/go-database" +) + +// MissingPropertyError returns a new error with a default message for problems deriving a given property ('prop') from a record, wrapping 'err' and prepending with the value of 't's Name() method. +func MissingPropertyError(t database.Table, prop string, err error) error { + return database.WrapError(t, fmt.Errorf("Failed to determine value for '%s' property, %w", prop, err)) +} diff --git a/sql/tables/feature.go b/sql/tables/feature.go new file mode 100644 index 0000000..7b08cc3 --- /dev/null +++ b/sql/tables/feature.go @@ -0,0 +1,13 @@ +package tables + +import ( + "context" + "database/sql" + + "github.com/sfomuseum/go-database" +) + +type FeatureTable interface { + database.Table + IndexFeature(context.Context, *sql.DB, []byte) error +} diff --git a/sql/tables/geojson.go b/sql/tables/geojson.go new file mode 100644 index 0000000..6d3de34 --- /dev/null +++ b/sql/tables/geojson.go @@ -0,0 +1,177 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const GEOJSON_TABLE_NAME string = "geojson" + +type GeoJSONTableOptions struct { + IndexAltFiles bool + AllowMissingSourceGeom bool +} + +func DefaultGeoJSONTableOptions() (*GeoJSONTableOptions, error) { + + opts := GeoJSONTableOptions{ + IndexAltFiles: false, + AllowMissingSourceGeom: true, + } + + return &opts, nil +} + +type GeoJSONTable struct { + database.Table + FeatureTable + name string + options *GeoJSONTableOptions +} + +type GeoJSONRow struct { + Id int64 + Body string + LastModified int64 +} + +func NewGeoJSONTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + opts, err := DefaultGeoJSONTableOptions() + + if err != nil { + return nil, err + } + + return NewGeoJSONTableWithDatabaseAndOptions(ctx, db, opts) +} + +func NewGeoJSONTableWithDatabaseAndOptions(ctx context.Context, db *sql.DB, opts *GeoJSONTableOptions) (database.Table, error) { + + t, err := NewGeoJSONTableWithOptions(ctx, opts) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func NewGeoJSONTable(ctx context.Context) (database.Table, error) { + + opts, err := DefaultGeoJSONTableOptions() + + if err != nil { + return nil, err + } + + return NewGeoJSONTableWithOptions(ctx, opts) +} + +func NewGeoJSONTableWithOptions(ctx context.Context, opts *GeoJSONTableOptions) (database.Table, error) { + + t := GeoJSONTable{ + name: GEOJSON_TABLE_NAME, + options: opts, + } + + return &t, nil +} + +func (t *GeoJSONTable) Name() string { + return t.name +} + +func (t *GeoJSONTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, GEOJSON_TABLE_NAME) +} + +func (t *GeoJSONTable) InitializeTable(ctx context.Context, db *sql.DB) error { + + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *GeoJSONTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *GeoJSONTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + is_alt := alt.IsAlt(f) + + if is_alt && !t.options.IndexAltFiles { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + source, err := properties.Source(f) + + if err != nil { + + if !t.options.AllowMissingSourceGeom { + return MissingPropertyError(t, "source", err) + } + + source = "unknown" + } + + alt_label, err := properties.AltLabel(f) + + if err != nil { + return MissingPropertyError(t, "alt label", err) + } + + lastmod := properties.LastModified(f) + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, body, source, is_alt, alt_label, lastmodified + ) VALUES ( + ?, ?, ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + str_body := string(f) + + _, err = stmt.Exec(id, str_body, source, is_alt, alt_label, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/geojson.mysql.schema b/sql/tables/geojson.mysql.schema new file mode 100644 index 0000000..c5f2234 --- /dev/null +++ b/sql/tables/geojson.mysql.schema @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS {{ .Name }} ( + id BIGINT UNSIGNED, + alt VARCHAR(255) NOT NULL, + body LONGBLOB NOT NULL, + lastmodified INT NOT NULL, + UNIQUE KEY {{ .Name }}_id_alt (id, alt), + KEY {{ .Name }}_lastmodified (lastmodified) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; \ No newline at end of file diff --git a/sql/tables/geojson.sqlite.schema b/sql/tables/geojson.sqlite.schema new file mode 100644 index 0000000..b7be81a --- /dev/null +++ b/sql/tables/geojson.sqlite.schema @@ -0,0 +1,11 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + body TEXT, + source TEXT, + is_alt BOOLEAN, + alt_label TEXT, + lastmodified INTEGER +); + +CREATE UNIQUE INDEX `{{ .Name }}_id_alt` ON {{ .Name }} (`id`, `alt_label`); +CREATE INDEX `{{ .Name }}_by_source` ON {{ .Name }} (`source`); diff --git a/sql/tables/geometries.go b/sql/tables/geometries.go new file mode 100644 index 0000000..1414720 --- /dev/null +++ b/sql/tables/geometries.go @@ -0,0 +1,187 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/paulmach/orb/encoding/wkt" + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/geometry" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const GEOMETRIES_TABLE_NAME string = "geometries" + +type GeometriesTableOptions struct { + IndexAltFiles bool +} + +func DefaultGeometriesTableOptions() (*GeometriesTableOptions, error) { + + opts := GeometriesTableOptions{ + IndexAltFiles: false, + } + + return &opts, nil +} + +type GeometriesTable struct { + database.Table + FeatureTable + name string + options *GeometriesTableOptions +} + +type GeometriesRow struct { + Id int64 + Body string + LastModified int64 +} + +func NewGeometriesTable(ctx context.Context) (database.Table, error) { + + opts, err := DefaultGeometriesTableOptions() + + if err != nil { + return nil, err + } + + return NewGeometriesTableWithOptions(ctx, opts) +} + +func NewGeometriesTableWithOptions(ctx context.Context, opts *GeometriesTableOptions) (database.Table, error) { + + t := GeometriesTable{ + name: GEOMETRIES_TABLE_NAME, + options: opts, + } + + return &t, nil +} + +func NewGeometriesTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + opts, err := DefaultGeometriesTableOptions() + + if err != nil { + return nil, err + } + + return NewGeometriesTableWithDatabaseAndOptions(ctx, db, opts) +} + +func NewGeometriesTableWithDatabaseAndOptions(ctx context.Context, db *sql.DB, opts *GeometriesTableOptions) (database.Table, error) { + + t, err := NewGeometriesTableWithOptions(ctx, opts) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func (t *GeometriesTable) Name() string { + return t.name +} + +func (t *GeometriesTable) Schema(db *sql.DB) (string, error) { + + // really this should probably be the SPR table + geom but + // let's just get this working first and then make it fancy + // (20180109/thisisaaronland) + + // https://www.gaia-gis.it/spatialite-1.0a/SpatiaLite-tutorial.html + // http://www.gaia-gis.it/gaia-sins/spatialite-sql-4.3.0.html + + // Note the InitSpatialMetaData() command because this: + // https://stackoverflow.com/questions/17761089/cannot-create-column-with-spatialite-unexpected-metadata-layout + + return LoadSchema(db, GEOMETRIES_TABLE_NAME) +} + +func (t *GeometriesTable) InitializeTable(ctx context.Context, db *sql.DB) error { + + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *GeometriesTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *GeometriesTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + is_alt := alt.IsAlt(f) + + if is_alt && !t.options.IndexAltFiles { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + alt_label, err := properties.AltLabel(f) + + if err != nil { + return MissingPropertyError(t, "alt label", err) + } + + lastmod := properties.LastModified(f) + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + geojson_geom, err := geometry.Geometry(f) + + if err != nil { + return MissingPropertyError(t, "geometry", err) + } + + orb_geom := geojson_geom.Geometry() + + str_wkt := wkt.MarshalString(orb_geom) + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, is_alt, alt_label, type, geom, lastmodified + ) VALUES ( + ?, ?, ?, ?, GeomFromText('%s', 4326), ? + )`, t.Name(), str_wkt) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + geom_type := "common" + + _, err = stmt.Exec(id, is_alt, alt_label, geom_type, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/geometries.sqlite.schema b/sql/tables/geometries.sqlite.schema new file mode 100644 index 0000000..2d6b54b --- /dev/null +++ b/sql/tables/geometries.sqlite.schema @@ -0,0 +1,14 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + type TEXT, + is_alt TINYINT, + alt_label TEXT, + lastmodified INTEGER +); + +SELECT InitSpatialMetaData(); +SELECT AddGeometryColumn('{{ .Name }}', 'geom', 4326, 'GEOMETRY', 'XY'); +SELECT CreateSpatialIndex('{{ .Name }}', 'geom'); + +CREATE UNIQUE INDEX `{{ .Name }}_by_id` ON {{ .Name }} (id, alt_label); +CREATE INDEX `{{ .Name }}_geometries_by_lastmod` ON {{ .Name }} (lastmodified);` diff --git a/sql/tables/names.go b/sql/tables/names.go new file mode 100644 index 0000000..c19501d --- /dev/null +++ b/sql/tables/names.go @@ -0,0 +1,175 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" + "github.com/whosonfirst/go-whosonfirst-names/tags" +) + +const NAMES_TABLE_NAME string = "names" + +type NamesTable struct { + database.Table + FeatureTable + name string +} + +type NamesRow struct { + Id int64 + Placetype string + Country string + Language string + ExtLang string + Script string + Region string + Variant string + Extension string + PrivateUse string + Name string + LastModified int64 +} + +func NewNamesTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + t, err := NewNamesTable(ctx) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, database.InitializeTableError(t, err) + } + + return t, nil +} + +func NewNamesTable(ctx context.Context) (database.Table, error) { + + t := NamesTable{ + name: NAMES_TABLE_NAME, + } + + return &t, nil +} + +func (t *NamesTable) Name() string { + return t.name +} + +func (t *NamesTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, NAMES_TABLE_NAME) +} + +func (t *NamesTable) InitializeTable(ctx context.Context, db *sql.DB) error { + + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *NamesTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *NamesTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + if alt.IsAlt(f) { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + pt, err := properties.Placetype(f) + + if err != nil { + return MissingPropertyError(t, "placetype", err) + } + + co := properties.Country(f) + + lastmod := properties.LastModified(f) + names := properties.Names(f) + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`DELETE FROM %s WHERE id = ?`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(id) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + for tag, names := range names { + + lt, err := tags.NewLangTag(tag) + + if err != nil { + return database.WrapError(t, fmt.Errorf("Failed to create new language tag for '%s', %w", tag, err)) + } + + for _, n := range names { + + sql := fmt.Sprintf(`INSERT INTO %s ( + id, placetype, country, + language, extlang, + region, script, variant, + extension, privateuse, + name, + lastmodified + ) VALUES ( + ?, ?, ?, + ?, ?, + ?, ?, ?, + ?, ?, + ?, + ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(id, pt, co, lt.Language(), lt.ExtLang(), lt.Script(), lt.Region(), lt.Variant(), lt.Extension(), lt.PrivateUse(), n, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + } + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/names.sqlite.schema b/sql/tables/names.sqlite.schema new file mode 100644 index 0000000..886261f --- /dev/null +++ b/sql/tables/names.sqlite.schema @@ -0,0 +1,22 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + placetype TEXT, + country TEXT, + language TEXT, + extlang TEXT, + script TEXT, + region TEXT, + variant TEXT, + extension TEXT, + privateuse TEXT, + name TEXT, + lastmodified INTEGER +); + +CREATE INDEX {{ .Name }}_by_lastmod ON {{ .Name }} (lastmodified); +CREATE INDEX {{ .Name }}_by_country ON {{ .Name }} (country,privateuse,placetype); +CREATE INDEX {{ .Name }}_by_language ON {{ .Name }} (language,privateuse,placetype); +CREATE INDEX {{ .Name }}_by_placetype ON {{ .Name }} (placetype,country,privateuse); +CREATE INDEX {{ .Name }}_by_name ON {{ .Name }} (name, placetype, country); +CREATE INDEX {{ .Name }}_by_name_private ON {{ .Name }} (name, privateuse, placetype, country); +CREATE INDEX {{ .Name }}_by_wofid ON {{ .Name }} (id); diff --git a/sql/tables/properties.go b/sql/tables/properties.go new file mode 100644 index 0000000..a812da1 --- /dev/null +++ b/sql/tables/properties.go @@ -0,0 +1,166 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/sfomuseum/go-database" + "github.com/tidwall/gjson" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const PROPERTIES_TABLE_NAME string = "properties" + +type PropertiesTableOptions struct { + IndexAltFiles bool +} + +func DefaultPropertiesTableOptions() (*PropertiesTableOptions, error) { + + opts := PropertiesTableOptions{ + IndexAltFiles: false, + } + + return &opts, nil +} + +type PropertiesTable struct { + database.Table + FeatureTable + name string + options *PropertiesTableOptions +} + +type PropertiesRow struct { + Id int64 + Body string + LastModified int64 +} + +func NewPropertiesTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + opts, err := DefaultPropertiesTableOptions() + + if err != nil { + return nil, err + } + + return NewPropertiesTableWithDatabaseAndOptions(ctx, db, opts) +} + +func NewPropertiesTableWithDatabaseAndOptions(ctx context.Context, db *sql.DB, opts *PropertiesTableOptions) (database.Table, error) { + + t, err := NewPropertiesTableWithOptions(ctx, opts) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, database.InitializeTableError(t, err) + } + + return t, nil +} + +func NewPropertiesTable(ctx context.Context) (database.Table, error) { + + opts, err := DefaultPropertiesTableOptions() + + if err != nil { + return nil, err + } + + return NewPropertiesTableWithOptions(ctx, opts) +} + +func NewPropertiesTableWithOptions(ctx context.Context, opts *PropertiesTableOptions) (database.Table, error) { + + t := PropertiesTable{ + name: PROPERTIES_TABLE_NAME, + options: opts, + } + + return &t, nil +} + +func (t *PropertiesTable) Name() string { + return t.name +} + +func (t *PropertiesTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, PROPERTIES_TABLE_NAME) +} + +func (t *PropertiesTable) InitializeTable(ctx context.Context, db *sql.DB) error { + + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *PropertiesTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *PropertiesTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + is_alt := alt.IsAlt(f) + + if is_alt && !t.options.IndexAltFiles { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + alt_label, err := properties.AltLabel(f) + + if err != nil { + return MissingPropertyError(t, "alt label", err) + } + + lastmod := properties.LastModified(f) + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, body, is_alt, alt_label, lastmodified + ) VALUES ( + ?, ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + rsp_props := gjson.GetBytes(f, "properties") + str_props := rsp_props.String() + + _, err = stmt.Exec(id, str_props, is_alt, alt_label, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/properties.sqlite.schema b/sql/tables/properties.sqlite.schema new file mode 100644 index 0000000..cfdd8cd --- /dev/null +++ b/sql/tables/properties.sqlite.schema @@ -0,0 +1,11 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + body TEXT, + is_alt BOOLEAN, + alt_label TEXT, + lastmodified INTEGER +); + +CREATE UNIQUE INDEX {{ .Name }}_by_id ON {{ .Name }} (id, alt_label); +CREATE INDEX {{ .Name }}_by_alt ON {{ .Name }} (id, is_alt, alt_label); +CREATE INDEX {{ .Name }}_by_lastmod ON {{ .Name }} (lastmodified); diff --git a/sql/tables/rtree.go b/sql/tables/rtree.go new file mode 100644 index 0000000..38ff2a8 --- /dev/null +++ b/sql/tables/rtree.go @@ -0,0 +1,231 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/paulmach/orb" + "github.com/paulmach/orb/encoding/wkt" + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/geometry" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const RTREE_TABLE_NAME string = "rtree" + +type RTreeTableOptions struct { + IndexAltFiles bool +} + +func DefaultRTreeTableOptions() (*RTreeTableOptions, error) { + + opts := RTreeTableOptions{ + IndexAltFiles: false, + } + + return &opts, nil +} + +type RTreeTable struct { + database.Table + FeatureTable + name string + options *RTreeTableOptions +} + +func NewRTreeTable(ctx context.Context) (database.Table, error) { + + opts, err := DefaultRTreeTableOptions() + + if err != nil { + return nil, err + } + + return NewRTreeTableWithOptions(ctx, opts) +} + +func NewRTreeTableWithOptions(ctx context.Context, opts *RTreeTableOptions) (database.Table, error) { + + t := RTreeTable{ + name: RTREE_TABLE_NAME, + options: opts, + } + + return &t, nil +} + +func NewRTreeTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + opts, err := DefaultRTreeTableOptions() + + if err != nil { + return nil, err + } + + return NewRTreeTableWithDatabaseAndOptions(ctx, db, opts) +} + +func NewRTreeTableWithDatabaseAndOptions(ctx context.Context, db *sql.DB, opts *RTreeTableOptions) (database.Table, error) { + + t, err := NewRTreeTableWithOptions(ctx, opts) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func (t *RTreeTable) Name() string { + return t.name +} + +func (t *RTreeTable) Schema(db *sql.DB) (string, error) { + + /* + + 3.1.1. Column naming details + + In the argments to "rtree" in the CREATE VIRTUAL TABLE statement, the names of the columns are taken from the first token of each argument. All subsequent tokens within each argument are silently ignored. This means, for example, that if you try to give a column a type affinity or add a constraint such as UNIQUE or NOT NULL or DEFAULT to a column, those extra tokens are accepted as valid, but they do not change the behavior of the rtree. In an RTREE virtual table, the first column always has a type affinity of INTEGER and all other data columns have a type affinity of NUMERIC. + + Recommended practice is to omit any extra tokens in the rtree specification. Let each argument to "rtree" be a single ordinary label that is the name of the corresponding column, and omit all other tokens from the argument list. + + 4.1. Auxiliary Columns + + Beginning with SQLite version 3.24.0 (2018-06-04), r-tree tables can have auxiliary columns that store arbitrary data. Auxiliary columns can be used in place of secondary tables such as "demo_data". + + Auxiliary columns are marked with a "+" symbol before the column name. Auxiliary columns must come after all of the coordinate boundary columns. There is a limit of no more than 100 auxiliary columns. The following example shows an r-tree table with auxiliary columns that is equivalent to the two tables "demo_index" and "demo_data" above: + + Note: Auxiliary columns must come at the end of a table definition + */ + + return LoadSchema(db, RTREE_TABLE_NAME) +} + +func (t *RTreeTable) InitializeTable(ctx context.Context, db *sql.DB) error { + + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *RTreeTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *RTreeTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + is_alt := alt.IsAlt(f) // this returns a boolean which is interpreted as a float by SQLite + + if is_alt && !t.options.IndexAltFiles { + return nil + } + + geom_type, err := geometry.Type(f) + + if err != nil { + return MissingPropertyError(t, "geometry type", err) + } + + switch geom_type { + case "Polygon", "MultiPolygon": + // pass + default: + return nil + } + + wof_id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + alt_label := "" + + if is_alt { + + label, err := properties.AltLabel(f) + + if err != nil { + return MissingPropertyError(t, "alt label", err) + } + + alt_label = label + } + + lastmod := properties.LastModified(f) + + geojson_geom, err := geometry.Geometry(f) + + if err != nil { + return MissingPropertyError(t, "geometry", err) + } + + orb_geom := geojson_geom.Geometry() + + tx, err := db.Begin() + + if err != nil { + return err + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, min_x, max_x, min_y, max_y, wof_id, is_alt, alt_label, geometry, lastmodified + ) VALUES ( + NULL, ?, ?, ?, ?, ?, ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + var mp orb.MultiPolygon + + switch geom_type { + case "MultiPolygon": + mp = orb_geom.(orb.MultiPolygon) + case "Polygon": + mp = []orb.Polygon{orb_geom.(orb.Polygon)} + default: + // This should never happen (we check above) but just in case... + return database.WrapError(t, fmt.Errorf("Invalid or unsupported geometry type, %s", geom_type)) + } + + for _, poly := range mp { + + // Store the geometry for each bounding box so we can use it to do + // raycasting and filter points in any interior rings. For example in + // whosonfirst/go-whosonfirst-spatial-sqlite + + bbox := poly.Bound() + + sw := bbox.Min + ne := bbox.Max + + enc_geom := wkt.MarshalString(poly) + + _, err = stmt.Exec(sw.X(), ne.X(), sw.Y(), ne.Y(), wof_id, is_alt, alt_label, enc_geom, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/rtree.sqlite.schema b/sql/tables/rtree.sqlite.schema new file mode 100644 index 0000000..5fedb85 --- /dev/null +++ b/sql/tables/rtree.sqlite.schema @@ -0,0 +1,12 @@ +CREATE VIRTUAL TABLE {{ .Name }} USING rtree ( + id, + min_x, + max_x, + min_y, + max_y, + +wof_id INTEGER, + +is_alt TINYINT, + +alt_label TEXT, + +geometry BLOB, + +lastmodified INTEGER +); diff --git a/sql/tables/search.go b/sql/tables/search.go new file mode 100644 index 0000000..88b34ed --- /dev/null +++ b/sql/tables/search.go @@ -0,0 +1,225 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" + "github.com/whosonfirst/go-whosonfirst-names/tags" +) + +const SEARCH_TABLE_NAME string = "search" + +type SearchTable struct { + database.Table + FeatureTable + name string +} + +func NewSearchTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + t, err := NewSearchTable(ctx) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func NewSearchTable(ctx context.Context) (database.Table, error) { + + t := SearchTable{ + name: SEARCH_TABLE_NAME, + } + + return &t, nil +} + +func (t *SearchTable) InitializeTable(ctx context.Context, db *sql.DB) error { + + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *SearchTable) Name() string { + return t.name +} + +func (t *SearchTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, SEARCH_TABLE_NAME) +} + +func (t *SearchTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *SearchTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + if alt.IsAlt(f) { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + placetype, err := properties.Placetype(f) + + if err != nil { + return MissingPropertyError(t, "placetype", err) + } + + is_current, err := properties.IsCurrent(f) + + if err != nil { + return MissingPropertyError(t, "is current", err) + } + + is_ceased, err := properties.IsCeased(f) + + if err != nil { + return MissingPropertyError(t, "is ceased", err) + } + + is_deprecated, err := properties.IsDeprecated(f) + + if err != nil { + return MissingPropertyError(t, "is deprecated", err) + } + + is_superseded, err := properties.IsSuperseded(f) + + if err != nil { + return MissingPropertyError(t, "is superseded", err) + } + + names_all := make([]string, 0) + names_preferred := make([]string, 0) + names_variant := make([]string, 0) + names_colloquial := make([]string, 0) + + name, err := properties.Name(f) + + if err != nil { + return MissingPropertyError(t, "name", err) + } + + names_all = append(names_all, name) + names_preferred = append(names_preferred, name) + + for tag, names := range properties.Names(f) { + + lt, err := tags.NewLangTag(tag) + + if err != nil { + return database.WrapError(t, fmt.Errorf("Failed to create new lang tag for '%s', %w", tag, err)) + } + + possible := make([]string, 0) + possible_map := make(map[string]bool) + + for _, n := range names { + + _, ok := possible_map[n] + + if !ok { + possible_map[n] = true + } + } + + for n, _ := range possible_map { + possible = append(possible, n) + } + + for _, n := range possible { + names_all = append(names_all, n) + } + + switch lt.PrivateUse() { + case "x_preferred": + for _, n := range possible { + names_preferred = append(names_preferred, n) + } + case "x_variant": + for _, n := range possible { + names_variant = append(names_variant, n) + } + case "x_colloquial": + for _, n := range possible { + names_colloquial = append(names_colloquial, n) + } + default: + continue + } + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, placetype, + name, names_all, names_preferred, names_variant, names_colloquial, + is_current, is_ceased, is_deprecated, is_superseded + ) VALUES ( + ?, ?, + ?, ?, ?, ?, ?, + ?, ?, ?, ? + )`, t.Name()) // ON CONFLICT DO BLAH BLAH BLAH + + args := []interface{}{ + id, placetype, + name, strings.Join(names_all, " "), strings.Join(names_preferred, " "), strings.Join(names_variant, " "), strings.Join(names_colloquial, " "), + is_current.Flag(), is_ceased.Flag(), is_deprecated.Flag(), is_superseded.Flag(), + } + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + s, err := tx.Prepare(fmt.Sprintf("DELETE FROM %s WHERE id = ?", t.Name())) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer s.Close() + + _, err = s.Exec(id) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(args...) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/search.sqlite.schema b/sql/tables/search.sqlite.schema new file mode 100644 index 0000000..e585da8 --- /dev/null +++ b/sql/tables/search.sqlite.schema @@ -0,0 +1,5 @@ +CREATE VIRTUAL TABLE {{ .Name }} USING fts5( + id, placetype, + name, names_all, names_preferred, names_variant, names_colloquial, + is_current, is_ceased, is_deprecated, is_superseded +); \ No newline at end of file diff --git a/sql/tables/spelunker.go b/sql/tables/spelunker.go new file mode 100644 index 0000000..91d3290 --- /dev/null +++ b/sql/tables/spelunker.go @@ -0,0 +1,177 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" + "github.com/whosonfirst/go-whosonfirst-spelunker/document" +) + +const SPELUNKER_TABLE_NAME string = "spelunker" + +type SpelunkerTableOptions struct { + IndexAltFiles bool + AllowMissingSourceGeom bool +} + +func DefaultSpelunkerTableOptions() (*SpelunkerTableOptions, error) { + + opts := SpelunkerTableOptions{ + IndexAltFiles: false, + AllowMissingSourceGeom: true, + } + + return &opts, nil +} + +type SpelunkerTable struct { + database.Table + FeatureTable + name string + options *SpelunkerTableOptions +} + +func NewSpelunkerTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + opts, err := DefaultSpelunkerTableOptions() + + if err != nil { + return nil, err + } + + return NewSpelunkerTableWithDatabaseAndOptions(ctx, db, opts) +} + +func NewSpelunkerTableWithDatabaseAndOptions(ctx context.Context, db *sql.DB, opts *SpelunkerTableOptions) (database.Table, error) { + + t, err := NewSpelunkerTableWithOptions(ctx, opts) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func NewSpelunkerTable(ctx context.Context) (database.Table, error) { + + opts, err := DefaultSpelunkerTableOptions() + + if err != nil { + return nil, err + } + + return NewSpelunkerTableWithOptions(ctx, opts) +} + +func NewSpelunkerTableWithOptions(ctx context.Context, opts *SpelunkerTableOptions) (database.Table, error) { + + t := SpelunkerTable{ + name: SPELUNKER_TABLE_NAME, + options: opts, + } + + return &t, nil +} + +func (t *SpelunkerTable) Name() string { + return t.name +} + +func (t *SpelunkerTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, SPELUNKER_TABLE_NAME) +} + +func (t *SpelunkerTable) InitializeTable(ctx context.Context, db *sql.DB) error { + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *SpelunkerTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *SpelunkerTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + is_alt := alt.IsAlt(f) + + if is_alt && !t.options.IndexAltFiles { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + source, err := properties.Source(f) + + if err != nil { + + if !t.options.AllowMissingSourceGeom { + return MissingPropertyError(t, "source", err) + } + + source = "unknown" + } + + alt_label, err := properties.AltLabel(f) + + if err != nil { + return MissingPropertyError(t, "alt label", err) + } + + lastmod := properties.LastModified(f) + + doc, err := document.PrepareSpelunkerV2Document(ctx, f) + + if err != nil { + return fmt.Errorf("Failed to prepare spelunker document, %w", err) + } + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, body, source, is_alt, alt_label, lastmodified + ) VALUES ( + ?, ?, ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + str_doc := string(doc) + + _, err = stmt.Exec(id, str_doc, source, is_alt, alt_label, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/spelunker.mysql.schema b/sql/tables/spelunker.mysql.schema new file mode 100644 index 0000000..628ee52 --- /dev/null +++ b/sql/tables/spelunker.mysql.schema @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS {{ .Name }} ( + id BIGINT UNSIGNED, + alt VARCHAR(255) NOT NULL, + body JSON NULL, + lastmodified INT NOT NULL, + UNIQUE KEY {{ .Name }}_id_alt (id, alt), + KEY {{ .Name }}_lastmodified (lastmodified) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; \ No newline at end of file diff --git a/sql/tables/spelunker.sqlite.schema b/sql/tables/spelunker.sqlite.schema new file mode 100644 index 0000000..a366224 --- /dev/null +++ b/sql/tables/spelunker.sqlite.schema @@ -0,0 +1,11 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL, + body JSON, + source TEXT, + is_alt BOOLEAN, + alt_label TEXT, + lastmodified INTEGER +); + +CREATE UNIQUE INDEX `{{ .Name }}_id_alt` ON {{ .Name }} (`id`, `alt_label`); +CREATE INDEX `{{ .Name }}_by_source` ON {{ .Name }} (`source`); diff --git a/sql/tables/spr.go b/sql/tables/spr.go new file mode 100644 index 0000000..896944c --- /dev/null +++ b/sql/tables/spr.go @@ -0,0 +1,240 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + "strconv" + "strings" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" + "github.com/whosonfirst/go-whosonfirst-spr/v2" +) + +const SPR_TABLE_NAME string = "spr" + +type SPRTableOptions struct { + IndexAltFiles bool +} + +func DefaultSPRTableOptions() (*SPRTableOptions, error) { + + opts := SPRTableOptions{ + IndexAltFiles: false, + } + + return &opts, nil +} + +type SPRTable struct { + database.Table + FeatureTable + name string + options *SPRTableOptions +} + +func NewSPRTable(ctx context.Context) (database.Table, error) { + + opts, err := DefaultSPRTableOptions() + + if err != nil { + return nil, err + } + + return NewSPRTableWithOptions(ctx, opts) +} + +func NewSPRTableWithOptions(ctx context.Context, opts *SPRTableOptions) (database.Table, error) { + + t := SPRTable{ + name: SPR_TABLE_NAME, + options: opts, + } + + return &t, nil +} + +func NewSPRTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + opts, err := DefaultSPRTableOptions() + + if err != nil { + return nil, err + } + + return NewSPRTableWithDatabaseAndOptions(ctx, db, opts) +} + +func NewSPRTableWithDatabaseAndOptions(ctx context.Context, db *sql.DB, opts *SPRTableOptions) (database.Table, error) { + + t, err := NewSPRTableWithOptions(ctx, opts) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func (t *SPRTable) InitializeTable(ctx context.Context, db *sql.DB) error { + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *SPRTable) Name() string { + return t.name +} + +func (t *SPRTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, SPR_TABLE_NAME) +} + +func (t *SPRTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *SPRTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + is_alt := alt.IsAlt(f) + + if is_alt { + + if !t.options.IndexAltFiles { + return nil + } + } + + alt_label, err := properties.AltLabel(f) + + if err != nil { + return MissingPropertyError(t, "alt label", err) + } + + var s spr.StandardPlacesResult + + if is_alt { + + _s, err := spr.WhosOnFirstAltSPR(f) + + if err != nil { + return database.WrapError(t, fmt.Errorf("Failed to generate SPR for alt geom, %w", err)) + } + + s = _s + + } else { + + _s, err := spr.WhosOnFirstSPR(f) + + if err != nil { + return database.WrapError(t, fmt.Errorf("Failed to SPR, %w", err)) + } + + s = _s + + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, parent_id, name, placetype, + inception, cessation, + country, repo, + latitude, longitude, + min_latitude, min_longitude, + max_latitude, max_longitude, + is_current, is_deprecated, is_ceased, + is_superseded, is_superseding, + superseded_by, supersedes, belongsto, + is_alt, alt_label, + lastmodified + ) VALUES ( + ?, ?, ?, ?, + ?, ?, + ?, ?, + ?, ?, + ?, ?, + ?, ?, + ?, ?, ?, + ?, ?, ?, + ?, ?, + ?, ?, + ? + )`, t.Name()) // ON CONFLICT DO BLAH BLAH BLAH + + superseded_by := int64ToString(s.SupersededBy()) + supersedes := int64ToString(s.Supersedes()) + belongs_to := int64ToString(s.BelongsTo()) + + str_inception := "" + str_cessation := "" + + inception := s.Inception() + cessation := s.Cessation() + + if inception != nil { + str_inception = inception.String() + } + + if cessation != nil { + str_cessation = cessation.String() + } + + args := []interface{}{ + s.Id(), s.ParentId(), s.Name(), s.Placetype(), + str_inception, str_cessation, + s.Country(), s.Repo(), + s.Latitude(), s.Longitude(), + s.MinLatitude(), s.MinLongitude(), + s.MaxLatitude(), s.MaxLongitude(), + s.IsCurrent().Flag(), s.IsDeprecated().Flag(), s.IsCeased().Flag(), + s.IsSuperseded().Flag(), s.IsSuperseding().Flag(), + superseded_by, supersedes, belongs_to, + is_alt, alt_label, + s.LastModified(), + } + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + _, err = stmt.Exec(args...) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} + +func int64ToString(ints []int64) string { + + str_ints := make([]string, len(ints)) + + for idx, i := range ints { + str_ints[idx] = strconv.FormatInt(i, 10) + } + + return strings.Join(str_ints, ",") +} diff --git a/sql/tables/spr.sqlite.schema b/sql/tables/spr.sqlite.schema new file mode 100644 index 0000000..04f1393 --- /dev/null +++ b/sql/tables/spr.sqlite.schema @@ -0,0 +1,43 @@ +CREATE TABLE {{ .Name }} ( + id TEXT NOT NULL, + parent_id INTEGER, + name TEXT, + placetype TEXT, + inception TEXT, + cessation TEXT, + country TEXT, + repo TEXT, + latitude REAL, + longitude REAL, + min_latitude REAL, + min_longitude REAL, + max_latitude REAL, + max_longitude REAL, + is_current INTEGER, + is_deprecated INTEGER, + is_ceased INTEGER, + is_superseded INTEGER, + is_superseding INTEGER, + superseded_by TEXT, + supersedes TEXT, + belongsto TEXT, + is_alt TINYINT, + alt_label TEXT, + lastmodified INTEGER +); + +CREATE UNIQUE INDEX {{ .Name }}_by_id ON {{ .Name }} (id, alt_label); +CREATE INDEX {{ .Name }}_by_lastmod ON {{ .Name }} (lastmodified); +CREATE INDEX {{ .Name }}_by_parent ON {{ .Name }} (parent_id, is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_placetype ON {{ .Name }} (placetype, is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_country ON {{ .Name }} (country, placetype, is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_name ON {{ .Name }} (name, placetype, is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_centroid ON {{ .Name }} (latitude, longitude, is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_bbox ON {{ .Name }} (min_latitude, min_longitude, max_latitude, max_longitude, placetype, is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_repo ON {{ .Name }} (repo, lastmodified); +CREATE INDEX {{ .Name }}_by_current ON {{ .Name }} (is_current, lastmodified); +CREATE INDEX {{ .Name }}_by_deprecated ON {{ .Name }} (is_deprecated, lastmodified); +CREATE INDEX {{ .Name }}_by_ceased ON {{ .Name }} (is_ceased, lastmodified); +CREATE INDEX {{ .Name }}_by_superseded ON {{ .Name }} (is_superseded, lastmodified); +CREATE INDEX {{ .Name }}_by_superseding ON {{ .Name }} (is_superseding, lastmodified); +CREATE INDEX {{ .Name }}_obsolete ON {{ .Name }} (is_deprecated, is_superseded); diff --git a/sql/tables/supersedes.go b/sql/tables/supersedes.go new file mode 100644 index 0000000..e0200f3 --- /dev/null +++ b/sql/tables/supersedes.go @@ -0,0 +1,128 @@ +package tables + +import ( + "context" + "database/sql" + "fmt" + + "github.com/sfomuseum/go-database" + "github.com/whosonfirst/go-whosonfirst-feature/alt" + "github.com/whosonfirst/go-whosonfirst-feature/properties" +) + +const SUPERSEDES_TABLE_NAME string = "supersedes" + +type SupersedesTable struct { + database.Table + FeatureTable + name string +} + +func NewSupersedesTableWithDatabase(ctx context.Context, db *sql.DB) (database.Table, error) { + + t, err := NewSupersedesTable(ctx) + + if err != nil { + return nil, err + } + + err = t.InitializeTable(ctx, db) + + if err != nil { + return nil, err + } + + return t, nil +} + +func NewSupersedesTable(ctx context.Context) (database.Table, error) { + + t := SupersedesTable{ + name: SUPERSEDES_TABLE_NAME, + } + + return &t, nil +} + +func (t *SupersedesTable) Name() string { + return t.name +} + +func (t *SupersedesTable) Schema(db *sql.DB) (string, error) { + return LoadSchema(db, SUPERSEDES_TABLE_NAME) +} + +func (t *SupersedesTable) InitializeTable(ctx context.Context, db *sql.DB) error { + return database.CreateTableIfNecessary(ctx, db, t) +} + +func (t *SupersedesTable) IndexRecord(ctx context.Context, db *sql.DB, i interface{}) error { + return t.IndexFeature(ctx, db, i.([]byte)) +} + +func (t *SupersedesTable) IndexFeature(ctx context.Context, db *sql.DB, f []byte) error { + + if alt.IsAlt(f) { + return nil + } + + id, err := properties.Id(f) + + if err != nil { + return MissingPropertyError(t, "id", err) + } + + lastmod := properties.LastModified(f) + + tx, err := db.Begin() + + if err != nil { + return database.BeginTransactionError(t, err) + } + + sql := fmt.Sprintf(`INSERT OR REPLACE INTO %s ( + id, superseded_id, superseded_by_id, lastmodified + ) VALUES ( + ?, ?, ?, ? + )`, t.Name()) + + stmt, err := tx.Prepare(sql) + + if err != nil { + return database.PrepareStatementError(t, err) + } + + defer stmt.Close() + + superseded_by := properties.SupersededBy(f) + + for _, other_id := range superseded_by { + + _, err = stmt.Exec(id, id, other_id, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + } + + supersedes := properties.Supersedes(f) + + for _, other_id := range supersedes { + + _, err = stmt.Exec(id, other_id, id, lastmod) + + if err != nil { + return database.ExecuteStatementError(t, err) + } + + } + + err = tx.Commit() + + if err != nil { + return database.CommitTransactionError(t, err) + } + + return nil +} diff --git a/sql/tables/supersedes.sqlite.schema b/sql/tables/supersedes.sqlite.schema new file mode 100644 index 0000000..ce9c023 --- /dev/null +++ b/sql/tables/supersedes.sqlite.schema @@ -0,0 +1,6 @@ +CREATE TABLE {{ .Name }} ( + id INTEGER NOT NULL PRIMARY KEY, + superseded_id INTEGER NOT NULL, + superseded_by_id INTEGER NOT NULL, + lastmodified INTEGER +); \ No newline at end of file diff --git a/sql/tables/tables.go b/sql/tables/tables.go new file mode 100644 index 0000000..7789204 --- /dev/null +++ b/sql/tables/tables.go @@ -0,0 +1,53 @@ +package tables + +import ( + "bufio" + "bytes" + "database/sql" + "embed" + "fmt" + "text/template" + + "github.com/sfomuseum/go-database" +) + +//go:embed *.schema +var fs embed.FS + +func LoadSchema(db *sql.DB, table_name string) (string, error) { + + driver := database.Driver(db) + + fname := fmt.Sprintf("%s.%s.schema", table_name, driver) + + data, err := fs.ReadFile(fname) + + if err != nil { + return "", fmt.Errorf("Failed to read %s, %w", fname, err) + } + + t, err := template.New(table_name).Parse(string(data)) + + if err != nil { + return "", fmt.Errorf("Failed to parse %s template, %w", fname, err) + } + + vars := struct { + Name string + }{ + Name: table_name, + } + + var buf bytes.Buffer + wr := bufio.NewWriter(&buf) + + err = t.Execute(wr, vars) + + if err != nil { + return "", fmt.Errorf("Failed to process %s template, %w", fname, err) + } + + wr.Flush() + + return buf.String(), nil +} diff --git a/sql/tables/tables_test.go b/sql/tables/tables_test.go new file mode 100644 index 0000000..578b8d6 --- /dev/null +++ b/sql/tables/tables_test.go @@ -0,0 +1,37 @@ +package tables + +import ( + "testing" +) + +func TestLoadSchema(t *testing.T) { + + engines := []string{ + "sqlite", + } + + table_names := []string{ + ANCESTORS_TABLE_NAME, + CONCORDANCES_TABLE_NAME, + GEOJSON_TABLE_NAME, + GEOMETRIES_TABLE_NAME, + NAMES_TABLE_NAME, + PROPERTIES_TABLE_NAME, + RTREE_TABLE_NAME, + SEARCH_TABLE_NAME, + SPR_TABLE_NAME, + SUPERSEDES_TABLE_NAME, + } + + for _, e := range engines { + + for _, n := range table_names { + + _, err := LoadSchema(e, n) + + if err != nil { + t.Fatalf("Failed to load %s table for %s database engine, %v", n, e, err) + } + } + } +} diff --git a/sql/tables/whosonfirst.go b/sql/tables/whosonfirst.go new file mode 100644 index 0000000..be77c63 --- /dev/null +++ b/sql/tables/whosonfirst.go @@ -0,0 +1,3 @@ +package tables + +const WHOSONFIRST_TABLE_NAME string = "whosonfirst" diff --git a/sql/tables/whosonfirst.mysql.schema b/sql/tables/whosonfirst.mysql.schema new file mode 100644 index 0000000..08f8904 --- /dev/null +++ b/sql/tables/whosonfirst.mysql.schema @@ -0,0 +1,30 @@ +CREATE TABLE IF NOT EXISTS whosonfirst ( + id BIGINT UNSIGNED PRIMARY KEY, + properties JSON NOT NULL, + geometry GEOMETRY NOT NULL, + centroid POINT NOT NULL COMMENT 'This is not necessary a math centroid', + lastmodified INT NOT NULL, + parent_id BIGINT GENERATED ALWAYS AS (JSON_UNQUOTE(JSON_EXTRACT(properties,'$."wof:parent_id"'))) VIRTUAL, + placetype VARCHAR(64) GENERATED ALWAYS AS (JSON_UNQUOTE(JSON_EXTRACT(properties,'$."wof:placetype"'))) VIRTUAL, + is_current TINYINT GENERATED ALWAYS AS (JSON_CONTAINS_PATH(properties, 'one', '$."mz:is_current"') AND JSON_UNQUOTE(JSON_EXTRACT(properties,'$."mz:is_current"'))) VIRTUAL, + is_nullisland TINYINT GENERATED ALWAYS AS (JSON_CONTAINS_PATH(properties, 'one', '$."mz:is_nullisland"') AND JSON_LENGTH(JSON_EXTRACT(properties, '$."mz:is_nullisland"'))) VIRTUAL, + is_approximate TINYINT GENERATED ALWAYS AS (JSON_CONTAINS_PATH(properties, 'one', '$."mz:is_approximate"') AND JSON_LENGTH(JSON_EXTRACT(properties, '$."mz:is_approximate"'))) VIRTUAL, + is_ceased TINYINT GENERATED ALWAYS AS (JSON_CONTAINS_PATH(properties, 'one', '$."edtf:cessation"') AND JSON_UNQUOTE(JSON_EXTRACT(properties,'$."edtf:cessation"')) != "" AND JSON_UNQUOTE(JSON_EXTRACT(properties,'$."edtf:cessation"')) != "open" AND json_unquote(json_extract(properties,'$."edtf:cessation"')) != "uuuu") VIRTUAL, + is_deprecated TINYINT GENERATED ALWAYS AS (JSON_CONTAINS_PATH(properties, 'one', '$."edtf:deprecated"') AND JSON_UNQUOTE(JSON_EXTRACT(properties,'$."edtf:deprecated"')) != "" AND json_unquote(json_extract(properties,'$."edtf:deprecated"')) != "uuuu") VIRTUAL, + is_superseded TINYINT GENERATED ALWAYS AS (JSON_LENGTH(JSON_EXTRACT(properties, '$."wof:superseded_by"')) > 0) VIRTUAL, + is_superseding TINYINT GENERATED ALWAYS AS (JSON_LENGTH(JSON_EXTRACT(properties, '$."wof:supersedes"')) > 0) VIRTUAL, + date_upper DATE GENERATED ALWAYS AS (JSON_UNQUOTE(JSON_EXTRACT(properties, '$."date:cessation_upper"'))) VIRTUAL, + date_lower DATE GENERATED ALWAYS AS (JSON_UNQUOTE(JSON_EXTRACT(properties, '$."date:inception_lower"'))) VIRTUAL, + KEY parent_id (parent_id), + KEY placetype (placetype), + KEY is_current (is_current), + KEY is_nullisland (is_nullisland), + KEY is_approximate (is_approximate), + KEY is_deprecated (is_deprecated), + KEY is_superseded (is_superseded), + KEY is_superseding (is_superseding), + KEY date_upper (date_upper), + KEY date_lower (date_lower), + SPATIAL KEY idx_geometry (geometry), + SPATIAL KEY idx_centroid (centroid) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-json-query/.gitignore b/vendor/github.com/aaronland/go-json-query/.gitignore new file mode 100644 index 0000000..6e7e38b --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/.gitignore @@ -0,0 +1,2 @@ +*~ +bin \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-json-query/LICENSE b/vendor/github.com/aaronland/go-json-query/LICENSE new file mode 100644 index 0000000..bcd322a --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2020xs, Aaron Straup Cope +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aaronland/go-json-query/Makefile b/vendor/github.com/aaronland/go-json-query/Makefile new file mode 100644 index 0000000..8c53999 --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/Makefile @@ -0,0 +1,2 @@ +cli: + go build -mod vendor -o bin/matches cmd/matches/main.go diff --git a/vendor/github.com/aaronland/go-json-query/README.md b/vendor/github.com/aaronland/go-json-query/README.md new file mode 100644 index 0000000..1475389 --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/README.md @@ -0,0 +1,63 @@ +# go-json-query + +Go package for querying and filter JSON documents using tidwall/gjson-style paths and regular expressions for testing values. + +## Documentation + +[![Go Reference](https://pkg.go.dev/badge/github.com/aaronland/go-json-query.svg)](https://pkg.go.dev/github.com/aaronland/go-json-query) + +## Important + +Documentation is incomplete. + +## Example + +``` +import ( + "context" + "flag" + "fmt" + "github.com/aaronland/go-json-query" + "io" + "os" + "strings" +) + +func main() { + + var queries query.QueryFlags + flag.Var(&queries, "query", "One or more {PATH}={REGEXP} parameters for filtering records.") + + valid_modes := strings.Join([]string{query.QUERYSET_MODE_ALL, query.QUERYSET_MODE_ANY}, ", ") + desc_modes := fmt.Sprintf("Specify how query filtering should be evaluated. Valid modes are: %s", valid_modes) + + query_mode := flag.String("query-mode", query.QUERYSET_MODE_ALL, desc_modes) + + flag.Parse() + + paths := flag.Args() + + qs := &query.QuerySet{ + Queries: queries, + Mode: *query_mode, + } + + ctx := context.Background() + + for _, path := range paths { + + fh, _ := os.Open(path) + defer fh.Close() + + body, _ := io.ReadAll(fh) + + matches, _ := query.Matches(ctx, qs, body) + + fmt.Printf("%s\t%t\n", path, matches) + } +} +``` + +## See also + +* https://github.com/tidwall/gjson \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-json-query/doc.go b/vendor/github.com/aaronland/go-json-query/doc.go new file mode 100644 index 0000000..423f0ae --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/doc.go @@ -0,0 +1,48 @@ +// package provides a lightweight interface for querying and filter JSON documents using tidwall/gjson-style paths and regular expressions for testing values. +// +// Example +// +// import ( +// "context" +// "flag" +// "fmt" +// "github.com/aaronland/go-json-query" +// "io" +// "os" +// "strings" +// ) +// +// func main() { +// +// var queries query.QueryFlags +// flag.Var(&queries, "query", "One or more {PATH}={REGEXP} parameters for filtering records.") +// +// valid_modes := strings.Join([]string{query.QUERYSET_MODE_ALL, query.QUERYSET_MODE_ANY}, ", ") +// desc_modes := fmt.Sprintf("Specify how query filtering should be evaluated. Valid modes are: %s", valid_modes) +// +// query_mode := flag.String("query-mode", query.QUERYSET_MODE_ALL, desc_modes) +// +// flag.Parse() +// +// paths := flag.Args() +// +// qs := &query.QuerySet{ +// Queries: queries, +// Mode: *query_mode, +// } +// +// ctx := context.Background() +// +// for _, path := range paths { +// +// fh, _ := os.Open(path) +// defer fh.Close() +// +// body, _ := io.ReadAll(fh) +// +// matches, _ := query.Matches(ctx, qs, body) +// +// fmt.Printf("%s\t%t\n", path, matches) +// } +// } +package query diff --git a/vendor/github.com/aaronland/go-json-query/flags.go b/vendor/github.com/aaronland/go-json-query/flags.go new file mode 100644 index 0000000..8e997f3 --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/flags.go @@ -0,0 +1,45 @@ +package query + +import ( + "errors" + "regexp" + "strings" +) + +// The separator string used to distinguish {PATH}={REGULAR_EXPRESSION} strings. +const SEP string = "=" + +// QueryFlags holds one or more Query instances that are created using {PATH}={REGULAR_EXPRESSION} strings. +type QueryFlags []*Query + +// Return the string value of the set of Query instances. Currently returns "". +func (m *QueryFlags) String() string { + return "" +} + +// Parse a {PATH}={REGULAR_EXPRESSION} string and store it as one of a set of Query instances. +func (m *QueryFlags) Set(value string) error { + + parts := strings.Split(value, SEP) + + if len(parts) != 2 { + return errors.New("Invalid query flag") + } + + path := parts[0] + str_match := parts[1] + + re, err := regexp.Compile(str_match) + + if err != nil { + return err + } + + q := &Query{ + Path: path, + Match: re, + } + + *m = append(*m, q) + return nil +} diff --git a/vendor/github.com/aaronland/go-json-query/query.go b/vendor/github.com/aaronland/go-json-query/query.go new file mode 100644 index 0000000..4c920e7 --- /dev/null +++ b/vendor/github.com/aaronland/go-json-query/query.go @@ -0,0 +1,89 @@ +package query + +import ( + "context" + "github.com/tidwall/gjson" + _ "log" + "regexp" +) + +// QUERYSET_MODE_ANY is a flag to signal that only one match in a QuerySet needs to be successful. +const QUERYSET_MODE_ANY string = "ANY" + +// QUERYSET_MODE_ALL is a flag to signal that only all matches in a QuerySet needs to be successful. +const QUERYSET_MODE_ALL string = "ALL" + +// QuerySet is a struct containing one or more Query instances and flags for how the results of those queries should be interpreted. +type QuerySet struct { + // A set of Query instances + Queries []*Query + // A string flag representing how query results should be interpreted. + Mode string +} + +// Query is an atomic query to perform against a JSON document. +type Query struct { + // A valid tidwall/gjson query path. + Path string + // A valid regular expression. + Match *regexp.Regexp +} + +// Matches compares the set of queries in 'qs' against a JSON record ('body') and returns true or false depending on whether or not some or all of those queries are matched successfully. +func Matches(ctx context.Context, qs *QuerySet, body []byte) (bool, error) { + + select { + case <-ctx.Done(): + return false, nil + default: + // pass + } + + queries := qs.Queries + mode := qs.Mode + + tests := len(queries) + matches := 0 + + for _, q := range queries { + + rsp := gjson.GetBytes(body, q.Path) + + if !rsp.Exists() { + + if mode == QUERYSET_MODE_ALL { + break + } + } + + for _, r := range rsp.Array() { + + if q.Match.MatchString(r.String()) { + + matches += 1 + + if mode == QUERYSET_MODE_ANY { + break + } + } + } + + if mode == QUERYSET_MODE_ANY && matches > 0 { + break + } + + } + + if mode == QUERYSET_MODE_ALL { + + if matches < tests { + return false, nil + } + } + + if matches == 0 { + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/aaronland/go-roster/.gitignore b/vendor/github.com/aaronland/go-roster/.gitignore new file mode 100644 index 0000000..e4e5f6c --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/.gitignore @@ -0,0 +1 @@ +*~ \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-roster/LICENSE b/vendor/github.com/aaronland/go-roster/LICENSE new file mode 100644 index 0000000..29b6a83 --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019, Aaron Straup Cope +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aaronland/go-roster/README.md b/vendor/github.com/aaronland/go-roster/README.md new file mode 100644 index 0000000..e630b02 --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/README.md @@ -0,0 +1,129 @@ +# go-roster + +Go package provides interfaces and methods for defining internal lookup tables (or "rosters") for registering and instantiatinge custom interfaces with multiple implementations. + +## Documentation + +[![Go Reference](https://pkg.go.dev/badge/github.com/aaronland/go-roster.svg)](https://pkg.go.dev/github.com/aaronland/go-roster) + +## Example + +The following example is the body of the [roster_test.go](roster_test.go) file: + +``` +package roster + +import ( + "context" + "fmt" + "net/url" + "testing" +) + +// Create a toy interface that might have multiple implementations including a common +// method signature for creating instantiations of that interface. + +type Example interface { + String() string +} + +type ExampleInitializationFunc func(context.Context, string) (Example, error) + +func RegisterExample(ctx context.Context, scheme string, init_func ExampleInitializationFunc) error { + return example_roster.Register(ctx, scheme, init_func) +} + +func NewExample(ctx context.Context, uri string) (Example, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, fmt.Errorf("Failed to parse URI, %w", err) + } + + scheme := u.Scheme + + i, err := example_roster.Driver(ctx, scheme) + + if err != nil { + return nil, fmt.Errorf("Failed to find registeration for %s, %w", scheme, err) + } + + init_func := i.(ExampleInitializationFunc) + return init_func(ctx, uri) +} + +// Something that implements the Example interface + +type StringExample struct { + Example + value string +} + +func NewStringExample(ctx context.Context, uri string) (Example, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, fmt.Errorf("Failed to parse URL, %w", err) + } + + s := &StringExample{ + value: u.Path, + } + + return s, nil +} + +func (e *StringExample) String() string { + return e.value +} + +// Create a global "roster" of implementations of the Example interface + +var example_roster Roster + +// Ensure that there is a valid roster (for use by the code handling the Example interface) +// and register the StringExample implementation + +func init() { + + ctx := context.Background() + + r, err := NewDefaultRoster() + + if err != nil { + panic(err) + } + + example_roster = r + + err = RegisterExample(ctx, "string", NewStringExample) + + if err != nil { + panic(err) + } +} + +func TestRoster(t *testing.T) { + + ctx := context.Background() + + e, err := NewExample(ctx, "string:///helloworld") + + if err != nil { + t.Fatalf("Failed to create new example, %v", err) + } + + v := e.String() + + if v != "/helloworld" { + t.Fatalf("Unexpected result: '%s'", v) + } +} +``` + +## Concrete examples + +* https://github.com/whosonfirst/go-reader +* https://github.com/whosonfirst/go-writer \ No newline at end of file diff --git a/vendor/github.com/aaronland/go-roster/default.go b/vendor/github.com/aaronland/go-roster/default.go new file mode 100644 index 0000000..2e3e337 --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/default.go @@ -0,0 +1,101 @@ +package roster + +import ( + "context" + "errors" + "fmt" + "sort" + "strings" + "sync" +) + +// DefaultRoster implements the the `Roster` interface mapping scheme names to arbitrary interface values. +type DefaultRoster struct { + Roster + mu *sync.RWMutex + drivers map[string]interface{} +} + +// NewDefaultRoster returns a new `DefaultRoster` instance. +func NewDefaultRoster() (Roster, error) { + + mu := new(sync.RWMutex) + drivers := make(map[string]interface{}) + + dr := &DefaultRoster{ + mu: mu, + drivers: drivers, + } + + return dr, nil +} + +// Driver returns the value associated with the key for the normalized value of 'name' in the list of registered +// drivers available to 'dr'. +func (dr *DefaultRoster) Driver(ctx context.Context, name string) (interface{}, error) { + + nrml_name := dr.NormalizeName(ctx, name) + + dr.mu.Lock() + defer dr.mu.Unlock() + + i, ok := dr.drivers[nrml_name] + + if !ok { + return nil, fmt.Errorf("Unknown driver: %s (%s)", name, nrml_name) + } + + return i, nil +} + +// Registers creates a new entry in the list of drivers available to 'dr' mapping the normalized version of 'name' to 'i'. +func (dr *DefaultRoster) Register(ctx context.Context, name string, i interface{}) error { + + dr.mu.Lock() + defer dr.mu.Unlock() + + if i == nil { + return errors.New("Nothing to register") + } + + nrml_name := dr.NormalizeName(ctx, name) + + _, dup := dr.drivers[nrml_name] + + if dup { + return fmt.Errorf("Register called twice for reader '%s'", name) + } + + dr.drivers[nrml_name] = i + return nil +} + +// UnregisterAll removes all the registers drivers from 'dr'. +func (dr *DefaultRoster) UnregisterAll(ctx context.Context) error { + dr.mu.Lock() + defer dr.mu.Unlock() + + dr.drivers = make(map[string]interface{}) + return nil +} + +// NormalizeName returns a normalized (upper-cased) version of 'name'. +func (dr *DefaultRoster) NormalizeName(ctx context.Context, name string) string { + return strings.ToUpper(name) +} + +// Drivers returns the list of registered schemes for all the drivers available to 'dr'. +func (dr *DefaultRoster) Drivers(ctx context.Context) []string { + + dr.mu.RLock() + defer dr.mu.RUnlock() + + var list []string + + for name := range dr.drivers { + list = append(list, name) + } + + sort.Strings(list) + return list +} diff --git a/vendor/github.com/aaronland/go-roster/doc.go b/vendor/github.com/aaronland/go-roster/doc.go new file mode 100644 index 0000000..ca273f6 --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/doc.go @@ -0,0 +1,4 @@ +// package roster provides interfaces and methods for defining internal lookup tables for registering +// and instantiatinge custom interfaces with multiple implementations. The expectation is that these +// (roster) interfaces will never be seen by user-facing code. +package roster diff --git a/vendor/github.com/aaronland/go-roster/roster.go b/vendor/github.com/aaronland/go-roster/roster.go new file mode 100644 index 0000000..44b9333 --- /dev/null +++ b/vendor/github.com/aaronland/go-roster/roster.go @@ -0,0 +1,19 @@ +package roster + +import ( + "context" +) + +// type Roster is an interface for defining internal lookup tables (or "rosters") for registering and instantiatinge custom interfaces with multiple implementations. +type Roster interface { + // Driver returns the value associated with a name or scheme from the list of drivers that have been registered. + Driver(context.Context, string) (interface{}, error) + // Drivers returns the list of names or schemes for the list of drivers that have been registered. + Drivers(context.Context) []string + // UnregisterAll removes all the registered drivers from an instance implementing the Roster interfave. + UnregisterAll(context.Context) error + // NormalizeName returns a normalized version of a string. + NormalizeName(context.Context, string) string + // Register associated a name or scheme with an arbitrary interface. + Register(context.Context, string, interface{}) error +} diff --git a/vendor/github.com/dominikbraun/graph/.gitignore b/vendor/github.com/dominikbraun/graph/.gitignore new file mode 100644 index 0000000..ee770a6 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea/ diff --git a/vendor/github.com/dominikbraun/graph/.golangci.yml b/vendor/github.com/dominikbraun/graph/.golangci.yml new file mode 100644 index 0000000..e1f1ab3 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/.golangci.yml @@ -0,0 +1,20 @@ +run: + timeout: 5m + +linters: + disable-all: true + enable: + - govet + - errcheck + - gosimple + - ineffassign + - staticcheck + - typecheck + - unused + +linters-settings: + govet: + enable-all: true + disable: + - stdmethods + - fieldalignment diff --git a/vendor/github.com/dominikbraun/graph/CHANGELOG.md b/vendor/github.com/dominikbraun/graph/CHANGELOG.md new file mode 100644 index 0000000..99022c9 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/CHANGELOG.md @@ -0,0 +1,297 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project +adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.23.0] - 2023-07-05 + +**Are you using graph? [Check out the graph user survey](https://forms.gle/MLKUZKMeCRxTfj4v9)** + +### Added +* Added the `AllPathsBetween` function for computing all paths between two vertices. + +## [0.22.3] - 2023-06-14 + +### Changed +* Changed `StableTopologicalSort` to invoke the `less` function as few as possible, reducing comparisons. +* Changed `CreatesCycle` to use an optimized path if the default in-memory store is being used. +* Changed map allocations to use pre-defined memory sizes. + +## [0.22.2] - 2023-06-06 + +### Fixed +* Fixed the major performance issues of `StableTopologicalSort`. + +## [0.22.1] - 2023-06-05 + +### Fixed +* Fixed `TopologicalSort` to retain its original performance. + +## [0.22.0] - 2023-05-24 + +### Added +* Added the `StableTopologicalSort` function for deterministic topological orderings. +* Added the `VertexAttributes` functional option for setting an entire vertex attributes map. + +## [0.21.0] - 2023-05-18 + +### Added +* Added the `BFSWithDepth` function for performing a BFS with depth information. + +### Fixed +* Fixed false positives of `ErrVertexHasEdges` when removing a vertex. + +## [0.20.0] - 2023-05-01 + +**Release post: [graph Version 0.20 Is Out](https://dominikbraun.io/blog/graph-version-0.20-is-out/)** + +### Added +* Added the `Graph.AddVerticesFrom` method for adding all vertices from another graph. +* Added the `Graph.AddEdgesFrom` method for adding all edges from another graph. +* Added the `Graph.Edges` method for obtaining all edges as a slice. +* Added the `Graph.UpdateEdge` method for updating the properties of an edge. +* Added the `Store.UpdateEdge` method for updating the properties of an edge. +* Added the `NewLike` function for creating a new graph that is "like" the given graph. +* Added the `EdgeAttributes` functional option for setting an entire edge attributes map. + +### Changed +* Changed `Graph.Clone` to use the built-in in-memory store for storing vertices and edges for cloned graphs. + +## [0.19.0] - 2023-04-23 + +### Added +* Added the `MinimumSpanningTree` function for finding a minimum spanning tree. +* Added the `MaximumSpanningTree` function for finding a maximum spanning tree. + +## [0.18.0] - 2023-04-16 + +### Added +* Added the `Graph.RemoveVertex` method for removing a vertex. +* Added the `Store.RemoveVertex` method for removing a vertex. +* Added the `ErrVertexHasEdges` error instance. +* Added the `Union` function for combining two graphs into one. + +## [0.17.0] - 2023-04-12 + +### Added +* Added the `draw.GraphAttributes` functional option for `draw.DOT` for rendering graph attributes. + +### Changed +* Changed the library's GoDoc documentation. + +## [0.16.2] - 2023-03-27 + +### Fixed +* Fixed `ShortestPath` for an edge case. + +## [0.16.1] - 2023-03-06 + +### Fixed +* Fixed `TransitiveReduction` not to incorrectly report cycles. + +## [0.16.0] - 2023-03-01 + +**This release contains breaking changes of the public API (see "Changed").** + +### Added +* Added the `Store` interface, introducing support for custom storage implementations. +* Added the `NewWithStore` function for explicitly initializing a graph with a `Store` instance. +* Added the `EdgeData` functional option that can be used with `AddEdge`, introducing support for arbitrary data. +* Added the `Data` field to `EdgeProperties` for retrieving data added using `EdgeData`. + +### Changed +* Changed `Order` to additionally return an error instance (breaking change). +* Changed `Size` to additionally return an error instance (breaking change). + +## [0.15.1] - 2023-01-18 + +### Changed +* Changed `ShortestPath` to return `ErrTargetNotReachable` if the target vertex is not reachable. + +### Fixed +* Fixed `ShortestPath` to return correct results for large unweighted graphs. + +## [0.15.0] - 2022-11-25 + +### Added +* Added the `ErrVertexAlreadyExists` error instance. Use `errors.Is` to check for this instance. +* Added the `ErrEdgeAlreadyExists` error instance. Use `errors.Is` to check for this instance. +* Added the `ErrEdgeCreatesCycle` error instance. Use `errors.Is` to check for this instance. + +### Changed +* Changed `AddVertex` to return `ErrVertexAlreadyExists` if the vertex already exists. +* Changed `VertexWithProperties` to return `ErrVertexNotFound` if the vertex doesn't exist. +* Changed `AddEdge` to return `ErrVertexNotFound` if either vertex doesn't exist. +* Changed `AddEdge` to return `ErrEdgeAlreadyExists` if the edge already exists. +* Changed `AddEdge` to return `ErrEdgeCreatesCycle` if cycle prevention is active and the edge would create a cycle. +* Changed `Edge` to return `ErrEdgeNotFound` if the edge doesn't exist. +* Changed `RemoveEdge` to return the error instances returned by `Edge`. + +## [0.14.0] - 2022-11-01 + +### Added +* Added the `ErrVertexNotFound` error instance. + +### Changed +* Changed `TopologicalSort` to fail at runtime when a cycle is detected. +* Changed `TransitiveReduction` to return the transitive reduction as a new graph and fail at runtime when a cycle is detected. +* Changed `Vertex` to return `ErrVertexNotFound` if the desired vertex couldn't be found. + +## [0.13.0] - 2022-10-15 + +### Added +* Added the `VertexProperties` type for storing vertex-related properties. +* Added the `VertexWithProperties` method for retrieving a vertex and its properties. +* Added the `VertexWeight` functional option that can be used for `AddVertex`. +* Added the `VertexAttribute` functional option that can be used for `AddVertex`. +* Added support for rendering vertices with attributes using `draw.DOT`. + +### Changed +* Changed `AddVertex` to accept functional options. +* Renamed `PermitCycles` to `PreventCycles`. This seems to be the price to pay if English isn't a library author's native language. + +### Fixed +* Fixed the behavior of `ShortestPath` when the target vertex is not reachable from one of the visited vertices. + +## [0.12.0] - 2022-09-19 + +### Added +* Added the `PermitCycles` option to explicitly prevent the creation of cycles. + +### Changed +* Changed the `Acyclic` option to not implicitly impose cycle checks for operations like `AddEdge`. To prevent the creation of cycles, use `PermitCycles`. +* Changed `TopologicalSort` to only work for graphs created with `PermitCycles`. This is temporary. +* Changed `TransitiveReduction` to only work for graphs created with `PermitCycles`. This is temporary. + +## [0.11.0] - 2022-09-15 + +### Added +* Added the `Order` method for retrieving the number of vertices in the graph. +* Added the `Size` method for retrieving the number of edges in the graph. + +### Changed +* Changed the `graph` logo. +* Changed an internal operation of `ShortestPath` from O(n) to O(log(n)) by implementing the priority queue as a binary heap. Note that the actual complexity might still be defined by `ShortestPath` itself. + +### Fixed +* Fixed `draw.DOT` to work correctly with vertices that contain special characters and whitespaces. + +## [0.10.0] - 2022-09-09 + +### Added +* Added the `PredecessorMap` method for obtaining a map with all predecessors of each vertex. +* Added the `RemoveEdge` method for removing the edge between two vertices. +* Added the `Clone` method for retrieving a deep copy of the graph. +* Added the `TopologicalSort` function for obtaining the topological order of the vertices in the graph. +* Added the `TransitiveReduction` function for transforming the graph into its transitive reduction. + +### Changed +* Changed the `visit` function of `DFS` to accept a vertex hash instead of the vertex value (i.e. `K` instead of `T`). +* Changed the `visit` function of `BFS` to accept a vertex hash instead of the vertex value (i.e. `K` instead of `T`). + +### Removed +* Removed the `Predecessors` function. Use `PredecessorMap` instead and look up the respective vertex. + +## [0.9.0] - 2022-08-17 + +### Added +* Added the `Graph.AddVertex` method for adding a vertex. This replaces `Graph.Vertex`. +* Added the `Graph.AddEdge` method for creating an edge. This replaces `Graph.Edge`. +* Added the `Graph.Vertex` method for retrieving a vertex by its hash. This is not to be confused with the old `Graph.Vertex` function for adding vertices that got replaced with `Graph.AddVertex`. +* Added the `Graph.Edge` method for retrieving an edge. This is not to be confused with the old `Graph.Edge` function for creating an edge that got replaced with `Graph.AddEdge`. +* Added the `Graph.Predecessors` function for retrieving a vertex' predecessors. +* Added the `DFS` function. +* Added the `BFS` function. +* Added the `CreatesCycle` function. +* Added the `StronglyConnectedComponents` function. +* Added the `ShortestPath` function. +* Added the `ErrEdgeNotFound` error indicating that a desired edge could not be found. + +### Removed +* Removed the `Graph.EdgeByHashes` method. Use `Graph.AddEdge` instead. +* Removed the `Graph.GetEdgeByHashes` method. Use `Graph.Edge` instead. +* Removed the `Graph.DegreeByHash` method. Use `Graph.Degree` instead. +* Removed the `Graph.Degree` method. +* Removed the `Graph.DFS` and `Graph.DFSByHash` methods. Use `DFS` instead. +* Removed the `Graph.BFS` and `Graph.BFSByHash` methods. Use `BFS` instead. +* Removed the `Graph.CreatesCycle` and `Graph.CreatesCycleByHashes` methods. Use `CreatesCycle` instead. +* Removed the `Graph.StronglyConnectedComponents` method. Use `StronglyConnectedComponents` instead. +* Removed the `Graph.ShortestPath` and `Graph.ShortestPathByHash` methods. Use `ShortestPath` instead. + +## [0.8.0] - 2022-08-01 + +### Added +* Added the `EdgeWeight` and `EdgeAttribute` functional options. +* Added the `Properties` field to `Edge`. + +### Changed +* Changed `Edge` to accept a variadic `options` parameter. +* Changed `EdgeByHashes` to accept a variadic `options` parameter. +* Renamed `draw.Graph` to `draw.DOT` for more clarity regarding the rendering format. + +### Removed +* Removed the `WeightedEdge` function. Use `Edge` with the `EdgeWeight` functional option instead. +* Removed the `WeightedEdgeByHashes` function. Use `EdgeByHashes` with the `EdgeWeight` functional option instead. + +### Fixed +* Fixed missing edge attributes when drawing a graph using `draw.DOT`. + +## [0.7.0] - 2022-07-26 + +### Added +* Added `draw` package for graph visualization using DOT-compatible renderers. +* Added `Traits` function for retrieving the graph's traits. + +## [0.6.0] - 2022-07-22 + +### Added +* Added `AdjacencyMap` function for retrieving an adjancency map for all vertices. + +### Removed +* Removed the `AdjacencyList` function. + +## [0.5.0] - 2022-07-21 + +### Added +* Added `AdjacencyList` function for retrieving an adjacency list for all vertices. + +### Changed +* Updated the examples in the documentation. + +## [0.4.0] - 2022-07-01 + +### Added +* Added `ShortestPath` function for computing shortest paths. + +### Changed +* Changed the term "properties" to "traits" in the code and documentation. +* Don't traverse all vertices in disconnected graphs by design. + +## [0.3.0] - 2022-06-27 + +### Added +* Added `StronglyConnectedComponents` function for detecting SCCs. +* Added various images to usage examples. + +## [0.2.0] - 2022-06-20 + +### Added +* Added `Degree` and `DegreeByHash` functions for determining vertex degrees. +* Added cycle checks when adding an edge using the `Edge` functions. + +## [0.1.0] - 2022-06-19 + +### Added +* Added `CreatesCycle` and `CreatesCycleByHashes` functions for predicting cycles. + +## [0.1.0-beta] - 2022-06-17 + +### Changed +* Introduced dedicated types for directed and undirected graphs, making `Graph[K, T]` an interface. + +## [0.1.0-alpha] - 2022-06-13 + +### Added +* Introduced core types and methods. diff --git a/vendor/github.com/dominikbraun/graph/LICENSE b/vendor/github.com/dominikbraun/graph/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dominikbraun/graph/README.md b/vendor/github.com/dominikbraun/graph/README.md new file mode 100644 index 0000000..de85472 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/README.md @@ -0,0 +1,404 @@ +[中文版](README_CN.md) | [English Version](README.md) + +# + +A library for creating generic graph data structures and modifying, analyzing, +and visualizing them. + +**Are you using graph? [Check out the graph user survey.](https://forms.gle/MLKUZKMeCRxTfj4v9)** + +# Features + +* Generic vertices of any type, such as `int` or `City`. +* Graph traits with corresponding validations, such as cycle checks in acyclic graphs. +* Algorithms for finding paths or components, such as shortest paths or strongly connected components. +* Algorithms for transformations and representations, such as transitive reduction or topological order. +* Algorithms for non-recursive graph traversal, such as DFS or BFS. +* Vertices and edges with optional metadata, such as weights or custom attributes. +* Visualization of graphs using the DOT language and Graphviz. +* Integrate any storage backend by using your own `Store` implementation. +* Extensive tests with ~90% coverage, and zero dependencies. + +> Status: Because `graph` is in version 0, the public API shouldn't be considered stable. + +> This README may contain unreleased changes. Check out the [latest documentation](https://pkg.go.dev/github.com/dominikbraun/graph). + +# Getting started + +``` +go get github.com/dominikbraun/graph +``` + +# Quick examples + +## Create a graph of integers + +![graph of integers](img/simple.svg) + +```go +g := graph.New(graph.IntHash) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) +_ = g.AddVertex(4) +_ = g.AddVertex(5) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 4) +_ = g.AddEdge(2, 3) +_ = g.AddEdge(2, 4) +_ = g.AddEdge(2, 5) +_ = g.AddEdge(3, 5) +``` + +## Create a directed acyclic graph of integers + +![directed acyclic graph](img/dag.svg) + +```go +g := graph.New(graph.IntHash, graph.Directed(), graph.Acyclic()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) +_ = g.AddVertex(4) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) +_ = g.AddEdge(2, 3) +_ = g.AddEdge(2, 4) +_ = g.AddEdge(3, 4) +``` + +## Create a graph of a custom type + +To understand this example in detail, see the [concept of hashes](https://pkg.go.dev/github.com/dominikbraun/graph#hdr-Hashes). + +```go +type City struct { + Name string +} + +cityHash := func(c City) string { + return c.Name +} + +g := graph.New(cityHash) + +_ = g.AddVertex(london) +``` + +## Create a weighted graph + +![weighted graph](img/cities.svg) + +```go +g := graph.New(cityHash, graph.Weighted()) + +_ = g.AddVertex(london) +_ = g.AddVertex(munich) +_ = g.AddVertex(paris) +_ = g.AddVertex(madrid) + +_ = g.AddEdge("london", "munich", graph.EdgeWeight(3)) +_ = g.AddEdge("london", "paris", graph.EdgeWeight(2)) +_ = g.AddEdge("london", "madrid", graph.EdgeWeight(5)) +_ = g.AddEdge("munich", "madrid", graph.EdgeWeight(6)) +_ = g.AddEdge("munich", "paris", graph.EdgeWeight(2)) +_ = g.AddEdge("paris", "madrid", graph.EdgeWeight(4)) +``` + +## Perform a Depth-First Search + +This example traverses and prints all vertices in the graph in DFS order. + +![depth-first search](img/dfs.svg) + +```go +g := graph.New(graph.IntHash, graph.Directed()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) +_ = g.AddVertex(4) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) +_ = g.AddEdge(3, 4) + +_ = graph.DFS(g, 1, func(value int) bool { + fmt.Println(value) + return false +}) +``` + +``` +1 3 4 2 +``` + +## Find strongly connected components + +![strongly connected components](img/scc.svg) + +```go +g := graph.New(graph.IntHash) + +// Add vertices and edges ... + +scc, _ := graph.StronglyConnectedComponents(g) + +fmt.Println(scc) +``` + +``` +[[1 2 5] [3 4 8] [6 7]] +``` + +## Find the shortest path + +![shortest path algorithm](img/dijkstra.svg) + +```go +g := graph.New(graph.StringHash, graph.Weighted()) + +// Add vertices and weighted edges ... + +path, _ := graph.ShortestPath(g, "A", "B") + +fmt.Println(path) +``` + +``` +[A C E B] +``` + +## Find spanning trees + +![minimum spanning tree](img/mst.svg) + +```go +g := graph.New(graph.StringHash, graph.Weighted()) + +// Add vertices and edges ... + +mst, _ := graph.MinimumSpanningTree(g) +``` + +## Perform a topological sort + +![topological sort](img/topological-sort.svg) + +```go +g := graph.New(graph.IntHash, graph.Directed(), graph.PreventCycles()) + +// Add vertices and edges ... + +// For a deterministic topological ordering, use StableTopologicalSort. +order, _ := graph.TopologicalSort(g) + +fmt.Println(order) +``` + +``` +[1 2 3 4 5] +``` + +## Perform a transitive reduction + +![transitive reduction](img/transitive-reduction-before.svg) + +```go +g := graph.New(graph.StringHash, graph.Directed(), graph.PreventCycles()) + +// Add vertices and edges ... + +transitiveReduction, _ := graph.TransitiveReduction(g) +``` + +![transitive reduction](img/transitive-reduction-after.svg) + +## Prevent the creation of cycles + +![cycle checks](img/cycles.svg) + +```go +g := graph.New(graph.IntHash, graph.PreventCycles()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) + +if err := g.AddEdge(2, 3); err != nil { + panic(err) +} +``` + +``` +panic: an edge between 2 and 3 would introduce a cycle +``` + +## Visualize a graph using Graphviz + +The following example will generate a DOT description for `g` and write it into the given file. + +```go +g := graph.New(graph.IntHash, graph.Directed()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) + +file, _ := os.Create("./mygraph.gv") +_ = draw.DOT(g, file) +``` + +To generate an SVG from the created file using Graphviz, use a command such as the following: + +``` +dot -Tsvg -O mygraph.gv +``` + +The `DOT` function also supports rendering graph attributes: + +```go +_ = draw.DOT(g, file, draw.GraphAttribute("label", "my-graph")) +``` + +### Draw a graph as in this documentation + +![simple graph](img/simple.svg) + +This graph has been rendered using the following program: + +```go +package main + +import ( + "os" + + "github.com/dominikbraun/graph" + "github.com/dominikbraun/graph/draw" +) + +func main() { + g := graph.New(graph.IntHash) + + _ = g.AddVertex(1, graph.VertexAttribute("colorscheme", "blues3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(2, graph.VertexAttribute("colorscheme", "greens3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(3, graph.VertexAttribute("colorscheme", "purples3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(4, graph.VertexAttribute("colorscheme", "ylorbr3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(5, graph.VertexAttribute("colorscheme", "reds3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + + _ = g.AddEdge(1, 2) + _ = g.AddEdge(1, 4) + _ = g.AddEdge(2, 3) + _ = g.AddEdge(2, 4) + _ = g.AddEdge(2, 5) + _ = g.AddEdge(3, 5) + + file, _ := os.Create("./simple.gv") + _ = draw.DOT(g, file) +} +``` + +It has been rendered using the `neato` engine: + +``` +dot -Tsvg -Kneato -O simple.gv +``` + +The example uses the [Brewer color scheme](https://graphviz.org/doc/info/colors.html#brewer) supported by Graphviz. + +## Storing edge attributes + +Edges may have one or more attributes which can be used to store metadata. Attributes will be taken +into account when [visualizing a graph](#visualize-a-graph-using-graphviz). For example, this edge +will be rendered in red color: + +```go +_ = g.AddEdge(1, 2, graph.EdgeAttribute("color", "red")) +``` + +To get an overview of all supported attributes, take a look at the +[DOT documentation](https://graphviz.org/doc/info/attrs.html). + +The stored attributes can be retrieved by getting the edge and accessing the `Properties.Attributes` +field. + +```go +edge, _ := g.Edge(1, 2) +color := edge.Properties.Attributes["color"] +``` + +## Storing edge data + +It is also possible to store arbitrary data inside edges, not just key-value string pairs. This data +is of type `any`. + +```go +_ = g.AddEdge(1, 2, graph.EdgeData(myData)) +``` + +The stored data can be retrieved by getting the edge and accessing the `Properties.Data` field. + +```go +edge, _ := g.Edge(1, 2) +myData := edge.Properties.Data +``` + +### Updating edge data + +Edge properties can be updated using `Graph.UpdateEdge`. The following example adds a new `color` +attribute to the edge (A,B) and sets the edge weight to 10. + +```go +_ = g.UpdateEdge("A", "B", graph.EdgeAttribute("color", "red"), graph.EdgeWeight(10)) +``` + +The method signature and the accepted functional options are exactly the same as for `Graph.AddEdge`. + +## Storing vertex attributes + +Vertices may have one or more attributes which can be used to store metadata. Attributes will be +taken into account when [visualizing a graph](#visualize-a-graph-using-graphviz). For example, this +vertex will be rendered in red color: + +```go +_ = g.AddVertex(1, graph.VertexAttribute("style", "filled")) +``` + +The stored data can be retrieved by getting the vertex using `VertexWithProperties` and accessing +the `Attributes` field. + +```go +vertex, properties, _ := g.VertexWithProperties(1) +style := properties.Attributes["style"] +``` + +To get an overview of all supported attributes, take a look at the +[DOT documentation](https://graphviz.org/doc/info/attrs.html). + +## Store the graph in a custom storage + +You can integrate any storage backend by implementing the `Store` interface and initializing a new +graph with it: + +```go +g := graph.NewWithStore(graph.IntHash, myStore) +``` + +To implement the `Store` interface appropriately, take a look at the [documentation](https://pkg.go.dev/github.com/dominikbraun/graph#Store). +[`graph-sql`](https://github.com/dominikbraun/graph-sql) is a ready-to-use SQL store implementation. + +# Documentation + +The full documentation is available at [pkg.go.dev](https://pkg.go.dev/github.com/dominikbraun/graph). + +**Are you using graph? [Check out the graph user survey.](https://forms.gle/MLKUZKMeCRxTfj4v9)** diff --git a/vendor/github.com/dominikbraun/graph/README_CN.md b/vendor/github.com/dominikbraun/graph/README_CN.md new file mode 100644 index 0000000..aee1dc0 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/README_CN.md @@ -0,0 +1,393 @@ +[中文版](README_CN.md) | [English Version](README.md) + +# + +这是一款用于创建通用图数据结构、对其进行修改、分析和可视化的库。 + +# 特性 + +* 支持任意类型的通用顶点,例如 `int` 或 `City`。 +* 图的特征和相应的验证,例如在无环图中进行循环检查。 +* 寻找路径或连通图的算法,例如最短路径或强连通图。 +* 转换和表示的算法,例如传递闭包或拓扑排序。 +* 非递归图遍历的算法,例如 DFS 或 BFS。 +* 顶点和边可以包含可选的元数据,例如权重或自定义属性。 +* 使用 DOT 语言和 Graphviz 进行图形可视化。 +* 通过使用自己的 `Store` 实现,可以集成任何存储后端。 +* 包含广泛的测试,覆盖率约为 90%,且没有任何依赖项。 + +> 状态:由于 graph 版本处于 0.x 阶段,公共 API 不应被视为稳定的。 + +> README 可能包含未发布的更改。请查看 [latest documentation](https://pkg.go.dev/github.com/dominikbraun/graph). + +# 入门指南 + +``` +go get github.com/dominikbraun/graph +``` + +# 快速示例 + +## 创建整数类型节点ID图 + +![graph of integers](img/simple.svg) + +```go +g := graph.New(graph.IntHash) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) +_ = g.AddVertex(4) +_ = g.AddVertex(5) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 4) +_ = g.AddEdge(2, 3) +_ = g.AddEdge(2, 4) +_ = g.AddEdge(2, 5) +_ = g.AddEdge(3, 5) +``` + +## 创建整数类型节点ID有向无环图 + +![directed acyclic graph](img/dag.svg) + +```go +g := graph.New(graph.IntHash, graph.Directed(), graph.Acyclic()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) +_ = g.AddVertex(4) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) +_ = g.AddEdge(2, 3) +_ = g.AddEdge(2, 4) +_ = g.AddEdge(3, 4) +``` + +## 创建自定义类型节点ID图 + +要详细了解此示例,请参见 [concept of hashes](https://pkg.go.dev/github.com/dominikbraun/graph@v0.17.0-rc4#hdr-Hashes). + +```go +type City struct { + Name string +} + +cityHash := func(c City) string { + return c.Name +} + +g := graph.New(cityHash) + +_ = g.AddVertex(london) +``` + +## 创建边带权重的图 + +![weighted graph](img/cities.svg) + +```go +g := graph.New(cityHash, graph.Weighted()) + +_ = g.AddVertex(london) +_ = g.AddVertex(munich) +_ = g.AddVertex(paris) +_ = g.AddVertex(madrid) + +_ = g.AddEdge("london", "munich", graph.EdgeWeight(3)) +_ = g.AddEdge("london", "paris", graph.EdgeWeight(2)) +_ = g.AddEdge("london", "madrid", graph.EdgeWeight(5)) +_ = g.AddEdge("munich", "madrid", graph.EdgeWeight(6)) +_ = g.AddEdge("munich", "paris", graph.EdgeWeight(2)) +_ = g.AddEdge("paris", "madrid", graph.EdgeWeight(4)) +``` + +## 执行深度优先搜索 + +这个示例按 DFS 顺序遍历并打印图中的所有顶点。 + +![depth-first search](img/dfs.svg) + +```go +g := graph.New(graph.IntHash, graph.Directed()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) +_ = g.AddVertex(4) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) +_ = g.AddEdge(3, 4) + +_ = graph.DFS(g, 1, func(value int) bool { + fmt.Println(value) + return false +}) +``` + +``` +1 3 4 2 +``` + +## 查找强联通分量 + +![strongly connected components](img/scc.svg) + +```go +g := graph.New(graph.IntHash) + +// Add vertices and edges ... + +scc, _ := graph.StronglyConnectedComponents(g) + +fmt.Println(scc) +``` + +``` +[[1 2 5] [3 4 8] [6 7]] +``` + +## 查找最短路径 + +![shortest path algorithm](img/dijkstra.svg) + +```go +g := graph.New(graph.StringHash, graph.Weighted()) + +// Add vertices and weighted edges ... + +path, _ := graph.ShortestPath(g, "A", "B") + +fmt.Println(path) +``` + +``` +[A C E B] +``` + +## 查找生成树 + +![minimum spanning tree](img/mst.svg) + +```go +g := graph.New(graph.StringHash, graph.Weighted()) + +// Add vertices and edges ... + +mst, _ := graph.MinimumSpanningTree(g) +``` + +## 执行拓扑排序 + +![topological sort](img/topological-sort.svg) + +```go +g := graph.New(graph.IntHash, graph.Directed(), graph.PreventCycles()) + +// Add vertices and edges ... + +// For a deterministic topological ordering, use StableTopologicalSort. +order, _ := graph.TopologicalSort(g) + +fmt.Println(order) +``` + +``` +[1 2 3 4 5] +``` + +## 执行传递闭包削减 + +![transitive reduction](img/transitive-reduction-before.svg) + +```go +g := graph.New(graph.StringHash, graph.Directed(), graph.PreventCycles()) + +// Add vertices and edges ... + +transitiveReduction, _ := graph.TransitiveReduction(g) +``` + +![transitive reduction](img/transitive-reduction-after.svg) + +## 禁止创建环路 + +![cycle checks](img/cycles.svg) + +```go +g := graph.New(graph.IntHash, graph.PreventCycles()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) + +if err := g.AddEdge(2, 3); err != nil { + panic(err) +} +``` + +``` +panic: 在 2 和 3 之间创建的边将会引入一个环 +``` + +## 使用 Graphviz 图可视化 + +以下示例将为 `g` 生成一个 DOT 描述,并将其写入给定的文件中。 + +```go +g := graph.New(graph.IntHash, graph.Directed()) + +_ = g.AddVertex(1) +_ = g.AddVertex(2) +_ = g.AddVertex(3) + +_ = g.AddEdge(1, 2) +_ = g.AddEdge(1, 3) + +file, _ := os.Create("./mygraph.gv") +_ = draw.DOT(g, file) +``` + +要使用 Graphviz 从创建的文件生成 SVG,请使用如下命令: + +``` +dot -Tsvg -O mygraph.gv +``` + +`DOT` 函数还支持渲染图属性: + +```go +_ = draw.DOT(g, file, draw.GraphAttribute("label", "my-graph")) +``` + +### 按照此文档绘制图 + +![simple graph](img/simple.svg) + +图使用以下程序进行渲染: + +```go +package main + +import ( + "os" + + "github.com/dominikbraun/graph" + "github.com/dominikbraun/graph/draw" +) + +func main() { + g := graph.New(graph.IntHash) + + _ = g.AddVertex(1, graph.VertexAttribute("colorscheme", "blues3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(2, graph.VertexAttribute("colorscheme", "greens3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(3, graph.VertexAttribute("colorscheme", "purples3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(4, graph.VertexAttribute("colorscheme", "ylorbr3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + _ = g.AddVertex(5, graph.VertexAttribute("colorscheme", "reds3"), graph.VertexAttribute("style", "filled"), graph.VertexAttribute("color", "2"), graph.VertexAttribute("fillcolor", "1")) + + _ = g.AddEdge(1, 2) + _ = g.AddEdge(1, 4) + _ = g.AddEdge(2, 3) + _ = g.AddEdge(2, 4) + _ = g.AddEdge(2, 5) + _ = g.AddEdge(3, 5) + + file, _ := os.Create("./simple.gv") + _ = draw.DOT(g, file) +} +``` + +使用 neato 引擎进行可视化: + +``` +dot -Tsvg -Kneato -O simple.gv +``` + +这个例子使用Graphviz支持的 [Brewer color scheme](https://graphviz.org/doc/info/colors.html#brewer)。 + +## 存储边属性 + +边可以具有一个或多个属性,用于存储元数据。在[visualizing a graph](#visualize-a-graph-using-graphviz) 时将考虑这些属性。 +例如,此边将呈现为红色: + +```go +_ = g.AddEdge(1, 2, graph.EdgeAttribute("color", "red")) +``` + +要获取所有支持的属性的概述,请查看 +[DOT documentation](https://graphviz.org/doc/info/attrs.html). + +The stored attributes can be retrieved by getting the edge and accessing the `Properties.Attributes` +field. +可以通过获取边并访问 `Properties.Attributes` 字段来检索存储的属性。 + +```go +edge, _ := g.Edge(1, 2) +color := edge.Properties.Attributes["color"] +``` + +## 存储边数据 + +还可以在边上存储任意类型属性数据,而不仅仅是键值字符串对。此数据类型为 `any`。 + +```go +_ = g.AddEdge(1, 2, graph.EdgeData(myData)) +``` + +可以通过获取边并访问 `Properties.Data` 字段来检索存储的数据。 + +```go +edge, _ := g.Edge(1, 2) +myData := edge.Properties.Data +``` + +### 更新边数据 + +可以使用 `Graph.UpdateEdge` 更新边属性。以下示例向边 (A,B) 添加了一个新的 `color` 属性,并将边权重设置为 10。 + +```go +_ = g.UpdateEdge("A", "B", graph.EdgeAttribute("color", "red"), graph.EdgeWeight(10)) +``` + +`Graph.UpdateEdge` 的方法签名和接受的函数选项与 `Graph.AddEdge` 完全相同。 + +## 存储点属性 + +顶点可能具有一个或多个属性,可用于存储元数据。在 [visualizing a graph](#visualize-a-graph-using-graphviz) 时将考虑这些属性。 +例如,此顶点将以红色渲染: + +```go +_ = g.AddVertex(1, graph.VertexAttribute("style", "filled")) +``` + +存储在顶点中的数据可以通过使用 `VertexWithProperties` 获取顶点,并访问 `Attributes` 字段来检索。 + +```go +vertex, properties, _ := g.VertexWithProperties(1) +style := properties.Attributes["style"] +``` + +要获取所有支持的属性的概述,请查看 +[DOT documentation](https://graphviz.org/doc/info/attrs.html). + +## 将图存储在自定义存储中 + +可以通过实现 `Store` 接口并使用它初始化一个新的图,来集成任何存储后端: + +```go +g := graph.NewWithStore(graph.IntHash, myStore) +``` + +恰当实现 `Store` 接口,参考 [documentation](https://pkg.go.dev/github.com/dominikbraun/graph#Store)。 +[`graph-sql`](https://github.com/dominikbraun/graph-sql) 是一个可直接使用的 SQL 存储实现。 +# 文档 + +完整文档可在以下位置找到: [pkg.go.dev](https://pkg.go.dev/github.com/dominikbraun/graph). diff --git a/vendor/github.com/dominikbraun/graph/collection.go b/vendor/github.com/dominikbraun/graph/collection.go new file mode 100644 index 0000000..8340cb6 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/collection.go @@ -0,0 +1,161 @@ +package graph + +import ( + "container/heap" + "errors" +) + +// priorityQueue implements a minimum priority queue using a minimum binary heap +// that prioritizes smaller values over larger values. +type priorityQueue[T comparable] struct { + items *minHeap[T] + cache map[T]*priorityItem[T] +} + +// priorityItem is an item on the binary heap consisting of a priority value and +// an actual payload value. +type priorityItem[T comparable] struct { + value T + priority float64 + index int +} + +func newPriorityQueue[T comparable]() *priorityQueue[T] { + return &priorityQueue[T]{ + items: &minHeap[T]{}, + cache: map[T]*priorityItem[T]{}, + } +} + +// Len returns the total number of items in the priority queue. +func (p *priorityQueue[T]) Len() int { + return p.items.Len() +} + +// Push pushes a new item with the given priority into the queue. This operation +// may cause a re-balance of the heap and thus scales with O(log n). +func (p *priorityQueue[T]) Push(item T, priority float64) { + if _, ok := p.cache[item]; ok { + return + } + + newItem := &priorityItem[T]{ + value: item, + priority: priority, + index: 0, + } + + heap.Push(p.items, newItem) + p.cache[item] = newItem +} + +// Pop returns and removes the item with the lowest priority. This operation may +// cause a re-balance of the heap and thus scales with O(log n). +func (p *priorityQueue[T]) Pop() (T, error) { + if len(*p.items) == 0 { + var empty T + return empty, errors.New("priority queue is empty") + } + + item := heap.Pop(p.items).(*priorityItem[T]) + delete(p.cache, item.value) + + return item.value, nil +} + +// UpdatePriority updates the priority of a given item and sets it to the given +// priority. If the item doesn't exist, nothing happens. This operation may +// cause a re-balance of the heap and this scales with O(log n). +func (p *priorityQueue[T]) UpdatePriority(item T, priority float64) { + targetItem, ok := p.cache[item] + if !ok { + return + } + + targetItem.priority = priority + heap.Fix(p.items, targetItem.index) +} + +// minHeap is a minimum binary heap that implements heap.Interface. +type minHeap[T comparable] []*priorityItem[T] + +func (m *minHeap[T]) Len() int { + return len(*m) +} + +func (m *minHeap[T]) Less(i, j int) bool { + return (*m)[i].priority < (*m)[j].priority +} + +func (m *minHeap[T]) Swap(i, j int) { + (*m)[i], (*m)[j] = (*m)[j], (*m)[i] + (*m)[i].index = i + (*m)[j].index = j +} + +func (m *minHeap[T]) Push(item interface{}) { + i := item.(*priorityItem[T]) + i.index = len(*m) + *m = append(*m, i) +} + +func (m *minHeap[T]) Pop() interface{} { + old := *m + item := old[len(old)-1] + *m = old[:len(old)-1] + + return item +} + +type stack[T any] interface { + push(T) + pop() (T, error) + top() (T, error) + isEmpty() bool + // forEach iterate the stack from bottom to top + forEach(func(T)) +} + +func newStack[T any]() stack[T] { + return &stackImpl[T]{ + elements: make([]T, 0), + } +} + +type stackImpl[T any] struct { + elements []T +} + +func (s *stackImpl[T]) push(t T) { + s.elements = append(s.elements, t) +} + +func (s *stackImpl[T]) pop() (T, error) { + e, err := s.top() + if err != nil { + var defaultValue T + return defaultValue, err + } + + s.elements = s.elements[:len(s.elements)-1] + return e, nil +} + +func (s *stackImpl[T]) top() (T, error) { + if s.isEmpty() { + var defaultValue T + return defaultValue, errors.New("no element in stack") + } + + return s.elements[len(s.elements)-1], nil +} + +func (s *stackImpl[T]) isEmpty() bool { + return len(s.elements) == 0 +} + +func (s *stackImpl[T]) forEach(f func(T)) { + for _, e := range s.elements { + f(e) + } +} diff --git a/vendor/github.com/dominikbraun/graph/dag.go b/vendor/github.com/dominikbraun/graph/dag.go new file mode 100644 index 0000000..83fad22 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/dag.go @@ -0,0 +1,225 @@ +package graph + +import ( + "errors" + "fmt" + "sort" +) + +// TopologicalSort runs a topological sort on a given directed graph and returns +// the vertex hashes in topological order. The topological order is a non-unique +// order of vertices in a directed graph where an edge from vertex A to vertex B +// implies that vertex A appears before vertex B. +// +// Note that TopologicalSort doesn't make any guarantees about the order. If there +// are multiple valid topological orderings, an arbitrary one will be returned. +// To make the output deterministic, use [StableTopologicalSort]. +// +// TopologicalSort only works for directed acyclic graphs. This implementation +// works non-recursively and utilizes Kahn's algorithm. +func TopologicalSort[K comparable, T any](g Graph[K, T]) ([]K, error) { + if !g.Traits().IsDirected { + return nil, fmt.Errorf("topological sort cannot be computed on undirected graph") + } + + gOrder, err := g.Order() + if err != nil { + return nil, fmt.Errorf("failed to get graph order: %w", err) + } + + predecessorMap, err := g.PredecessorMap() + if err != nil { + return nil, fmt.Errorf("failed to get predecessor map: %w", err) + } + + queue := make([]K, 0) + + for vertex, predecessors := range predecessorMap { + if len(predecessors) == 0 { + queue = append(queue, vertex) + } + } + + order := make([]K, 0, gOrder) + visited := make(map[K]struct{}, gOrder) + + for len(queue) > 0 { + currentVertex := queue[0] + queue = queue[1:] + + if _, ok := visited[currentVertex]; ok { + continue + } + + order = append(order, currentVertex) + visited[currentVertex] = struct{}{} + + for vertex, predecessors := range predecessorMap { + delete(predecessors, currentVertex) + + if len(predecessors) == 0 { + queue = append(queue, vertex) + } + } + } + + if len(order) != gOrder { + return nil, errors.New("topological sort cannot be computed on graph with cycles") + } + + return order, nil +} + +// StableTopologicalSort does the same as [TopologicalSort], but takes a function +// for comparing (and then ordering) two given vertices. This allows for a stable +// and deterministic output even for graphs with multiple topological orderings. +func StableTopologicalSort[K comparable, T any](g Graph[K, T], less func(K, K) bool) ([]K, error) { + if !g.Traits().IsDirected { + return nil, fmt.Errorf("topological sort cannot be computed on undirected graph") + } + + predecessorMap, err := g.PredecessorMap() + if err != nil { + return nil, fmt.Errorf("failed to get predecessor map: %w", err) + } + + queue := make([]K, 0) + queued := make(map[K]struct{}) + + for vertex, predecessors := range predecessorMap { + if len(predecessors) == 0 { + queue = append(queue, vertex) + queued[vertex] = struct{}{} + } + } + + order := make([]K, 0, len(predecessorMap)) + visited := make(map[K]struct{}) + + sort.Slice(queue, func(i, j int) bool { + return less(queue[i], queue[j]) + }) + + for len(queue) > 0 { + currentVertex := queue[0] + queue = queue[1:] + + if _, ok := visited[currentVertex]; ok { + continue + } + + order = append(order, currentVertex) + visited[currentVertex] = struct{}{} + + frontier := make([]K, 0) + + for vertex, predecessors := range predecessorMap { + delete(predecessors, currentVertex) + + if len(predecessors) != 0 { + continue + } + + if _, ok := queued[vertex]; ok { + continue + } + + frontier = append(frontier, vertex) + queued[vertex] = struct{}{} + } + + sort.Slice(frontier, func(i, j int) bool { + return less(frontier[i], frontier[j]) + }) + + queue = append(queue, frontier...) + } + + gOrder, err := g.Order() + if err != nil { + return nil, fmt.Errorf("failed to get graph order: %w", err) + } + + if len(order) != gOrder { + return nil, errors.New("topological sort cannot be computed on graph with cycles") + } + + return order, nil +} + +// TransitiveReduction returns a new graph with the same vertices and the same +// reachability as the given graph, but with as few edges as possible. The graph +// must be a directed acyclic graph. +// +// TransitiveReduction is a very expensive operation scaling with O(V(V+E)). +func TransitiveReduction[K comparable, T any](g Graph[K, T]) (Graph[K, T], error) { + if !g.Traits().IsDirected { + return nil, fmt.Errorf("transitive reduction cannot be performed on undirected graph") + } + + transitiveReduction, err := g.Clone() + if err != nil { + return nil, fmt.Errorf("failed to clone the graph: %w", err) + } + + adjacencyMap, err := transitiveReduction.AdjacencyMap() + if err != nil { + return nil, fmt.Errorf("failed to get adajcency map: %w", err) + } + + // For each vertex in the graph, run a depth-first search from each direct + // successor of that vertex. Then, for each vertex visited within the DFS, + // inspect all of its edges. Remove the edges that also appear in the edge + // set of the top-level vertex and target the current vertex. These edges + // are redundant because their targets apparently are not only reachable + // from the top-level vertex, but also through a DFS. + for vertex, successors := range adjacencyMap { + tOrder, err := transitiveReduction.Order() + if err != nil { + return nil, fmt.Errorf("failed to get graph order: %w", err) + } + for successor := range successors { + stack := make([]K, 0, tOrder) + visited := make(map[K]struct{}, tOrder) + onStack := make(map[K]bool, tOrder) + + stack = append(stack, successor) + + for len(stack) > 0 { + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[current]; ok { + onStack[current] = false + continue + } + + visited[current] = struct{}{} + onStack[current] = true + stack = append(stack, current) + + if len(adjacencyMap[current]) == 0 { + onStack[current] = false + } + + for adjacency := range adjacencyMap[current] { + if _, ok := visited[adjacency]; ok { + if onStack[adjacency] { + // If the current adjacency is both on the stack and + // has already been visited, there is a cycle. + return nil, fmt.Errorf("transitive reduction cannot be performed on graph with cycle") + } + continue + } + + if _, ok := adjacencyMap[vertex][adjacency]; ok { + _ = transitiveReduction.RemoveEdge(vertex, adjacency) + } + stack = append(stack, adjacency) + } + } + } + } + + return transitiveReduction, nil +} diff --git a/vendor/github.com/dominikbraun/graph/directed.go b/vendor/github.com/dominikbraun/graph/directed.go new file mode 100644 index 0000000..95d67f5 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/directed.go @@ -0,0 +1,325 @@ +package graph + +import ( + "errors" + "fmt" +) + +type directed[K comparable, T any] struct { + hash Hash[K, T] + traits *Traits + store Store[K, T] +} + +func newDirected[K comparable, T any](hash Hash[K, T], traits *Traits, store Store[K, T]) *directed[K, T] { + return &directed[K, T]{ + hash: hash, + traits: traits, + store: store, + } +} + +func (d *directed[K, T]) Traits() *Traits { + return d.traits +} + +func (d *directed[K, T]) AddVertex(value T, options ...func(*VertexProperties)) error { + hash := d.hash(value) + properties := VertexProperties{ + Weight: 0, + Attributes: make(map[string]string), + } + + for _, option := range options { + option(&properties) + } + + return d.store.AddVertex(hash, value, properties) +} + +func (d *directed[K, T]) AddVerticesFrom(g Graph[K, T]) error { + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return fmt.Errorf("failed to get adjacency map: %w", err) + } + + for hash := range adjacencyMap { + vertex, properties, err := g.VertexWithProperties(hash) + if err != nil { + return fmt.Errorf("failed to get vertex %v: %w", hash, err) + } + + if err = d.AddVertex(vertex, copyVertexProperties(properties)); err != nil { + return fmt.Errorf("failed to add vertex %v: %w", hash, err) + } + } + + return nil +} + +func (d *directed[K, T]) Vertex(hash K) (T, error) { + vertex, _, err := d.store.Vertex(hash) + return vertex, err +} + +func (d *directed[K, T]) VertexWithProperties(hash K) (T, VertexProperties, error) { + vertex, properties, err := d.store.Vertex(hash) + if err != nil { + return vertex, VertexProperties{}, err + } + + return vertex, properties, nil +} + +func (d *directed[K, T]) RemoveVertex(hash K) error { + return d.store.RemoveVertex(hash) +} + +func (d *directed[K, T]) AddEdge(sourceHash, targetHash K, options ...func(*EdgeProperties)) error { + _, _, err := d.store.Vertex(sourceHash) + if err != nil { + return fmt.Errorf("source vertex %v: %w", sourceHash, err) + } + + _, _, err = d.store.Vertex(targetHash) + if err != nil { + return fmt.Errorf("target vertex %v: %w", targetHash, err) + } + + if _, err := d.Edge(sourceHash, targetHash); !errors.Is(err, ErrEdgeNotFound) { + return ErrEdgeAlreadyExists + } + + // If the user opted in to preventing cycles, run a cycle check. + if d.traits.PreventCycles { + createsCycle, err := d.createsCycle(sourceHash, targetHash) + if err != nil { + return fmt.Errorf("check for cycles: %w", err) + } + if createsCycle { + return ErrEdgeCreatesCycle + } + } + + edge := Edge[K]{ + Source: sourceHash, + Target: targetHash, + Properties: EdgeProperties{ + Attributes: make(map[string]string), + }, + } + + for _, option := range options { + option(&edge.Properties) + } + + return d.addEdge(sourceHash, targetHash, edge) +} + +func (d *directed[K, T]) AddEdgesFrom(g Graph[K, T]) error { + edges, err := g.Edges() + if err != nil { + return fmt.Errorf("failed to get edges: %w", err) + } + + for _, edge := range edges { + if err := d.AddEdge(copyEdge(edge)); err != nil { + return fmt.Errorf("failed to add (%v, %v): %w", edge.Source, edge.Target, err) + } + } + + return nil +} + +func (d *directed[K, T]) Edge(sourceHash, targetHash K) (Edge[T], error) { + edge, err := d.store.Edge(sourceHash, targetHash) + if err != nil { + return Edge[T]{}, err + } + + sourceVertex, _, err := d.store.Vertex(sourceHash) + if err != nil { + return Edge[T]{}, err + } + + targetVertex, _, err := d.store.Vertex(targetHash) + if err != nil { + return Edge[T]{}, err + } + + return Edge[T]{ + Source: sourceVertex, + Target: targetVertex, + Properties: EdgeProperties{ + Weight: edge.Properties.Weight, + Attributes: edge.Properties.Attributes, + Data: edge.Properties.Data, + }, + }, nil +} + +func (d *directed[K, T]) Edges() ([]Edge[K], error) { + return d.store.ListEdges() +} + +func (d *directed[K, T]) UpdateEdge(source, target K, options ...func(properties *EdgeProperties)) error { + existingEdge, err := d.store.Edge(source, target) + if err != nil { + return err + } + + for _, option := range options { + option(&existingEdge.Properties) + } + + return d.store.UpdateEdge(source, target, existingEdge) +} + +func (d *directed[K, T]) RemoveEdge(source, target K) error { + if _, err := d.Edge(source, target); err != nil { + return err + } + + if err := d.store.RemoveEdge(source, target); err != nil { + return fmt.Errorf("failed to remove edge from %v to %v: %w", source, target, err) + } + + return nil +} + +func (d *directed[K, T]) AdjacencyMap() (map[K]map[K]Edge[K], error) { + vertices, err := d.store.ListVertices() + if err != nil { + return nil, fmt.Errorf("failed to list vertices: %w", err) + } + + edges, err := d.store.ListEdges() + if err != nil { + return nil, fmt.Errorf("failed to list edges: %w", err) + } + + m := make(map[K]map[K]Edge[K], len(vertices)) + + for _, vertex := range vertices { + m[vertex] = make(map[K]Edge[K]) + } + + for _, edge := range edges { + m[edge.Source][edge.Target] = edge + } + + return m, nil +} + +func (d *directed[K, T]) PredecessorMap() (map[K]map[K]Edge[K], error) { + vertices, err := d.store.ListVertices() + if err != nil { + return nil, fmt.Errorf("failed to list vertices: %w", err) + } + + edges, err := d.store.ListEdges() + if err != nil { + return nil, fmt.Errorf("failed to list edges: %w", err) + } + + m := make(map[K]map[K]Edge[K], len(vertices)) + + for _, vertex := range vertices { + m[vertex] = make(map[K]Edge[K]) + } + + for _, edge := range edges { + if _, ok := m[edge.Target]; !ok { + m[edge.Target] = make(map[K]Edge[K]) + } + m[edge.Target][edge.Source] = edge + } + + return m, nil +} + +func (d *directed[K, T]) addEdge(sourceHash, targetHash K, edge Edge[K]) error { + return d.store.AddEdge(sourceHash, targetHash, edge) +} + +func (d *directed[K, T]) Clone() (Graph[K, T], error) { + traits := &Traits{ + IsDirected: d.traits.IsDirected, + IsAcyclic: d.traits.IsAcyclic, + IsWeighted: d.traits.IsWeighted, + IsRooted: d.traits.IsRooted, + PreventCycles: d.traits.PreventCycles, + } + + clone := &directed[K, T]{ + hash: d.hash, + traits: traits, + store: newMemoryStore[K, T](), + } + + if err := clone.AddVerticesFrom(d); err != nil { + return nil, fmt.Errorf("failed to add vertices: %w", err) + } + + if err := clone.AddEdgesFrom(d); err != nil { + return nil, fmt.Errorf("failed to add edges: %w", err) + } + + return clone, nil +} + +func (d *directed[K, T]) Order() (int, error) { + return d.store.VertexCount() +} + +func (d *directed[K, T]) Size() (int, error) { + size := 0 + outEdges, err := d.AdjacencyMap() + if err != nil { + return 0, fmt.Errorf("failed to get adjacency map: %w", err) + } + + for _, outEdges := range outEdges { + size += len(outEdges) + } + + return size, nil +} + +func (d *directed[K, T]) edgesAreEqual(a, b Edge[T]) bool { + aSourceHash := d.hash(a.Source) + aTargetHash := d.hash(a.Target) + bSourceHash := d.hash(b.Source) + bTargetHash := d.hash(b.Target) + + return aSourceHash == bSourceHash && aTargetHash == bTargetHash +} + +func (d *directed[K, T]) createsCycle(source, target K) (bool, error) { + // If the underlying store implements CreatesCycle, use that fast path. + if cc, ok := d.store.(interface { + CreatesCycle(source, target K) (bool, error) + }); ok { + return cc.CreatesCycle(source, target) + } + + // Slow path. + return CreatesCycle(Graph[K, T](d), source, target) +} + +// copyEdge returns an argument list suitable for the Graph.AddEdge method. This +// argument list is derived from the given edge, hence the name copyEdge. +// +// The last argument is a custom functional option that sets the edge properties +// to the properties of the original edge. +func copyEdge[K comparable](edge Edge[K]) (K, K, func(properties *EdgeProperties)) { + copyProperties := func(p *EdgeProperties) { + for k, v := range edge.Properties.Attributes { + p.Attributes[k] = v + } + p.Weight = edge.Properties.Weight + p.Data = edge.Properties.Data + } + + return edge.Source, edge.Target, copyProperties +} diff --git a/vendor/github.com/dominikbraun/graph/draw/draw.go b/vendor/github.com/dominikbraun/graph/draw/draw.go new file mode 100644 index 0000000..5f52249 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/draw/draw.go @@ -0,0 +1,145 @@ +// Package draw provides functions for visualizing graph structures. At this +// time, draw supports the DOT language which can be interpreted by Graphviz, +// Grappa, and others. +package draw + +import ( + "fmt" + "io" + "text/template" + + "github.com/dominikbraun/graph" +) + +// ToDo: This template should be simplified and split into multiple templates. +const dotTemplate = `strict {{.GraphType}} { +{{range $k, $v := .Attributes}} + {{$k}}="{{$v}}"; +{{end}} +{{range $s := .Statements}} + "{{.Source}}" {{if .Target}}{{$.EdgeOperator}} "{{.Target}}" [ {{range $k, $v := .EdgeAttributes}}{{$k}}="{{$v}}", {{end}} weight={{.EdgeWeight}} ]{{else}}[ {{range $k, $v := .SourceAttributes}}{{$k}}="{{$v}}", {{end}} weight={{.SourceWeight}} ]{{end}}; +{{end}} +} +` + +type description struct { + GraphType string + Attributes map[string]string + EdgeOperator string + Statements []statement +} + +type statement struct { + Source interface{} + Target interface{} + SourceWeight int + SourceAttributes map[string]string + EdgeWeight int + EdgeAttributes map[string]string +} + +// DOT renders the given graph structure in DOT language into an io.Writer, for +// example a file. The generated output can be passed to Graphviz or other +// visualization tools supporting DOT. +// +// The following example renders a directed graph into a file my-graph.gv: +// +// g := graph.New(graph.IntHash, graph.Directed()) +// +// _ = g.AddVertex(1) +// _ = g.AddVertex(2) +// _ = g.AddVertex(3, graph.VertexAttribute("style", "filled"), graph.VertexAttribute("fillcolor", "red")) +// +// _ = g.AddEdge(1, 2, graph.EdgeWeight(10), graph.EdgeAttribute("color", "red")) +// _ = g.AddEdge(1, 3) +// +// file, _ := os.Create("./my-graph.gv") +// _ = draw.DOT(g, file) +// +// To generate an SVG from the created file using Graphviz, use a command such +// as the following: +// +// dot -Tsvg -O my-graph.gv +// +// Another possibility is to use os.Stdout as an io.Writer, print the DOT output +// to stdout, and pipe it as follows: +// +// go run main.go | dot -Tsvg > output.svg +// +// DOT also accepts the [GraphAttribute] functional option, which can be used to +// add global attributes when rendering the graph: +// +// _ = draw.DOT(g, file, draw.GraphAttribute("label", "my-graph")) +func DOT[K comparable, T any](g graph.Graph[K, T], w io.Writer, options ...func(*description)) error { + desc, err := generateDOT(g, options...) + if err != nil { + return fmt.Errorf("failed to generate DOT description: %w", err) + } + + return renderDOT(w, desc) +} + +// GraphAttribute is a functional option for the [DOT] method. +func GraphAttribute(key, value string) func(*description) { + return func(d *description) { + d.Attributes[key] = value + } +} + +func generateDOT[K comparable, T any](g graph.Graph[K, T], options ...func(*description)) (description, error) { + desc := description{ + GraphType: "graph", + Attributes: make(map[string]string), + EdgeOperator: "--", + Statements: make([]statement, 0), + } + + for _, option := range options { + option(&desc) + } + + if g.Traits().IsDirected { + desc.GraphType = "digraph" + desc.EdgeOperator = "->" + } + + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return desc, err + } + + for vertex, adjacencies := range adjacencyMap { + _, sourceProperties, err := g.VertexWithProperties(vertex) + if err != nil { + return desc, err + } + + stmt := statement{ + Source: vertex, + SourceWeight: sourceProperties.Weight, + SourceAttributes: sourceProperties.Attributes, + } + desc.Statements = append(desc.Statements, stmt) + + for adjacency, edge := range adjacencies { + stmt := statement{ + Source: vertex, + Target: adjacency, + EdgeWeight: edge.Properties.Weight, + EdgeAttributes: edge.Properties.Attributes, + } + desc.Statements = append(desc.Statements, stmt) + } + } + + return desc, nil +} + +func renderDOT(w io.Writer, d description) error { + tpl, err := template.New("dotTemplate").Parse(dotTemplate) + if err != nil { + return fmt.Errorf("failed to parse template: %w", err) + } + + return tpl.Execute(w, d) +} diff --git a/vendor/github.com/dominikbraun/graph/graph.go b/vendor/github.com/dominikbraun/graph/graph.go new file mode 100644 index 0000000..9376eb5 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/graph.go @@ -0,0 +1,387 @@ +// Package graph is a library for creating generic graph data structures and +// modifying, analyzing, and visualizing them. +// +// # Hashes +// +// A graph consists of vertices of type T, which are identified by a hash value +// of type K. The hash value for a given vertex is obtained using the hashing +// function passed to [New]. A hashing function takes a T and returns a K. +// +// For primitive types like integers, you may use a predefined hashing function +// such as [IntHash] – a function that takes an integer and uses that integer as +// the hash value at the same time: +// +// g := graph.New(graph.IntHash) +// +// For storing custom data types, you need to provide your own hashing function. +// This example takes a City instance and returns its name as the hash value: +// +// cityHash := func(c City) string { +// return c.Name +// } +// +// Creating a graph using this hashing function will yield a graph of vertices +// of type City identified by hash values of type string. +// +// g := graph.New(cityHash) +// +// # Operations +// +// Adding vertices to a graph of integers is simple. [graph.Graph.AddVertex] +// takes a vertex and adds it to the graph. +// +// g := graph.New(graph.IntHash) +// +// _ = g.AddVertex(1) +// _ = g.AddVertex(2) +// +// Most functions accept and return only hash values instead of entire instances +// of the vertex type T. For example, [graph.Graph.AddEdge] creates an edge +// between two vertices and accepts the hash values of those vertices. Because +// this graph uses the [IntHash] hashing function, the vertex values and hash +// values are the same. +// +// _ = g.AddEdge(1, 2) +// +// All operations that modify the graph itself are methods of [Graph]. All other +// operations are top-level functions of by this library. +// +// For detailed usage examples, take a look at the README. +package graph + +import "errors" + +var ( + ErrVertexNotFound = errors.New("vertex not found") + ErrVertexAlreadyExists = errors.New("vertex already exists") + ErrEdgeNotFound = errors.New("edge not found") + ErrEdgeAlreadyExists = errors.New("edge already exists") + ErrEdgeCreatesCycle = errors.New("edge would create a cycle") + ErrVertexHasEdges = errors.New("vertex has edges") +) + +// Graph represents a generic graph data structure consisting of vertices of +// type T identified by a hash of type K. +type Graph[K comparable, T any] interface { + // Traits returns the graph's traits. Those traits must be set when creating + // a graph using New. + Traits() *Traits + + // AddVertex creates a new vertex in the graph. If the vertex already exists + // in the graph, ErrVertexAlreadyExists will be returned. + // + // AddVertex accepts a variety of functional options to set further edge + // details such as the weight or an attribute: + // + // _ = graph.AddVertex("A", "B", graph.VertexWeight(4), graph.VertexAttribute("label", "my-label")) + // + AddVertex(value T, options ...func(*VertexProperties)) error + + // AddVerticesFrom adds all vertices along with their properties from the + // given graph to the receiving graph. + // + // All vertices will be added until an error occurs. If one of the vertices + // already exists, ErrVertexAlreadyExists will be returned. + AddVerticesFrom(g Graph[K, T]) error + + // Vertex returns the vertex with the given hash or ErrVertexNotFound if it + // doesn't exist. + Vertex(hash K) (T, error) + + // VertexWithProperties returns the vertex with the given hash along with + // its properties or ErrVertexNotFound if it doesn't exist. + VertexWithProperties(hash K) (T, VertexProperties, error) + + // RemoveVertex removes the vertex with the given hash value from the graph. + // + // The vertex is not allowed to have edges and thus must be disconnected. + // Potential edges must be removed first. Otherwise, ErrVertexHasEdges will + // be returned. If the vertex doesn't exist, ErrVertexNotFound is returned. + RemoveVertex(hash K) error + + // AddEdge creates an edge between the source and the target vertex. + // + // If either vertex cannot be found, ErrVertexNotFound will be returned. If + // the edge already exists, ErrEdgeAlreadyExists will be returned. If cycle + // prevention has been activated using PreventCycles and if adding the edge + // would create a cycle, ErrEdgeCreatesCycle will be returned. + // + // AddEdge accepts functional options to set further edge properties such as + // the weight or an attribute: + // + // _ = g.AddEdge("A", "B", graph.EdgeWeight(4), graph.EdgeAttribute("label", "my-label")) + // + AddEdge(sourceHash, targetHash K, options ...func(*EdgeProperties)) error + + // AddEdgesFrom adds all edges along with their properties from the given + // graph to the receiving graph. + // + // All vertices that the edges are joining have to exist already. If needed, + // these vertices can be added using AddVerticesFrom first. Depending on the + // situation, it also might make sense to clone the entire original graph. + AddEdgesFrom(g Graph[K, T]) error + + // Edge returns the edge joining two given vertices or ErrEdgeNotFound if + // the edge doesn't exist. In an undirected graph, an edge with swapped + // source and target vertices does match. + Edge(sourceHash, targetHash K) (Edge[T], error) + + // Edges returns a slice of all edges in the graph. These edges are of type + // Edge[K] and hence will contain the vertex hashes, not the vertex values. + Edges() ([]Edge[K], error) + + // UpdateEdge updates the edge joining the two given vertices with the data + // provided in the given functional options. Valid functional options are: + // - EdgeWeight: Sets a new weight for the edge properties. + // - EdgeAttribute: Adds a new attribute to the edge properties. + // - EdgeAttributes: Sets a new attributes map for the edge properties. + // - EdgeData: Sets a new Data field for the edge properties. + // + // UpdateEdge accepts the same functional options as AddEdge. For example, + // setting the weight of an edge (A,B) to 10 would look as follows: + // + // _ = g.UpdateEdge("A", "B", graph.EdgeWeight(10)) + // + // Removing a particular edge attribute is not possible at the moment. A + // workaround is to create a new map without the respective element and + // overwrite the existing attributes using the EdgeAttributes option. + UpdateEdge(source, target K, options ...func(properties *EdgeProperties)) error + + // RemoveEdge removes the edge between the given source and target vertices. + // If the edge cannot be found, ErrEdgeNotFound will be returned. + RemoveEdge(source, target K) error + + // AdjacencyMap computes an adjacency map with all vertices in the graph. + // + // There is an entry for each vertex. Each of those entries is another map + // whose keys are the hash values of the adjacent vertices. The value is an + // Edge instance that stores the source and target hash values along with + // the edge metadata. + // + // For a directed graph with two edges AB and AC, AdjacencyMap would return + // the following map: + // + // map[string]map[string]Edge[string]{ + // "A": map[string]Edge[string]{ + // "B": {Source: "A", Target: "B"}, + // "C": {Source: "A", Target: "C"}, + // }, + // "B": map[string]Edge[string]{}, + // "C": map[string]Edge[string]{}, + // } + // + // This design makes AdjacencyMap suitable for a wide variety of algorithms. + AdjacencyMap() (map[K]map[K]Edge[K], error) + + // PredecessorMap computes a predecessor map with all vertices in the graph. + // + // It has the same map layout and does the same thing as AdjacencyMap, but + // for ingoing instead of outgoing edges of each vertex. + // + // For a directed graph with two edges AB and AC, PredecessorMap would + // return the following map: + // + // map[string]map[string]Edge[string]{ + // "A": map[string]Edge[string]{}, + // "B": map[string]Edge[string]{ + // "A": {Source: "A", Target: "B"}, + // }, + // "C": map[string]Edge[string]{ + // "A": {Source: "A", Target: "C"}, + // }, + // } + // + // For an undirected graph, PredecessorMap is the same as AdjacencyMap. This + // is because there is no distinction between "outgoing" and "ingoing" edges + // in an undirected graph. + PredecessorMap() (map[K]map[K]Edge[K], error) + + // Clone creates a deep copy of the graph and returns that cloned graph. + // + // The cloned graph will use the default in-memory store for storing the + // vertices and edges. If you want to utilize a custom store instead, create + // a new graph using NewWithStore and use AddVerticesFrom and AddEdgesFrom. + Clone() (Graph[K, T], error) + + // Order returns the number of vertices in the graph. + Order() (int, error) + + // Size returns the number of edges in the graph. + Size() (int, error) +} + +// Edge represents an edge that joins two vertices. Even though these edges are +// always referred to as source and target, whether the graph is directed or not +// is determined by its traits. +type Edge[T any] struct { + Source T + Target T + Properties EdgeProperties +} + +// EdgeProperties represents a set of properties that each edge possesses. They +// can be set when adding a new edge using the corresponding functional options: +// +// g.AddEdge("A", "B", graph.EdgeWeight(2), graph.EdgeAttribute("color", "red")) +// +// The example above will create an edge with a weight of 2 and an attribute +// "color" with value "red". +type EdgeProperties struct { + Attributes map[string]string + Weight int + Data any +} + +// Hash is a hashing function that takes a vertex of type T and returns a hash +// value of type K. +// +// Every graph has a hashing function and uses that function to retrieve the +// hash values of its vertices. You can either use one of the predefined hashing +// functions or provide your own one for custom data types: +// +// cityHash := func(c City) string { +// return c.Name +// } +// +// The cityHash function returns the city name as a hash value. The types of T +// and K, in this case City and string, also define the types of the graph. +type Hash[K comparable, T any] func(T) K + +// New creates a new graph with vertices of type T, identified by hash values of +// type K. These hash values will be obtained using the provided hash function. +// +// The graph will use the default in-memory store for persisting vertices and +// edges. To use a different [Store], use [NewWithStore]. +func New[K comparable, T any](hash Hash[K, T], options ...func(*Traits)) Graph[K, T] { + return NewWithStore(hash, newMemoryStore[K, T](), options...) +} + +// NewWithStore creates a new graph same as [New] but uses the provided store +// instead of the default memory store. +func NewWithStore[K comparable, T any](hash Hash[K, T], store Store[K, T], options ...func(*Traits)) Graph[K, T] { + var p Traits + + for _, option := range options { + option(&p) + } + + if p.IsDirected { + return newDirected(hash, &p, store) + } + + return newUndirected(hash, &p, store) +} + +// NewLike creates a graph that is "like" the given graph: It has the same type, +// the same hashing function, and the same traits. The new graph is independent +// of the original graph and uses the default in-memory storage. +// +// g := graph.New(graph.IntHash, graph.Directed()) +// h := graph.NewLike(g) +// +// In the example above, h is a new directed graph of integers derived from g. +func NewLike[K comparable, T any](g Graph[K, T]) Graph[K, T] { + copyTraits := func(t *Traits) { + t.IsDirected = g.Traits().IsDirected + t.IsAcyclic = g.Traits().IsAcyclic + t.IsWeighted = g.Traits().IsWeighted + t.IsRooted = g.Traits().IsRooted + t.PreventCycles = g.Traits().PreventCycles + } + + var hash Hash[K, T] + + if g.Traits().IsDirected { + hash = g.(*directed[K, T]).hash + } else { + hash = g.(*undirected[K, T]).hash + } + + return New(hash, copyTraits) +} + +// StringHash is a hashing function that accepts a string and uses that exact +// string as a hash value. Using it as Hash will yield a Graph[string, string]. +func StringHash(v string) string { + return v +} + +// IntHash is a hashing function that accepts an integer and uses that exact +// integer as a hash value. Using it as Hash will yield a Graph[int, int]. +func IntHash(v int) int { + return v +} + +// EdgeWeight returns a function that sets the weight of an edge to the given +// weight. This is a functional option for the [graph.Graph.Edge] and +// [graph.Graph.AddEdge] methods. +func EdgeWeight(weight int) func(*EdgeProperties) { + return func(e *EdgeProperties) { + e.Weight = weight + } +} + +// EdgeAttribute returns a function that adds the given key-value pair to the +// attributes of an edge. This is a functional option for the [graph.Graph.Edge] +// and [graph.Graph.AddEdge] methods. +func EdgeAttribute(key, value string) func(*EdgeProperties) { + return func(e *EdgeProperties) { + e.Attributes[key] = value + } +} + +// EdgeAttributes returns a function that sets the given map as the attributes +// of an edge. This is a functional option for the [graph.Graph.AddEdge] and +// [graph.Graph.UpdateEdge] methods. +func EdgeAttributes(attributes map[string]string) func(*EdgeProperties) { + return func(e *EdgeProperties) { + e.Attributes = attributes + } +} + +// EdgeData returns a function that sets the data of an edge to the given value. +// This is a functional option for the [graph.Graph.Edge] and +// [graph.Graph.AddEdge] methods. +func EdgeData(data any) func(*EdgeProperties) { + return func(e *EdgeProperties) { + e.Data = data + } +} + +// VertexProperties represents a set of properties that each vertex has. They +// can be set when adding a vertex using the corresponding functional options: +// +// _ = g.AddVertex("A", "B", graph.VertexWeight(2), graph.VertexAttribute("color", "red")) +// +// The example above will create a vertex with a weight of 2 and an attribute +// "color" with value "red". +type VertexProperties struct { + Attributes map[string]string + Weight int +} + +// VertexWeight returns a function that sets the weight of a vertex to the given +// weight. This is a functional option for the [graph.Graph.Vertex] and +// [graph.Graph.AddVertex] methods. +func VertexWeight(weight int) func(*VertexProperties) { + return func(e *VertexProperties) { + e.Weight = weight + } +} + +// VertexAttribute returns a function that adds the given key-value pair to the +// vertex attributes. This is a functional option for the [graph.Graph.Vertex] +// and [graph.Graph.AddVertex] methods. +func VertexAttribute(key, value string) func(*VertexProperties) { + return func(e *VertexProperties) { + e.Attributes[key] = value + } +} + +// VertexAttributes returns a function that sets the given map as the attributes +// of a vertex. This is a functional option for the [graph.Graph.AddVertex] methods. +func VertexAttributes(attributes map[string]string) func(*VertexProperties) { + return func(e *VertexProperties) { + e.Attributes = attributes + } +} diff --git a/vendor/github.com/dominikbraun/graph/paths.go b/vendor/github.com/dominikbraun/graph/paths.go new file mode 100644 index 0000000..0bb3c68 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/paths.go @@ -0,0 +1,339 @@ +package graph + +import ( + "errors" + "fmt" + "math" +) + +var ErrTargetNotReachable = errors.New("target vertex not reachable from source") + +// CreatesCycle determines whether adding an edge between the two given vertices +// would introduce a cycle in the graph. CreatesCycle will not create an edge. +// +// A potential edge would create a cycle if the target vertex is also a parent +// of the source vertex. In order to determine this, CreatesCycle runs a DFS. +func CreatesCycle[K comparable, T any](g Graph[K, T], source, target K) (bool, error) { + if _, err := g.Vertex(source); err != nil { + return false, fmt.Errorf("could not get vertex with hash %v: %w", source, err) + } + + if _, err := g.Vertex(target); err != nil { + return false, fmt.Errorf("could not get vertex with hash %v: %w", target, err) + } + + if source == target { + return true, nil + } + + predecessorMap, err := g.PredecessorMap() + if err != nil { + return false, fmt.Errorf("failed to get predecessor map: %w", err) + } + + stack := make([]K, 0) + visited := make(map[K]bool) + + stack = append(stack, source) + + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currentHash]; !ok { + // If the adjacent vertex also is the target vertex, the target is a + // parent of the source vertex. An edge would introduce a cycle. + if currentHash == target { + return true, nil + } + + visited[currentHash] = true + + for adjacency := range predecessorMap[currentHash] { + stack = append(stack, adjacency) + } + } + } + + return false, nil +} + +// ShortestPath computes the shortest path between a source and a target vertex +// under consideration of the edge weights. It returns a slice of hash values of +// the vertices forming that path. +// +// The returned path includes the source and target vertices. If the target is +// not reachable from the source, ErrTargetNotReachable will be returned. Should +// there be multiple shortest paths, and arbitrary one will be returned. +// +// ShortestPath has a time complexity of O(|V|+|E|log(|V|)). +func ShortestPath[K comparable, T any](g Graph[K, T], source, target K) ([]K, error) { + weights := make(map[K]float64) + visited := make(map[K]bool) + + weights[source] = 0 + visited[target] = true + + queue := newPriorityQueue[K]() + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return nil, fmt.Errorf("could not get adjacency map: %w", err) + } + + for hash := range adjacencyMap { + if hash != source { + weights[hash] = math.Inf(1) + visited[hash] = false + } + + queue.Push(hash, weights[hash]) + } + + // bestPredecessors stores the cheapest or least-weighted predecessor for + // each vertex. Given an edge AC with weight=4 and an edge BC with weight=2, + // the cheapest predecessor for C is B. + bestPredecessors := make(map[K]K) + + for queue.Len() > 0 { + vertex, _ := queue.Pop() + hasInfiniteWeight := math.IsInf(weights[vertex], 1) + + for adjacency, edge := range adjacencyMap[vertex] { + edgeWeight := edge.Properties.Weight + + // Setting the weight to 1 is required for unweighted graphs whose + // edge weights are 0. Otherwise, all paths would have a sum of 0 + // and a random path would be returned. + if !g.Traits().IsWeighted { + edgeWeight = 1 + } + + weight := weights[vertex] + float64(edgeWeight) + + if weight < weights[adjacency] && !hasInfiniteWeight { + weights[adjacency] = weight + bestPredecessors[adjacency] = vertex + queue.UpdatePriority(adjacency, weight) + } + } + } + + path := []K{target} + current := target + + for current != source { + // If the current vertex is not present in bestPredecessors, current is + // set to the zero value of K. Without this check, this would lead to an + // endless prepending of zero values to the path. Also, the target would + // not be reachable from one of the preceding vertices. + if _, ok := bestPredecessors[current]; !ok { + return nil, ErrTargetNotReachable + } + current = bestPredecessors[current] + path = append([]K{current}, path...) + } + + return path, nil +} + +type sccState[K comparable] struct { + adjacencyMap map[K]map[K]Edge[K] + components [][]K + stack []K + onStack map[K]bool + visited map[K]struct{} + lowlink map[K]int + index map[K]int + time int +} + +// StronglyConnectedComponents detects all strongly connected components within +// the graph and returns the hashes of the vertices shaping these components, so +// each component is represented by a []K. +// +// StronglyConnectedComponents can only run on directed graphs. +func StronglyConnectedComponents[K comparable, T any](g Graph[K, T]) ([][]K, error) { + if !g.Traits().IsDirected { + return nil, errors.New("SCCs can only be detected in directed graphs") + } + + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return nil, fmt.Errorf("could not get adjacency map: %w", err) + } + + state := &sccState[K]{ + adjacencyMap: adjacencyMap, + components: make([][]K, 0), + stack: make([]K, 0), + onStack: make(map[K]bool), + visited: make(map[K]struct{}), + lowlink: make(map[K]int), + index: make(map[K]int), + } + + for hash := range state.adjacencyMap { + if _, ok := state.visited[hash]; !ok { + findSCC(hash, state) + } + } + + return state.components, nil +} + +func findSCC[K comparable](vertexHash K, state *sccState[K]) { + state.stack = append(state.stack, vertexHash) + state.onStack[vertexHash] = true + state.visited[vertexHash] = struct{}{} + state.index[vertexHash] = state.time + state.lowlink[vertexHash] = state.time + + state.time++ + + for adjacency := range state.adjacencyMap[vertexHash] { + if _, ok := state.visited[adjacency]; !ok { + findSCC(adjacency, state) + + smallestLowlink := math.Min( + float64(state.lowlink[vertexHash]), + float64(state.lowlink[adjacency]), + ) + state.lowlink[vertexHash] = int(smallestLowlink) + } else { + // If the adjacent vertex already is on the stack, the edge joining + // the current and the adjacent vertex is a back ege. Therefore, the + // lowlink value of the vertex has to be updated to the index of the + // adjacent vertex if it is smaller than the current lowlink value. + if state.onStack[adjacency] { + smallestLowlink := math.Min( + float64(state.lowlink[vertexHash]), + float64(state.index[adjacency]), + ) + state.lowlink[vertexHash] = int(smallestLowlink) + } + } + } + + // If the lowlink value of the vertex is equal to its DFS value, this is the + // head vertex of a strongly connected component that's shaped by the vertex + // and all vertices on the stack. + if state.lowlink[vertexHash] == state.index[vertexHash] { + var hash K + var component []K + + for hash != vertexHash { + hash = state.stack[len(state.stack)-1] + state.stack = state.stack[:len(state.stack)-1] + state.onStack[hash] = false + + component = append(component, hash) + } + + state.components = append(state.components, component) + } +} + +// AllPathsBetween computes and returns all paths between two given vertices. A +// path is represented as a slice of vertex hashes. The returned slice contains +// these paths. +// +// AllPathsBetween utilizes a non-recursive, stack-based implementation. It has +// an estimated runtime complexity of O(n^2) where n is the number of vertices. +func AllPathsBetween[K comparable, T any](g Graph[K, T], start, end K) ([][]K, error) { + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return nil, err + } + + // The algorithm used relies on stacks instead of recursion. It is described + // here: https://boycgit.github.io/all-paths-between-two-vertex/ + mainStack := newStack[K]() + viceStack := newStack[stack[K]]() + + checkEmpty := func() error { + if mainStack.isEmpty() || viceStack.isEmpty() { + return errors.New("empty stack") + } + return nil + } + + buildLayer := func(element K) { + mainStack.push(element) + + newElements := newStack[K]() + for e := range adjacencyMap[element] { + var contains bool + mainStack.forEach(func(k K) { + if e == k { + contains = true + } + }) + if contains { + continue + } + newElements.push(e) + } + viceStack.push(newElements) + } + + buildStack := func() error { + if err = checkEmpty(); err != nil { + return fmt.Errorf("unable to build stack: %w", err) + } + + elements, _ := viceStack.top() + + for !elements.isEmpty() { + element, _ := elements.pop() + buildLayer(element) + elements, _ = viceStack.top() + } + + return nil + } + + removeLayer := func() error { + if err = checkEmpty(); err != nil { + return fmt.Errorf("unable to remove layer: %w", err) + } + + if e, _ := viceStack.top(); !e.isEmpty() { + return errors.New("the top element of vice-stack is not empty") + } + + _, _ = mainStack.pop() + _, _ = viceStack.pop() + + return nil + } + + buildLayer(start) + + allPaths := make([][]K, 0) + + for !mainStack.isEmpty() { + v, _ := mainStack.top() + adjs, _ := viceStack.top() + + if adjs.isEmpty() { + if v == end { + path := make([]K, 0) + mainStack.forEach(func(k K) { + path = append(path, k) + }) + allPaths = append(allPaths, path) + } + + err = removeLayer() + if err != nil { + return nil, err + } + } else { + if err = buildStack(); err != nil { + return nil, err + } + } + } + + return allPaths, nil +} diff --git a/vendor/github.com/dominikbraun/graph/sets.go b/vendor/github.com/dominikbraun/graph/sets.go new file mode 100644 index 0000000..a4aedc2 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/sets.go @@ -0,0 +1,124 @@ +package graph + +import ( + "fmt" +) + +// Union combines two given graphs into a new graph. The vertex hashes in both +// graphs are expected to be unique. The two input graphs will remain unchanged. +// +// Both graphs should be either directed or undirected. All traits for the new +// graph will be derived from g. +func Union[K comparable, T any](g, h Graph[K, T]) (Graph[K, T], error) { + union, err := g.Clone() + if err != nil { + return union, fmt.Errorf("failed to clone g: %w", err) + } + + adjacencyMap, err := h.AdjacencyMap() + if err != nil { + return union, fmt.Errorf("failed to get adjacency map: %w", err) + } + + addedEdges := make(map[K]map[K]struct{}) + + for currentHash := range adjacencyMap { + vertex, properties, err := h.VertexWithProperties(currentHash) //nolint:govet + if err != nil { + return union, fmt.Errorf("failed to get vertex %v: %w", currentHash, err) + } + + err = union.AddVertex(vertex, copyVertexProperties(properties)) + if err != nil { + return union, fmt.Errorf("failed to add vertex %v: %w", currentHash, err) + } + } + + for _, adjacencies := range adjacencyMap { + for _, edge := range adjacencies { + if _, sourceOK := addedEdges[edge.Source]; sourceOK { + if _, targetOK := addedEdges[edge.Source][edge.Target]; targetOK { + // If the edge addedEdges[source][target] exists, the edge + // has already been created and thus can be skipped here. + continue + } + } + + err = union.AddEdge(copyEdge(edge)) + if err != nil { + return union, fmt.Errorf("failed to add edge (%v, %v): %w", edge.Source, edge.Target, err) + } + + if _, ok := addedEdges[edge.Source]; !ok { + addedEdges[edge.Source] = make(map[K]struct{}) + } + addedEdges[edge.Source][edge.Target] = struct{}{} + } + } + + return union, nil +} + +// unionFind implements a union-find or disjoint set data structure that works +// with vertex hashes as vertices. It's an internal helper type at the moment, +// but could perhaps be exposed publicly in the future. +// +// unionFind is not related to the Union function. +type unionFind[K comparable] struct { + parents map[K]K +} + +func newUnionFind[K comparable](vertices ...K) *unionFind[K] { + u := &unionFind[K]{ + parents: make(map[K]K, len(vertices)), + } + + for _, vertex := range vertices { + u.parents[vertex] = vertex + } + + return u +} + +func (u *unionFind[K]) add(vertex K) { + u.parents[vertex] = vertex +} + +func (u *unionFind[K]) union(vertex1, vertex2 K) { + root1 := u.find(vertex1) + root2 := u.find(vertex2) + + if root1 == root2 { + return + } + + u.parents[root2] = root1 +} + +func (u *unionFind[K]) find(vertex K) K { + root := vertex + + for u.parents[root] != root { + root = u.parents[root] + } + + // Perform a path compression in order to optimize of future find calls. + current := vertex + + for u.parents[current] != root { + parent := u.parents[vertex] + u.parents[vertex] = root + current = parent + } + + return root +} + +func copyVertexProperties(source VertexProperties) func(*VertexProperties) { + return func(p *VertexProperties) { + for k, v := range source.Attributes { + p.Attributes[k] = v + } + p.Weight = source.Weight + } +} diff --git a/vendor/github.com/dominikbraun/graph/store.go b/vendor/github.com/dominikbraun/graph/store.go new file mode 100644 index 0000000..34af914 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/store.go @@ -0,0 +1,276 @@ +package graph + +import ( + "fmt" + "sync" +) + +// Store represents a storage for vertices and edges. The graph library provides an in-memory store +// by default and accepts any Store implementation to work with - for example, an SQL store. +// +// When implementing your own Store, make sure the individual methods and their behavior adhere to +// this documentation. Otherwise, the graphs aren't guaranteed to behave as expected. +type Store[K comparable, T any] interface { + // AddVertex should add the given vertex with the given hash value and vertex properties to the + // graph. If the vertex already exists, it is up to you whether ErrVertexAlreadyExists or no + // error should be returned. + AddVertex(hash K, value T, properties VertexProperties) error + + // Vertex should return the vertex and vertex properties with the given hash value. If the + // vertex doesn't exist, ErrVertexNotFound should be returned. + Vertex(hash K) (T, VertexProperties, error) + + // RemoveVertex should remove the vertex with the given hash value. If the vertex doesn't + // exist, ErrVertexNotFound should be returned. If the vertex has edges to other vertices, + // ErrVertexHasEdges should be returned. + RemoveVertex(hash K) error + + // ListVertices should return all vertices in the graph in a slice. + ListVertices() ([]K, error) + + // VertexCount should return the number of vertices in the graph. This should be equal to the + // length of the slice returned by ListVertices. + VertexCount() (int, error) + + // AddEdge should add an edge between the vertices with the given source and target hashes. + // + // If either vertex doesn't exit, ErrVertexNotFound should be returned for the respective + // vertex. If the edge already exists, ErrEdgeAlreadyExists should be returned. + AddEdge(sourceHash, targetHash K, edge Edge[K]) error + + // UpdateEdge should update the edge between the given vertices with the data of the given + // Edge instance. If the edge doesn't exist, ErrEdgeNotFound should be returned. + UpdateEdge(sourceHash, targetHash K, edge Edge[K]) error + + // RemoveEdge should remove the edge between the vertices with the given source and target + // hashes. + // + // If either vertex doesn't exist, it is up to you whether ErrVertexNotFound or no error should + // be returned. If the edge doesn't exist, it is up to you whether ErrEdgeNotFound or no error + // should be returned. + RemoveEdge(sourceHash, targetHash K) error + + // Edge should return the edge joining the vertices with the given hash values. It should + // exclusively look for an edge between the source and the target vertex, not vice versa. The + // graph implementation does this for undirected graphs itself. + // + // Note that unlike Graph.Edge, this function is supposed to return an Edge[K], i.e. an edge + // that only contains the vertex hashes instead of the vertices themselves. + // + // If the edge doesn't exist, ErrEdgeNotFound should be returned. + Edge(sourceHash, targetHash K) (Edge[K], error) + + // ListEdges should return all edges in the graph in a slice. + ListEdges() ([]Edge[K], error) +} + +type memoryStore[K comparable, T any] struct { + lock sync.RWMutex + vertices map[K]T + vertexProperties map[K]VertexProperties + + // outEdges and inEdges store all outgoing and ingoing edges for all vertices. For O(1) access, + // these edges themselves are stored in maps whose keys are the hashes of the target vertices. + outEdges map[K]map[K]Edge[K] // source -> target + inEdges map[K]map[K]Edge[K] // target -> source +} + +func newMemoryStore[K comparable, T any]() Store[K, T] { + return &memoryStore[K, T]{ + vertices: make(map[K]T), + vertexProperties: make(map[K]VertexProperties), + outEdges: make(map[K]map[K]Edge[K]), + inEdges: make(map[K]map[K]Edge[K]), + } +} + +func (s *memoryStore[K, T]) AddVertex(k K, t T, p VertexProperties) error { + s.lock.Lock() + defer s.lock.Unlock() + + if _, ok := s.vertices[k]; ok { + return ErrVertexAlreadyExists + } + + s.vertices[k] = t + s.vertexProperties[k] = p + + return nil +} + +func (s *memoryStore[K, T]) ListVertices() ([]K, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + hashes := make([]K, 0, len(s.vertices)) + for k := range s.vertices { + hashes = append(hashes, k) + } + + return hashes, nil +} + +func (s *memoryStore[K, T]) VertexCount() (int, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + return len(s.vertices), nil +} + +func (s *memoryStore[K, T]) Vertex(k K) (T, VertexProperties, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + v, ok := s.vertices[k] + if !ok { + return v, VertexProperties{}, ErrVertexNotFound + } + + p := s.vertexProperties[k] + + return v, p, nil +} + +func (s *memoryStore[K, T]) RemoveVertex(k K) error { + s.lock.RLock() + defer s.lock.RUnlock() + + if _, ok := s.vertices[k]; !ok { + return ErrVertexNotFound + } + + if edges, ok := s.inEdges[k]; ok { + if len(edges) > 0 { + return ErrVertexHasEdges + } + delete(s.inEdges, k) + } + + if edges, ok := s.outEdges[k]; ok { + if len(edges) > 0 { + return ErrVertexHasEdges + } + delete(s.outEdges, k) + } + + delete(s.vertices, k) + delete(s.vertexProperties, k) + + return nil +} + +func (s *memoryStore[K, T]) AddEdge(sourceHash, targetHash K, edge Edge[K]) error { + s.lock.Lock() + defer s.lock.Unlock() + + if _, ok := s.outEdges[sourceHash]; !ok { + s.outEdges[sourceHash] = make(map[K]Edge[K]) + } + + s.outEdges[sourceHash][targetHash] = edge + + if _, ok := s.inEdges[targetHash]; !ok { + s.inEdges[targetHash] = make(map[K]Edge[K]) + } + + s.inEdges[targetHash][sourceHash] = edge + + return nil +} + +func (s *memoryStore[K, T]) UpdateEdge(sourceHash, targetHash K, edge Edge[K]) error { + if _, err := s.Edge(sourceHash, targetHash); err != nil { + return err + } + + s.lock.Lock() + defer s.lock.Unlock() + + s.outEdges[sourceHash][targetHash] = edge + s.inEdges[targetHash][sourceHash] = edge + + return nil +} + +func (s *memoryStore[K, T]) RemoveEdge(sourceHash, targetHash K) error { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.inEdges[targetHash], sourceHash) + delete(s.outEdges[sourceHash], targetHash) + return nil +} + +func (s *memoryStore[K, T]) Edge(sourceHash, targetHash K) (Edge[K], error) { + s.lock.RLock() + defer s.lock.RUnlock() + + sourceEdges, ok := s.outEdges[sourceHash] + if !ok { + return Edge[K]{}, ErrEdgeNotFound + } + + edge, ok := sourceEdges[targetHash] + if !ok { + return Edge[K]{}, ErrEdgeNotFound + } + + return edge, nil +} + +func (s *memoryStore[K, T]) ListEdges() ([]Edge[K], error) { + s.lock.RLock() + defer s.lock.RUnlock() + + res := make([]Edge[K], 0) + for _, edges := range s.outEdges { + for _, edge := range edges { + res = append(res, edge) + } + } + return res, nil +} + +// CreatesCycle is a fastpath version of [CreatesCycle] that avoids calling +// [PredecessorMap], which generates large amounts of garbage to collect. +// +// Because CreatesCycle doesn't need to modify the PredecessorMap, we can use +// inEdges instead to compute the same thing without creating any copies. +func (s *memoryStore[K, T]) CreatesCycle(source, target K) (bool, error) { + if _, _, err := s.Vertex(source); err != nil { + return false, fmt.Errorf("could not get vertex with hash %v: %w", source, err) + } + + if _, _, err := s.Vertex(target); err != nil { + return false, fmt.Errorf("could not get vertex with hash %v: %w", target, err) + } + + if source == target { + return true, nil + } + + stack := make([]K, 0) + visited := make(map[K]struct{}) + + stack = append(stack, source) + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currentHash]; !ok { + // If the adjacent vertex also is the target vertex, the target is a + // parent of the source vertex. An edge would introduce a cycle. + if currentHash == target { + return true, nil + } + + visited[currentHash] = struct{}{} + + for adjacency := range s.inEdges[currentHash] { + stack = append(stack, adjacency) + } + } + } + + return false, nil +} diff --git a/vendor/github.com/dominikbraun/graph/traits.go b/vendor/github.com/dominikbraun/graph/traits.go new file mode 100644 index 0000000..11b7357 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/traits.go @@ -0,0 +1,63 @@ +package graph + +// Traits represents a set of graph traits and types, such as directedness or acyclicness. These +// traits can be set when creating a graph by passing the corresponding functional options, for +// example: +// +// g := graph.New(graph.IntHash, graph.Directed()) +// +// This will set the IsDirected field to true. +type Traits struct { + IsDirected bool + IsAcyclic bool + IsWeighted bool + IsRooted bool + PreventCycles bool +} + +// Directed creates a directed graph. This has implications on graph traversal and the order of +// arguments of the Edge and AddEdge functions. +func Directed() func(*Traits) { + return func(t *Traits) { + t.IsDirected = true + } +} + +// Acyclic creates an acyclic graph. Note that creating edges that form a cycle will still be +// possible. To prevent this explicitly, use PreventCycles. +func Acyclic() func(*Traits) { + return func(t *Traits) { + t.IsAcyclic = true + } +} + +// Weighted creates a weighted graph. To set weights, use the Edge and AddEdge functions. +func Weighted() func(*Traits) { + return func(t *Traits) { + t.IsWeighted = true + } +} + +// Rooted creates a rooted graph. This is particularly common for building tree data structures. +func Rooted() func(*Traits) { + return func(t *Traits) { + t.IsRooted = true + } +} + +// Tree is an alias for Acyclic and Rooted, since most trees in Computer Science are rooted trees. +func Tree() func(*Traits) { + return func(t *Traits) { + Acyclic()(t) + Rooted()(t) + } +} + +// PreventCycles creates an acyclic graph that prevents and proactively prevents the creation of +// cycles. These cycle checks affect the performance and complexity of operations such as AddEdge. +func PreventCycles() func(*Traits) { + return func(t *Traits) { + Acyclic()(t) + t.PreventCycles = true + } +} diff --git a/vendor/github.com/dominikbraun/graph/traversal.go b/vendor/github.com/dominikbraun/graph/traversal.go new file mode 100644 index 0000000..73f704b --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/traversal.go @@ -0,0 +1,157 @@ +package graph + +import "fmt" + +// DFS performs a depth-first search on the graph, starting from the given vertex. The visit +// function will be invoked with the hash of the vertex currently visited. If it returns false, DFS +// will continue traversing the graph, and if it returns true, the traversal will be stopped. In +// case the graph is disconnected, only the vertices joined with the starting vertex are visited. +// +// This example prints all vertices of the graph in DFS-order: +// +// g := graph.New(graph.IntHash) +// +// _ = g.AddVertex(1) +// _ = g.AddVertex(2) +// _ = g.AddVertex(3) +// +// _ = g.AddEdge(1, 2) +// _ = g.AddEdge(2, 3) +// _ = g.AddEdge(3, 1) +// +// _ = graph.DFS(g, 1, func(value int) bool { +// fmt.Println(value) +// return false +// }) +// +// Similarly, if you have a graph of City vertices and the traversal should stop at London, the +// visit function would look as follows: +// +// func(c City) bool { +// return c.Name == "London" +// } +// +// DFS is non-recursive and maintains a stack instead. +func DFS[K comparable, T any](g Graph[K, T], start K, visit func(K) bool) error { + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return fmt.Errorf("could not get adjacency map: %w", err) + } + + if _, ok := adjacencyMap[start]; !ok { + return fmt.Errorf("could not find start vertex with hash %v", start) + } + + stack := make([]K, 0) + visited := make(map[K]bool) + + stack = append(stack, start) + + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + + stack = stack[:len(stack)-1] + + if _, ok := visited[currentHash]; !ok { + // Stop traversing the graph if the visit function returns true. + if stop := visit(currentHash); stop { + break + } + visited[currentHash] = true + + for adjacency := range adjacencyMap[currentHash] { + stack = append(stack, adjacency) + } + } + } + + return nil +} + +// BFS performs a breadth-first search on the graph, starting from the given vertex. The visit +// function will be invoked with the hash of the vertex currently visited. If it returns false, BFS +// will continue traversing the graph, and if it returns true, the traversal will be stopped. In +// case the graph is disconnected, only the vertices joined with the starting vertex are visited. +// +// This example prints all vertices of the graph in BFS-order: +// +// g := graph.New(graph.IntHash) +// +// _ = g.AddVertex(1) +// _ = g.AddVertex(2) +// _ = g.AddVertex(3) +// +// _ = g.AddEdge(1, 2) +// _ = g.AddEdge(2, 3) +// _ = g.AddEdge(3, 1) +// +// _ = graph.BFS(g, 1, func(value int) bool { +// fmt.Println(value) +// return false +// }) +// +// Similarly, if you have a graph of City vertices and the traversal should stop at London, the +// visit function would look as follows: +// +// func(c City) bool { +// return c.Name == "London" +// } +// +// BFS is non-recursive and maintains a stack instead. +func BFS[K comparable, T any](g Graph[K, T], start K, visit func(K) bool) error { + ignoreDepth := func(vertex K, _ int) bool { + return visit(vertex) + } + return BFSWithDepth(g, start, ignoreDepth) +} + +// BFSWithDepth works just as BFS and performs a breadth-first search on the graph, but its +// visit function is passed the current depth level as a second argument. Consequently, the +// current depth can be used for deciding whether or not to proceed past a certain depth. +// +// _ = graph.BFSWithDepth(g, 1, func(value int, depth int) bool { +// fmt.Println(value) +// return depth > 3 +// }) +// +// With the visit function from the example, the BFS traversal will stop once a depth greater +// than 3 is reached. +func BFSWithDepth[K comparable, T any](g Graph[K, T], start K, visit func(K, int) bool) error { + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return fmt.Errorf("could not get adjacency map: %w", err) + } + + if _, ok := adjacencyMap[start]; !ok { + return fmt.Errorf("could not find start vertex with hash %v", start) + } + + queue := make([]K, 0) + visited := make(map[K]bool) + + visited[start] = true + queue = append(queue, start) + depth := 0 + + for len(queue) > 0 { + currentHash := queue[0] + + queue = queue[1:] + depth++ + + // Stop traversing the graph if the visit function returns true. + if stop := visit(currentHash, depth); stop { + break + } + + for adjacency := range adjacencyMap[currentHash] { + if _, ok := visited[adjacency]; !ok { + visited[adjacency] = true + queue = append(queue, adjacency) + } + } + + } + + return nil +} diff --git a/vendor/github.com/dominikbraun/graph/trees.go b/vendor/github.com/dominikbraun/graph/trees.go new file mode 100644 index 0000000..7201a41 --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/trees.go @@ -0,0 +1,82 @@ +package graph + +import ( + "errors" + "fmt" + "sort" +) + +// MinimumSpanningTree returns a minimum spanning tree within the given graph. +// +// The MST contains all vertices from the given graph as well as the required +// edges for building the MST. The original graph remains unchanged. +func MinimumSpanningTree[K comparable, T any](g Graph[K, T]) (Graph[K, T], error) { + return spanningTree(g, false) +} + +// MaximumSpanningTree returns a minimum spanning tree within the given graph. +// +// The MST contains all vertices from the given graph as well as the required +// edges for building the MST. The original graph remains unchanged. +func MaximumSpanningTree[K comparable, T any](g Graph[K, T]) (Graph[K, T], error) { + return spanningTree(g, true) +} + +func spanningTree[K comparable, T any](g Graph[K, T], maximum bool) (Graph[K, T], error) { + if g.Traits().IsDirected { + return nil, errors.New("spanning trees can only be determined for undirected graphs") + } + + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return nil, fmt.Errorf("failed to get adjacency map: %w", err) + } + + edges := make([]Edge[K], 0) + subtrees := newUnionFind[K]() + + mst := NewLike(g) + + for v, adjacencies := range adjacencyMap { + vertex, properties, err := g.VertexWithProperties(v) //nolint:govet + if err != nil { + return nil, fmt.Errorf("failed to get vertex %v: %w", v, err) + } + + err = mst.AddVertex(vertex, copyVertexProperties(properties)) + if err != nil { + return nil, fmt.Errorf("failed to add vertex %v: %w", v, err) + } + + subtrees.add(v) + + for _, edge := range adjacencies { + edges = append(edges, edge) + } + } + + if maximum { + sort.Slice(edges, func(i, j int) bool { + return edges[i].Properties.Weight > edges[j].Properties.Weight + }) + } else { + sort.Slice(edges, func(i, j int) bool { + return edges[i].Properties.Weight < edges[j].Properties.Weight + }) + } + + for _, edge := range edges { + sourceRoot := subtrees.find(edge.Source) + targetRoot := subtrees.find(edge.Target) + + if sourceRoot != targetRoot { + subtrees.union(sourceRoot, targetRoot) + + if err = mst.AddEdge(copyEdge(edge)); err != nil { + return nil, fmt.Errorf("failed to add edge (%v, %v): %w", edge.Source, edge.Target, err) + } + } + } + + return mst, nil +} diff --git a/vendor/github.com/dominikbraun/graph/undirected.go b/vendor/github.com/dominikbraun/graph/undirected.go new file mode 100644 index 0000000..37d320c --- /dev/null +++ b/vendor/github.com/dominikbraun/graph/undirected.go @@ -0,0 +1,369 @@ +package graph + +import ( + "errors" + "fmt" +) + +type undirected[K comparable, T any] struct { + hash Hash[K, T] + traits *Traits + store Store[K, T] +} + +func newUndirected[K comparable, T any](hash Hash[K, T], traits *Traits, store Store[K, T]) *undirected[K, T] { + return &undirected[K, T]{ + hash: hash, + traits: traits, + store: store, + } +} + +func (u *undirected[K, T]) Traits() *Traits { + return u.traits +} + +func (u *undirected[K, T]) AddVertex(value T, options ...func(*VertexProperties)) error { + hash := u.hash(value) + + prop := VertexProperties{ + Weight: 0, + Attributes: make(map[string]string), + } + + for _, option := range options { + option(&prop) + } + + return u.store.AddVertex(hash, value, prop) +} + +func (u *undirected[K, T]) Vertex(hash K) (T, error) { + vertex, _, err := u.store.Vertex(hash) + return vertex, err +} + +func (u *undirected[K, T]) VertexWithProperties(hash K) (T, VertexProperties, error) { + vertex, prop, err := u.store.Vertex(hash) + if err != nil { + return vertex, VertexProperties{}, err + } + + return vertex, prop, nil +} + +func (u *undirected[K, T]) RemoveVertex(hash K) error { + return u.store.RemoveVertex(hash) +} + +func (u *undirected[K, T]) AddEdge(sourceHash, targetHash K, options ...func(*EdgeProperties)) error { + if _, _, err := u.store.Vertex(sourceHash); err != nil { + return fmt.Errorf("could not find source vertex with hash %v: %w", sourceHash, err) + } + + if _, _, err := u.store.Vertex(targetHash); err != nil { + return fmt.Errorf("could not find target vertex with hash %v: %w", targetHash, err) + } + + //nolint:govet // False positive. + if _, err := u.Edge(sourceHash, targetHash); !errors.Is(err, ErrEdgeNotFound) { + return ErrEdgeAlreadyExists + } + + // If the user opted in to preventing cycles, run a cycle check. + if u.traits.PreventCycles { + createsCycle, err := CreatesCycle[K, T](u, sourceHash, targetHash) + if err != nil { + return fmt.Errorf("check for cycles: %w", err) + } + if createsCycle { + return ErrEdgeCreatesCycle + } + } + + edge := Edge[K]{ + Source: sourceHash, + Target: targetHash, + Properties: EdgeProperties{ + Attributes: make(map[string]string), + }, + } + + for _, option := range options { + option(&edge.Properties) + } + + if err := u.addEdge(sourceHash, targetHash, edge); err != nil { + return fmt.Errorf("failed to add edge: %w", err) + } + + return nil +} + +func (u *undirected[K, T]) AddEdgesFrom(g Graph[K, T]) error { + edges, err := g.Edges() + if err != nil { + return fmt.Errorf("failed to get edges: %w", err) + } + + for _, edge := range edges { + if err := u.AddEdge(copyEdge(edge)); err != nil { + return fmt.Errorf("failed to add (%v, %v): %w", edge.Source, edge.Target, err) + } + } + + return nil +} + +func (u *undirected[K, T]) AddVerticesFrom(g Graph[K, T]) error { + adjacencyMap, err := g.AdjacencyMap() + if err != nil { + return fmt.Errorf("failed to get adjacency map: %w", err) + } + + for hash := range adjacencyMap { + vertex, properties, err := g.VertexWithProperties(hash) + if err != nil { + return fmt.Errorf("failed to get vertex %v: %w", hash, err) + } + + if err = u.AddVertex(vertex, copyVertexProperties(properties)); err != nil { + return fmt.Errorf("failed to add vertex %v: %w", hash, err) + } + } + + return nil +} + +func (u *undirected[K, T]) Edge(sourceHash, targetHash K) (Edge[T], error) { + // In an undirected graph, since multigraphs aren't supported, the edge AB + // is the same as BA. Therefore, if source[target] cannot be found, this + // function also looks for target[source]. + edge, err := u.store.Edge(sourceHash, targetHash) + if errors.Is(err, ErrEdgeNotFound) { + edge, err = u.store.Edge(targetHash, sourceHash) + } + + if err != nil { + return Edge[T]{}, err + } + + sourceVertex, _, err := u.store.Vertex(sourceHash) + if err != nil { + return Edge[T]{}, err + } + + targetVertex, _, err := u.store.Vertex(targetHash) + if err != nil { + return Edge[T]{}, err + } + + return Edge[T]{ + Source: sourceVertex, + Target: targetVertex, + Properties: EdgeProperties{ + Weight: edge.Properties.Weight, + Attributes: edge.Properties.Attributes, + Data: edge.Properties.Data, + }, + }, nil +} + +type tuple[K comparable] struct { + source, target K +} + +func (u *undirected[K, T]) Edges() ([]Edge[K], error) { + storedEdges, err := u.store.ListEdges() + if err != nil { + return nil, fmt.Errorf("failed to get edges: %w", err) + } + + // An undirected graph creates each edge twice internally: The edge (A,B) is + // stored both as (A,B) and (B,A). The Edges method is supposed to return + // one of these two edges, because from an outside perspective, it only is + // a single edge. + // + // To achieve this, Edges keeps track of already-added edges. For each edge, + // it also checks if the reversed edge has already been added - e.g., for + // an edge (A,B), Edges checks if the edge has been added as (B,A). + // + // These reversed edges are built as a custom tuple type, which is then used + // as a map key for access in O(1) time. It looks scarier than it is. + edges := make([]Edge[K], 0, len(storedEdges)/2) + + added := make(map[tuple[K]]struct{}) + + for _, storedEdge := range storedEdges { + reversedEdge := tuple[K]{ + source: storedEdge.Target, + target: storedEdge.Source, + } + if _, ok := added[reversedEdge]; ok { + continue + } + + edges = append(edges, storedEdge) + + addedEdge := tuple[K]{ + source: storedEdge.Source, + target: storedEdge.Target, + } + + added[addedEdge] = struct{}{} + } + + return edges, nil +} + +func (u *undirected[K, T]) UpdateEdge(source, target K, options ...func(properties *EdgeProperties)) error { + existingEdge, err := u.store.Edge(source, target) + if err != nil { + return err + } + + for _, option := range options { + option(&existingEdge.Properties) + } + + if err := u.store.UpdateEdge(source, target, existingEdge); err != nil { + return err + } + + reversedEdge := existingEdge + reversedEdge.Source = existingEdge.Target + reversedEdge.Target = existingEdge.Source + + return u.store.UpdateEdge(target, source, reversedEdge) +} + +func (u *undirected[K, T]) RemoveEdge(source, target K) error { + if _, err := u.Edge(source, target); err != nil { + return err + } + + if err := u.store.RemoveEdge(source, target); err != nil { + return fmt.Errorf("failed to remove edge from %v to %v: %w", source, target, err) + } + + if err := u.store.RemoveEdge(target, source); err != nil { + return fmt.Errorf("failed to remove edge from %v to %v: %w", target, source, err) + } + + return nil +} + +func (u *undirected[K, T]) AdjacencyMap() (map[K]map[K]Edge[K], error) { + vertices, err := u.store.ListVertices() + if err != nil { + return nil, fmt.Errorf("failed to list vertices: %w", err) + } + + edges, err := u.store.ListEdges() + if err != nil { + return nil, fmt.Errorf("failed to list edges: %w", err) + } + + m := make(map[K]map[K]Edge[K], len(vertices)) + + for _, vertex := range vertices { + m[vertex] = make(map[K]Edge[K]) + } + + for _, edge := range edges { + m[edge.Source][edge.Target] = edge + } + + return m, nil +} + +func (u *undirected[K, T]) PredecessorMap() (map[K]map[K]Edge[K], error) { + return u.AdjacencyMap() +} + +func (u *undirected[K, T]) Clone() (Graph[K, T], error) { + traits := &Traits{ + IsDirected: u.traits.IsDirected, + IsAcyclic: u.traits.IsAcyclic, + IsWeighted: u.traits.IsWeighted, + IsRooted: u.traits.IsRooted, + } + + clone := &undirected[K, T]{ + hash: u.hash, + traits: traits, + store: newMemoryStore[K, T](), + } + + if err := clone.AddVerticesFrom(u); err != nil { + return nil, fmt.Errorf("failed to add vertices: %w", err) + } + + if err := clone.AddEdgesFrom(u); err != nil { + return nil, fmt.Errorf("failed to add edges: %w", err) + } + + return clone, nil +} + +func (u *undirected[K, T]) Order() (int, error) { + return u.store.VertexCount() +} + +func (u *undirected[K, T]) Size() (int, error) { + size := 0 + + outEdges, err := u.AdjacencyMap() + if err != nil { + return 0, fmt.Errorf("failed to get adjacency map: %w", err) + } + + for _, outEdges := range outEdges { + size += len(outEdges) + } + + // Divide by 2 since every add edge operation on undirected graph is counted + // twice. + return size / 2, nil +} + +func (u *undirected[K, T]) edgesAreEqual(a, b Edge[T]) bool { + aSourceHash := u.hash(a.Source) + aTargetHash := u.hash(a.Target) + bSourceHash := u.hash(b.Source) + bTargetHash := u.hash(b.Target) + + if aSourceHash == bSourceHash && aTargetHash == bTargetHash { + return true + } + + if !u.traits.IsDirected { + return aSourceHash == bTargetHash && aTargetHash == bSourceHash + } + + return false +} + +func (u *undirected[K, T]) addEdge(sourceHash, targetHash K, edge Edge[K]) error { + err := u.store.AddEdge(sourceHash, targetHash, edge) + if err != nil { + return err + } + + rEdge := Edge[K]{ + Source: edge.Target, + Target: edge.Source, + Properties: EdgeProperties{ + Weight: edge.Properties.Weight, + Attributes: edge.Properties.Attributes, + Data: edge.Properties.Data, + }, + } + + err = u.store.AddEdge(targetHash, sourceHash, rEdge) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 0000000..444df08 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 0000000..a733bef --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 0000000..82b4de9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 0000000..b97cd6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 0000000..71dd308 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,150 @@ +# go-multierror + +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) + +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 0000000..3e2589b --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,43 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 0000000..aab8e9a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 0000000..47f13c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 0000000..9c29efb --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 0000000..f545743 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,121 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. +// +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 0000000..5c477ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 0000000..fecb14e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/paulmach/orb/CHANGELOG.md b/vendor/github.com/paulmach/orb/CHANGELOG.md new file mode 100644 index 0000000..1783940 --- /dev/null +++ b/vendor/github.com/paulmach/orb/CHANGELOG.md @@ -0,0 +1,176 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [v0.11.1](https://github.com/paulmach/orb/compare/v0.11.0...v0.11.1) - 2024-01-29 + +### Fixed + +- geojson: `null` json into non-pointer Feature/FeatureCollection will set them to empty by [@paulmach](https://github.com/paulmach)in https://github.com/paulmach/orb/pull/145 + +## [v0.11.0](https://github.com/paulmach/orb/compare/v0.10.0...v0.11.0) - 2024-01-11 + +### Fixed + +- quadtree: InBoundMatching does not properly accept passed-in buffer by [@nirmal-vuppuluri](https://github.com/nirmal-vuppuluri) in https://github.com/paulmach/orb/pull/139 +- mvt: Do not swallow error cause by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/137 + +### Changed + +- simplify: Visvalingam, by default, keeps 3 points for "areas" by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/140 +- encoding/mvt: skip encoding of features will nil geometry by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/141 +- encoding/wkt: improve unmarshalling performance by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/142 + +## [v0.10.0](https://github.com/paulmach/orb/compare/v0.9.2...v0.10.0) - 2023-07-16 + +### Added + +- add ChildrenInZoomRange method to maptile.Tile by [@peitili](https://github.com/peitili) in https://github.com/paulmach/orb/pull/133 + +## [v0.9.2](https://github.com/paulmach/orb/compare/v0.9.1...v0.9.2) - 2023-05-04 + +### Fixed + +- encoding/wkt: better handling/validation of missing parens by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/131 + +## [v0.9.1](https://github.com/paulmach/orb/compare/v0.9.0...v0.9.1) - 2023-04-26 + +### Fixed + +- Bump up mongo driver to 1.11.4 by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/129 +- encoding/wkt: split strings with regexp by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/128 + +## [v0.9.0](https://github.com/paulmach/orb/compare/v0.8.0...v0.9.0) - 2023-02-19 + +### Added + +- geojson: marshal/unmarshal BSON [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/123 + +## [v0.8.0](https://github.com/paulmach/orb/compare/v0.7.1...v0.8.0) - 2023-01-05 + +### Fixed + +- quadtree: fix bad sort due to pointer allocation issue by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/115 +- geojson: ensure geometry unmarshal errors get returned by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/117 +- encoding/mvt: remove use of crypto/md5 to compare marshalling in tests by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/118 +- encoding/wkt: fix panic for some invalid wkt data by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/119 + +### Other + +- fix typo by [@rubenpoppe](https://github.com/rubenpoppe) in https://github.com/paulmach/orb/pull/107 +- Fixed a small twister in README.md by [@Timahawk](https://github.com/Timahawk) in https://github.com/paulmach/orb/pull/108 +- update github ci to use go 1.19 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/116 + +## [v0.7.1](https://github.com/paulmach/orb/compare/v0.7.0...v0.7.1) - 2022-05-16 + +No changes + +The v0.7.0 tag was updated since it initially pointed to the wrong commit. This is causing caching issues. + +## [v0.7.0](https://github.com/paulmach/orb/compare/v0.6.0...v0.7.0) - 2022-05-10 + +This tag is broken, please use v0.7.1 instead. + +### Breaking Changes + +- tilecover now returns an error (vs. panicing) on non-closed 2d geometry by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/87 + + This changes the signature of many of the methods in the [maptile/tilecover](https://github.com/paulmach/orb/tree/master/maptile/tilecover) package. + To emulate the old behavior replace: + + tiles := tilecover.Geometry(poly, zoom) + + with + + tiles, err := tilecover.Geometry(poly, zoom) + if err != nil { + panic(err) + } + +## [v0.6.0](https://github.com/paulmach/orb/compare/v0.5.0...v0.6.0) - 2022-05-04 + +### Added + +- geo: add correctly spelled LengthHaversine by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/97 +- geojson: add support for "external" json encoders/decoders by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/98 +- Add ewkb encoding/decoding support by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/88 + +## [v0.5.0](https://github.com/paulmach/orb/compare/v0.4.0...v0.5.0) - 2022-04-06 + +### Added + +- encoding/mvt: stable marshalling by [@travisgrigsby](https://github.com/travisgrigsby) in https://github.com/paulmach/orb/pull/93 +- encoding/mvt: support mvt marshal for GeometryCollection by [@dadadamarine](https://github.com/dadadamarine) in https://github.com/paulmach/orb/pull/89 + +### Fixed + +- quadtree: fix cleanup of nodes during removal by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/94 + +### Other + +- encoding/wkt: various code improvements by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/95 +- update protoscan to 0.2.1 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/83 + +## [v0.4.0](https://github.com/paulmach/orb/compare/v0.3.0...v0.4.0) - 2021-11-11 + +### Added + +- geo: Add functions to calculate points based on distance and bearing by [@thzinc](https://github.com/thzinc) in https://github.com/paulmach/orb/pull/76 + +### Fixed + +- encoding/mvt: avoid reflect nil value by [@nicklasaven](https://github.com/nicklasaven) in https://github.com/paulmach/orb/pull/78 + +## [v0.3.0](https://github.com/paulmach/orb/compare/v0.2.2...v0.3.0) - 2021-10-16 + +### Changed + +- quadtree: sort KNearest results closest first by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/75 +- ring: require >=4 points to return true when calling Closed() by [@missinglink](https://github.com/missinglink) in https://github.com/paulmach/orb/pull/70 + +### Fixed + +- encoding/mvt: verify tile coord does not overflow for z > 20 by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/74 +- quadtree: Address panic-ing quadtree.Matching(…) method when finding no closest node by [@willsalz](https://github.com/willsalz) in https://github.com/paulmach/orb/pull/73 + +## [v0.2.2](https://github.com/paulmach/orb/compare/v0.2.1...v0.2.2) - 2021-06-05 + +### Fixed + +- Dependency resolution problems in some cases, issue https://github.com/paulmach/orb/issues/65, pr https://github.com/paulmach/orb/pull/66 + +## [v0.2.1](https://github.com/paulmach/orb/compare/v0.2.0...v0.2.1) - 2021-01-16 + +### Changed + +- encoding/mvt: upgrade protoscan v0.1 -> v0.2 [`ad31566`](https://github.com/paulmach/orb/commit/ad31566942027c1cd30dd341f35123fb54676599) +- encoding/mvt: remove github.com/pkg/errors as a dependency [`d2e235`](https://github.com/paulmach/orb/commit/d2e23529a295a0d973cc787ad2742cb6ccbd5306) + +## v0.2.0 - 2021-01-16 + +### Breaking Changes + +- Foreign Members in Feature Collections + + Extra attributes in a feature collection object will now be put into `featureCollection.ExtraMembers`. + Similarly, stuff in `ExtraMembers will be marshalled into the feature collection base. + The break happens if you were decoding these foreign members using something like + + ```go + type MyFeatureCollection struct { + geojson.FeatureCollection + Title string `json:"title"` + } + ``` + + **The above will no longer work** in this release and it never supported marshalling. See https://github.com/paulmach/orb/pull/56 for more details. + +- Features with nil/missing geometry will no longer return an errors + + Previously missing or invalid geometry in a feature collection would return a `ErrInvalidGeometry` error. + However missing geometry is compliant with [section 3.2](https://tools.ietf.org/html/rfc7946#section-3.2) of the spec. + See https://github.com/paulmach/orb/issues/38 and https://github.com/paulmach/orb/pull/58 for more details. + +### Changed + +- encoding/mvt: faster unmarshalling for Mapbox Vector Tiles (MVT) see https://github.com/paulmach/orb/pull/57 diff --git a/vendor/github.com/paulmach/orb/LICENSE.md b/vendor/github.com/paulmach/orb/LICENSE.md new file mode 100644 index 0000000..526962a --- /dev/null +++ b/vendor/github.com/paulmach/orb/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Paul Mach + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/paulmach/orb/README.md b/vendor/github.com/paulmach/orb/README.md new file mode 100644 index 0000000..03fe463 --- /dev/null +++ b/vendor/github.com/paulmach/orb/README.md @@ -0,0 +1,181 @@ +# orb [![CI](https://github.com/paulmach/orb/workflows/CI/badge.svg)](https://github.com/paulmach/orb/actions?query=workflow%3ACI+event%3Apush) [![codecov](https://codecov.io/gh/paulmach/orb/branch/master/graph/badge.svg?token=NuuTjLVpKW)](https://codecov.io/gh/paulmach/orb) [![Go Report Card](https://goreportcard.com/badge/github.com/paulmach/orb)](https://goreportcard.com/report/github.com/paulmach/orb) [![Go Reference](https://pkg.go.dev/badge/github.com/paulmach/orb.svg)](https://pkg.go.dev/github.com/paulmach/orb) + +Package `orb` defines a set of types for working with 2d geo and planar/projected geometric data in Golang. +There are a set of sub-packages that use these types to do interesting things. +They each provide their own README with extra info. + +## Interesting features + +- **Simple types** - allow for natural operations using the `make`, `append`, `len`, `[s:e]` builtins. +- **GeoJSON** - support as part of the [`geojson`](geojson) sub-package. +- **Mapbox Vector Tile** - encoding and decoding as part of the [`encoding/mvt`](encoding/mvt) sub-package. +- **Direct to type from DB query results** - by scanning WKB data directly into types. +- **Rich set of sub-packages** - including [`clipping`](clip), [`simplifing`](simplify), [`quadtree`](quadtree) and more. + +## Type definitions + +```go +type Point [2]float64 +type MultiPoint []Point + +type LineString []Point +type MultiLineString []LineString + +type Ring LineString +type Polygon []Ring +type MultiPolygon []Polygon + +type Collection []Geometry + +type Bound struct { Min, Max Point } +``` + +Defining the types as slices allows them to be accessed in an idiomatic way +using Go's built-in functions such at `make`, `append`, `len` +and with slice notation like `[s:e]`. For example: + +```go +ls := make(orb.LineString, 0, 100) +ls = append(ls, orb.Point{1, 1}) +point := ls[0] +``` + +### Shared `Geometry` interface + +All of the base types implement the `orb.Geometry` interface defined as: + +```go +type Geometry interface { + GeoJSONType() string + Dimensions() int // e.g. 0d, 1d, 2d + Bound() Bound +} +``` + +This interface is accepted by functions in the sub-packages which then act on the +base types correctly. For example: + +```go +l := clip.Geometry(bound, geom) +``` + +will use the appropriate clipping algorithm depending on if the input is 1d or 2d, +e.g. a `orb.LineString` or a `orb.Polygon`. + +Only a few methods are defined directly on these type, for example `Clone`, `Equal`, `GeoJSONType`. +Other operation that depend on geo vs. planar contexts are defined in the respective sub-package. +For example: + +- Computing the geo distance between two point: + + ```go + p1 := orb.Point{-72.796408, -45.407131} + p2 := orb.Point{-72.688541, -45.384987} + + geo.Distance(p1, p2) + ``` + +- Compute the planar area and centroid of a polygon: + + ```go + poly := orb.Polygon{...} + centroid, area := planar.CentroidArea(poly) + ``` + +## GeoJSON + +The [geojson](geojson) sub-package implements Marshalling and Unmarshalling of GeoJSON data. +Features are defined as: + +```go +type Feature struct { + ID interface{} `json:"id,omitempty"` + Type string `json:"type"` + Geometry orb.Geometry `json:"geometry"` + Properties Properties `json:"properties"` +} +``` + +Defining the geometry as an `orb.Geometry` interface along with sub-package functions +accepting geometries allows them to work together to create easy to follow code. +For example, clipping all the geometries in a collection: + +```go +fc, err := geojson.UnmarshalFeatureCollection(data) +for _, f := range fc { + f.Geometry = clip.Geometry(bound, f.Geometry) +} +``` + +The library supports third party "encoding/json" replacements +such [github.com/json-iterator/go](https://github.com/json-iterator/go). +See the [geojson](geojson) readme for more details. + +The types also support BSON so they can be used directly when working with MongoDB. + +## Mapbox Vector Tiles + +The [encoding/mvt](encoding/mvt) sub-package implements Marshalling and +Unmarshalling [MVT](https://www.mapbox.com/vector-tiles/) data. +This package uses sets of `geojson.FeatureCollection` to define the layers, +keyed by the layer name. For example: + +```go +collections := map[string]*geojson.FeatureCollection{} + +// Convert to a layers object and project to tile coordinates. +layers := mvt.NewLayers(collections) +layers.ProjectToTile(maptile.New(x, y, z)) + +// In order to be used as source for MapboxGL geometries need to be clipped +// to max allowed extent. (uncomment next line) +// layers.Clip(mvt.MapboxGLDefaultExtentBound) + +// Simplify the geometry now that it's in tile coordinate space. +layers.Simplify(simplify.DouglasPeucker(1.0)) + +// Depending on use-case remove empty geometry, those too small to be +// represented in this tile space. +// In this case lines shorter than 1, and areas smaller than 2. +layers.RemoveEmpty(1.0, 2.0) + +// encoding using the Mapbox Vector Tile protobuf encoding. +data, err := mvt.Marshal(layers) // this data is NOT gzipped. + +// Sometimes MVT data is stored and transfered gzip compressed. In that case: +data, err := mvt.MarshalGzipped(layers) +``` + +## Decoding WKB/EWKB from a database query + +Geometries are usually returned from databases in WKB or EWKB format. The [encoding/ewkb](encoding/ewkb) +sub-package offers helpers to "scan" the data into the base types directly. +For example: + +```go +db.Exec( + "INSERT INTO postgis_table (point_column) VALUES (ST_GeomFromEWKB(?))", + ewkb.Value(orb.Point{1, 2}, 4326), +) + +row := db.QueryRow("SELECT ST_AsBinary(point_column) FROM postgis_table") + +var p orb.Point +err := row.Scan(ewkb.Scanner(&p)) +``` + +For more information see the readme in the [encoding/ewkb](encoding/ewkb) package. + +## List of sub-package utilities + +- [`clip`](clip) - clipping geometry to a bounding box +- [`encoding/mvt`](encoding/mvt) - encoded and decoding from [Mapbox Vector Tiles](https://www.mapbox.com/vector-tiles/) +- [`encoding/wkb`](encoding/wkb) - well-known binary as well as helpers to decode from the database queries +- [`encoding/ewkb`](encoding/ewkb) - extended well-known binary format that includes the SRID +- [`encoding/wkt`](encoding/wkt) - well-known text encoding +- [`geojson`](geojson) - working with geojson and the types in this package +- [`maptile`](maptile) - working with mercator map tiles and quadkeys +- [`project`](project) - project geometries between geo and planar contexts +- [`quadtree`](quadtree) - quadtree implementation using the types in this package +- [`resample`](resample) - resample points in a line string geometry +- [`simplify`](simplify) - linear geometry simplifications like Douglas-Peucker diff --git a/vendor/github.com/paulmach/orb/bound.go b/vendor/github.com/paulmach/orb/bound.go new file mode 100644 index 0000000..a0726d6 --- /dev/null +++ b/vendor/github.com/paulmach/orb/bound.go @@ -0,0 +1,172 @@ +package orb + +import ( + "math" +) + +var emptyBound = Bound{Min: Point{1, 1}, Max: Point{-1, -1}} + +// A Bound represents a closed box or rectangle. +// To create a bound with two points you can do something like: +// orb.MultiPoint{p1, p2}.Bound() +type Bound struct { + Min, Max Point +} + +// GeoJSONType returns the GeoJSON type for the object. +func (b Bound) GeoJSONType() string { + return "Polygon" +} + +// Dimensions returns 2 because a Bound is a 2d object. +func (b Bound) Dimensions() int { + return 2 +} + +// ToPolygon converts the bound into a Polygon object. +func (b Bound) ToPolygon() Polygon { + return Polygon{b.ToRing()} +} + +// ToRing converts the bound into a loop defined +// by the boundary of the box. +func (b Bound) ToRing() Ring { + return Ring{ + b.Min, + Point{b.Max[0], b.Min[1]}, + b.Max, + Point{b.Min[0], b.Max[1]}, + b.Min, + } +} + +// Extend grows the bound to include the new point. +func (b Bound) Extend(point Point) Bound { + // already included, no big deal + if b.Contains(point) { + return b + } + + return Bound{ + Min: Point{ + math.Min(b.Min[0], point[0]), + math.Min(b.Min[1], point[1]), + }, + Max: Point{ + math.Max(b.Max[0], point[0]), + math.Max(b.Max[1], point[1]), + }, + } +} + +// Union extends this bound to contain the union of this and the given bound. +func (b Bound) Union(other Bound) Bound { + if other.IsEmpty() { + return b + } + + b = b.Extend(other.Min) + b = b.Extend(other.Max) + b = b.Extend(other.LeftTop()) + b = b.Extend(other.RightBottom()) + + return b +} + +// Contains determines if the point is within the bound. +// Points on the boundary are considered within. +func (b Bound) Contains(point Point) bool { + if point[1] < b.Min[1] || b.Max[1] < point[1] { + return false + } + + if point[0] < b.Min[0] || b.Max[0] < point[0] { + return false + } + + return true +} + +// Intersects determines if two bounds intersect. +// Returns true if they are touching. +func (b Bound) Intersects(bound Bound) bool { + if (b.Max[0] < bound.Min[0]) || + (b.Min[0] > bound.Max[0]) || + (b.Max[1] < bound.Min[1]) || + (b.Min[1] > bound.Max[1]) { + return false + } + + return true +} + +// Pad extends the bound in all directions by the given value. +func (b Bound) Pad(d float64) Bound { + b.Min[0] -= d + b.Min[1] -= d + + b.Max[0] += d + b.Max[1] += d + + return b +} + +// Center returns the center of the bounds by "averaging" the x and y coords. +func (b Bound) Center() Point { + return Point{ + (b.Min[0] + b.Max[0]) / 2.0, + (b.Min[1] + b.Max[1]) / 2.0, + } +} + +// Top returns the top of the bound. +func (b Bound) Top() float64 { + return b.Max[1] +} + +// Bottom returns the bottom of the bound. +func (b Bound) Bottom() float64 { + return b.Min[1] +} + +// Right returns the right of the bound. +func (b Bound) Right() float64 { + return b.Max[0] +} + +// Left returns the left of the bound. +func (b Bound) Left() float64 { + return b.Min[0] +} + +// LeftTop returns the upper left point of the bound. +func (b Bound) LeftTop() Point { + return Point{b.Left(), b.Top()} +} + +// RightBottom return the lower right point of the bound. +func (b Bound) RightBottom() Point { + return Point{b.Right(), b.Bottom()} +} + +// IsEmpty returns true if it contains zero area or if +// it's in some malformed negative state where the left point is larger than the right. +// This can be caused by padding too much negative. +func (b Bound) IsEmpty() bool { + return b.Min[0] > b.Max[0] || b.Min[1] > b.Max[1] +} + +// IsZero return true if the bound just includes just null island. +func (b Bound) IsZero() bool { + return b.Max == Point{} && b.Min == Point{} +} + +// Bound returns the the same bound. +func (b Bound) Bound() Bound { + return b +} + +// Equal returns if two bounds are equal. +func (b Bound) Equal(c Bound) bool { + return b.Min == c.Min && b.Max == c.Max +} diff --git a/vendor/github.com/paulmach/orb/clone.go b/vendor/github.com/paulmach/orb/clone.go new file mode 100644 index 0000000..ac757b7 --- /dev/null +++ b/vendor/github.com/paulmach/orb/clone.go @@ -0,0 +1,56 @@ +package orb + +import ( + "fmt" +) + +// Clone will make a deep copy of the geometry. +func Clone(g Geometry) Geometry { + if g == nil { + return nil + } + + switch g := g.(type) { + case Point: + return g + case MultiPoint: + if g == nil { + return nil + } + return g.Clone() + case LineString: + if g == nil { + return nil + } + return g.Clone() + case MultiLineString: + if g == nil { + return nil + } + return g.Clone() + case Ring: + if g == nil { + return nil + } + return g.Clone() + case Polygon: + if g == nil { + return nil + } + return g.Clone() + case MultiPolygon: + if g == nil { + return nil + } + return g.Clone() + case Collection: + if g == nil { + return nil + } + return g.Clone() + case Bound: + return g + } + + panic(fmt.Sprintf("geometry type not supported: %T", g)) +} diff --git a/vendor/github.com/paulmach/orb/codecov.yml b/vendor/github.com/paulmach/orb/codecov.yml new file mode 100644 index 0000000..8bf5754 --- /dev/null +++ b/vendor/github.com/paulmach/orb/codecov.yml @@ -0,0 +1,10 @@ +coverage: + status: + project: off + patch: off + + precision: 2 + round: down + range: "70...90" + +comment: false diff --git a/vendor/github.com/paulmach/orb/define.go b/vendor/github.com/paulmach/orb/define.go new file mode 100644 index 0000000..a0b08f3 --- /dev/null +++ b/vendor/github.com/paulmach/orb/define.go @@ -0,0 +1,44 @@ +package orb + +// EarthRadius is the radius of the earth in meters. It is used in geo distance calculations. +// To keep things consistent, this value matches WGS84 Web Mercator (EPSG:3857). +const EarthRadius = 6378137.0 // meters + +// DefaultRoundingFactor is the default rounding factor used by the Round func. +var DefaultRoundingFactor = 1e6 // 6 decimal places + +// Orientation defines the order of the points in a polygon +// or closed ring. +type Orientation int8 + +// Constants to define orientation. +// They follow the right hand rule for orientation. +const ( + // CCW stands for Counter Clock Wise + CCW Orientation = 1 + + // CW stands for Clock Wise + CW Orientation = -1 +) + +// A DistanceFunc is a function that computes the distance between two points. +type DistanceFunc func(Point, Point) float64 + +// A Projection a function that moves a point from one space to another. +type Projection func(Point) Point + +// Pointer is something that can be represented by a point. +type Pointer interface { + Point() Point +} + +// A Simplifier is something that can simplify geometry. +type Simplifier interface { + Simplify(g Geometry) Geometry + LineString(ls LineString) LineString + MultiLineString(mls MultiLineString) MultiLineString + Ring(r Ring) Ring + Polygon(p Polygon) Polygon + MultiPolygon(mp MultiPolygon) MultiPolygon + Collection(c Collection) Collection +} diff --git a/vendor/github.com/paulmach/orb/encoding/wkt/README.md b/vendor/github.com/paulmach/orb/encoding/wkt/README.md new file mode 100644 index 0000000..a8f7dcb --- /dev/null +++ b/vendor/github.com/paulmach/orb/encoding/wkt/README.md @@ -0,0 +1,17 @@ +# encoding/wkt [![Godoc Reference](https://pkg.go.dev/badge/github.com/paulmach/orb)](https://pkg.go.dev/github.com/paulmach/orb/encoding/wkt) + +This package provides encoding and decoding of [WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) +data. The interface is defined as: + +```go +func MarshalString(orb.Geometry) string + +func Unmarshal(string) (orb.Geometry, error) +func UnmarshalPoint(string) (orb.Point, err error) +func UnmarshalMultiPoint(string) (orb.MultiPoint, err error) +func UnmarshalLineString(string) (orb.LineString, err error) +func UnmarshalMultiLineString(string) (orb.MultiLineString, err error) +func UnmarshalPolygon(string) (orb.Polygon, err error) +func UnmarshalMultiPolygon(string) (orb.MultiPolygon, err error) +func UnmarshalCollection(string) (orb.Collection, err error) +``` diff --git a/vendor/github.com/paulmach/orb/encoding/wkt/unmarshal.go b/vendor/github.com/paulmach/orb/encoding/wkt/unmarshal.go new file mode 100644 index 0000000..d9659d4 --- /dev/null +++ b/vendor/github.com/paulmach/orb/encoding/wkt/unmarshal.go @@ -0,0 +1,610 @@ +package wkt + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" + + "github.com/paulmach/orb" +) + +var ( + // ErrNotWKT is returned when unmarshalling WKT and the data is not valid. + ErrNotWKT = errors.New("wkt: invalid data") + + // ErrIncorrectGeometry is returned when unmarshalling WKT data into the wrong type. + // For example, unmarshaling linestring data into a point. + ErrIncorrectGeometry = errors.New("wkt: incorrect geometry") + + // ErrUnsupportedGeometry is returned when geometry type is not supported by this lib. + ErrUnsupportedGeometry = errors.New("wkt: unsupported geometry") + + doubleParen = regexp.MustCompile(`\)[\s|\t]*\)([\s|\t]*,[\s|\t]*)\([\s|\t]*\(`) + singleParen = regexp.MustCompile(`\)([\s|\t]*,[\s|\t]*)\(`) +) + +// UnmarshalPoint returns the point represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a point. +func UnmarshalPoint(s string) (orb.Point, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("POINT")) { + return orb.Point{}, ErrIncorrectGeometry + } + + return unmarshalPoint(s) +} + +func unmarshalPoint(s string) (orb.Point, error) { + s, err := trimSpaceBrackets(s[5:]) + if err != nil { + return orb.Point{}, err + } + + tp, err := parsePoint(s) + if err != nil { + return orb.Point{}, err + } + + return tp, nil +} + +// parsePoint pase point by (x y) +func parsePoint(s string) (p orb.Point, err error) { + one, two, ok := cut(s, " ") + if !ok { + return orb.Point{}, ErrNotWKT + } + + x, err := strconv.ParseFloat(one, 64) + if err != nil { + return orb.Point{}, ErrNotWKT + } + + y, err := strconv.ParseFloat(two, 64) + if err != nil { + return orb.Point{}, ErrNotWKT + } + + return orb.Point{x, y}, nil +} + +// UnmarshalMultiPoint returns the multi-point represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a multi-point. +func UnmarshalMultiPoint(s string) (orb.MultiPoint, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("MULTIPOINT")) { + return nil, ErrIncorrectGeometry + } + + return unmarshalMultiPoint(s) +} + +func unmarshalMultiPoint(s string) (orb.MultiPoint, error) { + if strings.EqualFold(s, "MULTIPOINT EMPTY") { + return orb.MultiPoint{}, nil + } + + s, err := trimSpaceBrackets(s[10:]) + if err != nil { + return nil, err + } + + count := strings.Count(s, ",") + mp := make(orb.MultiPoint, 0, count+1) + + err = splitOnComma(s, func(p string) error { + p, err := trimSpaceBrackets(p) + if err != nil { + return err + } + + tp, err := parsePoint(p) + if err != nil { + return err + } + + mp = append(mp, tp) + return nil + }) + if err != nil { + return nil, err + } + + return mp, nil +} + +// UnmarshalLineString returns the linestring represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a linestring. +func UnmarshalLineString(s string) (orb.LineString, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("LINESTRING")) { + return nil, ErrIncorrectGeometry + } + + return unmarshalLineString(s) +} + +func unmarshalLineString(s string) (orb.LineString, error) { + if strings.EqualFold(s, "LINESTRING EMPTY") { + return orb.LineString{}, nil + } + + s, err := trimSpaceBrackets(s[10:]) + if err != nil { + return nil, err + } + + count := strings.Count(s, ",") + ls := make(orb.LineString, 0, count+1) + + err = splitOnComma(s, func(p string) error { + tp, err := parsePoint(p) + if err != nil { + return err + } + + ls = append(ls, tp) + return nil + }) + if err != nil { + return nil, err + } + + return ls, nil +} + +// UnmarshalMultiLineString returns the multi-linestring represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a multi-linestring. +func UnmarshalMultiLineString(s string) (orb.MultiLineString, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("MULTILINESTRING")) { + return nil, ErrIncorrectGeometry + } + + return unmarshalMultiLineString(s) +} + +func unmarshalMultiLineString(s string) (orb.MultiLineString, error) { + if strings.EqualFold(s, "MULTILINESTRING EMPTY") { + return orb.MultiLineString{}, nil + } + + s, err := trimSpaceBrackets(s[15:]) + if err != nil { + return nil, err + } + + var tmls orb.MultiLineString + err = splitByRegexpYield( + s, + singleParen, + func(i int) { + tmls = make(orb.MultiLineString, 0, i) + }, + func(ls string) error { + ls, err := trimSpaceBrackets(ls) + if err != nil { + return err + } + + count := strings.Count(ls, ",") + tls := make(orb.LineString, 0, count+1) + + err = splitOnComma(ls, func(p string) error { + tp, err := parsePoint(p) + if err != nil { + return err + } + + tls = append(tls, tp) + return nil + }) + if err != nil { + return err + } + + tmls = append(tmls, tls) + return nil + }, + ) + if err != nil { + return nil, err + } + + return tmls, nil +} + +// UnmarshalPolygon returns the polygon represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a polygon. +func UnmarshalPolygon(s string) (orb.Polygon, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("POLYGON")) { + return nil, ErrIncorrectGeometry + } + + return unmarshalPolygon(s) +} + +func unmarshalPolygon(s string) (orb.Polygon, error) { + if strings.EqualFold(s, "POLYGON EMPTY") { + return orb.Polygon{}, nil + } + + s, err := trimSpaceBrackets(s[7:]) + if err != nil { + return nil, err + } + + var poly orb.Polygon + err = splitByRegexpYield( + s, + singleParen, + func(i int) { + poly = make(orb.Polygon, 0, i) + }, + func(r string) error { + r, err := trimSpaceBrackets(r) + if err != nil { + return err + } + + count := strings.Count(r, ",") + ring := make(orb.Ring, 0, count+1) + + err = splitOnComma(r, func(p string) error { + tp, err := parsePoint(p) + if err != nil { + return err + } + ring = append(ring, tp) + return nil + }) + if err != nil { + return err + } + + poly = append(poly, ring) + return nil + }, + ) + if err != nil { + return nil, err + } + + return poly, nil +} + +// UnmarshalMultiPolygon returns the multi-polygon represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a multi-polygon. +func UnmarshalMultiPolygon(s string) (orb.MultiPolygon, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("MULTIPOLYGON")) { + return nil, ErrIncorrectGeometry + } + + return unmarshalMultiPolygon(s) +} + +func unmarshalMultiPolygon(s string) (orb.MultiPolygon, error) { + if strings.EqualFold(s, "MULTIPOLYGON EMPTY") { + return orb.MultiPolygon{}, nil + } + + s, err := trimSpaceBrackets(s[12:]) + if err != nil { + return nil, err + } + + var mpoly orb.MultiPolygon + err = splitByRegexpYield( + s, + doubleParen, + func(i int) { + mpoly = make(orb.MultiPolygon, 0, i) + }, + func(poly string) error { + poly, err := trimSpaceBrackets(poly) + if err != nil { + return err + } + + var tpoly orb.Polygon + err = splitByRegexpYield( + poly, + singleParen, + func(i int) { + tpoly = make(orb.Polygon, 0, i) + }, + func(r string) error { + r, err := trimSpaceBrackets(r) + if err != nil { + return err + } + + count := strings.Count(r, ",") + tr := make(orb.Ring, 0, count+1) + + err = splitOnComma(r, func(s string) error { + tp, err := parsePoint(s) + if err != nil { + return err + } + + tr = append(tr, tp) + return nil + }) + if err != nil { + return err + } + + tpoly = append(tpoly, tr) + return nil + }, + ) + if err != nil { + return err + } + + mpoly = append(mpoly, tpoly) + return nil + }, + ) + if err != nil { + return nil, err + } + + return mpoly, nil +} + +// UnmarshalCollection returns the geometry collection represented by the wkt string. +// Will return ErrIncorrectGeometry if the wkt is not a geometry collection. +func UnmarshalCollection(s string) (orb.Collection, error) { + s = trimSpace(s) + prefix := upperPrefix(s) + if !bytes.HasPrefix(prefix, []byte("GEOMETRYCOLLECTION")) { + return nil, ErrIncorrectGeometry + } + + return unmarshalCollection(s) +} + +func unmarshalCollection(s string) (orb.Collection, error) { + if strings.EqualFold(s, "GEOMETRYCOLLECTION EMPTY") { + return orb.Collection{}, nil + } + + if len(s) == 18 { // just GEOMETRYCOLLECTION + return nil, ErrNotWKT + } + + geometries := splitGeometryCollection(s[18:]) + if len(geometries) == 0 { + return orb.Collection{}, nil + } + + c := make(orb.Collection, 0, len(geometries)) + for _, g := range geometries { + if len(g) == 0 { + continue + } + + tg, err := Unmarshal(g) + if err != nil { + return nil, err + } + + c = append(c, tg) + } + + return c, nil +} + +// splitGeometryCollection split GEOMETRYCOLLECTION to more geometry +func splitGeometryCollection(s string) (r []string) { + r = make([]string, 0) + stack := make([]rune, 0) + l := len(s) + for i, v := range s { + if !strings.Contains(string(stack), "(") { + stack = append(stack, v) + continue + } + if ('A' <= v && v < 'Z') || ('a' <= v && v < 'z') { + t := string(stack) + r = append(r, t[:len(t)-1]) + stack = make([]rune, 0) + stack = append(stack, v) + continue + } + if i == l-1 { + r = append(r, string(stack)) + continue + } + stack = append(stack, v) + } + return +} + +// Unmarshal return a geometry by parsing the WKT string. +func Unmarshal(s string) (orb.Geometry, error) { + var ( + g orb.Geometry + err error + ) + + s = trimSpace(s) + prefix := upperPrefix(s) + + if bytes.HasPrefix(prefix, []byte("POINT")) { + g, err = unmarshalPoint(s) + } else if bytes.HasPrefix(prefix, []byte("LINESTRING")) { + g, err = unmarshalLineString(s) + } else if bytes.HasPrefix(prefix, []byte("POLYGON")) { + g, err = unmarshalPolygon(s) + } else if bytes.HasPrefix(prefix, []byte("MULTIPOINT")) { + g, err = unmarshalMultiPoint(s) + } else if bytes.HasPrefix(prefix, []byte("MULTILINESTRING")) { + g, err = unmarshalMultiLineString(s) + } else if bytes.HasPrefix(prefix, []byte("MULTIPOLYGON")) { + g, err = unmarshalMultiPolygon(s) + } else if bytes.HasPrefix(prefix, []byte("GEOMETRYCOLLECTION")) { + g, err = unmarshalCollection(s) + } else { + return nil, ErrUnsupportedGeometry + } + + if err != nil { + return nil, err + } + + return g, nil +} + +// splitByRegexpYield splits the input by the regexp. The first callback can +// be used to initialize an array with the size of the result, the second +// is the callback with the matches. +// We use a yield function because it was faster/used less memory than +// allocating an array of the results. +func splitByRegexpYield(s string, re *regexp.Regexp, set func(int), yield func(string) error) error { + indexes := re.FindAllStringSubmatchIndex(s, -1) + set(len(indexes) + 1) + start := 0 + for _, element := range indexes { + err := yield(s[start:element[2]]) + if err != nil { + return err + } + start = element[3] + } + + return yield(s[start:]) +} + +// splitOnComma is optimized to split on the regex [\s|\t|\n]*,[\s|\t|\n]* +// i.e. comma with possible spaces on each side. e.g. ' , ' +// We use a yield function because it was faster/used less memory than +// allocating an array of the results. +func splitOnComma(s string, yield func(s string) error) error { + // in WKT points are separtated by commas, coordinates in points are separted by spaces + // e.g. 1 2,3 4,5 6,7 81 2,5 4 + // we want to split this and find each point. + + // at is right after the previous space-comma-space match. + // once a space-comma-space match is found, we go from 'at' to the start + // of the match, that's the split that needs to be returned. + var at int + + var start int // the start of a space-comma-space section + + // a space starts a section, we need to see a comma for it to be a valid section + var sawSpace, sawComma bool + for i := 0; i < len(s); i++ { + if s[i] == ',' { + if !sawSpace { + sawSpace = true + start = i + } + sawComma = true + continue + } + + if v := s[i]; v == ' ' || v == '\t' || v == '\n' { + if !sawSpace { + sawSpace = true + start = i + } + continue + } + + if sawComma { + err := yield(s[at:start]) + if err != nil { + return err + } + at = i + } + sawSpace = false + sawComma = false + } + + return yield(s[at:]) +} + +// trimSpaceBrackets trim space and brackets +func trimSpaceBrackets(s string) (string, error) { + s = trimSpace(s) + if len(s) == 0 { + return s, nil + } + + if s[0] == '(' { + s = s[1:] + } else { + return "", ErrNotWKT + } + + if s[len(s)-1] == ')' { + s = s[:len(s)-1] + } else { + return "", ErrNotWKT + } + + return trimSpace(s), nil +} + +func trimSpace(s string) string { + if len(s) == 0 { + return s + } + + var start, end int + + for start = 0; start < len(s); start++ { + if v := s[start]; v != ' ' && v != '\t' && v != '\n' { + break + } + } + + for end = len(s) - 1; end >= 0; end-- { + if v := s[end]; v != ' ' && v != '\t' && v != '\n' { + break + } + } + + if start >= end { + return "" + } + + return s[start : end+1] +} + +// gets the ToUpper case of the first 20 chars. +// This is to determin the type without doing a full strings.ToUpper +func upperPrefix(s string) []byte { + prefix := make([]byte, 20) + for i := 0; i < 20 && i < len(s); i++ { + if 'a' <= s[i] && s[i] <= 'z' { + prefix[i] = s[i] - ('a' - 'A') + } else { + prefix[i] = s[i] + } + } + + return prefix +} + +// coppied here from strings.Cut so we don't require go1.18 +func cut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} diff --git a/vendor/github.com/paulmach/orb/encoding/wkt/wkt.go b/vendor/github.com/paulmach/orb/encoding/wkt/wkt.go new file mode 100644 index 0000000..758d3be --- /dev/null +++ b/vendor/github.com/paulmach/orb/encoding/wkt/wkt.go @@ -0,0 +1,134 @@ +package wkt + +import ( + "bytes" + "fmt" + + "github.com/paulmach/orb" +) + +// Marshal returns a WKT representation of the geometry. +func Marshal(g orb.Geometry) []byte { + buf := bytes.NewBuffer(nil) + + wkt(buf, g) + return buf.Bytes() +} + +// MarshalString returns a WKT representation of the geometry as a string. +func MarshalString(g orb.Geometry) string { + buf := bytes.NewBuffer(nil) + + wkt(buf, g) + return buf.String() +} + +func wkt(buf *bytes.Buffer, geom orb.Geometry) { + switch g := geom.(type) { + case orb.Point: + fmt.Fprintf(buf, "POINT(%g %g)", g[0], g[1]) + case orb.MultiPoint: + if len(g) == 0 { + buf.Write([]byte(`MULTIPOINT EMPTY`)) + return + } + + buf.Write([]byte(`MULTIPOINT(`)) + for i, p := range g { + if i != 0 { + buf.WriteByte(',') + } + + fmt.Fprintf(buf, "(%g %g)", p[0], p[1]) + } + buf.WriteByte(')') + case orb.LineString: + if len(g) == 0 { + buf.Write([]byte(`LINESTRING EMPTY`)) + return + } + + buf.Write([]byte(`LINESTRING`)) + writeLineString(buf, g) + case orb.MultiLineString: + if len(g) == 0 { + buf.Write([]byte(`MULTILINESTRING EMPTY`)) + return + } + + buf.Write([]byte(`MULTILINESTRING(`)) + for i, ls := range g { + if i != 0 { + buf.WriteByte(',') + } + writeLineString(buf, ls) + } + buf.WriteByte(')') + case orb.Ring: + wkt(buf, orb.Polygon{g}) + case orb.Polygon: + if len(g) == 0 { + buf.Write([]byte(`POLYGON EMPTY`)) + return + } + + buf.Write([]byte(`POLYGON(`)) + for i, r := range g { + if i != 0 { + buf.WriteByte(',') + } + writeLineString(buf, orb.LineString(r)) + } + buf.WriteByte(')') + case orb.MultiPolygon: + if len(g) == 0 { + buf.Write([]byte(`MULTIPOLYGON EMPTY`)) + return + } + + buf.Write([]byte(`MULTIPOLYGON(`)) + for i, p := range g { + if i != 0 { + buf.WriteByte(',') + } + buf.WriteByte('(') + for j, r := range p { + if j != 0 { + buf.WriteByte(',') + } + writeLineString(buf, orb.LineString(r)) + } + buf.WriteByte(')') + } + buf.WriteByte(')') + case orb.Collection: + if len(g) == 0 { + buf.Write([]byte(`GEOMETRYCOLLECTION EMPTY`)) + return + } + buf.Write([]byte(`GEOMETRYCOLLECTION(`)) + for i, c := range g { + if i != 0 { + buf.WriteByte(',') + } + wkt(buf, c) + } + buf.WriteByte(')') + case orb.Bound: + wkt(buf, g.ToPolygon()) + default: + panic("unsupported type") + } +} + +func writeLineString(buf *bytes.Buffer, ls orb.LineString) { + buf.WriteByte('(') + for i, p := range ls { + if i != 0 { + buf.WriteByte(',') + } + + fmt.Fprintf(buf, "%g %g", p[0], p[1]) + } + buf.WriteByte(')') +} diff --git a/vendor/github.com/paulmach/orb/equal.go b/vendor/github.com/paulmach/orb/equal.go new file mode 100644 index 0000000..7cc87dd --- /dev/null +++ b/vendor/github.com/paulmach/orb/equal.go @@ -0,0 +1,51 @@ +package orb + +import ( + "fmt" +) + +// Equal returns if the two geometrires are equal. +func Equal(g1, g2 Geometry) bool { + if g1 == nil || g2 == nil { + return g1 == g2 + } + + if g1.GeoJSONType() != g2.GeoJSONType() { + return false + } + + switch g1 := g1.(type) { + case Point: + return g1.Equal(g2.(Point)) + case MultiPoint: + return g1.Equal(g2.(MultiPoint)) + case LineString: + return g1.Equal(g2.(LineString)) + case MultiLineString: + return g1.Equal(g2.(MultiLineString)) + case Ring: + g2, ok := g2.(Ring) + if !ok { + return false + } + return g1.Equal(g2) + case Polygon: + g2, ok := g2.(Polygon) + if !ok { + return false + } + return g1.Equal(g2) + case MultiPolygon: + return g1.Equal(g2.(MultiPolygon)) + case Collection: + return g1.Equal(g2.(Collection)) + case Bound: + g2, ok := g2.(Bound) + if !ok { + return false + } + return g1.Equal(g2) + } + + panic(fmt.Sprintf("geometry type not supported: %T", g1)) +} diff --git a/vendor/github.com/paulmach/orb/geojson/README.md b/vendor/github.com/paulmach/orb/geojson/README.md new file mode 100644 index 0000000..07ca932 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/README.md @@ -0,0 +1,132 @@ +# orb/geojson [![Godoc Reference](https://pkg.go.dev/badge/github.com/paulmach/orb)](https://pkg.go.dev/github.com/paulmach/orb/geojson) + +This package **encodes and decodes** [GeoJSON](http://geojson.org/) into Go structs +using the geometries in the [orb](https://github.com/paulmach/orb) package. + +Supports both the [json.Marshaler](https://pkg.go.dev/encoding/json#Marshaler) and +[json.Unmarshaler](https://pkg.go.dev/encoding/json#Unmarshaler) interfaces. +The package also provides helper functions such as `UnmarshalFeatureCollection` and `UnmarshalFeature`. + +The types also support BSON via the [bson.Marshaler](https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#Marshaler) and +[bson.Unmarshaler](https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#Unmarshaler) interfaces. +These types can be used directly when working with MongoDB. + +## Unmarshalling (JSON -> Go) + +```go +rawJSON := []byte(` + { "type": "FeatureCollection", + "features": [ + { "type": "Feature", + "geometry": {"type": "Point", "coordinates": [102.0, 0.5]}, + "properties": {"prop0": "value0"} + } + ] + }`) + +fc, _ := geojson.UnmarshalFeatureCollection(rawJSON) + +// or + +fc := geojson.NewFeatureCollection() +err := json.Unmarshal(rawJSON, &fc) + +// Geometry will be unmarshalled into the correct geo.Geometry type. +point := fc.Features[0].Geometry.(orb.Point) +``` + +## Marshalling (Go -> JSON) + +```go +fc := geojson.NewFeatureCollection() +fc.Append(geojson.NewFeature(orb.Point{1, 2})) + +rawJSON, _ := fc.MarshalJSON() + +// or +blob, _ := json.Marshal(fc) +``` + +## Foreign/extra members in a feature collection + +```go +rawJSON := []byte(` + { "type": "FeatureCollection", + "generator": "myapp", + "timestamp": "2020-06-15T01:02:03Z", + "features": [ + { "type": "Feature", + "geometry": {"type": "Point", "coordinates": [102.0, 0.5]}, + "properties": {"prop0": "value0"} + } + ] + }`) + +fc, _ := geojson.UnmarshalFeatureCollection(rawJSON) + +fc.ExtraMembers["generator"] // == "myApp" +fc.ExtraMembers["timestamp"] // == "2020-06-15T01:02:03Z" + +// Marshalling will include values in `ExtraMembers` in the +// base featureCollection object. +``` + +## Performance + +For performance critical applications, consider a +third party replacement of "encoding/json" like [github.com/json-iterator/go](https://github.com/json-iterator/go) + +This can be enabled with something like this: + +```go +import ( + jsoniter "github.com/json-iterator/go" + "github.com/paulmach/orb" +) + +var c = jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: false, + MarshalFloatWith6Digits: true, +}.Froze() + +CustomJSONMarshaler = c +CustomJSONUnmarshaler = c +``` + +The above change can have dramatic performance implications, see the benchmarks below +on a 100k feature collection file: + +``` +benchmark old ns/op new ns/op delta +BenchmarkFeatureMarshalJSON-12 2694543 733480 -72.78% +BenchmarkFeatureUnmarshalJSON-12 5383825 2738183 -49.14% +BenchmarkGeometryMarshalJSON-12 210107 62789 -70.12% +BenchmarkGeometryUnmarshalJSON-12 691472 144689 -79.08% + +benchmark old allocs new allocs delta +BenchmarkFeatureMarshalJSON-12 7818 2316 -70.38% +BenchmarkFeatureUnmarshalJSON-12 23047 31946 +38.61% +BenchmarkGeometryMarshalJSON-12 2 3 +50.00% +BenchmarkGeometryUnmarshalJSON-12 2042 18 -99.12% + +benchmark old bytes new bytes delta +BenchmarkFeatureMarshalJSON-12 794088 490251 -38.26% +BenchmarkFeatureUnmarshalJSON-12 766354 1068497 +39.43% +BenchmarkGeometryMarshalJSON-12 24787 18650 -24.76% +BenchmarkGeometryUnmarshalJSON-12 79784 51374 -35.61% +``` + +## Feature Properties + +GeoJSON features can have properties of any type. This can cause issues in a statically typed +language such as Go. Included is a `Properties` type with some helper methods that will try to +force convert a property. An optional default, will be used if the property is missing or the wrong +type. + +```go +f.Properties.MustBool(key string, def ...bool) bool +f.Properties.MustFloat64(key string, def ...float64) float64 +f.Properties.MustInt(key string, def ...int) int +f.Properties.MustString(key string, def ...string) string +``` diff --git a/vendor/github.com/paulmach/orb/geojson/bbox.go b/vendor/github.com/paulmach/orb/geojson/bbox.go new file mode 100644 index 0000000..cd2c45e --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/bbox.go @@ -0,0 +1,38 @@ +package geojson + +import "github.com/paulmach/orb" + +// BBox is for the geojson bbox attribute which is an array with all axes +// of the most southwesterly point followed by all axes of the more northeasterly point. +type BBox []float64 + +// NewBBox creates a bbox from a a bound. +func NewBBox(b orb.Bound) BBox { + return []float64{ + b.Min[0], b.Min[1], + b.Max[0], b.Max[1], + } +} + +// Valid checks if the bbox is present and has at least 4 elements. +func (bb BBox) Valid() bool { + if bb == nil { + return false + } + + return len(bb) >= 4 && len(bb)%2 == 0 +} + +// Bound returns the orb.Bound for the BBox. +func (bb BBox) Bound() orb.Bound { + if !bb.Valid() { + return orb.Bound{} + } + + mid := len(bb) / 2 + + return orb.Bound{ + Min: orb.Point{bb[0], bb[1]}, + Max: orb.Point{bb[mid], bb[mid+1]}, + } +} diff --git a/vendor/github.com/paulmach/orb/geojson/feature.go b/vendor/github.com/paulmach/orb/geojson/feature.go new file mode 100644 index 0000000..0dd246d --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/feature.go @@ -0,0 +1,138 @@ +package geojson + +import ( + "bytes" + "fmt" + + "github.com/paulmach/orb" + "go.mongodb.org/mongo-driver/bson" +) + +// A Feature corresponds to GeoJSON feature object +type Feature struct { + ID interface{} `json:"id,omitempty"` + Type string `json:"type"` + BBox BBox `json:"bbox,omitempty"` + Geometry orb.Geometry `json:"geometry"` + Properties Properties `json:"properties"` +} + +// NewFeature creates and initializes a GeoJSON feature given the required attributes. +func NewFeature(geometry orb.Geometry) *Feature { + return &Feature{ + Type: "Feature", + Geometry: geometry, + Properties: make(map[string]interface{}), + } +} + +// Point implements the orb.Pointer interface so that Features can be used +// with quadtrees. The point returned is the center of the Bound of the geometry. +// To represent the geometry with another point you must create a wrapper type. +func (f *Feature) Point() orb.Point { + return f.Geometry.Bound().Center() +} + +var _ orb.Pointer = &Feature{} + +// MarshalJSON converts the feature object into the proper JSON. +// It will handle the encoding of all the child geometries. +// Alternately one can call json.Marshal(f) directly for the same result. +func (f Feature) MarshalJSON() ([]byte, error) { + return marshalJSON(newFeatureDoc(&f)) +} + +// MarshalBSON converts the feature object into the proper JSON. +// It will handle the encoding of all the child geometries. +// Alternately one can call json.Marshal(f) directly for the same result. +func (f Feature) MarshalBSON() ([]byte, error) { + return bson.Marshal(newFeatureDoc(&f)) +} + +func newFeatureDoc(f *Feature) *featureDoc { + doc := &featureDoc{ + ID: f.ID, + Type: "Feature", + Properties: f.Properties, + BBox: f.BBox, + Geometry: NewGeometry(f.Geometry), + } + + if len(doc.Properties) == 0 { + doc.Properties = nil + } + + return doc +} + +// UnmarshalFeature decodes the data into a GeoJSON feature. +// Alternately one can call json.Unmarshal(f) directly for the same result. +func UnmarshalFeature(data []byte) (*Feature, error) { + f := &Feature{} + err := f.UnmarshalJSON(data) + if err != nil { + return nil, err + } + + return f, nil +} + +// UnmarshalJSON handles the correct unmarshalling of the data +// into the orb.Geometry types. +func (f *Feature) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte(`null`)) { + *f = Feature{} + return nil + } + + doc := &featureDoc{} + err := unmarshalJSON(data, &doc) + if err != nil { + return err + } + + return featureUnmarshalFinish(doc, f) +} + +// UnmarshalBSON will unmarshal a BSON document created with bson.Marshal. +func (f *Feature) UnmarshalBSON(data []byte) error { + doc := &featureDoc{} + err := bson.Unmarshal(data, &doc) + if err != nil { + return err + } + + return featureUnmarshalFinish(doc, f) +} + +func featureUnmarshalFinish(doc *featureDoc, f *Feature) error { + if doc.Type != "Feature" { + return fmt.Errorf("geojson: not a feature: type=%s", doc.Type) + } + + var g orb.Geometry + if doc.Geometry != nil { + if doc.Geometry.Coordinates == nil && doc.Geometry.Geometries == nil { + return ErrInvalidGeometry + } + g = doc.Geometry.Geometry() + } + + *f = Feature{ + ID: doc.ID, + Type: doc.Type, + Properties: doc.Properties, + BBox: doc.BBox, + Geometry: g, + } + + return nil +} + +type featureDoc struct { + ID interface{} `json:"id,omitempty" bson:"id"` + Type string `json:"type" bson:"type"` + BBox BBox `json:"bbox,omitempty" bson:"bbox,omitempty"` + Geometry *Geometry `json:"geometry" bson:"geometry"` + Properties Properties `json:"properties" bson:"properties"` +} diff --git a/vendor/github.com/paulmach/orb/geojson/feature_collection.go b/vendor/github.com/paulmach/orb/geojson/feature_collection.go new file mode 100644 index 0000000..0235bc5 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/feature_collection.go @@ -0,0 +1,197 @@ +/* +Package geojson is a library for encoding and decoding GeoJSON into Go structs +using the geometries in the orb package. Supports both the json.Marshaler and +json.Unmarshaler interfaces as well as helper functions such as +`UnmarshalFeatureCollection` and `UnmarshalFeature`. +*/ +package geojson + +import ( + "bytes" + "fmt" + + "go.mongodb.org/mongo-driver/bson" +) + +const featureCollection = "FeatureCollection" + +// A FeatureCollection correlates to a GeoJSON feature collection. +type FeatureCollection struct { + Type string `json:"type"` + BBox BBox `json:"bbox,omitempty"` + Features []*Feature `json:"features"` + + // ExtraMembers can be used to encoded/decode extra key/members in + // the base of the feature collection. Note that keys of "type", "bbox" + // and "features" will not work as those are reserved by the GeoJSON spec. + ExtraMembers Properties `json:"-"` +} + +// NewFeatureCollection creates and initializes a new feature collection. +func NewFeatureCollection() *FeatureCollection { + return &FeatureCollection{ + Type: featureCollection, + Features: []*Feature{}, + } +} + +// Append appends a feature to the collection. +func (fc *FeatureCollection) Append(feature *Feature) *FeatureCollection { + fc.Features = append(fc.Features, feature) + return fc +} + +// MarshalJSON converts the feature collection object into the proper JSON. +// It will handle the encoding of all the child features and geometries. +// Alternately one can call json.Marshal(fc) directly for the same result. +// Items in the ExtraMembers map will be included in the base of the +// feature collection object. +func (fc FeatureCollection) MarshalJSON() ([]byte, error) { + m := newFeatureCollectionDoc(fc) + return marshalJSON(m) +} + +// MarshalBSON converts the feature collection object into a BSON document +// represented by bytes. It will handle the encoding of all the child features +// and geometries. +// Items in the ExtraMembers map will be included in the base of the +// feature collection object. +func (fc FeatureCollection) MarshalBSON() ([]byte, error) { + m := newFeatureCollectionDoc(fc) + return bson.Marshal(m) +} + +func newFeatureCollectionDoc(fc FeatureCollection) map[string]interface{} { + var tmp map[string]interface{} + if fc.ExtraMembers != nil { + tmp = fc.ExtraMembers.Clone() + } else { + tmp = make(map[string]interface{}, 3) + } + + tmp["type"] = featureCollection + delete(tmp, "bbox") + if fc.BBox != nil { + tmp["bbox"] = fc.BBox + } + if fc.Features == nil { + tmp["features"] = []*Feature{} + } else { + tmp["features"] = fc.Features + } + + return tmp +} + +// UnmarshalJSON decodes the data into a GeoJSON feature collection. +// Extra/foreign members will be put into the `ExtraMembers` attribute. +func (fc *FeatureCollection) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte(`null`)) { + *fc = FeatureCollection{} + return nil + } + + tmp := make(map[string]nocopyRawMessage, 4) + + err := unmarshalJSON(data, &tmp) + if err != nil { + return err + } + + *fc = FeatureCollection{} + for key, value := range tmp { + switch key { + case "type": + err := unmarshalJSON(value, &fc.Type) + if err != nil { + return err + } + case "bbox": + err := unmarshalJSON(value, &fc.BBox) + if err != nil { + return err + } + case "features": + err := unmarshalJSON(value, &fc.Features) + if err != nil { + return err + } + default: + if fc.ExtraMembers == nil { + fc.ExtraMembers = Properties{} + } + + var val interface{} + err := unmarshalJSON(value, &val) + if err != nil { + return err + } + fc.ExtraMembers[key] = val + } + } + + if fc.Type != featureCollection { + return fmt.Errorf("geojson: not a feature collection: type=%s", fc.Type) + } + + return nil +} + +// UnmarshalBSON will unmarshal a BSON document created with bson.Marshal. +// Extra/foreign members will be put into the `ExtraMembers` attribute. +func (fc *FeatureCollection) UnmarshalBSON(data []byte) error { + tmp := make(map[string]bson.RawValue, 4) + + err := bson.Unmarshal(data, &tmp) + if err != nil { + return err + } + + *fc = FeatureCollection{} + for key, value := range tmp { + switch key { + case "type": + fc.Type, _ = bson.RawValue(value).StringValueOK() + case "bbox": + err := value.Unmarshal(&fc.BBox) + if err != nil { + return err + } + case "features": + err := value.Unmarshal(&fc.Features) + if err != nil { + return err + } + default: + if fc.ExtraMembers == nil { + fc.ExtraMembers = Properties{} + } + + var val interface{} + err := value.Unmarshal(&val) + if err != nil { + return err + } + fc.ExtraMembers[key] = val + } + } + + if fc.Type != featureCollection { + return fmt.Errorf("geojson: not a feature collection: type=%s", fc.Type) + } + + return nil +} + +// UnmarshalFeatureCollection decodes the data into a GeoJSON feature collection. +// Alternately one can call json.Unmarshal(fc) directly for the same result. +func UnmarshalFeatureCollection(data []byte) (*FeatureCollection, error) { + fc := &FeatureCollection{} + + err := fc.UnmarshalJSON(data) + if err != nil { + return nil, err + } + + return fc, nil +} diff --git a/vendor/github.com/paulmach/orb/geojson/geometry.go b/vendor/github.com/paulmach/orb/geojson/geometry.go new file mode 100644 index 0000000..1524521 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/geometry.go @@ -0,0 +1,586 @@ +package geojson + +import ( + "errors" + + "github.com/paulmach/orb" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrInvalidGeometry will be returned if a the json of the geometry is invalid. +var ErrInvalidGeometry = errors.New("geojson: invalid geometry") + +// A Geometry matches the structure of a GeoJSON Geometry. +type Geometry struct { + Type string `json:"type"` + Coordinates orb.Geometry `json:"coordinates,omitempty"` + Geometries []*Geometry `json:"geometries,omitempty"` +} + +// NewGeometry will create a Geometry object but will convert +// the input into a GoeJSON geometry. For example, it will convert +// Rings and Bounds into Polygons. +func NewGeometry(g orb.Geometry) *Geometry { + jg := &Geometry{} + switch g := g.(type) { + case orb.Ring: + jg.Coordinates = orb.Polygon{g} + case orb.Bound: + jg.Coordinates = g.ToPolygon() + case orb.Collection: + for _, c := range g { + jg.Geometries = append(jg.Geometries, NewGeometry(c)) + } + jg.Type = g.GeoJSONType() + default: + jg.Coordinates = g + } + + if jg.Coordinates != nil { + jg.Type = jg.Coordinates.GeoJSONType() + } + return jg +} + +// Geometry returns the orb.Geometry for the geojson Geometry. +// This will convert the "Geometries" into a orb.Collection if applicable. +func (g *Geometry) Geometry() orb.Geometry { + if g.Coordinates != nil { + return g.Coordinates + } + + c := make(orb.Collection, 0, len(g.Geometries)) + for _, geom := range g.Geometries { + c = append(c, geom.Geometry()) + } + return c +} + +// MarshalJSON will marshal the geometry into the correct JSON structure. +func (g *Geometry) MarshalJSON() ([]byte, error) { + if g.Coordinates == nil && len(g.Geometries) == 0 { + return []byte(`null`), nil + } + + ng := newGeometryMarshallDoc(g) + return marshalJSON(ng) +} + +// MarshalBSON will convert the geometry into a BSON document with the structure +// of a GeoJSON Geometry. This function is used when the geometry is the top level +// document to be marshalled. +func (g *Geometry) MarshalBSON() ([]byte, error) { + ng := newGeometryMarshallDoc(g) + return bson.Marshal(ng) +} + +// MarshalBSONValue will marshal the geometry into a BSON value +// with the structure of a GeoJSON Geometry. +func (g *Geometry) MarshalBSONValue() (bsontype.Type, []byte, error) { + // implementing MarshalBSONValue allows us to marshal into a null value + // needed to match behavior with the JSON marshalling. + + if g.Coordinates == nil && len(g.Geometries) == 0 { + return bsontype.Null, nil, nil + } + + ng := newGeometryMarshallDoc(g) + return bson.MarshalValue(ng) +} + +func newGeometryMarshallDoc(g *Geometry) *geometryMarshallDoc { + ng := &geometryMarshallDoc{} + switch g := g.Coordinates.(type) { + case orb.Ring: + ng.Coordinates = orb.Polygon{g} + case orb.Bound: + ng.Coordinates = g.ToPolygon() + case orb.Collection: + ng.Geometries = make([]*Geometry, 0, len(g)) + for _, c := range g { + ng.Geometries = append(ng.Geometries, NewGeometry(c)) + } + ng.Type = g.GeoJSONType() + default: + ng.Coordinates = g + } + + if ng.Coordinates != nil { + ng.Type = ng.Coordinates.GeoJSONType() + } + + if len(g.Geometries) > 0 { + ng.Geometries = g.Geometries + ng.Type = orb.Collection{}.GeoJSONType() + } + + return ng +} + +// UnmarshalGeometry decodes the JSON data into a GeoJSON feature. +// Alternately one can call json.Unmarshal(g) directly for the same result. +func UnmarshalGeometry(data []byte) (*Geometry, error) { + g := &Geometry{} + err := unmarshalJSON(data, g) + if err != nil { + return nil, err + } + + return g, nil +} + +// UnmarshalJSON will unmarshal the correct geometry from the JSON structure. +func (g *Geometry) UnmarshalJSON(data []byte) error { + jg := &jsonGeometry{} + err := unmarshalJSON(data, jg) + if err != nil { + return err + } + + switch jg.Type { + case "Point": + p := orb.Point{} + err = unmarshalJSON(jg.Coordinates, &p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPoint": + mp := orb.MultiPoint{} + err = unmarshalJSON(jg.Coordinates, &mp) + if err != nil { + return err + } + g.Coordinates = mp + case "LineString": + ls := orb.LineString{} + err = unmarshalJSON(jg.Coordinates, &ls) + if err != nil { + return err + } + g.Coordinates = ls + case "MultiLineString": + mls := orb.MultiLineString{} + err = unmarshalJSON(jg.Coordinates, &mls) + if err != nil { + return err + } + g.Coordinates = mls + case "Polygon": + p := orb.Polygon{} + err = unmarshalJSON(jg.Coordinates, &p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPolygon": + mp := orb.MultiPolygon{} + err = unmarshalJSON(jg.Coordinates, &mp) + if err != nil { + return err + } + g.Coordinates = mp + case "GeometryCollection": + g.Geometries = jg.Geometries + default: + return ErrInvalidGeometry + } + + g.Type = g.Geometry().GeoJSONType() + + return nil +} + +// UnmarshalBSON will unmarshal a BSON document created with bson.Marshal. +func (g *Geometry) UnmarshalBSON(data []byte) error { + bg := &bsonGeometry{} + err := bson.Unmarshal(data, bg) + if err != nil { + return err + } + + switch bg.Type { + case "Point": + p := orb.Point{} + err = bg.Coordinates.Unmarshal(&p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPoint": + mp := orb.MultiPoint{} + err = bg.Coordinates.Unmarshal(&mp) + if err != nil { + return err + } + g.Coordinates = mp + case "LineString": + ls := orb.LineString{} + + err = bg.Coordinates.Unmarshal(&ls) + if err != nil { + return err + } + g.Coordinates = ls + case "MultiLineString": + mls := orb.MultiLineString{} + err = bg.Coordinates.Unmarshal(&mls) + if err != nil { + return err + } + g.Coordinates = mls + case "Polygon": + p := orb.Polygon{} + err = bg.Coordinates.Unmarshal(&p) + if err != nil { + return err + } + g.Coordinates = p + case "MultiPolygon": + mp := orb.MultiPolygon{} + err = bg.Coordinates.Unmarshal(&mp) + if err != nil { + return err + } + g.Coordinates = mp + case "GeometryCollection": + g.Geometries = bg.Geometries + default: + return ErrInvalidGeometry + } + + g.Type = g.Geometry().GeoJSONType() + + return nil +} + +// A Point is a helper type that will marshal to/from a GeoJSON Point geometry. +type Point orb.Point + +// Geometry will return the orb.Geometry version of the data. +func (p Point) Geometry() orb.Geometry { + return orb.Point(p) +} + +// MarshalJSON will convert the Point into a GeoJSON Point geometry. +func (p Point) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.Point(p)}) +} + +// MarshalBSON will convert the Point into a BSON value following the GeoJSON Point structure. +func (p Point) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.Point(p)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON Point geometry. +func (p *Point) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + point, ok := g.Coordinates.(orb.Point) + if !ok { + return errors.New("geojson: not a Point type") + } + + *p = Point(point) + return nil +} + +// UnmarshalBSON will unmarshal GeoJSON Point geometry. +func (p *Point) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + point, ok := g.Coordinates.(orb.Point) + if !ok { + return errors.New("geojson: not a Point type") + } + + *p = Point(point) + return nil +} + +// A MultiPoint is a helper type that will marshal to/from a GeoJSON MultiPoint geometry. +type MultiPoint orb.MultiPoint + +// Geometry will return the orb.Geometry version of the data. +func (mp MultiPoint) Geometry() orb.Geometry { + return orb.MultiPoint(mp) +} + +// MarshalJSON will convert the MultiPoint into a GeoJSON MultiPoint geometry. +func (mp MultiPoint) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.MultiPoint(mp)}) +} + +// MarshalBSON will convert the MultiPoint into a GeoJSON MultiPoint geometry BSON. +func (mp MultiPoint) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.MultiPoint(mp)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPoint geometry. +func (mp *MultiPoint) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + multiPoint, ok := g.Coordinates.(orb.MultiPoint) + if !ok { + return errors.New("geojson: not a MultiPoint type") + } + + *mp = MultiPoint(multiPoint) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPoint geometry. +func (mp *MultiPoint) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + multiPoint, ok := g.Coordinates.(orb.MultiPoint) + if !ok { + return errors.New("geojson: not a MultiPoint type") + } + + *mp = MultiPoint(multiPoint) + return nil +} + +// A LineString is a helper type that will marshal to/from a GeoJSON LineString geometry. +type LineString orb.LineString + +// Geometry will return the orb.Geometry version of the data. +func (ls LineString) Geometry() orb.Geometry { + return orb.LineString(ls) +} + +// MarshalJSON will convert the LineString into a GeoJSON LineString geometry. +func (ls LineString) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.LineString(ls)}) +} + +// MarshalBSON will convert the LineString into a GeoJSON LineString geometry. +func (ls LineString) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.LineString(ls)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPoint geometry. +func (ls *LineString) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + lineString, ok := g.Coordinates.(orb.LineString) + if !ok { + return errors.New("geojson: not a LineString type") + } + + *ls = LineString(lineString) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPoint geometry. +func (ls *LineString) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + lineString, ok := g.Coordinates.(orb.LineString) + if !ok { + return errors.New("geojson: not a LineString type") + } + + *ls = LineString(lineString) + return nil +} + +// A MultiLineString is a helper type that will marshal to/from a GeoJSON MultiLineString geometry. +type MultiLineString orb.MultiLineString + +// Geometry will return the orb.Geometry version of the data. +func (mls MultiLineString) Geometry() orb.Geometry { + return orb.MultiLineString(mls) +} + +// MarshalJSON will convert the MultiLineString into a GeoJSON MultiLineString geometry. +func (mls MultiLineString) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.MultiLineString(mls)}) +} + +// MarshalBSON will convert the MultiLineString into a GeoJSON MultiLineString geometry. +func (mls MultiLineString) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.MultiLineString(mls)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPoint geometry. +func (mls *MultiLineString) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + multilineString, ok := g.Coordinates.(orb.MultiLineString) + if !ok { + return errors.New("geojson: not a MultiLineString type") + } + + *mls = MultiLineString(multilineString) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPoint geometry. +func (mls *MultiLineString) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + multilineString, ok := g.Coordinates.(orb.MultiLineString) + if !ok { + return errors.New("geojson: not a MultiLineString type") + } + + *mls = MultiLineString(multilineString) + return nil +} + +// A Polygon is a helper type that will marshal to/from a GeoJSON Polygon geometry. +type Polygon orb.Polygon + +// Geometry will return the orb.Geometry version of the data. +func (p Polygon) Geometry() orb.Geometry { + return orb.Polygon(p) +} + +// MarshalJSON will convert the Polygon into a GeoJSON Polygon geometry. +func (p Polygon) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.Polygon(p)}) +} + +// MarshalBSON will convert the Polygon into a GeoJSON Polygon geometry. +func (p Polygon) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.Polygon(p)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON Polygon geometry. +func (p *Polygon) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + polygon, ok := g.Coordinates.(orb.Polygon) + if !ok { + return errors.New("geojson: not a Polygon type") + } + + *p = Polygon(polygon) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON Polygon geometry. +func (p *Polygon) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + polygon, ok := g.Coordinates.(orb.Polygon) + if !ok { + return errors.New("geojson: not a Polygon type") + } + + *p = Polygon(polygon) + return nil +} + +// A MultiPolygon is a helper type that will marshal to/from a GeoJSON MultiPolygon geometry. +type MultiPolygon orb.MultiPolygon + +// Geometry will return the orb.Geometry version of the data. +func (mp MultiPolygon) Geometry() orb.Geometry { + return orb.MultiPolygon(mp) +} + +// MarshalJSON will convert the MultiPolygon into a GeoJSON MultiPolygon geometry. +func (mp MultiPolygon) MarshalJSON() ([]byte, error) { + return marshalJSON(&Geometry{Coordinates: orb.MultiPolygon(mp)}) +} + +// MarshalBSON will convert the MultiPolygon into a GeoJSON MultiPolygon geometry. +func (mp MultiPolygon) MarshalBSON() ([]byte, error) { + return bson.Marshal(&Geometry{Coordinates: orb.MultiPolygon(mp)}) +} + +// UnmarshalJSON will unmarshal the GeoJSON MultiPolygon geometry. +func (mp *MultiPolygon) UnmarshalJSON(data []byte) error { + g := &Geometry{} + err := unmarshalJSON(data, &g) + if err != nil { + return err + } + + multiPolygon, ok := g.Coordinates.(orb.MultiPolygon) + if !ok { + return errors.New("geojson: not a MultiPolygon type") + } + + *mp = MultiPolygon(multiPolygon) + return nil +} + +// UnmarshalBSON will unmarshal the GeoJSON MultiPolygon geometry. +func (mp *MultiPolygon) UnmarshalBSON(data []byte) error { + g := &Geometry{} + err := bson.Unmarshal(data, &g) + if err != nil { + return err + } + + multiPolygon, ok := g.Coordinates.(orb.MultiPolygon) + if !ok { + return errors.New("geojson: not a MultiPolygon type") + } + + *mp = MultiPolygon(multiPolygon) + return nil +} + +type bsonGeometry struct { + Type string `json:"type" bson:"type"` + Coordinates bson.RawValue `json:"coordinates" bson:"coordinates"` + Geometries []*Geometry `json:"geometries,omitempty" bson:"geometries"` +} + +type jsonGeometry struct { + Type string `json:"type"` + Coordinates nocopyRawMessage `json:"coordinates"` + Geometries []*Geometry `json:"geometries,omitempty"` +} + +type geometryMarshallDoc struct { + Type string `json:"type" bson:"type"` + Coordinates orb.Geometry `json:"coordinates,omitempty" bson:"coordinates,omitempty"` + Geometries []*Geometry `json:"geometries,omitempty" bson:"geometries,omitempty"` +} diff --git a/vendor/github.com/paulmach/orb/geojson/json.go b/vendor/github.com/paulmach/orb/geojson/json.go new file mode 100644 index 0000000..7b8d654 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/json.go @@ -0,0 +1,74 @@ +package geojson + +import "encoding/json" + +// CustomJSONMarshaler can be set to have the code use a different +// json marshaler than the default in the standard library. +// One use case in enabling `github.com/json-iterator/go` +// with something like this: +// +// import ( +// jsoniter "github.com/json-iterator/go" +// "github.com/paulmach/orb" +// ) +// +// var c = jsoniter.Config{ +// EscapeHTML: true, +// SortMapKeys: false, +// MarshalFloatWith6Digits: true, +// }.Froze() +// +// orb.CustomJSONMarshaler = c +// orb.CustomJSONUnmarshaler = c +// +// Note that any errors encountered during marshaling will be different. +var CustomJSONMarshaler interface { + Marshal(v interface{}) ([]byte, error) +} = nil + +// CustomJSONUnmarshaler can be set to have the code use a different +// json unmarshaler than the default in the standard library. +// One use case in enabling `github.com/json-iterator/go` +// with something like this: +// +// import ( +// jsoniter "github.com/json-iterator/go" +// "github.com/paulmach/orb" +// ) +// +// var c = jsoniter.Config{ +// EscapeHTML: true, +// SortMapKeys: false, +// MarshalFloatWith6Digits: true, +// }.Froze() +// +// orb.CustomJSONMarshaler = c +// orb.CustomJSONUnmarshaler = c +// +// Note that any errors encountered during unmarshaling will be different. +var CustomJSONUnmarshaler interface { + Unmarshal(data []byte, v interface{}) error +} = nil + +func marshalJSON(v interface{}) ([]byte, error) { + if CustomJSONMarshaler == nil { + return json.Marshal(v) + } + + return CustomJSONMarshaler.Marshal(v) +} + +func unmarshalJSON(data []byte, v interface{}) error { + if CustomJSONUnmarshaler == nil { + return json.Unmarshal(data, v) + } + + return CustomJSONUnmarshaler.Unmarshal(data, v) +} + +type nocopyRawMessage []byte + +func (m *nocopyRawMessage) UnmarshalJSON(data []byte) error { + *m = data + return nil +} diff --git a/vendor/github.com/paulmach/orb/geojson/properties.go b/vendor/github.com/paulmach/orb/geojson/properties.go new file mode 100644 index 0000000..0e0eca9 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/properties.go @@ -0,0 +1,112 @@ +package geojson + +import "fmt" + +// Properties defines the feature properties with some helper methods. +type Properties map[string]interface{} + +// MustBool guarantees the return of a `bool` (with optional default). +// This function useful when you explicitly want a `bool` in a single +// value return context, for example: +// myFunc(f.Properties.MustBool("param1"), f.Properties.MustBool("optional_param", true)) +// This function will panic if the value is present but not a bool. +func (p Properties) MustBool(key string, def ...bool) bool { + v := p[key] + if b, ok := v.(bool); ok { + return b + } + + if v != nil { + panic(fmt.Sprintf("not a bool, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// MustInt guarantees the return of an `int` (with optional default). +// This function useful when you explicitly want a `int` in a single +// value return context, for example: +// myFunc(f.Properties.MustInt("param1"), f.Properties.MustInt("optional_param", 123)) +// This function will panic if the value is present but not a number. +func (p Properties) MustInt(key string, def ...int) int { + v := p[key] + if i, ok := v.(int); ok { + return i + } + + if f, ok := v.(float64); ok { + return int(f) + } + + if v != nil { + panic(fmt.Sprintf("not a number, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// MustFloat64 guarantees the return of a `float64` (with optional default) +// This function useful when you explicitly want a `float64` in a single +// value return context, for example: +// myFunc(f.Properties.MustFloat64("param1"), f.Properties.MustFloat64("optional_param", 10.1)) +// This function will panic if the value is present but not a number. +func (p Properties) MustFloat64(key string, def ...float64) float64 { + v := p[key] + if f, ok := v.(float64); ok { + return f + } + + if i, ok := v.(int); ok { + return float64(i) + } + + if v != nil { + panic(fmt.Sprintf("not a number, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// MustString guarantees the return of a `string` (with optional default) +// This function useful when you explicitly want a `string` in a single +// value return context, for example: +// myFunc(f.Properties.MustString("param1"), f.Properties.MustString("optional_param", "default")) +// This function will panic if the value is present but not a string. +func (p Properties) MustString(key string, def ...string) string { + v := p[key] + if s, ok := v.(string); ok { + return s + } + + if v != nil { + panic(fmt.Sprintf("not a string, but a %T: %v", v, v)) + } + + if len(def) > 0 { + return def[0] + } + + panic("property not found") +} + +// Clone returns a shallow copy of the properties. +func (p Properties) Clone() Properties { + n := make(Properties, len(p)+3) + for k, v := range p { + n[k] = v + } + + return n +} diff --git a/vendor/github.com/paulmach/orb/geojson/types.go b/vendor/github.com/paulmach/orb/geojson/types.go new file mode 100644 index 0000000..7a6a7e7 --- /dev/null +++ b/vendor/github.com/paulmach/orb/geojson/types.go @@ -0,0 +1,11 @@ +package geojson + +// A list of the geojson types that are currently supported. +const ( + TypePoint = "Point" + TypeMultiPoint = "MultiPoint" + TypeLineString = "LineString" + TypeMultiLineString = "MultiLineString" + TypePolygon = "Polygon" + TypeMultiPolygon = "MultiPolygon" +) diff --git a/vendor/github.com/paulmach/orb/geometry.go b/vendor/github.com/paulmach/orb/geometry.go new file mode 100644 index 0000000..a15e36b --- /dev/null +++ b/vendor/github.com/paulmach/orb/geometry.go @@ -0,0 +1,146 @@ +package orb + +// Geometry is an interface that represents the shared attributes +// of a geometry. +type Geometry interface { + GeoJSONType() string + Dimensions() int // e.g. 0d, 1d, 2d + Bound() Bound + + // requiring because sub package type switch over all possible types. + private() +} + +// compile time checks +var ( + _ Geometry = Point{} + _ Geometry = MultiPoint{} + _ Geometry = LineString{} + _ Geometry = MultiLineString{} + _ Geometry = Ring{} + _ Geometry = Polygon{} + _ Geometry = MultiPolygon{} + _ Geometry = Bound{} + + _ Geometry = Collection{} +) + +func (p Point) private() {} +func (mp MultiPoint) private() {} +func (ls LineString) private() {} +func (mls MultiLineString) private() {} +func (r Ring) private() {} +func (p Polygon) private() {} +func (mp MultiPolygon) private() {} +func (b Bound) private() {} +func (c Collection) private() {} + +// AllGeometries lists all possible types and values that a geometry +// interface can be. It should be used only for testing to verify +// functions that accept a Geometry will work in all cases. +var AllGeometries = []Geometry{ + nil, + Point{}, + MultiPoint{}, + LineString{}, + MultiLineString{}, + Ring{}, + Polygon{}, + MultiPolygon{}, + Bound{}, + Collection{}, + + // nil values + MultiPoint(nil), + LineString(nil), + MultiLineString(nil), + Ring(nil), + Polygon(nil), + MultiPolygon(nil), + Collection(nil), + + // Collection of Collection + Collection{Collection{Point{}}}, +} + +// A Collection is a collection of geometries that is also a Geometry. +type Collection []Geometry + +// GeoJSONType returns the geometry collection type. +func (c Collection) GeoJSONType() string { + return "GeometryCollection" +} + +// Dimensions returns the max of the dimensions of the collection. +func (c Collection) Dimensions() int { + max := -1 + for _, g := range c { + if d := g.Dimensions(); d > max { + max = d + } + } + + return max +} + +// Bound returns the bounding box of all the Geometries combined. +func (c Collection) Bound() Bound { + if len(c) == 0 { + return emptyBound + } + + var b Bound + start := -1 + + for i, g := range c { + if g != nil { + start = i + b = g.Bound() + break + } + } + + if start == -1 { + return emptyBound + } + + for i := start + 1; i < len(c); i++ { + if c[i] == nil { + continue + } + + b = b.Union(c[i].Bound()) + } + + return b +} + +// Equal compares two collections. Returns true if lengths are the same +// and all the sub geometries are the same and in the same order. +func (c Collection) Equal(collection Collection) bool { + if len(c) != len(collection) { + return false + } + + for i, g := range c { + if !Equal(g, collection[i]) { + return false + } + } + + return true +} + +// Clone returns a deep copy of the collection. +func (c Collection) Clone() Collection { + if c == nil { + return nil + } + + nc := make(Collection, len(c)) + for i, g := range c { + nc[i] = Clone(g) + } + + return nc +} diff --git a/vendor/github.com/paulmach/orb/line_string.go b/vendor/github.com/paulmach/orb/line_string.go new file mode 100644 index 0000000..6f48939 --- /dev/null +++ b/vendor/github.com/paulmach/orb/line_string.go @@ -0,0 +1,40 @@ +package orb + +// LineString represents a set of points to be thought of as a polyline. +type LineString []Point + +// GeoJSONType returns the GeoJSON type for the object. +func (ls LineString) GeoJSONType() string { + return "LineString" +} + +// Dimensions returns 1 because a LineString is a 1d object. +func (ls LineString) Dimensions() int { + return 1 +} + +// Reverse will reverse the line string. +// This is done inplace, ie. it modifies the original data. +func (ls LineString) Reverse() { + l := len(ls) - 1 + for i := 0; i <= l/2; i++ { + ls[i], ls[l-i] = ls[l-i], ls[i] + } +} + +// Bound returns a rect around the line string. Uses rectangular coordinates. +func (ls LineString) Bound() Bound { + return MultiPoint(ls).Bound() +} + +// Equal compares two line strings. Returns true if lengths are the same +// and all points are Equal. +func (ls LineString) Equal(lineString LineString) bool { + return MultiPoint(ls).Equal(MultiPoint(lineString)) +} + +// Clone returns a new copy of the line string. +func (ls LineString) Clone() LineString { + ps := MultiPoint(ls) + return LineString(ps.Clone()) +} diff --git a/vendor/github.com/paulmach/orb/multi_line_string.go b/vendor/github.com/paulmach/orb/multi_line_string.go new file mode 100644 index 0000000..2b67f20 --- /dev/null +++ b/vendor/github.com/paulmach/orb/multi_line_string.go @@ -0,0 +1,58 @@ +package orb + +// MultiLineString is a set of polylines. +type MultiLineString []LineString + +// GeoJSONType returns the GeoJSON type for the object. +func (mls MultiLineString) GeoJSONType() string { + return "MultiLineString" +} + +// Dimensions returns 1 because a MultiLineString is a 2d object. +func (mls MultiLineString) Dimensions() int { + return 1 +} + +// Bound returns a bound around all the line strings. +func (mls MultiLineString) Bound() Bound { + if len(mls) == 0 { + return emptyBound + } + + bound := mls[0].Bound() + for i := 1; i < len(mls); i++ { + bound = bound.Union(mls[i].Bound()) + } + + return bound +} + +// Equal compares two multi line strings. Returns true if lengths are the same +// and all points are Equal. +func (mls MultiLineString) Equal(multiLineString MultiLineString) bool { + if len(mls) != len(multiLineString) { + return false + } + + for i, ls := range mls { + if !ls.Equal(multiLineString[i]) { + return false + } + } + + return true +} + +// Clone returns a new deep copy of the multi line string. +func (mls MultiLineString) Clone() MultiLineString { + if mls == nil { + return nil + } + + nmls := make(MultiLineString, 0, len(mls)) + for _, ls := range mls { + nmls = append(nmls, ls.Clone()) + } + + return nmls +} diff --git a/vendor/github.com/paulmach/orb/multi_point.go b/vendor/github.com/paulmach/orb/multi_point.go new file mode 100644 index 0000000..cee955c --- /dev/null +++ b/vendor/github.com/paulmach/orb/multi_point.go @@ -0,0 +1,56 @@ +package orb + +// A MultiPoint represents a set of points in the 2D Eucledian or Cartesian plane. +type MultiPoint []Point + +// GeoJSONType returns the GeoJSON type for the object. +func (mp MultiPoint) GeoJSONType() string { + return "MultiPoint" +} + +// Dimensions returns 0 because a MultiPoint is a 0d object. +func (mp MultiPoint) Dimensions() int { + return 0 +} + +// Clone returns a new copy of the points. +func (mp MultiPoint) Clone() MultiPoint { + if mp == nil { + return nil + } + + points := make([]Point, len(mp)) + copy(points, mp) + + return MultiPoint(points) +} + +// Bound returns a bound around the points. Uses rectangular coordinates. +func (mp MultiPoint) Bound() Bound { + if len(mp) == 0 { + return emptyBound + } + + b := Bound{mp[0], mp[0]} + for _, p := range mp { + b = b.Extend(p) + } + + return b +} + +// Equal compares two MultiPoint objects. Returns true if lengths are the same +// and all points are Equal, and in the same order. +func (mp MultiPoint) Equal(multiPoint MultiPoint) bool { + if len(mp) != len(multiPoint) { + return false + } + + for i := range mp { + if !mp[i].Equal(multiPoint[i]) { + return false + } + } + + return true +} diff --git a/vendor/github.com/paulmach/orb/multi_polygon.go b/vendor/github.com/paulmach/orb/multi_polygon.go new file mode 100644 index 0000000..938a9a9 --- /dev/null +++ b/vendor/github.com/paulmach/orb/multi_polygon.go @@ -0,0 +1,56 @@ +package orb + +// MultiPolygon is a set of polygons. +type MultiPolygon []Polygon + +// GeoJSONType returns the GeoJSON type for the object. +func (mp MultiPolygon) GeoJSONType() string { + return "MultiPolygon" +} + +// Dimensions returns 2 because a MultiPolygon is a 2d object. +func (mp MultiPolygon) Dimensions() int { + return 2 +} + +// Bound returns a bound around the multi-polygon. +func (mp MultiPolygon) Bound() Bound { + if len(mp) == 0 { + return emptyBound + } + bound := mp[0].Bound() + for i := 1; i < len(mp); i++ { + bound = bound.Union(mp[i].Bound()) + } + + return bound +} + +// Equal compares two multi-polygons. +func (mp MultiPolygon) Equal(multiPolygon MultiPolygon) bool { + if len(mp) != len(multiPolygon) { + return false + } + + for i, p := range mp { + if !p.Equal(multiPolygon[i]) { + return false + } + } + + return true +} + +// Clone returns a new deep copy of the multi-polygon. +func (mp MultiPolygon) Clone() MultiPolygon { + if mp == nil { + return nil + } + + nmp := make(MultiPolygon, 0, len(mp)) + for _, p := range mp { + nmp = append(nmp, p.Clone()) + } + + return nmp +} diff --git a/vendor/github.com/paulmach/orb/point.go b/vendor/github.com/paulmach/orb/point.go new file mode 100644 index 0000000..c459c35 --- /dev/null +++ b/vendor/github.com/paulmach/orb/point.go @@ -0,0 +1,51 @@ +package orb + +// A Point is a Lon/Lat 2d point. +type Point [2]float64 + +var _ Pointer = Point{} + +// GeoJSONType returns the GeoJSON type for the object. +func (p Point) GeoJSONType() string { + return "Point" +} + +// Dimensions returns 0 because a point is a 0d object. +func (p Point) Dimensions() int { + return 0 +} + +// Bound returns a single point bound of the point. +func (p Point) Bound() Bound { + return Bound{p, p} +} + +// Point returns itself so it implements the Pointer interface. +func (p Point) Point() Point { + return p +} + +// Y returns the vertical coordinate of the point. +func (p Point) Y() float64 { + return p[1] +} + +// X returns the horizontal coordinate of the point. +func (p Point) X() float64 { + return p[0] +} + +// Lat returns the vertical, latitude coordinate of the point. +func (p Point) Lat() float64 { + return p[1] +} + +// Lon returns the horizontal, longitude coordinate of the point. +func (p Point) Lon() float64 { + return p[0] +} + +// Equal checks if the point represents the same point or vector. +func (p Point) Equal(point Point) bool { + return p[0] == point[0] && p[1] == point[1] +} diff --git a/vendor/github.com/paulmach/orb/polygon.go b/vendor/github.com/paulmach/orb/polygon.go new file mode 100644 index 0000000..b3e7d29 --- /dev/null +++ b/vendor/github.com/paulmach/orb/polygon.go @@ -0,0 +1,55 @@ +package orb + +// Polygon is a closed area. The first LineString is the outer ring. +// The others are the holes. Each LineString is expected to be closed +// ie. the first point matches the last. +type Polygon []Ring + +// GeoJSONType returns the GeoJSON type for the object. +func (p Polygon) GeoJSONType() string { + return "Polygon" +} + +// Dimensions returns 2 because a Polygon is a 2d object. +func (p Polygon) Dimensions() int { + return 2 +} + +// Bound returns a bound around the polygon. +func (p Polygon) Bound() Bound { + if len(p) == 0 { + return emptyBound + } + return p[0].Bound() +} + +// Equal compares two polygons. Returns true if lengths are the same +// and all points are Equal. +func (p Polygon) Equal(polygon Polygon) bool { + if len(p) != len(polygon) { + return false + } + + for i := range p { + if !p[i].Equal(polygon[i]) { + return false + } + } + + return true +} + +// Clone returns a new deep copy of the polygon. +// All of the rings are also cloned. +func (p Polygon) Clone() Polygon { + if p == nil { + return p + } + + np := make(Polygon, 0, len(p)) + for _, r := range p { + np = append(np, r.Clone()) + } + + return np +} diff --git a/vendor/github.com/paulmach/orb/ring.go b/vendor/github.com/paulmach/orb/ring.go new file mode 100644 index 0000000..5fe88ac --- /dev/null +++ b/vendor/github.com/paulmach/orb/ring.go @@ -0,0 +1,75 @@ +package orb + +// Ring represents a set of ring on the earth. +type Ring LineString + +// GeoJSONType returns the GeoJSON type for the object. +func (r Ring) GeoJSONType() string { + return "Polygon" +} + +// Dimensions returns 2 because a Ring is a 2d object. +func (r Ring) Dimensions() int { + return 2 +} + +// Closed will return true if the ring is a real ring. +// ie. 4+ points and the first and last points match. +// NOTE: this will not check for self-intersection. +func (r Ring) Closed() bool { + return (len(r) >= 4) && (r[0] == r[len(r)-1]) +} + +// Reverse changes the direction of the ring. +// This is done inplace, ie. it modifies the original data. +func (r Ring) Reverse() { + LineString(r).Reverse() +} + +// Bound returns a rect around the ring. Uses rectangular coordinates. +func (r Ring) Bound() Bound { + return MultiPoint(r).Bound() +} + +// Orientation returns 1 if the the ring is in couter-clockwise order, +// return -1 if the ring is the clockwise order and 0 if the ring is +// degenerate and had no area. +func (r Ring) Orientation() Orientation { + area := 0.0 + + // This is a fast planar area computation, which is okay for this use. + // implicitly move everything to near the origin to help with roundoff + offsetX := r[0][0] + offsetY := r[0][1] + for i := 1; i < len(r)-1; i++ { + area += (r[i][0]-offsetX)*(r[i+1][1]-offsetY) - + (r[i+1][0]-offsetX)*(r[i][1]-offsetY) + } + + if area > 0 { + return CCW + } + + if area < 0 { + return CW + } + + // degenerate case, no area + return 0 +} + +// Equal compares two rings. Returns true if lengths are the same +// and all points are Equal. +func (r Ring) Equal(ring Ring) bool { + return MultiPoint(r).Equal(MultiPoint(ring)) +} + +// Clone returns a new copy of the ring. +func (r Ring) Clone() Ring { + if r == nil { + return nil + } + + ps := MultiPoint(r) + return Ring(ps.Clone()) +} diff --git a/vendor/github.com/paulmach/orb/round.go b/vendor/github.com/paulmach/orb/round.go new file mode 100644 index 0000000..b6c7799 --- /dev/null +++ b/vendor/github.com/paulmach/orb/round.go @@ -0,0 +1,100 @@ +package orb + +import ( + "fmt" + "math" +) + +// Round will round all the coordinates of the geometry to the given factor. +// The default is 6 decimal places. +func Round(g Geometry, factor ...int) Geometry { + if g == nil { + return nil + } + + f := float64(DefaultRoundingFactor) + if len(factor) > 0 { + f = float64(factor[0]) + } + + switch g := g.(type) { + case Point: + return Point{ + math.Round(g[0]*f) / f, + math.Round(g[1]*f) / f, + } + case MultiPoint: + if g == nil { + return nil + } + roundPoints([]Point(g), f) + return g + case LineString: + if g == nil { + return nil + } + roundPoints([]Point(g), f) + return g + case MultiLineString: + if g == nil { + return nil + } + for _, ls := range g { + roundPoints([]Point(ls), f) + } + return g + case Ring: + if g == nil { + return nil + } + roundPoints([]Point(g), f) + return g + case Polygon: + if g == nil { + return nil + } + for _, r := range g { + roundPoints([]Point(r), f) + } + return g + case MultiPolygon: + if g == nil { + return nil + } + for _, p := range g { + for _, r := range p { + roundPoints([]Point(r), f) + } + } + return g + case Collection: + if g == nil { + return nil + } + + for i := range g { + g[i] = Round(g[i], int(f)) + } + return g + case Bound: + return Bound{ + Min: Point{ + math.Round(g.Min[0]*f) / f, + math.Round(g.Min[1]*f) / f, + }, + Max: Point{ + math.Round(g.Max[0]*f) / f, + math.Round(g.Max[1]*f) / f, + }, + } + } + + panic(fmt.Sprintf("geometry type not supported: %T", g)) +} + +func roundPoints(ps []Point, f float64) { + for i := range ps { + ps[i][0] = math.Round(ps[i][0]*f) / f + ps[i][1] = math.Round(ps[i][1]*f) / f + } +} diff --git a/vendor/github.com/sfomuseum/go-database/.gitignore b/vendor/github.com/sfomuseum/go-database/.gitignore new file mode 100644 index 0000000..e112032 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/.gitignore @@ -0,0 +1,4 @@ +*~ +bin +work +go.work* \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-database/LICENSE b/vendor/github.com/sfomuseum/go-database/LICENSE new file mode 100644 index 0000000..e2fb977 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2024, City and County of San Francisco, acting by and through its +Airport Commission ("City"). All rights reserved. + +The City and County of San Francisco, acting by and through its Airport +Commission, created and operates the SFO Museum. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the City nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-database/README.md b/vendor/github.com/sfomuseum/go-database/README.md new file mode 100644 index 0000000..edfddc2 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/README.md @@ -0,0 +1,7 @@ +# go-database + +Go package providing opinionate helper methods for working with databases. + +## Documentation + +Documentation is incomplete. \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-database/database.go b/vendor/github.com/sfomuseum/go-database/database.go new file mode 100644 index 0000000..43efda6 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/database.go @@ -0,0 +1,28 @@ +package database + +import ( + "context" + "database/sql" + "fmt" +) + +type ConfigureSQLDatabaseOptions struct { + CreateTablesIfNecessary bool + Tables []Table + Pragma []string +} + +func DefaultConfigureSQLDatabaseOptions() *ConfigureSQLDatabaseOptions { + opts := &ConfigureSQLDatabaseOptions{} + return opts +} + +func ConfigureSQLDatabase(ctx context.Context, db *sql.DB, opts *ConfigureSQLDatabaseOptions) error { + + switch Driver(db) { + case SQLITE_DRIVER: + return configureSQLiteDatabase(ctx, db, opts) + default: + return fmt.Errorf("Unhandled or unsupported database driver %s", DriverTypeOf(db)) + } +} diff --git a/vendor/github.com/sfomuseum/go-database/database_sqlite.go b/vendor/github.com/sfomuseum/go-database/database_sqlite.go new file mode 100644 index 0000000..8647214 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/database_sqlite.go @@ -0,0 +1,59 @@ +package database + +import ( + "context" + "database/sql" + "fmt" + "slices" +) + +func configureSQLiteDatabase(ctx context.Context, db *sql.DB, opts *ConfigureSQLDatabaseOptions) error { + + if opts.CreateTablesIfNecessary { + + table_names := make([]string, 0) + + sql := "SELECT name FROM sqlite_master WHERE type='table'" + + rows, err := db.QueryContext(ctx, sql) + + if err != nil { + return fmt.Errorf("Failed to query sqlite_master, %w", err) + } + + defer rows.Close() + + for rows.Next() { + + var name string + err := rows.Scan(&name) + + if err != nil { + return fmt.Errorf("Failed scan table name, %w", err) + } + + table_names = append(table_names, name) + } + + for _, t := range opts.Tables { + + if slices.Contains(table_names, t.Name()) { + continue + } + + schema, err := t.Schema(db) + + if err != nil { + return fmt.Errorf("Failed to derive schema for table %s, %w", t.Name(), err) + } + + _, err = db.ExecContext(ctx, schema) + + if err != nil { + return fmt.Errorf("Failed to create %s table, %w", t.Name(), err) + } + } + } + + return nil +} diff --git a/vendor/github.com/sfomuseum/go-database/driver.go b/vendor/github.com/sfomuseum/go-database/driver.go new file mode 100644 index 0000000..2ad60b2 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/driver.go @@ -0,0 +1,32 @@ +package database + +import ( + "database/sql" + "fmt" + "log/slog" + "reflect" +) + +const SQLITE_DRIVER string = "sqlite" +const MYSQL_DRIVER string = "mysql" +const POSTGRES_DRIVER string = "postgres" + +// https://github.com/golang/go/issues/12600 +// https://stackoverflow.com/questions/38811056/how-to-determine-name-of-database-driver-im-using + +func DriverTypeOf(db *sql.DB) string { + return fmt.Sprintf("%s", reflect.TypeOf(db.Driver())) +} + +func Driver(db *sql.DB) string { + + driver_type := DriverTypeOf(db) + + switch driver_type { + case "*sqlite3.SQLiteDriver", "*sqlite.Driver": + return SQLITE_DRIVER + default: + slog.Warn("Unhandled driver type", "type", driver_type) + return "" + } +} diff --git a/vendor/github.com/sfomuseum/go-database/errors.go b/vendor/github.com/sfomuseum/go-database/errors.go new file mode 100644 index 0000000..9fe0096 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/errors.go @@ -0,0 +1,40 @@ +package database + +import ( + "fmt" +) + +// WrapError returns a new error wrapping 'err' and prepending with the value of 't's Name() method. +func WrapError(t Table, err error) error { + return fmt.Errorf("[%s] %w", t.Name(), err) +} + +// InitializeTableError returns a new error with a default message for database initialization problems wrapping 'err' and prepending with the value of 't's Name() method. +func InitializeTableError(t Table, err error) error { + return WrapError(t, fmt.Errorf("Failed to initialize database table, %w", err)) +} + +// DatabaseConnectionError returns a new error with a default message for database connection problems wrapping 'err' and prepending with the value of 't's Name() method. +func DatabaseConnectionError(t Table, err error) error { + return WrapError(t, fmt.Errorf("Failed to establish database connection, %w", err)) +} + +// BeginTransactionError returns a new error with a default message for database transaction initialization problems wrapping 'err' and prepending with the value of 't's Name() method. +func BeginTransactionError(t Table, err error) error { + return WrapError(t, fmt.Errorf("Failed to begin database transaction, %w", err)) +} + +// CommitTransactionError returns a new error with a default message for problems committing database transactions wrapping 'err' and prepending with the value of 't's Name() method. +func CommitTransactionError(t Table, err error) error { + return WrapError(t, fmt.Errorf("Failed to commit database transaction, %w", err)) +} + +// PrepareStatementError returns a new error with a default message for problems preparing database (SQL) statements wrapping 'err' and prepending with the value of 't's Name() method. +func PrepareStatementError(t Table, err error) error { + return WrapError(t, fmt.Errorf("Failed to prepare SQL statement, %w", err)) +} + +// ExecuteStatementError returns a new error with a default message for problems executing database (SQL) statements wrapping 'err' and prepending with the value of 't's Name() method. +func ExecuteStatementError(t Table, err error) error { + return WrapError(t, fmt.Errorf("Failed to execute SQL statement, %w", err)) +} diff --git a/vendor/github.com/sfomuseum/go-database/sqlite.go b/vendor/github.com/sfomuseum/go-database/sqlite.go new file mode 100644 index 0000000..ae0c7a4 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/sqlite.go @@ -0,0 +1,34 @@ +package database + +import ( + "context" + "database/sql" + "fmt" +) + +func DefaultSQLitePragma() []string { + + pragma := []string{ + "PRAGMA JOURNAL_MODE=OFF", + "PRAGMA SYNCHRONOUS=OFF", + // https://www.gaia-gis.it/gaia-sins/spatialite-cookbook/html/system.html + "PRAGMA PAGE_SIZE=4096", + "PRAGMA CACHE_SIZE=1000000", + } + + return pragma +} + +func ConfigureSQLitePragma(ctx context.Context, db *sql.DB, pragma []string) error { + + for _, p := range pragma { + + _, err := db.ExecContext(ctx, p) + + if err != nil { + return fmt.Errorf("Failed to set pragma '%s', %w", p, err) + } + } + + return nil +} diff --git a/vendor/github.com/sfomuseum/go-database/table.go b/vendor/github.com/sfomuseum/go-database/table.go new file mode 100644 index 0000000..ffb2054 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/table.go @@ -0,0 +1,69 @@ +package database + +import ( + "context" + "database/sql" + "fmt" + "regexp" +) + +var re_mem *regexp.Regexp +var re_vfs *regexp.Regexp +var re_file *regexp.Regexp + +func init() { + re_mem = regexp.MustCompile(`^(file\:)?\:memory\:.*`) + re_vfs = regexp.MustCompile(`^vfs:\.*`) + re_file = regexp.MustCompile(`^file\:([^\?]+)(?:\?.*)?$`) +} + +type Table interface { + Name() string + Schema(*sql.DB) (string, error) + InitializeTable(context.Context, *sql.DB) error + IndexRecord(context.Context, *sql.DB, interface{}) error +} + +func HasTable(ctx context.Context, db *sql.DB, table_name string) (bool, error) { + + switch Driver(db) { + case SQLITE_DRIVER: + return hasSQLiteTable(ctx, db, table_name) + default: + return false, fmt.Errorf("Unhandled or unsupported database driver %s", DriverTypeOf(db)) + } + +} + +func CreateTableIfNecessary(ctx context.Context, db *sql.DB, t Table) error { + + create := false + + has_table, err := HasTable(ctx, db, t.Name()) + + if err != nil { + return err + } + + if !has_table { + create = true + } + + if create { + + sql, err := t.Schema(db) + + if err != nil { + return err + } + + _, err = db.ExecContext(ctx, sql) + + if err != nil { + return err + } + + } + + return nil +} diff --git a/vendor/github.com/sfomuseum/go-database/table_sqlite.go b/vendor/github.com/sfomuseum/go-database/table_sqlite.go new file mode 100644 index 0000000..5f51934 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-database/table_sqlite.go @@ -0,0 +1,43 @@ +package database + +// Something something something maybe build tags I am not sure yet... + +import ( + "context" + "database/sql" + "fmt" +) + +func hasSQLiteTable(ctx context.Context, db *sql.DB, table_name string) (bool, error) { + + has_table := false + + // TBD... how to derive database engine... + + sql := "SELECT name FROM sqlite_master WHERE type='table'" + + rows, err := db.QueryContext(ctx, sql) + + if err != nil { + return false, fmt.Errorf("Failed to query sqlite_master, %w", err) + } + + defer rows.Close() + + for rows.Next() { + + var name string + err := rows.Scan(&name) + + if err != nil { + return false, fmt.Errorf("Failed scan table name, %w", err) + } + + if name == table_name { + has_table = true + break + } + } + + return has_table, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/.gitignore b/vendor/github.com/sfomuseum/go-edtf/.gitignore new file mode 100644 index 0000000..afa44cd --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/.gitignore @@ -0,0 +1,11 @@ +*~ +pkg +src +!vendor/src +bin +!bin/.gitignore +*.log +*.json +.travis.yml +*.db +testdata/*.txt \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-edtf/LICENSE b/vendor/github.com/sfomuseum/go-edtf/LICENSE new file mode 100644 index 0000000..44e2c16 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2021, City and County of San Francisco, acting by and through its +Airport Commission ("City"). All rights reserved. + +The City and County of San Francisco, acting by and through its Airport +Commission, created and operates the SFO Museum. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the City nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-edtf/Makefile b/vendor/github.com/sfomuseum/go-edtf/Makefile new file mode 100644 index 0000000..57a6e61 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/Makefile @@ -0,0 +1,3 @@ +cli: + go build -mod vendor -o bin/parse cmd/parse/main.go + go build -mod vendor -o bin/matches cmd/matches/main.go diff --git a/vendor/github.com/sfomuseum/go-edtf/README.md b/vendor/github.com/sfomuseum/go-edtf/README.md new file mode 100644 index 0000000..4244899 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/README.md @@ -0,0 +1,381 @@ +# go-edtf + +A Go package for parsing Extended DateTime Format (EDTF) date strings. It is compliant with Level 0, Level 1 and Level 2 of the EDTF specification (2019). + +* [Background](#background) +* [Features](#features) +* [Nomenclature and Type Definitions](#nomenclature-and-type-definitions) +* [Documentation](#documentation) +* [Example](#example) +* [Tools](#tools) +* [Tests](#tests) +* [Reporting Bugs and Issues](#reporting-bugs-and-issues) + +[![Go Reference](https://pkg.go.dev/badge/github.com/sfomuseum/go-edtf.svg)](https://pkg.go.dev/github.com/sfomuseum/go-edtf) + +_At this time Go reference documentation is incomplete._ + +## Background + +The following is taken from the [EDTF website](https://www.loc.gov/standards/datetime/background.html): + +> EDTF defines features to be supported in a date/time string, features considered useful for a wide variety of applications + +> Date and time formats are specified in ISO 8601, the International Standard for the representation of dates and times. ISO 8601-2004 provided basic date and time formats; these were not sufficiently expressive to support various semantic qualifiers and concepts than many applications find useful. For example, although it could express the concept "the year 1984", it could not express "approximately the year 1984", or "we think the year is 1984 but we're not certain". These as well as various other concepts had therefore often been represented using ad hoc conventions; EDTF provides a standard syntax for their representation. + +> Further, 8601 is a complex specification describing a large number of date/time formats, in many cases providing multiple options for a given format. Thus a second aim of EDTF is to restrict the supported formats to a smaller set. + +> EDTF functionality has now been integrated into ISO 8601-2019, the latest revision of ISO 8601, published in March 2019. + +> EDTF was developed over the course of several years by a community of interested parties, and a draft specification was published in 2012. The draft specification is no longer publicly, readily available, because its availability has caused confusion with the official version. + +## Features + +### Level 0 + +| Name | Implementation | Tests | Notes | +| --- | --- | --- | --- | +| [Date](https://www.loc.gov/standards/datetime/) | [yes](level0/date.go) | [yes](level0/date_test.go) | | +| [Date and Time](https://www.loc.gov/standards/datetime/) | [yes](level0/date_and_time.go) | [yes](level0/date_and_time_test.go) | | +| [Time Interval](https://www.loc.gov/standards/datetime/) | [yes](level0/time_interval.go) | [yes](level0/time_interval_test.go) | | + +### Level 1 + +| Name | Implementation | Tests | Notes | +| --- | --- | --- | --- | +| [Letter-prefixed calendar year](https://www.loc.gov/standards/datetime/) | [yes](level1/letter_prefixed_calendar_year.go) | [yes](level1/letter_prefixed_calendar_year_test.go) | Calendar years greater (or less) than 9999 are not supported yet. | +| [Season](https://www.loc.gov/standards/datetime/) | [yes](level1/season.go) | [yes](level1/season_test.go) | | +| [Qualification of a date (complete)](https://www.loc.gov/standards/datetime/) | [yes](level1/qualified_date.go) | [yes](level1/qualified_date_test.go) | | +| [Unspecified digit(s) from the right](https://www.loc.gov/standards/datetime/) | [yes](level1/unspecified_digits.go) | [yes](level1/unspecified_digits_test.go) | | +| [Extended Interval (L1)](https://www.loc.gov/standards/datetime/) | [yes](level1/extended_interval.go) | [yes](level1/extended_interval_test.go) | | +| [Negative calendar year](https://www.loc.gov/standards/datetime/) | [yes](level1/negative_calendar_year.go) | [yes](level1/negative_calendar_year_test.go) | | + +### Level 2 + +| Name | Implementation | Tests | Notes | +| --- | --- | --- | --- | +| [Exponential year](https://www.loc.gov/standards/datetime/) | [yes](level2/exponential_year.go) | [yes](level2/exponential_year_test.go) | Calendar years greater (or less) than 9999 are not supported yet. | +| [Significant digits](https://www.loc.gov/standards/datetime/) | [yes](level2/significant_digits.go) | [yes](level2/significant_digits_test.go) | | +| [Sub-year groupings](https://www.loc.gov/standards/datetime/) | [yes](level2/sub_year_grouping.go) | [yes](level2/sub_year_grouping_test.go) | Compound phrases, like "second quarter of 2001" are not supported yet. | +| [Set representation](https://www.loc.gov/standards/datetime/) | [yes](level2/set_representation.go) | [yes](level2/set_representation_test.go) | | +| [Qualification](https://www.loc.gov/standards/datetime/) | [yes](level2/qualification.go) | [yes](level2/qualification_test.go) | | +| [Unspecified Digit](https://www.loc.gov/standards/datetime/) | [yes](level2/unspecified_digit.go) | [yes](level2/unspecified_digit_test.go) | Years with a leading unspecified digit, for example "X999", are not supported yet | +| [Interval](https://www.loc.gov/standards/datetime/) | [yes](level2/interval.go) | [yes](level2/interval.go) | | + +## Nomenclature and Type Definitions + +### Date Spans (or `edtf.EDTFDate`) + +The word `span` is defined as: + +``` +The full extent of something from end to end; the amount of space that something covers: +``` + +An `edtf.EDTFDate` instance is a struct that represents a date span in the form of `Start` and `End` properties which are themselves `edtf.DateRange` instances. It also contains properties denoting the EDTF feature and level associated with EDTF string used to create the instance. + +``` +type EDTFDate struct { + Start *DateRange `json:"start"` + End *DateRange `json:"end"` + EDTF string `json:"edtf"` + Level int `json:"level"` + Feature string `json:"feature"` +} +``` + +### Date ranges (or `edtf.DateRange`) + +The word `range` is defined as: + +``` +The area of variation between upper and lower limits on a particular scale +``` + +A `edtf.DateRange` instance encompasses upper and lower dates (for an EDTF string). It is a struct with `Lower` and `Upper` properties which are themselves `edtf.Date` instances. + +``` +type DateRange struct { + EDTF string `json:"edtf"` + Lower *Date `json:"lower"` + Upper *Date `json:"upper"` +} +``` + +### Date (or `edtf.Date`) + +A `edtf.Date` instance is the upper or lower end of a date range. It is a struct that contains atomic date and time information as well as a number of flags denoting precision and other granularities defined in the EDTF specification. + +``` +type Date struct { + DateTime string `json:"datetime,omitempty"` + Timestamp *Timestamp `json:"timestamp,omitempty"` + YMD *YMD `json:"ymd"` + Uncertain Precision `json:"uncertain,omitempty"` + Approximate Precision `json:"approximate,omitempty"` + Unspecified Precision `json:"unspecified,omitempty"` + Precision Precision `json:"precision,omitempty"` + Open bool `json:"open,omitempty"` + Unknown bool `json:"unknown,omitempty"` + Inclusivity Precision `json:"inclusivity,omitempty"` +} +``` + +#### Notes + +* `DateTime` strings are encoded as `RFC3339` strings, in UTC. + +### Timestamp (or `edtf.Timestamp`) + +A `edtf.Timestamp` instance represents the Unix timestamp for the date. When serialized (or "marshal-ed") as JSON a `edtf.Timestamp` instance is encoded as a signed 64-bit integer. When a JSON-encoded `edtf.EDTFDate` is unserialized (or "unmarshal"-ed) the integer is converted to a `edtf.Timestamp` instance. + +The `Timestamp` is defined as a custom struct rather than an integer so that when it is serialized there is no confusion about whether a value of "0" means "January 1, 1970" or "this property is missing". It means the former. If the property is `nil` then it should be excluded from the JSON representation entirely. + +Because the Go language imposes limits on the minimum and maximum date it can represent (-9999 and 9999 respectively) this element _may_ be `nil`. + +### YMD (or `edtf.YMD`) + +A `edtf.YMD instance a struct containing numeric year, month and day properties. It is designed to supplement "time" elements or, in cases where a "time" element is not possible to replace it. + +``` +type YMD struct { + Year int `json:"year"` + Month int `json:"month"` + Day int `json:"day"` +} +``` + +### Precision (or `edtf.Precision`) + +"Precision" a 32-bit integer (as well as a Go language `Precision` instance with its own method) that uses bitsets to represent granularity. + +The following named granularities are defined as constants: + +| Name | Value | Notes | +| --- | --- | --- | +| NONE | 0 | | +| ALL | 2 | | +| ANY | 4 | | +| DAY | 8 | | +| WEEK | 16 | +| MONTH | 32 | | +| YEAR | 64 | | +| DECADE | 128 | | +| CENTURY | 256 | | +| MILLENIUM | 512 | | + +## Documentation + +Proper Go language documentation [is incomplete](https://github.com/sfomuseum/go-edtf/issues/12) but can be viewed in its current state at: https://godoc.org/github.com/sfomuseum/go-edtf + +## Example + +``` +package main + +import ( + "flag" + "github.com/sfomuseum/go-edtf/parser" + "log" +) + +func main() { + + flag.Parse() + + for _, raw := range flag.Args() { + + if !parser.IsValid(raw){ + continue + } + + level, feature, _ := parser.Matches(raw) + + log.Printf("%s is a Level %d (%s) string\n", raw, level, feature) + + d, _ := parser.ParseString(raw) + + log.Printf("%s span %v to %v\n", d.Lower(), d.Upper()) + } +} +``` + +_Error handling removed for the sake of brevity._ + +## Tools + +To build binary versions of these tools run the `cli` Makefile target. For example: + +``` +$> make cli +go build -mod vendor -o bin/parse cmd/parse/main.go +go build -mod vendor -o bin/matches cmd/matches/main.go +``` + +### matches + +Parse one or more EDTF strings and emit the EDTF level and feature name they match. + +``` +> ./bin/matches -h +Parse one or more EDTF strings and emit the EDTF level and feature name they match. +Usage: + ./bin/matches edtf_string(N) edtf_string(N) +``` + +For example: + +``` +> ./bin/matches 193X 2021-10-10T00:24:00Z +193X level 1 (Unspecified digit(s) from the right) +2021-10-10T00:24:00Z level 0 (Date and Time) +``` + +### parse + +Parse one or more EDTF strings and return a list of JSON-encoded `edtf.EDTFDate` objects. + +``` +$> ./bin/parse -h +Parse one or more EDTF strings and return a list of JSON-encoded edtf.EDTFDate objects. +Usage: + ./bin/parse edtf_string(N) edtf_string(N) +``` + +For example: + +``` +$> ./bin/parse 2004-06-XX/2004-07-03 '{1667,1668,1670..1672}' | jq + +[ + { + "start": { + "edtf": "2004-06-XX", + "lower": { + "datetime": "2004-06-01T00:00:00Z", + "timestamp": 1086048000, + "ymd": { + "year": 2004, + "month": 6, + "day": 1 + }, + "precision": 32 + }, + "upper": { + "datetime": "2004-06-30T23:59:59Z", + "timestamp": 1088639999, + "ymd": { + "year": 2004, + "month": 6, + "day": 30 + }, + "precision": 32 + } + }, + "end": { + "edtf": "2004-07-03", + "lower": { + "datetime": "2004-07-03T00:00:00Z", + "timestamp": 1088812800, + "ymd": { + "year": 2004, + "month": 7, + "day": 3 + }, + "precision": 64 + }, + "upper": { + "datetime": "2004-07-03T23:59:59Z", + "timestamp": 1088899199, + "ymd": { + "year": 2004, + "month": 7, + "day": 3 + }, + "precision": 64 + } + }, + "edtf": "2004-06-XX/2004-07-03", + "level": 2, + "feature": "Interval" + }, + { + "start": { + "edtf": "1667", + "lower": { + "datetime": "1667-01-01T00:00:00Z", + "timestamp": -9561715200, + "ymd": { + "year": 1667, + "month": 1, + "day": 1 + }, + "precision": 64, + "inclusivity": 2 + }, + "upper": { + "datetime": "1667-12-31T23:59:59Z", + "timestamp": -9530179201, + "ymd": { + "year": 1667, + "month": 12, + "day": 31 + }, + "precision": 64, + "inclusivity": 2 + } + }, + "end": { + "edtf": "1672", + "lower": { + "datetime": "1672-01-01T00:00:00Z", + "timestamp": -9403948800, + "ymd": { + "year": 1672, + "month": 1, + "day": 1 + }, + "precision": 64, + "inclusivity": 2 + }, + "upper": { + "datetime": "1672-12-31T23:59:59Z", + "timestamp": -9372326401, + "ymd": { + "year": 1672, + "month": 12, + "day": 31 + }, + "precision": 64, + "inclusivity": 2 + } + }, + "edtf": "{1667,1668,1670..1672}", + "level": 2, + "feature": "Set representation" + } +] +``` + +## Tests + +Tests are defined and handled in (3) places: + +* In every `level(N)` package there are individual `_test.go` files for each feature. +* In every `level(N)` package there is a `tests.go` file that defines input values and expected response values defined as `tests.TestResult` instances. +* The `tests.TestResult` instance, its options and its methods are defined in the `tests` package. It implements a `TestDate` method that most of the individual `_test.go` files invoke. + +## Reporting Bugs and Issues + +There might still be bugs, implementation gotchas or other issues. If you encounter any of these [please report them here](https://github.com/sfomuseum/go-edtf/issues). + +## See also + +* http://www.loc.gov/standards/datetime/ +* https://www.iso.org/standard/70907.html (ISO 8601-1:2019) +* https://www.iso.org/standard/70908.html (ISO 8601-2:2019) + +### Related + +* https://github.com/sjansen/edtf +* https://github.com/unt-libraries/edtf-validate \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-edtf/calendar/calendar.go b/vendor/github.com/sfomuseum/go-edtf/calendar/calendar.go new file mode 100644 index 0000000..75d3866 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/calendar/calendar.go @@ -0,0 +1,74 @@ +// package calendar provides common date and calendar methods. +package calendar + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Calculate the number of days in a month for a 'YYYYMM' formatted string. +func DaysInMonthWithString(yyyymm string) (int, error) { + + ym := strings.Split(yyyymm, "-") + + var str_yyyy string + var str_mm string + + switch len(ym) { + case 3: + str_yyyy = fmt.Sprintf("-%s", ym[1]) + str_mm = ym[2] + case 2: + str_yyyy = ym[0] + str_mm = ym[1] + default: + return 0, errors.New("Invalid YYYYMM string") + } + + yyyy, err := strconv.Atoi(str_yyyy) + + if err != nil { + return 0, err + } + + mm, err := strconv.Atoi(str_mm) + + if err != nil { + return 0, err + } + + return DaysInMonth(yyyy, mm) +} + +// Calculate the number of days in a month given a year and month in numeric form. +func DaysInMonth(yyyy int, mm int) (int, error) { + + // Because Go can't parse dates < 0... + + if yyyy < 0 { + yyyy = yyyy - (yyyy * 2) + } + + next_yyyy := yyyy + next_mm := mm + 1 + + if mm >= 12 { + next_mm = yyyy + 1 + next_mm = 1 + } + + next_ymd := fmt.Sprintf("%04d-%02d-01", next_yyyy, next_mm) + next_t, err := time.Parse("2006-01-02", next_ymd) + + if err != nil { + return 0, err + } + + mm_t := next_t.AddDate(0, 0, -1) + dd := mm_t.Day() + + return dd, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/common/common.go b/vendor/github.com/sfomuseum/go-edtf/common/common.go new file mode 100644 index 0000000..3b6fdbb --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/common.go @@ -0,0 +1,2 @@ +// package common provide common methods across EDTF level definitions. +package common diff --git a/vendor/github.com/sfomuseum/go-edtf/common/exponential.go b/vendor/github.com/sfomuseum/go-edtf/common/exponential.go new file mode 100644 index 0000000..0acaab7 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/exponential.go @@ -0,0 +1,26 @@ +package common + +import ( + "github.com/sfomuseum/go-edtf" + "math/big" +) + +// Parse a string in exponential notation in to a year value in numeric form. +func ParseExponentialNotation(notation string) (int, error) { + + flt, _, err := big.ParseFloat(notation, 10, 0, big.ToNearestEven) + + if err != nil { + return 0, err + } + + var i = new(big.Int) + yyyy, _ := flt.Int(i) + + if yyyy.Int64() > int64(edtf.MAX_YEARS) || yyyy.Int64() < int64(0-edtf.MAX_YEARS) { + return 0, edtf.Unsupported("exponential notation", notation) + } + + yyyy_i := int(yyyy.Int64()) + return yyyy_i, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/common/range.go b/vendor/github.com/sfomuseum/go-edtf/common/range.go new file mode 100644 index 0000000..14de55f --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/range.go @@ -0,0 +1,538 @@ +package common + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/calendar" + "github.com/sfomuseum/go-edtf/re" + "strconv" + "strings" +) + +type Qualifier struct { + Value string + Type string +} + +func (q *Qualifier) String() string { + return fmt.Sprintf("[%T] Value: '%s' Type: '%s'", q, q.Value, q.Type) +} + +// StringWhatever is a bad naming convention - please make me better +// (20210105/thisisaaronland) + +type StringDate struct { + Year string + Month string + Day string +} + +func (d *StringDate) String() string { + return fmt.Sprintf("[[%T] Y: '%s' M: '%s' D: '%s']", d, d.Year, d.Month, d.Day) +} + +func (d *StringDate) Equals(other_d *StringDate) bool { + + if d.Year != other_d.Year { + return false + } + + if d.Month != other_d.Month { + return false + } + + if d.Day != other_d.Day { + return false + } + + return true +} + +type StringRange struct { + Start *StringDate + End *StringDate + Precision edtf.Precision + Uncertain edtf.Precision + Approximate edtf.Precision + EDTF string +} + +func (r *StringRange) String() string { + return fmt.Sprintf("[[%T] Start: '%s' End: '%s']", r, r.Start, r.End) +} + +func StringRangeFromYMD(edtf_str string) (*StringRange, error) { + + precision := edtf.NONE + uncertain := edtf.NONE + approximate := edtf.NONE + + parts := re.YMD.FindStringSubmatch(edtf_str) + count := len(parts) + + if count != 4 { + return nil, edtf.Invalid("date", edtf_str) + } + + yyyy := parts[1] + mm := parts[2] + dd := parts[3] + + // fmt.Printf("DATE Y: '%s' M: '%s' D: '%s'\n", yyyy, mm, dd) + + if yyyy != "" && mm != "" && dd != "" { + precision.AddFlag(edtf.DAY) + } else if yyyy != "" && mm != "" { + precision.AddFlag(edtf.MONTH) + } else if yyyy != "" { + precision.AddFlag(edtf.YEAR) + } + + // fmt.Println("PRECISION -", edtf_str, precision) + + var yyyy_q *Qualifier + var mm_q *Qualifier + var dd_q *Qualifier + + if yyyy != "" { + + y, q, err := parseYMDComponent(yyyy) + + if err != nil { + return nil, err + } + + yyyy = y + yyyy_q = q + } + + if mm != "" { + + m, q, err := parseYMDComponent(mm) + + if err != nil { + return nil, err + } + + mm = m + mm_q = q + } + + if dd != "" { + + d, q, err := parseYMDComponent(dd) + + if err != nil { + return nil, err + } + + dd = d + dd_q = q + } + + // fmt.Println("YYYY", yyyy_q) + // fmt.Println("MM", mm_q) + // fmt.Println("DD", dd_q) + + if dd_q != nil && dd_q.Type == "Group" { + + // precision.AddFlag(edtf.YEAR) + // precision.AddFlag(edtf.MONTH) + // precision.AddFlag(edtf.DAY) + + switch dd_q.Value { + case edtf.UNCERTAIN: + uncertain.AddFlag(edtf.YEAR) + uncertain.AddFlag(edtf.MONTH) + uncertain.AddFlag(edtf.DAY) + case edtf.APPROXIMATE: + approximate.AddFlag(edtf.YEAR) + approximate.AddFlag(edtf.MONTH) + approximate.AddFlag(edtf.DAY) + case edtf.UNCERTAIN_AND_APPROXIMATE: + uncertain.AddFlag(edtf.YEAR) + uncertain.AddFlag(edtf.MONTH) + uncertain.AddFlag(edtf.DAY) + approximate.AddFlag(edtf.YEAR) + approximate.AddFlag(edtf.MONTH) + approximate.AddFlag(edtf.DAY) + default: + // pass + } + + } + + if mm_q != nil && mm_q.Type == "Group" { + + // precision.AddFlag(edtf.YEAR) + // precision.AddFlag(edtf.MONTH) + + switch mm_q.Value { + case edtf.UNCERTAIN: + uncertain.AddFlag(edtf.YEAR) + uncertain.AddFlag(edtf.MONTH) + case edtf.APPROXIMATE: + approximate.AddFlag(edtf.YEAR) + approximate.AddFlag(edtf.MONTH) + case edtf.UNCERTAIN_AND_APPROXIMATE: + uncertain.AddFlag(edtf.YEAR) + uncertain.AddFlag(edtf.MONTH) + approximate.AddFlag(edtf.YEAR) + approximate.AddFlag(edtf.MONTH) + default: + // pass + } + + } + + if yyyy_q != nil && yyyy_q.Type == "Group" { + + // precision.AddFlag(edtf.YEAR) + + switch yyyy_q.Value { + case edtf.UNCERTAIN: + uncertain.AddFlag(edtf.YEAR) + case edtf.APPROXIMATE: + approximate.AddFlag(edtf.YEAR) + case edtf.UNCERTAIN_AND_APPROXIMATE: + uncertain.AddFlag(edtf.YEAR) + approximate.AddFlag(edtf.YEAR) + default: + // pass + } + + } + + if yyyy_q != nil && yyyy_q.Type == "Individual" { + + switch yyyy_q.Value { + case edtf.UNCERTAIN: + uncertain.AddFlag(edtf.YEAR) + case edtf.APPROXIMATE: + approximate.AddFlag(edtf.YEAR) + case edtf.UNCERTAIN_AND_APPROXIMATE: + uncertain.AddFlag(edtf.YEAR) + approximate.AddFlag(edtf.YEAR) + default: + // pass + } + } + + if mm_q != nil && mm_q.Type == "Individual" { + + switch mm_q.Value { + case edtf.UNCERTAIN: + uncertain.AddFlag(edtf.MONTH) + case edtf.APPROXIMATE: + approximate.AddFlag(edtf.MONTH) + case edtf.UNCERTAIN_AND_APPROXIMATE: + uncertain.AddFlag(edtf.MONTH) + approximate.AddFlag(edtf.MONTH) + default: + // pass + } + } + + if dd_q != nil && dd_q.Type == "Individual" { + + switch dd_q.Value { + case edtf.UNCERTAIN: + uncertain.AddFlag(edtf.DAY) + case edtf.APPROXIMATE: + approximate.AddFlag(edtf.DAY) + case edtf.UNCERTAIN_AND_APPROXIMATE: + uncertain.AddFlag(edtf.DAY) + approximate.AddFlag(edtf.DAY) + default: + // pass + } + } + + start_yyyy := yyyy + start_mm := mm + start_dd := dd + + end_yyyy := start_yyyy + end_mm := start_mm + end_dd := start_dd + + // fmt.Println("PRECISION 0", edtf_str, precision) + + if !strings.HasSuffix(yyyy, "X") { + + precision = edtf.NONE + precision.AddFlag(edtf.YEAR) + + } else { + + start_m := int64(0) + end_m := int64(0) + + start_c := int64(0) + end_c := int64(900) + + start_d := int64(0) + end_d := int64(90) + + start_y := int64(0) + end_y := int64(9) + + if string(yyyy[0]) == "X" { + return nil, edtf.NotImplemented("date", edtf_str) + } else { + + m, err := strconv.ParseInt(string(yyyy[0]), 10, 32) + + if err != nil { + return nil, err + } + + start_m = m * 1000 + end_m = start_m + + precision = edtf.NONE + precision.AddFlag(edtf.MILLENIUM) + } + + if string(yyyy[1]) != "X" { + + c, err := strconv.ParseInt(string(yyyy[1]), 10, 32) + + if err != nil { + return nil, err + } + + start_c = c * 100 + end_c = start_c + + precision = edtf.NONE + precision.AddFlag(edtf.CENTURY) + } + + if string(yyyy[2]) != "X" { + + d, err := strconv.ParseInt(string(yyyy[2]), 10, 32) + + if err != nil { + return nil, err + } + + start_d = d * 10 + end_d = start_d + + precision = edtf.NONE + precision.AddFlag(edtf.DECADE) + } + + if string(yyyy[3]) != "X" { + + y, err := strconv.ParseInt(string(yyyy[3]), 10, 32) + + if err != nil { + return nil, err + } + + start_y = y * 1 + end_y = start_y + + precision = edtf.NONE + precision.AddFlag(edtf.YEAR) + } + + start_ymd := start_m + start_c + start_d + start_y + end_ymd := end_m + end_c + end_d + end_y + + // fmt.Printf("OMG '%s' '%d' '%d' '%d' '%d' '%d'\n", yyyy, start_m, start_c, start_d, start_y, start_ymd) + // fmt.Printf("WTF '%s' '%d' '%d' '%d' '%d' '%d'\n", yyyy, end_m, end_c, end_d, end_y, end_ymd) + + start_yyyy = strconv.FormatInt(start_ymd, 10) + end_yyyy = strconv.FormatInt(end_ymd, 10) + + } + + // fmt.Println("PRECISION 1", edtf_str, precision) + + if !strings.HasSuffix(mm, "X") { + + if mm != "" && precision == edtf.NONE { + precision = edtf.NONE + precision.AddFlag(edtf.MONTH) + } + + } else { + + // this does not account for 1985-24, etc. + + if strings.HasPrefix(mm, "X") { + start_mm = "01" + end_mm = "12" + + } else { + start_mm = "10" + end_mm = "12" + + precision = edtf.NONE + precision.AddFlag(edtf.MONTH) + } + } + + // fmt.Println("PRECISION 2", edtf_str, precision) + + if !strings.HasSuffix(dd, "X") { + + if dd != "" && precision == edtf.NONE { + precision = edtf.NONE + precision.AddFlag(edtf.DAY) + } + + } else { + + switch string(dd[0]) { + case "X": + start_dd = "01" + end_dd = "" + case "1": + start_dd = "10" + end_dd = "19" + case "2": + start_dd = "20" + end_dd = "29" + case "3": + start_dd = "30" + end_dd = "" + default: + return nil, edtf.Invalid("date", edtf_str) + } + } + + // the fact that I need to do this tells me that all of the precision + // logic around significant digits needs to be refactored but this will + // do for now... (20210106/thisisaaronland) + + if dd == "XX" && mm == "XX" { + precision = edtf.NONE + precision.AddFlag(edtf.YEAR) + } else if dd == "XX" { + precision = edtf.NONE + precision.AddFlag(edtf.MONTH) + } else { + } + + // fmt.Println("PRECISION 3", edtf_str, precision) + + if start_mm == "" { + start_mm = "01" + } + + if start_dd == "" { + start_dd = "01" + } + + if end_mm == "" { + end_mm = "12" + } + + if end_dd == "" { + + yyyymm := fmt.Sprintf("%s-%s", end_yyyy, end_mm) + + dd, err := calendar.DaysInMonthWithString(yyyymm) + + if err != nil { + return nil, err + } + + end_dd = strconv.Itoa(int(dd)) + } + + start := &StringDate{ + Year: start_yyyy, + Month: start_mm, + Day: start_dd, + } + + end := &StringDate{ + Year: end_yyyy, + Month: end_mm, + Day: end_dd, + } + + r := &StringRange{ + Start: start, + End: end, + Precision: precision, + Uncertain: uncertain, + Approximate: approximate, + EDTF: edtf_str, + } + + return r, nil +} + +func EmptyDateRange() *edtf.DateRange { + + lower_d := &edtf.Date{} + upper_d := &edtf.Date{} + + dt := &edtf.DateRange{ + Lower: lower_d, + Upper: upper_d, + } + + return dt +} + +func UnknownDateRange() *edtf.DateRange { + + dr := EmptyDateRange() + dr.Lower.Unknown = true + dr.Upper.Unknown = true + return dr +} + +func OpenDateRange() *edtf.DateRange { + + dr := EmptyDateRange() + dr.Lower.Open = true + dr.Upper.Open = true + return dr +} + +func parseYMDComponent(date string) (string, *Qualifier, error) { + + m := re.QualifiedIndividual.FindStringSubmatch(date) + + if len(m) == 3 { + + var q *Qualifier + + if m[1] != "" { + + q = &Qualifier{ + Type: "Individual", + Value: m[1], + } + } + + return m[2], q, nil + } + + m = re.QualifiedGroup.FindStringSubmatch(date) + + if len(m) == 3 { + + var q *Qualifier + + if m[2] != "" { + + q = &Qualifier{ + Type: "Group", + Value: m[2], + } + } + + return m[1], q, nil + } + + return "", nil, edtf.Invalid("date", date) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/common/span.go b/vendor/github.com/sfomuseum/go-edtf/common/span.go new file mode 100644 index 0000000..89b2957 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/span.go @@ -0,0 +1,284 @@ +package common + +import ( + "github.com/sfomuseum/go-edtf" + "strings" + "time" +) + +func DateSpanFromEDTF(edtf_str string) (*edtf.DateSpan, error) { + + parts := strings.Split(edtf_str, "/") + count := len(parts) + + is_multi := false + + var left_edtf string + var right_edtf string + + switch count { + case 2: + left_edtf = parts[0] + right_edtf = parts[1] + is_multi = true + case 1: + left_edtf = parts[0] + default: + return nil, edtf.Invalid("date span", edtf_str) + } + + if !is_multi { + return dateSpanFromYMD(left_edtf) + } + + left_span, err := dateSpanFromEDTF(left_edtf) + + if err != nil { + return nil, err + } + + right_span, err := dateSpanFromEDTF(right_edtf) + + if err != nil { + return nil, err + } + + left_span.Start.Upper = left_span.End.Upper + + right_span.End.Lower = right_span.Start.Lower + + left_span.End = right_span.End + + return left_span, nil +} + +// specifically from one half of a FOO/BAR string + +func dateSpanFromEDTF(edtf_str string) (*edtf.DateSpan, error) { + + var span *edtf.DateSpan + + switch edtf_str { + case edtf.UNKNOWN: + + span = UnknownDateSpan() + + span.Start.EDTF = edtf_str + span.End.EDTF = edtf_str + + case edtf.OPEN: + + span = OpenDateSpan() + + span.Start.EDTF = edtf_str + span.End.EDTF = edtf_str + + default: + + ds, err := dateSpanFromYMD(edtf_str) + + if err != nil { + return nil, err + } + + span = ds + } + + return span, nil +} + +func dateSpanFromYMD(edtf_str string) (*edtf.DateSpan, error) { + + str_range, err := StringRangeFromYMD(edtf_str) + + if err != nil { + return nil, err + } + + start := str_range.Start + end := str_range.End + + start_ymd, err := YMDFromStringDate(start) + + if err != nil { + return nil, err + } + + end_ymd, err := YMDFromStringDate(end) + + if err != nil { + return nil, err + } + + var start_lower_t *time.Time + var start_upper_t *time.Time + + var end_lower_t *time.Time + var end_upper_t *time.Time + + // fmt.Println("START", start) + // fmt.Println("END", end) + + if end.Equals(start) { + + st, err := TimeWithYMD(start_ymd, edtf.HMS_LOWER) + + if err != nil { + return nil, err + } + + et, err := TimeWithYMD(end_ymd, edtf.HMS_UPPER) + + if err != nil { + return nil, err + } + + start_lower_t = st + start_upper_t = st + + end_lower_t = et + end_upper_t = et + + } else { + + sl, err := TimeWithYMD(start_ymd, edtf.HMS_LOWER) + + if err != nil { + return nil, err + } + + su, err := TimeWithYMD(start_ymd, edtf.HMS_UPPER) + + if err != nil { + return nil, err + } + + el, err := TimeWithYMD(end_ymd, edtf.HMS_LOWER) + + if err != nil { + return nil, err + } + + eu, err := TimeWithYMD(end_ymd, edtf.HMS_UPPER) + + if err != nil { + return nil, err + } + + start_lower_t = sl + start_upper_t = su + end_lower_t = el + end_upper_t = eu + + /* + fmt.Printf("START LOWER %v\n", sl) + fmt.Printf("START UPPER %v\n", su) + fmt.Printf("END LOWER %v\n", el) + fmt.Printf("END UPPER %v\n", eu) + */ + } + + // + + start_lower := &edtf.Date{ + YMD: start_ymd, + Uncertain: str_range.Uncertain, + Approximate: str_range.Approximate, + Precision: str_range.Precision, + } + + start_upper := &edtf.Date{ + YMD: start_ymd, + Uncertain: str_range.Uncertain, + Approximate: str_range.Approximate, + Precision: str_range.Precision, + } + + end_lower := &edtf.Date{ + YMD: end_ymd, + Uncertain: str_range.Uncertain, + Approximate: str_range.Approximate, + Precision: str_range.Precision, + } + + end_upper := &edtf.Date{ + YMD: end_ymd, + Uncertain: str_range.Uncertain, + Approximate: str_range.Approximate, + Precision: str_range.Precision, + } + + if start_lower_t != nil { + start_lower.SetTime(start_lower_t) + } + + if start_upper_t != nil { + start_upper.SetTime(start_upper_t) + } + + if end_lower_t != nil { + end_lower.SetTime(end_lower_t) + } + + if end_upper_t != nil { + end_upper.SetTime(end_upper_t) + } + + start_range := &edtf.DateRange{ + EDTF: edtf_str, + Lower: start_lower, + Upper: start_upper, + } + + end_range := &edtf.DateRange{ + EDTF: edtf_str, + Lower: end_lower, + Upper: end_upper, + } + + sp := &edtf.DateSpan{ + Start: start_range, + End: end_range, + } + + return sp, nil +} + +func EmptyDateSpan() *edtf.DateSpan { + + start := EmptyDateRange() + end := EmptyDateRange() + + sp := &edtf.DateSpan{ + Start: start, + End: end, + } + + return sp +} + +func UnknownDateSpan() *edtf.DateSpan { + + start := UnknownDateRange() + end := UnknownDateRange() + + sp := &edtf.DateSpan{ + Start: start, + End: end, + } + + return sp +} + +func OpenDateSpan() *edtf.DateSpan { + + start := OpenDateRange() + end := OpenDateRange() + + sp := &edtf.DateSpan{ + Start: start, + End: end, + } + + return sp +} diff --git a/vendor/github.com/sfomuseum/go-edtf/common/time.go b/vendor/github.com/sfomuseum/go-edtf/common/time.go new file mode 100644 index 0000000..6ea7c9c --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/time.go @@ -0,0 +1,56 @@ +package common + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "time" +) + +func TimeWithYMDString(str_yyyy string, str_mm string, str_dd string, hms string) (*time.Time, error) { + + ymd, err := YMDFromStrings(str_yyyy, str_mm, str_dd) + + if err != nil { + return nil, err + } + + return TimeWithYMD(ymd, hms) +} + +func TimeWithYMD(ymd *edtf.YMD, hms string) (*time.Time, error) { + + // See this? If yyyy < 0 then we are dealing with a BCE year + // which can't be parsed by the time.Parse() function so we're + // going to set a flag and convert yyyy to a positive number. + // After we've created time.Time instances below, we'll check to see + // whether the flag is set and if it is then we'll update the + // year to be BCE again. One possible gotcha in this approach is + // that the calendar.DaysInMonth method may return wonky results + // since it will calculating things on a CE year rather than a BCE + // year. (20201230/thisisaaronland) + + yyyy := ymd.Year + mm := ymd.Month + dd := ymd.Day + + is_bce := false + + if yyyy < 0 { + is_bce = true + yyyy = FlipYear(yyyy) + } + + t_str := fmt.Sprintf("%04d-%02d-%02dT%s", yyyy, mm, dd, hms) + + t, err := time.Parse("2006-01-02T15:04:05", t_str) + + if err != nil { + return nil, err + } + + if is_bce { + t = TimeToBCE(t) + } + + return &t, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/common/year.go b/vendor/github.com/sfomuseum/go-edtf/common/year.go new file mode 100644 index 0000000..16e8463 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/year.go @@ -0,0 +1,13 @@ +package common + +import ( + "time" +) + +func FlipYear(yyyy int) int { + return yyyy - (yyyy * 2) +} + +func TimeToBCE(t time.Time) time.Time { + return t.AddDate(-2*t.Year(), 0, 0) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/common/ymd.go b/vendor/github.com/sfomuseum/go-edtf/common/ymd.go new file mode 100644 index 0000000..69480f5 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/common/ymd.go @@ -0,0 +1,157 @@ +package common + +import ( + "errors" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/calendar" + "strconv" + "strings" +) + +func YMDFromStringDate(d *StringDate) (*edtf.YMD, error) { + return YMDFromStrings(d.Year, d.Month, d.Day) +} + +func YMDFromString(str_ymd string) (*edtf.YMD, error) { + + yyyy := "" + mm := "" + dd := "" + + parts := strings.Split(str_ymd, "-") + + switch len(parts) { + case 4: + yyyy = "-" + parts[1] + mm = parts[2] + dd = parts[3] + case 3: + yyyy = parts[0] + mm = parts[1] + dd = parts[2] + case 2: + yyyy = parts[0] + mm = parts[1] + case 1: + yyyy = parts[0] + default: + return nil, errors.New("Invalid YMD string") + } + + return YMDFromStrings(yyyy, mm, dd) +} + +func YMDFromStrings(str_yyyy string, str_mm string, str_dd string) (*edtf.YMD, error) { + + if str_yyyy == "" { + return nil, errors.New("Missing year") + } + + if str_mm == "" && str_dd != "" { + return nil, errors.New("Missing month") + } + + yyyy, err := strconv.Atoi(str_yyyy) + + if err != nil { + return nil, err + } + + // See this? If yyyy < 0 then we are dealing with a BCE year + // which can't be parsed by the time.Parse() function so we're + // going to set a flag and convert yyyy to a positive number. + // After we've created time.Time instances below, we'll check to see + // whether the flag is set and if it is then we'll update the + // year to be BCE again. One possible gotcha in this approach is + // that the calendar.DaysInMonth method may return wonky results + // since it will calculating things on a CE year rather than a BCE + // year. (20201230/thisisaaronland) + + is_bce := false + + if yyyy < 0 { + is_bce = true + yyyy = FlipYear(yyyy) + } + + mm := 0 + dd := 0 + + if str_mm != "" { + + m, err := strconv.Atoi(str_mm) + + if err != nil { + return nil, err + } + + mm = m + } + + if str_dd != "" { + + d, err := strconv.Atoi(str_dd) + + if err != nil { + return nil, err + } + + dd = d + } + + if yyyy == 0 { + return nil, errors.New("Missing year") + } + + if yyyy > edtf.MAX_YEARS { + return nil, edtf.Unsupported("year", strconv.Itoa(yyyy)) + } + + if mm == 0 && dd != 0 { + return nil, errors.New("Missing month") + } + + if mm == 0 { + mm = 1 + } else { + + if mm > 12 { + return nil, errors.New("Invalid month") + } + } + + if dd == 0 { + + days, err := calendar.DaysInMonth(yyyy, mm) + + if err != nil { + return nil, err + } + + dd = int(days) + + } else { + + days, err := calendar.DaysInMonth(yyyy, mm) + + if err != nil { + return nil, err + } + + if dd > days { + return nil, errors.New("Invalid days for month") + } + } + + if is_bce { + yyyy = FlipYear(yyyy) + } + + ymd := &edtf.YMD{ + Year: yyyy, + Month: mm, + Day: dd, + } + + return ymd, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/date.go b/vendor/github.com/sfomuseum/go-edtf/date.go new file mode 100644 index 0000000..1ba96d5 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/date.go @@ -0,0 +1,28 @@ +package edtf + +import ( + "fmt" + "time" +) + +type Date struct { + DateTime string `json:"datetime,omitempty"` + Timestamp *Timestamp `json:"timestamp,omitempty"` + YMD *YMD `json:"ymd"` + Uncertain Precision `json:"uncertain,omitempty"` + Approximate Precision `json:"approximate,omitempty"` + Unspecified Precision `json:"unspecified,omitempty"` + Precision Precision `json:"precision,omitempty"` + Open bool `json:"open,omitempty"` + Unknown bool `json:"unknown,omitempty"` + Inclusivity Precision `json:"inclusivity,omitempty"` +} + +func (d *Date) SetTime(t *time.Time) { + d.DateTime = t.Format(time.RFC3339) + d.Timestamp = NewTimestampWithTime(t) +} + +func (d *Date) String() string { + return fmt.Sprintf("[[%T] Time: '%v' YMD: '%v']", d, d.Timestamp, d.YMD) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/daterange.go b/vendor/github.com/sfomuseum/go-edtf/daterange.go new file mode 100644 index 0000000..4c6a9e4 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/daterange.go @@ -0,0 +1,15 @@ +package edtf + +import ( + "fmt" +) + +type DateRange struct { + EDTF string `json:"edtf"` + Lower *Date `json:"lower"` + Upper *Date `json:"upper"` +} + +func (r *DateRange) String() string { + return fmt.Sprintf("[[%T] Lower: '%v' Upper: '%v'[", r, r.Lower, r.Upper) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/datespan.go b/vendor/github.com/sfomuseum/go-edtf/datespan.go new file mode 100644 index 0000000..d44a7fc --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/datespan.go @@ -0,0 +1,14 @@ +package edtf + +import ( + "fmt" +) + +type DateSpan struct { + Start *DateRange `json:"start"` + End *DateRange `json:"end"` +} + +func (s *DateSpan) String() string { + return fmt.Sprintf("[[%T] Start: '%v' End: '%v']", s, s.Start, s.End) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/deprecated.go b/vendor/github.com/sfomuseum/go-edtf/deprecated.go new file mode 100644 index 0000000..80de99a --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/deprecated.go @@ -0,0 +1,42 @@ +package edtf + +import ( + "fmt" +) + +var deprecated map[string]string + +func init() { + + deprecated = map[string]string{ + OPEN_2012: OPEN, + UNSPECIFIED_2012: UNSPECIFIED, + } + +} + +// IsDeprecated returns a boolean flag indicating whether 'str' is considered a deprecated EDTF value. +func IsDeprecated(str string) bool { + + for test, _ := range deprecated { + + if str == test { + return true + } + } + + return false +} + +// ReplaceDeprecated returns the current value for 'old'. +func ReplaceDeprecated(old string) (string, error) { + + new, ok := deprecated[old] + + if !ok { + err := fmt.Errorf("Unknown or unsupported EDTF string '%s' : %v", old, deprecated) + return "", err + } + + return new, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/doc.go b/vendor/github.com/sfomuseum/go-edtf/doc.go new file mode 100644 index 0000000..fcfb915 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/doc.go @@ -0,0 +1,2 @@ +// package edtf provides method for parsing Extended DateTime Format (EDTF) date strings. It is compliant with Level 0, Level 1 and Level 2 of the EDTF specification (2019). +package edtf diff --git a/vendor/github.com/sfomuseum/go-edtf/edtf.go b/vendor/github.com/sfomuseum/go-edtf/edtf.go new file mode 100644 index 0000000..b1c7726 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/edtf.go @@ -0,0 +1,150 @@ +package edtf + +import ( + "fmt" + "time" +) + +const UNCERTAIN string = "?" +const APPROXIMATE string = "~" +const UNCERTAIN_AND_APPROXIMATE string = "%" +const OPEN string = ".." +const OPEN_2012 string = "open" +const UNSPECIFIED string = "" +const UNSPECIFIED_2012 string = "uuuu" +const UNKNOWN string = UNSPECIFIED // this code was incorrectly referring to "unspecified" as "unknown" +const UNKNOWN_2012 string = UNSPECIFIED_2012 + +const NEGATIVE string = "-" + +const HMS_LOWER string = "00:00:00" +const HMS_UPPER string = "23:59:59" + +const MAX_YEARS int = 9999 // This is a Golang thing + +// Return a boolean value indicating whether a string is considered to be an "open" EDTF date. +func IsOpen(s string) bool { + + switch s { + case OPEN, OPEN_2012: + return true + default: + return false + } +} + +// Return a boolean value indicating whether a string is considered to be an "unspecified" EDTF date. +func IsUnspecified(s string) bool { + + switch s { + case UNSPECIFIED, UNSPECIFIED_2012: + return true + default: + return false + } +} + +// Return a boolean value indicating whether a string is considered to be an "unknown" EDTF date. +func IsUnknown(s string) bool { + + switch s { + case UNKNOWN, UNKNOWN_2012: + return true + default: + return false + } +} + +type EDTFDate struct { + Start *DateRange `json:"start"` + End *DateRange `json:"end"` + EDTF string `json:"edtf"` + Level int `json:"level"` + Feature string `json:"feature"` +} + +func (d *EDTFDate) Lower() (*time.Time, error) { + + ts := d.Start.Lower.Timestamp + + if ts == nil { + return nil, NotSet() + } + + return ts.Time(), nil +} + +func (d *EDTFDate) Upper() (*time.Time, error) { + + ts := d.End.Upper.Timestamp + + if ts == nil { + return nil, NotSet() + } + + return ts.Time(), nil +} + +/* + +Eventually this should be generated from the components pieces +collected during parsing and compared against Raw but this will +do for now (20201223/thisisaaronland) + +*/ + +func (d *EDTFDate) String() string { + return d.EDTF +} + +// After reports whether the EDTFDate instance `d` is after `u`. +func (d *EDTFDate) After(u *EDTFDate) (bool, error) { + + if IsOpen(d.EDTF) { + return false, nil + } + + u_t, err := u.Upper() + + if err != nil { + return false, fmt.Errorf("Failed to derive upper time for inception date (%s), %w", u.EDTF, err) + } + + t, err := d.Lower() + + if err != nil { + return false, fmt.Errorf("Failed to derive lower time for cessation date (%s), %w", d.EDTF, err) + } + + if u_t.After(*t) { + return false, nil + } + + return true, nil +} + +// Before reports whether the EDTFDate instance `d` is after `u`. +func (d *EDTFDate) Before(u *EDTFDate) (bool, error) { + + if IsOpen(d.EDTF) { + return false, nil + } + + u_t, err := u.Lower() + + if err != nil { + return false, fmt.Errorf("Failed to derive lower time for inception date (%s), %w", u.EDTF, err) + } + + t, err := d.Upper() + + if err != nil { + return false, fmt.Errorf("Failed to derive upper time for cessation date (%s), %w", d.EDTF, err) + } + + if u_t.Before(*t) { + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/errors.go b/vendor/github.com/sfomuseum/go-edtf/errors.go new file mode 100644 index 0000000..5debc12 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/errors.go @@ -0,0 +1,130 @@ +package edtf + +import ( + "fmt" +) + +type NotSetError struct { +} + +func (e *NotSetError) Error() string { + return fmt.Sprintf("This property has not (or can not) been set") +} + +func NotSet() error { + return &NotImplementedError{} +} + +func IsNotSet(e error) bool { + + switch e.(type) { + case *NotSetError: + return true + default: + return false + } +} + +type NotImplementedError struct { + edtf_str string + label string +} + +func (e *NotImplementedError) Error() string { + return fmt.Sprintf("Not implemented '%s' (%s)", e.edtf_str, e.label) +} + +func NotImplemented(label string, edtf_str string) error { + return &NotImplementedError{ + edtf_str: edtf_str, + label: label, + } +} + +func IsNotImplemented(e error) bool { + + switch e.(type) { + case *NotImplementedError: + return true + default: + return false + } +} + +type InvalidError struct { + edtf_str string + label string +} + +func (e *InvalidError) Error() string { + return fmt.Sprintf("Invalid EDTF string '%s' (%s)", e.edtf_str, e.label) +} + +func Invalid(label string, edtf_str string) error { + return &InvalidError{ + edtf_str: edtf_str, + label: label, + } +} + +func IsInvalid(e error) bool { + + switch e.(type) { + case *InvalidError: + return true + default: + return false + } +} + +type UnsupportedError struct { + edtf_str string + label string +} + +func (e *UnsupportedError) Error() string { + return fmt.Sprintf("Unsupported EDTF string '%s' (%s)", e.edtf_str, e.label) +} + +func Unsupported(label string, edtf_str string) error { + return &UnsupportedError{ + edtf_str: edtf_str, + label: label, + } +} + +func IsUnsupported(e error) bool { + + switch e.(type) { + case *UnsupportedError: + return true + default: + return false + } +} + +type UnrecognizedError struct { + edtf_str string + label string +} + +func (e *UnrecognizedError) Error() string { + return fmt.Sprintf("Unrecognized EDTF string '%s' (%s)", e.edtf_str, e.label) +} + +func Unrecognized(label string, edtf_str string) error { + return &UnrecognizedError{ + edtf_str: edtf_str, + label: label, + } +} + +func IsUnrecognized(e error) bool { + + switch e.(type) { + case *UnrecognizedError: + return true + default: + return false + } +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level0/date.go b/vendor/github.com/sfomuseum/go-edtf/level0/date.go new file mode 100644 index 0000000..a8c9493 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level0/date.go @@ -0,0 +1,47 @@ +package level0 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + +Date + + complete representation: [year][“-”][month][“-”][day] + Example 1 ‘1985-04-12’ refers to the calendar date 1985 April 12th with day precision. + reduced precision for year and month: [year][“-”][month] + Example 2 ‘1985-04’ refers to the calendar month 1985 April with month precision. + reduced precision for year: [year] + Example 3 ‘1985’ refers to the calendar year 1985 with year precision. + +*/ + +func IsDate(edtf_str string) bool { + return re.Date.MatchString(edtf_str) +} + +func ParseDate(edtf_str string) (*edtf.EDTFDate, error) { + + if !re.Date.MatchString(edtf_str) { + return nil, edtf.Invalid(DATE, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: DATE, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level0/date_and_time.go b/vendor/github.com/sfomuseum/go-edtf/level0/date_and_time.go new file mode 100644 index 0000000..2fc5227 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level0/date_and_time.go @@ -0,0 +1,104 @@ +package level0 + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "strings" + "time" +) + +/* + +Date and Time + + [date][“T”][time] + Complete representations for calendar date and (local) time of day + Example 1 ‘1985-04-12T23:20:30’ refers to the date 1985 April 12th at 23:20:30 local time. + [dateI][“T”][time][“Z”] + Complete representations for calendar date and UTC time of day + Example 2 ‘1985-04-12T23:20:30Z’ refers to the date 1985 April 12th at 23:20:30 UTC time. + [dateI][“T”][time][shiftHour] + Date and time with timeshift in hours (only) + Example 3 ‘1985-04-12T23:20:30-04’ refers to the date 1985 April 12th time of day 23:20:30 with time shift of 4 hours behind UTC. + [dateI][“T”][time][shiftHourMinute] + Date and time with timeshift in hours and minutes + Example 4 ‘1985-04-12T23:20:30+04:30’ refers to the date 1985 April 12th, time of day 23:20:30 with time shift of 4 hours and 30 minutes ahead of UTC. + +*/ + +func IsDateAndTime(edtf_str string) bool { + return re.DateAndTime.MatchString(edtf_str) +} + +func ParseDateAndTime(edtf_str string) (*edtf.EDTFDate, error) { + + m := re.DateAndTime.FindStringSubmatch(edtf_str) + + if len(m) != 12 { + return nil, edtf.Invalid(DATE_AND_TIME, edtf_str) + } + + t_fmt := "2006-01-02T15:04:05" + + if m[7] == "Z" { + t_fmt = "2006-01-02T15:04:05Z" + } + + if m[8] == "-" || m[8] == "+" { + + if strings.HasPrefix(m[10], ":") { + t_fmt = "2006-01-02T15:04:05-07:00" + } else { + t_fmt = "2006-01-02T15:04:05-07" + } + } + + is_bce := false + + if strings.HasPrefix(edtf_str, "-") { + is_bce = true + + t_fmt = fmt.Sprintf("-%s", t_fmt) + } + + t, err := time.Parse(t_fmt, edtf_str) + + if err != nil { + return nil, err + } + + t = t.UTC() + + if is_bce { + t = common.TimeToBCE(t) + } + + upper_date := &edtf.Date{} + + lower_date := &edtf.Date{} + + upper_date.SetTime(&t) + lower_date.SetTime(&t) + + start := &edtf.DateRange{ + Lower: lower_date, + Upper: lower_date, + } + + end := &edtf.DateRange{ + Lower: upper_date, + Upper: upper_date, + } + + d := &edtf.EDTFDate{ + Start: start, + End: end, + EDTF: edtf_str, + Level: LEVEL, + Feature: DATE_AND_TIME, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level0/level0.go b/vendor/github.com/sfomuseum/go-edtf/level0/level0.go new file mode 100644 index 0000000..dc3f867 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level0/level0.go @@ -0,0 +1,50 @@ +package level0 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/re" +) + +const LEVEL int = 0 + +const DATE string = "Date" +const DATE_AND_TIME string = "Date and Time" +const TIME_INTERVAL string = "Time Interval" + +func IsLevel0(edtf_str string) bool { + return re.Level0.MatchString(edtf_str) +} + +func Matches(edtf_str string) (string, error) { + + if IsDate(edtf_str) { + return DATE, nil + } + + if IsDateAndTime(edtf_str) { + return DATE_AND_TIME, nil + } + + if IsTimeInterval(edtf_str) { + return TIME_INTERVAL, nil + } + + return "", edtf.Invalid("Invalid Level 0 string", edtf_str) +} + +func ParseString(edtf_str string) (*edtf.EDTFDate, error) { + + if IsDate(edtf_str) { + return ParseDate(edtf_str) + } + + if IsDateAndTime(edtf_str) { + return ParseDateAndTime(edtf_str) + } + + if IsTimeInterval(edtf_str) { + return ParseTimeInterval(edtf_str) + } + + return nil, edtf.Invalid("Invalid Level 0 string", edtf_str) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level0/tests.go b/vendor/github.com/sfomuseum/go-edtf/level0/tests.go new file mode 100644 index 0000000..9dc1faa --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level0/tests.go @@ -0,0 +1,140 @@ +package level0 + +import ( + "github.com/sfomuseum/go-edtf/tests" +) + +var Tests map[string]map[string]*tests.TestResult = map[string]map[string]*tests.TestResult{ + DATE: map[string]*tests.TestResult{ + "1985-04-12": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-12T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-12T00:00:00Z", + EndLowerTimeRFC3339: "1985-04-12T23:59:59Z", + EndUpperTimeRFC3339: "1985-04-12T23:59:59Z", + }), + "1985-04": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-01T23:59:59Z", + EndLowerTimeRFC3339: "1985-04-30T00:00:00Z", + EndUpperTimeRFC3339: "1985-04-30T23:59:59Z", + }), + "1985": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-01-01T23:59:59Z", + EndLowerTimeRFC3339: "1985-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1985-12-31T23:59:59Z", + }), + "-0400": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-0400-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-0400-01-01T23:59:59Z", + EndLowerTimeRFC3339: "-0400-12-31T00:00:00Z", + EndUpperTimeRFC3339: "-0400-12-31T23:59:59Z", + }), + "-1200-06": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-1200-06-01T00:00:00Z", + StartUpperTimeRFC3339: "-1200-06-01T23:59:59Z", + EndLowerTimeRFC3339: "-1200-06-30T00:00:00Z", + EndUpperTimeRFC3339: "-1200-06-30T23:59:59Z", + }), + }, + DATE_AND_TIME: map[string]*tests.TestResult{ + "1985-04-12T23:20:30": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-12T23:20:30Z", + StartUpperTimeRFC3339: "1985-04-12T23:20:30Z", + EndLowerTimeRFC3339: "1985-04-12T23:20:30Z", + EndUpperTimeRFC3339: "1985-04-12T23:20:30Z", + }), + "2021-12-10T01:29:00Z": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2021-12-10T01:29:00Z", + StartUpperTimeRFC3339: "2021-12-10T01:29:00Z", + EndLowerTimeRFC3339: "2021-12-10T01:29:00Z", + EndUpperTimeRFC3339: "2021-12-10T01:29:00Z", + }), + "2021-10-10T00:24:00Z": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2021-10-10T00:24:00Z", + StartUpperTimeRFC3339: "2021-10-10T00:24:00Z", + EndLowerTimeRFC3339: "2021-10-10T00:24:00Z", + EndUpperTimeRFC3339: "2021-10-10T00:24:00Z", + }), + "2021-09-20T21:14:00Z": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2021-09-20T21:14:00Z", + StartUpperTimeRFC3339: "2021-09-20T21:14:00Z", + EndLowerTimeRFC3339: "2021-09-20T21:14:00Z", + EndUpperTimeRFC3339: "2021-09-20T21:14:00Z", + }), + "1985-04-12T23:20:30Z": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-12T23:20:30Z", + StartUpperTimeRFC3339: "1985-04-12T23:20:30Z", + EndLowerTimeRFC3339: "1985-04-12T23:20:30Z", + EndUpperTimeRFC3339: "1985-04-12T23:20:30Z", + }), + "1985-04-12T23:20:30-04": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-13T03:20:30Z", + StartUpperTimeRFC3339: "1985-04-13T03:20:30Z", + EndLowerTimeRFC3339: "1985-04-13T03:20:30Z", + EndUpperTimeRFC3339: "1985-04-13T03:20:30Z", + }), + "1985-04-12T23:20:30+04:30": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-12T18:50:30Z", + StartUpperTimeRFC3339: "1985-04-12T18:50:30Z", + EndLowerTimeRFC3339: "1985-04-12T18:50:30Z", + EndUpperTimeRFC3339: "1985-04-12T18:50:30Z", + }), + "-1972-04-12T23:20:28": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-1972-04-12T23:20:28Z", + StartUpperTimeRFC3339: "-1972-04-12T23:20:28Z", + EndLowerTimeRFC3339: "-1972-04-12T23:20:28Z", + EndUpperTimeRFC3339: "-1972-04-12T23:20:28Z", + }), + }, + TIME_INTERVAL: map[string]*tests.TestResult{ + "1964/2008": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1964-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1964-12-31T23:59:59Z", + EndLowerTimeRFC3339: "2008-01-01T00:00:00Z", + EndUpperTimeRFC3339: "2008-12-31T23:59:59Z", + }), + "2004-06/2006-08": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-30T23:59:59Z", + EndLowerTimeRFC3339: "2006-08-01T00:00:00Z", + EndUpperTimeRFC3339: "2006-08-31T23:59:59Z", + }), + "2004-02-01/2005-02-08": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-02-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-02-01T23:59:59Z", + EndLowerTimeRFC3339: "2005-02-08T00:00:00Z", + EndUpperTimeRFC3339: "2005-02-08T23:59:59Z", + }), + "2004-02-01/2005-02": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-02-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-02-01T23:59:59Z", + EndLowerTimeRFC3339: "2005-02-01T00:00:00Z", + EndUpperTimeRFC3339: "2005-02-28T23:59:59Z", + }), + "2004-02-01/2005": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-02-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-02-01T23:59:59Z", + EndLowerTimeRFC3339: "2005-01-01T00:00:00Z", + EndUpperTimeRFC3339: "2005-12-31T23:59:59Z", + }), + "2005/2020-02": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2005-01-01T00:00:00Z", + StartUpperTimeRFC3339: "2005-12-31T23:59:59Z", + EndLowerTimeRFC3339: "2020-02-01T00:00:00Z", + EndUpperTimeRFC3339: "2020-02-29T23:59:59Z", // leap year + }), + "-0200/0200": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-0200-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-0200-12-31T23:59:59Z", + EndLowerTimeRFC3339: "0200-01-01T00:00:00Z", + EndUpperTimeRFC3339: "0200-12-31T23:59:59Z", + }), + "-1200-06/0200-05-02": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-1200-06-01T00:00:00Z", + StartUpperTimeRFC3339: "-1200-06-30T23:59:59Z", + EndLowerTimeRFC3339: "0200-05-02T00:00:00Z", + EndUpperTimeRFC3339: "0200-05-02T23:59:59Z", + }), + }, +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level0/time_interval.go b/vendor/github.com/sfomuseum/go-edtf/level0/time_interval.go new file mode 100644 index 0000000..b8d0a13 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level0/time_interval.go @@ -0,0 +1,49 @@ +package level0 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + +Time Interval + +EDTF Level 0 adopts representations of a time interval where both the start and end are dates: start and end date only; that is, both start and duration, and duration and end, are excluded. Time of day is excluded. + + Example 1 ‘1964/2008’ is a time interval with calendar year precision, beginning sometime in 1964 and ending sometime in 2008. + Example 2 ‘2004-06/2006-08’ is a time interval with calendar month precision, beginning sometime in June 2004 and ending sometime in August of 2006. + Example 3 ‘2004-02-01/2005-02-08’ is a time interval with calendar day precision, beginning sometime on February 1, 2004 and ending sometime on February 8, 2005. + Example 4 ‘2004-02-01/2005-02’ is a time interval beginning sometime on February 1, 2004 and ending sometime in February 2005. Since the start endpoint precision (day) is different than that of the end endpoint (month) the precision of the time interval at large is undefined. + Example 5 ‘2004-02-01/2005’ is a time interval beginning sometime on February 1, 2004 and ending sometime in 2005. The start endpoint has calendar day precision and the end endpoint has calendar year precision. Similar to the previous example, the precision of the time interval at large is undefined. + Example 6 ‘2005/2006-02’ is a time interval beginning sometime in 2005 and ending sometime in February 2006. + +*/ + +func IsTimeInterval(edtf_str string) bool { + return re.TimeInterval.MatchString(edtf_str) +} + +func ParseTimeInterval(edtf_str string) (*edtf.EDTFDate, error) { + + if !re.TimeInterval.MatchString(edtf_str) { + return nil, edtf.Invalid(TIME_INTERVAL, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: TIME_INTERVAL, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/extended_interval.go b/vendor/github.com/sfomuseum/go-edtf/level1/extended_interval.go new file mode 100644 index 0000000..a80e971 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/extended_interval.go @@ -0,0 +1,141 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + +Extended Interval (L1) + + A null string may be used for the start or end date when it is unknown. + Double-dot (“..”) may be used when either the start or end date is not specified, either because there is none or for any other reason. + A modifier may appear at the end of the date to indicate "uncertain" and/or "approximate" + +Open end time interval + + Example 1 ‘1985-04-12/..’ + interval starting at 1985 April 12th with day precision; end open + Example 2 ‘1985-04/..’ + interval starting at 1985 April with month precision; end open + Example 3 ‘1985/..’ + interval starting at year 1985 with year precision; end open + +Open start time interval + + Example 4 ‘../1985-04-12’ + interval with open start; ending 1985 April 12th with day precision + Example 5 ‘../1985-04’ + interval with open start; ending 1985 April with month precision + Example 6 ‘../1985’ + interval with open start; ending at year 1985 with year precision + +Time interval with unknown end + + Example 7 ‘1985-04-12/’ + interval starting 1985 April 12th with day precision; end unknown + Example 8 ‘1985-04/’ + interval starting 1985 April with month precision; end unknown + Example 9 ‘1985/’ + interval starting year 1985 with year precision; end unknown + +Time interval with unknown start + + Example 10 ‘/1985-04-12’ + interval with unknown start; ending 1985 April 12th with day precision + Example 11 ‘/1985-04’ + interval with unknown start; ending 1985 April with month precision + Example 12 ‘/1985’ + interval with unknown start; ending year 1985 with year precision + +*/ + +func IsExtendedInterval(edtf_str string) bool { + + if re.IntervalEnd.MatchString(edtf_str) { + return true + } + + if re.IntervalStart.MatchString(edtf_str) { + return true + } + + return true +} + +func ParseExtendedInterval(edtf_str string) (*edtf.EDTFDate, error) { + + if re.IntervalStart.MatchString(edtf_str) { + return ParseExtendedIntervalStart(edtf_str) + } + + if re.IntervalEnd.MatchString(edtf_str) { + return ParseExtendedIntervalEnd(edtf_str) + } + + return nil, edtf.Invalid(EXTENDED_INTERVAL, edtf_str) +} + +func ParseExtendedIntervalStart(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + START 5 ../1985-04-12,..,1985,04,12 + START 5 ../1985-04,..,1985,04, + START 5 ../1985,..,1985,, + START 5 /1985-04-12,,1985,04,12 + START 5 /1985-04,,1985,04, + START 5 /1985,,1985,, + + */ + + if !re.IntervalStart.MatchString(edtf_str) { + return nil, edtf.Invalid(EXTENDED_INTERVAL_START, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: EXTENDED_INTERVAL_START, + } + + return d, nil +} + +func ParseExtendedIntervalEnd(edtf_str string) (*edtf.EDTFDate, error) { + + /* + END 5 1985/..,1985,,,.. + END 5 1985/,1985,,, + */ + + if !re.IntervalEnd.MatchString(edtf_str) { + return nil, edtf.Invalid(EXTENDED_INTERVAL_END, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: EXTENDED_INTERVAL_END, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/letter_prefixed_calendar_year.go b/vendor/github.com/sfomuseum/go-edtf/level1/letter_prefixed_calendar_year.go new file mode 100644 index 0000000..8677768 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/letter_prefixed_calendar_year.go @@ -0,0 +1,69 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "strings" +) + +/* + +'Y' may be used at the beginning of the date string to signify that the date is a year, when (and only when) the year exceeds four digits, i.e. for years later than 9999 or earlier than -9999. + + Example 1 'Y170000002' is the year 170000002 + Example 2 'Y-170000002' is the year -170000002 + +*/ + +func IsLetterPrefixedCalendarYear(edtf_str string) bool { + return re.LetterPrefixedCalendarYear.MatchString(edtf_str) +} + +func ParseLetterPrefixedCalendarYear(edtf_str string) (*edtf.EDTFDate, error) { + + m := re.LetterPrefixedCalendarYear.FindStringSubmatch(edtf_str) + + if len(m) != 2 { + return nil, edtf.Invalid(LETTER_PREFIXED_CALENDAR_YEAR, edtf_str) + } + + // Years must be in the range 0000..9999. + // https://golang.org/pkg/time/#Parse + + // sigh.... + // fmt.Printf("DEBUG %v\n", start.Add(time.Hour * 8760 * 1000)) + // ./prog.go:21:54: constant 31536000000000000000 overflows time.Duration + + // common.DateSpanFromEDTF needs to be updated to simply assign a valid + // *edtf.YMD element and leave *time.Time blank when creating *edtf.Date + // instances (20210105/thisisaaronland) + + yyyy := m[1] + + max_length := 4 + + if strings.HasPrefix(yyyy, "-") { + max_length = 5 + } + + if len(yyyy) > max_length { + return nil, edtf.Unsupported(LETTER_PREFIXED_CALENDAR_YEAR, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(yyyy) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: LETTER_PREFIXED_CALENDAR_YEAR, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/level1.go b/vendor/github.com/sfomuseum/go-edtf/level1/level1.go new file mode 100644 index 0000000..d85c82e --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/level1.go @@ -0,0 +1,86 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/re" +) + +const LEVEL int = 1 + +const LETTER_PREFIXED_CALENDAR_YEAR string = "Letter-prefixed calendar year" +const SEASON string = "Seasons" +const QUALIFIED_DATE string = "Qualification of a date (complete)" +const UNSPECIFIED_DIGITS string = "Unspecified digit(s) from the right" +const EXTENDED_INTERVAL string = "Extended Interval" +const EXTENDED_INTERVAL_START string = "Extended Interval (Start)" +const EXTENDED_INTERVAL_END string = "Extended Interval (End)" +const NEGATIVE_CALENDAR_YEAR string = "Negative calendar year" + +func IsLevel1(edtf_str string) bool { + return re.Level1.MatchString(edtf_str) +} + +func Matches(edtf_str string) (string, error) { + + if IsLetterPrefixedCalendarYear(edtf_str) { + return LETTER_PREFIXED_CALENDAR_YEAR, nil + } + + if IsSeason(edtf_str) { + return SEASON, nil + } + + if IsQualifiedDate(edtf_str) { + return QUALIFIED_DATE, nil + } + + if IsUnspecifiedDigits(edtf_str) { + return UNSPECIFIED_DIGITS, nil + } + + if IsNegativeCalendarYear(edtf_str) { + return NEGATIVE_CALENDAR_YEAR, nil + } + + if IsExtendedInterval(edtf_str) { + + if re.IntervalStart.MatchString(edtf_str) { + return EXTENDED_INTERVAL_START, nil + } + + if re.IntervalEnd.MatchString(edtf_str) { + return EXTENDED_INTERVAL_END, nil + } + } + + return "", edtf.Invalid("Invalid Level 1 string", edtf_str) +} + +func ParseString(edtf_str string) (*edtf.EDTFDate, error) { + + if IsLetterPrefixedCalendarYear(edtf_str) { + return ParseLetterPrefixedCalendarYear(edtf_str) + } + + if IsSeason(edtf_str) { + return ParseSeason(edtf_str) + } + + if IsQualifiedDate(edtf_str) { + return ParseQualifiedDate(edtf_str) + } + + if IsUnspecifiedDigits(edtf_str) { + return ParseUnspecifiedDigits(edtf_str) + } + + if IsNegativeCalendarYear(edtf_str) { + return ParseNegativeCalendarYear(edtf_str) + } + + if IsExtendedInterval(edtf_str) { + return ParseExtendedInterval(edtf_str) + } + + return nil, edtf.Invalid("Invalid or unsupported Level 1 EDTF string", edtf_str) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/negative_calendar_year.go b/vendor/github.com/sfomuseum/go-edtf/level1/negative_calendar_year.go new file mode 100644 index 0000000..c3e9c4d --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/negative_calendar_year.go @@ -0,0 +1,44 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + + Negative calendar year + + Example 1 ‘-1985’ + +Note: ISO 8601 Part 1 does not support negative year. + +*/ + +func IsNegativeCalendarYear(edtf_str string) bool { + return re.NegativeYear.MatchString(edtf_str) +} + +func ParseNegativeCalendarYear(edtf_str string) (*edtf.EDTFDate, error) { + + if !re.NegativeYear.MatchString(edtf_str) { + return nil, edtf.Invalid(NEGATIVE_CALENDAR_YEAR, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: NEGATIVE_CALENDAR_YEAR, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/qualified_date.go b/vendor/github.com/sfomuseum/go-edtf/level1/qualified_date.go new file mode 100644 index 0000000..d02ccf7 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/qualified_date.go @@ -0,0 +1,46 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + +Qualification of a date (complete) + +The characters '?', '~' and '%' are used to mean "uncertain", "approximate", and "uncertain" as well as "approximate", respectively. These characters may occur only at the end of the date string and apply to the entire date. + + Example 1 '1984?' year uncertain (possibly the year 1984, but not definitely) + Example 2 '2004-06~'' year-month approximate + Example 3 '2004-06-11%' entire date (year-month-day) uncertain and approximate + +*/ + +func IsQualifiedDate(edtf_str string) bool { + return re.QualifiedDate.MatchString(edtf_str) +} + +func ParseQualifiedDate(edtf_str string) (*edtf.EDTFDate, error) { + + if !re.QualifiedDate.MatchString(edtf_str) { + return nil, edtf.Invalid(QUALIFIED_DATE, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: QUALIFIED_DATE, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/season.go b/vendor/github.com/sfomuseum/go-edtf/level1/season.go new file mode 100644 index 0000000..e8b3cf7 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/season.go @@ -0,0 +1,193 @@ +package level1 + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/calendar" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "strconv" + "strings" +) + +/* + +Seasons + +The values 21, 22, 23, 24 may be used used to signify ' Spring', 'Summer', 'Autumn', 'Winter', respectively, in place of a month value (01 through 12) for a year-and-month format string. + + Example 2001-21 Spring, 2001 + +*/ + +func IsSeason(edtf_str string) bool { + return re.Season.MatchString(edtf_str) +} + +func ParseSeason(edtf_str string) (*edtf.EDTFDate, error) { + + /* + SEASON 5 [2001-01 2001 01 ] + SEASON 5 [2001-24 2001 24 ] + SEASON 5 [Spring, 2002 Spring 2002] + SEASON 5 [winter, 2002 winter 2002] + */ + + m := re.Season.FindStringSubmatch(edtf_str) + + if len(m) != 5 { + return nil, edtf.Invalid(SEASON, edtf_str) + } + + var start_yyyy int + var start_mm int + var start_dd int + + var end_yyyy int + var end_mm int + var end_dd int + + if m[1] == "" { + + season := m[3] + str_yyyy := m[4] + + yyyy, err := strconv.Atoi(str_yyyy) + + if err != nil { + return nil, err + } + + switch strings.ToUpper(season) { + case "WINTER": + + start_yyyy = yyyy + start_mm = 12 + start_dd = 1 + + end_yyyy = yyyy + 1 + end_mm = 2 + + case "SPRING": + + start_yyyy = yyyy + start_mm = 3 + start_dd = 1 + + end_yyyy = yyyy + end_mm = 5 + + case "SUMMER": + + start_yyyy = yyyy + start_mm = 6 + start_dd = 1 + + end_yyyy = yyyy + end_mm = 8 + + case "FALL": + + start_yyyy = yyyy + start_mm = 9 + start_dd = 1 + + end_yyyy = yyyy + end_mm = 11 + + default: + return nil, edtf.Invalid(SEASON, edtf_str) + } + + } else { + + str_yyyy := m[1] + str_mm := m[2] + + yyyy, err := strconv.Atoi(str_yyyy) + + if err != nil { + return nil, err + } + + mm, err := strconv.Atoi(str_mm) + + if err != nil { + return nil, err + } + + switch mm { + case 21: // spring + + start_yyyy = yyyy + start_mm = 3 + start_dd = 1 + + end_yyyy = yyyy + end_mm = 5 + + case 22: // summer + + start_yyyy = yyyy + start_mm = 6 + start_dd = 1 + + end_yyyy = yyyy + end_mm = 8 + + case 23: // autumn + + start_yyyy = yyyy + start_mm = 9 + start_dd = 1 + + end_yyyy = yyyy + end_mm = 11 + + case 24: // winter + + start_yyyy = yyyy + start_mm = 12 + start_dd = 1 + + end_yyyy = yyyy + 1 + end_mm = 2 + + default: + + start_yyyy = yyyy + start_mm = mm + start_dd = 1 + + end_yyyy = yyyy + end_mm = mm + } + + } + + dm, err := calendar.DaysInMonth(end_yyyy, end_mm) + + if err != nil { + return nil, err + } + + end_dd = dm + + _str := fmt.Sprintf("%04d-%02d-%02d/%04d-%02d-%02d", start_yyyy, start_mm, start_dd, end_yyyy, end_mm, end_dd) + + sp, err := common.DateSpanFromEDTF(_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: SEASON, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/tests.go b/vendor/github.com/sfomuseum/go-edtf/level1/tests.go new file mode 100644 index 0000000..23e8172 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/tests.go @@ -0,0 +1,205 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/tests" +) + +var Tests map[string]map[string]*tests.TestResult = map[string]map[string]*tests.TestResult{ + LETTER_PREFIXED_CALENDAR_YEAR: map[string]*tests.TestResult{ + "Y170000002": tests.NewTestResult(tests.TestResultOptions{}), // TO DO + "Y-17000002": tests.NewTestResult(tests.TestResultOptions{}), // TO DO + "Y1700": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1700-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1700-01-01T23:59:59Z", + EndLowerTimeRFC3339: "1700-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1700-12-31T23:59:59Z", + }), + "Y-1200": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-1200-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-1200-01-01T23:59:59Z", + EndLowerTimeRFC3339: "-1200-12-31T00:00:00Z", + EndUpperTimeRFC3339: "-1200-12-31T23:59:59Z", + }), + }, + SEASON: map[string]*tests.TestResult{ + "2001-01": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2001-01-01T00:00:00Z", + StartUpperTimeRFC3339: "2001-01-01T23:59:59Z", + EndLowerTimeRFC3339: "2001-01-31T00:00:00Z", + EndUpperTimeRFC3339: "2001-01-31T23:59:59Z", + }), + "2019-24": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2019-12-01T00:00:00Z", + StartUpperTimeRFC3339: "2019-12-01T23:59:59Z", + EndLowerTimeRFC3339: "2020-02-29T00:00:00Z", + EndUpperTimeRFC3339: "2020-02-29T23:59:59Z", // leap year + }), + "Spring, 2002": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2002-03-01T00:00:00Z", + StartUpperTimeRFC3339: "2002-03-01T23:59:59Z", + EndLowerTimeRFC3339: "2002-05-31T00:00:00Z", + EndUpperTimeRFC3339: "2002-05-31T23:59:59Z", + }), + "winter, 2002": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2002-12-01T00:00:00Z", + StartUpperTimeRFC3339: "2002-12-01T23:59:59Z", + EndLowerTimeRFC3339: "2003-02-28T00:00:00Z", + EndUpperTimeRFC3339: "2003-02-28T23:59:59Z", + }), + /* + "Summer, -1980": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-1980-06-01T00:00:00Z", + StartUpperTimeRFC3339: "-1980-06-01T23:59:59Z", + EndLowerTimeRFC3339: "-1980-08-31T00:00:00Z", + EndUpperTimeRFC3339: "-1980-08-31T23:59:59Z", + }), + */ + }, + QUALIFIED_DATE: map[string]*tests.TestResult{ + "1984?": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1984-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1984-01-01T23:59:59Z", + EndLowerTimeRFC3339: "1984-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1984-12-31T23:59:59Z", + StartUpperUncertain: edtf.YEAR, + }), + "2004-06~": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-01T23:59:59Z", + EndLowerTimeRFC3339: "2004-06-30T00:00:00Z", + EndUpperTimeRFC3339: "2004-06-30T23:59:59Z", + EndLowerApproximate: edtf.MONTH, + }), + "2004-06-11%": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-11T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-11T00:00:00Z", + EndLowerTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperTimeRFC3339: "2004-06-11T23:59:59Z", + EndLowerUncertain: edtf.DAY, + EndLowerApproximate: edtf.DAY, + }), + }, + UNSPECIFIED_DIGITS: map[string]*tests.TestResult{ + "201X": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2010-01-01T00:00:00Z", + StartUpperTimeRFC3339: "2010-01-01T23:59:59Z", + EndLowerTimeRFC3339: "2019-12-31T00:00:00Z", + EndUpperTimeRFC3339: "2019-12-31T23:59:59Z", + StartUpperPrecision: edtf.DECADE, + }), + "20XX": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2000-01-01T00:00:00Z", + StartUpperTimeRFC3339: "2000-01-01T23:59:59Z", + EndLowerTimeRFC3339: "2099-12-31T00:00:00Z", + EndUpperTimeRFC3339: "2099-12-31T23:59:59Z", + StartUpperPrecision: edtf.CENTURY, + }), + "2004-XX": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-01-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-01-01T23:59:59Z", + EndLowerTimeRFC3339: "2004-12-31T00:00:00Z", + EndUpperTimeRFC3339: "2004-12-31T23:59:59Z", + StartUpperPrecision: edtf.YEAR, + }), + "1985-04-XX": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-01T23:59:59Z", + EndLowerTimeRFC3339: "1985-04-30T00:00:00Z", + EndUpperTimeRFC3339: "1985-04-30T23:59:59Z", + StartUpperPrecision: edtf.MONTH, + }), + "1985-XX-XX": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-01-01T23:59:59Z", + EndLowerTimeRFC3339: "1985-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1985-12-31T23:59:59Z", + StartUpperPrecision: edtf.YEAR, + }), + }, + EXTENDED_INTERVAL_START: map[string]*tests.TestResult{ + "../1985-04-12": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndLowerTimeRFC3339: "1985-04-12T00:00:00Z", + EndUpperTimeRFC3339: "1985-04-12T23:59:59Z", + }), + + "../1985-04": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndLowerTimeRFC3339: "1985-04-01T00:00:00Z", + EndUpperTimeRFC3339: "1985-04-30T23:59:59Z", + }), + "../1985": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndLowerTimeRFC3339: "1985-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1985-12-31T23:59:59Z", + }), + "/1985-04-12": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsUnknown: true, + StartUpperIsUnknown: true, + EndLowerTimeRFC3339: "1985-04-12T00:00:00Z", + EndUpperTimeRFC3339: "1985-04-12T23:59:59Z", + }), + "/1985-04": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsUnknown: true, + StartUpperIsUnknown: true, + EndLowerTimeRFC3339: "1985-04-01T00:00:00Z", + EndUpperTimeRFC3339: "1985-04-30T23:59:59Z", + }), + "/1985": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsUnknown: true, + StartUpperIsUnknown: true, + EndLowerTimeRFC3339: "1985-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1985-12-31T23:59:59Z", + }), + }, + EXTENDED_INTERVAL_END: map[string]*tests.TestResult{ + "1985-04-12/..": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-12T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-12T23:59:59Z", + EndLowerIsOpen: true, + EndUpperIsOpen: true, + }), + "1985-04/..": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-30T23:59:59Z", + EndLowerIsOpen: true, + EndUpperIsOpen: true, + }), + "1985/..": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-12-31T23:59:59Z", + EndLowerIsOpen: true, + EndUpperIsOpen: true, + }), + "1985-04-12/": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-12T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-12T23:59:59Z", + EndLowerIsUnknown: true, + EndUpperIsUnknown: true, + }), + "1985-04/": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-04-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-04-30T23:59:59Z", + EndLowerIsUnknown: true, + EndUpperIsUnknown: true, + }), + "1985/": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1985-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1985-12-31T23:59:59Z", + EndLowerIsUnknown: true, + EndUpperIsUnknown: true, + }), + }, + NEGATIVE_CALENDAR_YEAR: map[string]*tests.TestResult{ + "-1985": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-1985-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-1985-01-01T23:59:59Z", + EndLowerTimeRFC3339: "-1985-12-31T00:00:00Z", + EndUpperTimeRFC3339: "-1985-12-31T23:59:59Z", + }), + }, +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level1/unspecified_digits.go b/vendor/github.com/sfomuseum/go-edtf/level1/unspecified_digits.go new file mode 100644 index 0000000..88c8c35 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level1/unspecified_digits.go @@ -0,0 +1,53 @@ +package level1 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + +Unspecified digit(s) from the right + +The character 'X' may be used in place of one or more rightmost digits to indicate that the value of that digit is unspecified, for the following cases: + + A year with one or two (rightmost) unspecified digits in a year-only expression (year precision) + Example 1 ‘201X’ + Example 2 ‘20XX’ + Year specified, month unspecified in a year-month expression (month precision) + Example 3 ‘2004-XX’ + Year and month specified, day unspecified in a year-month-day expression (day precision) + Example 4 ‘1985-04-XX’ + Year specified, day and month unspecified in a year-month-day expression (day precision) + Example 5 ‘1985-XX-XX’ + + +*/ + +func IsUnspecifiedDigits(edtf_str string) bool { + return re.UnspecifiedDigits.MatchString(edtf_str) +} + +func ParseUnspecifiedDigits(edtf_str string) (*edtf.EDTFDate, error) { + + if !re.UnspecifiedDigits.MatchString(edtf_str) { + return nil, edtf.Invalid(UNSPECIFIED_DIGITS, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: UNSPECIFIED_DIGITS, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/exponential_year.go b/vendor/github.com/sfomuseum/go-edtf/level2/exponential_year.go new file mode 100644 index 0000000..ea90b09 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/exponential_year.go @@ -0,0 +1,67 @@ +package level2 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "strconv" +) + +/* + +Exponential year + +'Y' at the beginning of the string (which indicates "year", as in level 1) may be followed by an integer, followed by 'E' followed by a positive integer. This signifies "times 10 to the power of". Thus 17E8 means "17 times 10 to the eighth power". + + Example ‘Y-17E7’ + the calendar year -17*10 to the seventh power= -170000000 + +*/ + +func IsExponentialYear(edtf_str string) bool { + return re.ExponentialYear.MatchString(edtf_str) +} + +func ParseExponentialYear(edtf_str string) (*edtf.EDTFDate, error) { + + /* + EXP 5 Y-17E7,-17E7,-,17,7 + EXP 5 Y10E7,10E7,,10,7 + */ + + if !re.ExponentialYear.MatchString(edtf_str) { + return nil, edtf.Invalid(EXPONENTIAL_YEAR, edtf_str) + } + + m := re.ExponentialYear.FindStringSubmatch(edtf_str) + + if len(m) != 2 { + return nil, edtf.Invalid(EXPONENTIAL_YEAR, edtf_str) + } + + notation := m[1] + + yyyy, err := common.ParseExponentialNotation(notation) + + if err != nil { + return nil, err + } + + str_yyyy := strconv.Itoa(yyyy) + + sp, err := common.DateSpanFromEDTF(str_yyyy) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: EXPONENTIAL_YEAR, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/interval.go b/vendor/github.com/sfomuseum/go-edtf/level2/interval.go new file mode 100644 index 0000000..d5b94bf --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/interval.go @@ -0,0 +1,55 @@ +package level2 + +import ( + // "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + //"strings" +) + +/* + +For Level 2 portions of a date within an interval may be designated as approximate, uncertain, or unspecified. + + Example 1 ‘2004-06-~01/2004-06-~20’ + An interval in June 2004 beginning approximately the first and ending approximately the 20th + Example 2 ‘2004-06-XX/2004-07-03’ + An interval beginning on an unspecified day in June 2004 and ending July 3. + + +*/ + +func IsInterval(edtf_str string) bool { + return re.Interval.MatchString(edtf_str) +} + +func ParseInterval(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + INTERVAL 2004-06-~01/2004-06-~20 13 2004-06-~01/2004-06-~20,,2004,,06,~,01,,2004,,06,~,20 + INTERVAL 2004-06-XX/2004-07-03 13 2004-06-XX/2004-07-03,,2004,,06,,XX,,2004,,07,,03 + + */ + + if !re.Interval.MatchString(edtf_str) { + return nil, edtf.Invalid(INTERVAL, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: INTERVAL, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/level2.go b/vendor/github.com/sfomuseum/go-edtf/level2/level2.go new file mode 100644 index 0000000..e265bee --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/level2.go @@ -0,0 +1,95 @@ +package level2 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/re" +) + +const LEVEL int = 2 + +const EXPONENTIAL_YEAR string = "Exponential year" +const SIGNIFICANT_DIGITS string = "Significant digits" +const SUB_YEAR_GROUPINGS string = "Sub-year groupings" +const SET_REPRESENTATIONS string = "Set representation" +const GROUP_QUALIFICATION string = "Qualification (Group)" +const INDIVIDUAL_QUALIFICATION string = "Qualification (Individual)" +const UNSPECIFIED_DIGIT string = "Unspecified Digit" +const INTERVAL string = "Interval" + +func IsLevel2(edtf_str string) bool { + return re.Level2.MatchString(edtf_str) +} + +func Matches(edtf_str string) (string, error) { + + if IsExponentialYear(edtf_str) { + return EXPONENTIAL_YEAR, nil + } + + if IsSignificantDigits(edtf_str) { + return SIGNIFICANT_DIGITS, nil + } + + if IsSubYearGrouping(edtf_str) { + return SUB_YEAR_GROUPINGS, nil + } + + if IsSetRepresentation(edtf_str) { + return SET_REPRESENTATIONS, nil + } + + if IsGroupQualification(edtf_str) { + return GROUP_QUALIFICATION, nil + } + + if IsIndividualQualification(edtf_str) { + return INDIVIDUAL_QUALIFICATION, nil + } + + if IsUnspecifiedDigit(edtf_str) { + return UNSPECIFIED_DIGIT, nil + } + + if IsInterval(edtf_str) { + return INTERVAL, nil + } + + return "", edtf.Invalid("Invalid or unsupported Level 2 string", edtf_str) +} + +func ParseString(edtf_str string) (*edtf.EDTFDate, error) { + + if IsExponentialYear(edtf_str) { + return ParseExponentialYear(edtf_str) + } + + if IsSignificantDigits(edtf_str) { + return ParseSignificantDigits(edtf_str) + } + + if IsSubYearGrouping(edtf_str) { + return ParseSubYearGroupings(edtf_str) + } + + if IsSetRepresentation(edtf_str) { + return ParseSetRepresentations(edtf_str) + } + + if IsGroupQualification(edtf_str) { + return ParseGroupQualification(edtf_str) + } + + if IsIndividualQualification(edtf_str) { + return ParseIndividualQualification(edtf_str) + } + + if IsUnspecifiedDigit(edtf_str) { + return ParseUnspecifiedDigit(edtf_str) + } + + if IsInterval(edtf_str) { + return ParseInterval(edtf_str) + } + + return nil, edtf.Invalid("Invalid or unsupported Level 2 string", edtf_str) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/qualification.go b/vendor/github.com/sfomuseum/go-edtf/level2/qualification.go new file mode 100644 index 0000000..bfd31c2 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/qualification.go @@ -0,0 +1,103 @@ +package level2 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" +) + +/* + +Group Qualification + +A qualification character to the immediate right of a component applies to that component as well as to all components to the left. + + Example 1 ‘2004-06-11%’ + year, month, and day uncertain and approximate + Example 2 ‘2004-06~-11’ + year and month approximate + Example 3 ‘2004?-06-11’ + year uncertain +*/ + +func IsGroupQualification(edtf_str string) bool { + return re.GroupQualification.MatchString(edtf_str) +} + +func ParseGroupQualification(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + GROUP 2004-06-11% 7 2004-06-11%,2004,,06,,11,% + GROUP 2004-06~-11 7 2004-06~-11,2004,,06,~,11, + GROUP 2004?-06-11 7 2004?-06-11,2004,?,06,,11, + + */ + + if !re.GroupQualification.MatchString(edtf_str) { + return nil, edtf.Invalid(GROUP_QUALIFICATION, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: GROUP_QUALIFICATION, + } + + return d, nil +} + +/* + +Qualification of Individual Component + +A qualification character to the immediate left of a component applies to that component only. + + Example 4 ‘?2004-06-~11’ + year uncertain; month known; day approximate + Example 5 ‘2004-%06-11’ + month uncertain and approximate; year and day known + +*/ + +func IsIndividualQualification(edtf_str string) bool { + return re.IndividualQualification.MatchString(edtf_str) +} + +func ParseIndividualQualification(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + INDIVIDUAL ?2004-06-~11 7 ?2004-06-~11,?,2004,,06,~,11 + INDIVIDUAL 2004-%06-11 7 2004-%06-11,,2004,%,06,,11 + + */ + + if !re.IndividualQualification.MatchString(edtf_str) { + return nil, edtf.Invalid(INDIVIDUAL_QUALIFICATION, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: INDIVIDUAL_QUALIFICATION, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/set_representation.go b/vendor/github.com/sfomuseum/go-edtf/level2/set_representation.go new file mode 100644 index 0000000..9702342 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/set_representation.go @@ -0,0 +1,243 @@ +package level2 + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "sort" + "strings" +) + +/* + +Set representation + + Square brackets wrap a single-choice list (select one member). + Curly brackets wrap an inclusive list (all members included). + Members of the set are separated by commas. + No spaces are allowed, anywhere within the expression. + Double-dots indicates all the values between the two values it separates, inclusive. + Double-dot at the beginning or end of the list means "on or before" or "on or after" respectively. + Elements immediately preceeding and/or following as well as the elements represented by a double-dot, all have the same precision. Otherwise, different elements may have different precisions + +One of a set + + Example 1 [1667,1668,1670..1672] + One of the years 1667, 1668, 1670, 1671, 1672 + Example 2 [..1760-12-03] + December 3, 1760; or some earlier date + Example 3 [1760-12..] + December 1760, or some later month + Example 4 [1760-01,1760-02,1760-12..] + January or February of 1760 or December 1760 or some later month + Example 5 [1667,1760-12] + Either the year 1667 or the month December of 1760. + Example 6 [..1984] + The year 1984 or an earlier year + +All Members + + Example 7 {1667,1668,1670..1672} + All of the years 1667, 1668, 1670, 1671, 1672 + Example 8 {1960,1961-12} + The year 1960 and the month December of 1961. + Example 9 {..1984} + The year 1984 and all earlier years + +*/ + +func IsSetRepresentation(edtf_str string) bool { + return re.SetRepresentations.MatchString(edtf_str) +} + +func ParseSetRepresentations(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + SET [1667,1668,1670..1672] 9 [1667,1668,1670..1672],[,,1672,,,..,,] + SET [..1760-12-03] 9 [..1760-12-03],[,..,1760,12,03,,,] + SET [1760-12..] 9 [1760-12..],[,,1760,12,,..,,] + SET [1760-01,1760-02,1760-12..] 9 [1760-01,1760-02,1760-12..],[,,1760,12,,..,,] + SET [1667,1760-12] 9 [1667,1760-12],[,,1760,12,,,,,] + SET [..1984] 9 [..1984],[,..,1984,,,,,] + SET {1667,1668,1670..1672} 9 {1667,1668,1670..1672},{,,1672,,,..,,} + SET {1960,1961-12} 9 {1960,1961-12},{,,1961,12,,,,,} + SET {..1984} 9 {..1984},{,..,1984,,,,,} + + */ + + m := re.SetRepresentations.FindStringSubmatch(edtf_str) + + if len(m) != 6 { + return nil, edtf.Invalid(SET_REPRESENTATIONS, edtf_str) + } + + class := m[1] + candidates := m[2] + + start_ymd := "" + end_ymd := "" + + start_open := false + end_open := false + + inclusivity := edtf.NONE + + switch class { + case "[": + inclusivity = edtf.ANY + case "{": + inclusivity = edtf.ALL + default: + return nil, edtf.Invalid(SET_REPRESENTATIONS, edtf_str) + } + + // this should be moved in to a separate method for getting + // the list of all possible dates - we only care about the + // bookends right now (20201231/thisisaaronland) + + possible := make([]string, 0) + + for _, date := range strings.Split(candidates, ",") { + + parts := strings.Split(date, "..") + count := len(parts) + + switch count { + case 1: + possible = append(possible, date) + continue + case 2: + + if parts[0] != "" && parts[1] != "" { // YYYY..YYYY + + // get everything in between parts[0] and parts[1] + // need to determine what to get (days, months, years) + + possible = append(possible, parts[0]) + possible = append(possible, parts[1]) + + } else if parts[0] == "" { // ..YYYY + + // parts[1] is end (max) date + // start (min) date is "open" or "unknown" + + possible = append(possible, parts[1]) + start_open = true + + } else { // YYYY.. + + // parts[0] is start (min) date + // end (max) date is "open" or "unknown" + + possible = append(possible, parts[0]) + end_open = true + } + + default: + return nil, edtf.Invalid(SET_REPRESENTATIONS, edtf_str) + } + } + + sort.Strings(possible) + count := len(possible) + + switch count { + case 0: + return nil, edtf.Invalid(SET_REPRESENTATIONS, edtf_str) + case 1: + start_ymd = possible[0] + end_ymd = start_ymd + default: + start_ymd = possible[0] + end_ymd = possible[count-1] + } + + _str := start_ymd + + if start_open { + + _str = fmt.Sprintf("../%s", start_ymd) + + } else if end_open { + + _str = fmt.Sprintf("%s/..", start_ymd) + + } else if start_ymd != end_ymd { + + _str = fmt.Sprintf("%s/%s", start_ymd, end_ymd) + } + + /* + + Imagine we have a string like this: + '[1760-01,1760-02,1760-12..]' + + Which needs to be interpreted as: + + start lower: 1760-01 + start upper: 1760-12 + + end lower/upper: .. + + But since we can't parse '1760-01/1760-12/...' + since that would be gibberish we parse '1760-01/1760-1' + and set the 'open_post_facto' flag to update the results of + common.DateSpanFromEDTF after the fact + (20210106/thisisaaronland) + + */ + + open_post_facto := false + + if start_open || end_open { + + if len(possible) > 1 { + _str = fmt.Sprintf("%s/%s", start_ymd, end_ymd) + open_post_facto = true + } + } + + sp, err := common.DateSpanFromEDTF(_str) + + if err != nil { + return nil, err + } + + if open_post_facto { + + open_range := common.OpenDateRange() + + if start_open { + sp.End.Lower = sp.Start.Lower + sp.Start = open_range + } + + if end_open { + + sp.Start.Upper = sp.End.Upper + sp.End = open_range + } + } + + if !start_open { + sp.Start.Lower.Inclusivity = inclusivity + sp.Start.Upper.Inclusivity = inclusivity + } + + if !end_open { + sp.End.Lower.Inclusivity = inclusivity + sp.End.Upper.Inclusivity = inclusivity + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: SET_REPRESENTATIONS, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/significant_digits.go b/vendor/github.com/sfomuseum/go-edtf/level2/significant_digits.go new file mode 100644 index 0000000..925d1ea --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/significant_digits.go @@ -0,0 +1,138 @@ +package level2 + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "strconv" + "strings" +) + +/* + +Significant digits + +A year (expressed in any of the three allowable forms: four-digit, 'Y' prefix, or exponential) may be followed by 'S', followed by a positive integer indicating the number of significant digits. + + Example 1 ‘1950S2’ + some year between 1900 and 1999, estimated to be 1950 + Example 2 ‘Y171010000S3’ + some year between 171010000 and 171010999, estimated to be 171010000 + Example 3 ‘Y3388E2S3’ + some year between 338000 and 338999, estimated to be 338800. + +*/ + +func IsSignificantDigits(edtf_str string) bool { + return re.SignificantDigits.MatchString(edtf_str) +} + +func ParseSignificantDigits(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + SIGN 5 1950S2,1950,,,2 + SIGN 5 Y171010000S3,,171010000,,3 + SIGN 5 Y-20E2S3,,,-20E2,3 + SIGN 5 Y3388E2S3,,,3388E2,3 + SIGN 5 Y-20E2S3,,,-20E2,3 + + */ + + m := re.SignificantDigits.FindStringSubmatch(edtf_str) + + if len(m) != 5 { + return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str) + } + + str_yyyy := m[1] + str_year := m[2] + notation := m[3] + str_digits := m[4] + + var yyyy int + + if str_yyyy != "" { + + y, err := strconv.Atoi(str_yyyy) + + if err != nil { + return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str) + } + + yyyy = y + + } else if str_year != "" { + + if len(str_year) > 4 { + return nil, edtf.Unsupported(SIGNIFICANT_DIGITS, edtf_str) + } + + y, err := strconv.Atoi(str_year) + + if err != nil { + return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str) + } + + yyyy = y + + } else if notation != "" { + + y, err := common.ParseExponentialNotation(notation) + + if err != nil { + return nil, err + } + + yyyy = y + + } else { + return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str) + } + + if yyyy > edtf.MAX_YEARS { + return nil, edtf.Unsupported(SIGNIFICANT_DIGITS, edtf_str) + } + + digits, err := strconv.Atoi(str_digits) + + if err != nil { + return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str) + } + + if len(strconv.Itoa(digits)) > len(strconv.Itoa(yyyy)) { + return nil, edtf.Invalid(SIGNIFICANT_DIGITS, edtf_str) + } + + str_yyyy = strconv.Itoa(yyyy) + prefix_yyyy := str_yyyy[0 : len(str_yyyy)-digits] + + first := strings.Repeat("0", digits) + last := strings.Repeat("9", digits) + + start_yyyy := prefix_yyyy + first + end_yyyy := prefix_yyyy + last + + _str := fmt.Sprintf("%s/%s", start_yyyy, end_yyyy) + + if strings.HasPrefix(start_yyyy, "-") && strings.HasPrefix(end_yyyy, "-") { + _str = fmt.Sprintf("%s/%s", end_yyyy, start_yyyy) + } + + sp, err := common.DateSpanFromEDTF(_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: SIGNIFICANT_DIGITS, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/sub_year_grouping.go b/vendor/github.com/sfomuseum/go-edtf/level2/sub_year_grouping.go new file mode 100644 index 0000000..b96888f --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/sub_year_grouping.go @@ -0,0 +1,204 @@ +package level2 + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + "strconv" +) + +/* + +Level 2 extends the season feature of Level 1 to include the following sub-year groupings. + +21 Spring (independent of location) +22 Summer (independent of location) +23 Autumn (independent of location) +24 Winter (independent of location) +25 Spring - Northern Hemisphere +26 Summer - Northern Hemisphere +27 Autumn - Northern Hemisphere +28 Winter - Northern Hemisphere +29 Spring - Southern Hemisphere +30 Summer - Southern Hemisphere +31 Autumn - Southern Hemisphere +32 Winter - Southern Hemisphere +33 Quarter 1 (3 months in duration) +34 Quarter 2 (3 months in duration) +35 Quarter 3 (3 months in duration) +36 Quarter 4 (3 months in duration) +37 Quadrimester 1 (4 months in duration) +38 Quadrimester 2 (4 months in duration) +39 Quadrimester 3 (4 months in duration) +40 Semestral 1 (6 months in duration) +41 Semestral 2 (6 months in duration) + + Example ‘2001-34’ + second quarter of 2001 + +*/ + +func IsSubYearGrouping(edtf_str string) bool { + return re.SubYearGrouping.MatchString(edtf_str) +} + +func ParseSubYearGroupings(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + SUB 3 2001-34,2001,34 + + */ + + m := re.SubYearGrouping.FindStringSubmatch(edtf_str) + + if len(m) != 3 { + return nil, edtf.Invalid(SUB_YEAR_GROUPINGS, edtf_str) + } + + year := m[1] + grouping := m[2] + + start_yyyy := year + start_mm := "" + start_dd := "" + + end_yyyy := year + end_mm := "" + end_dd := "" + + switch grouping { + case "21", "25": // Spring (independent of location, Northern Hemisphere) + start_mm = "03" + start_dd = "01" + end_mm = "05" + end_dd = "31" + case "22", "26": // Summer (independent of location, Northern Hemisphere) + start_mm = "06" + start_dd = "01" + end_mm = "08" + end_dd = "31" + case "23", "27": // Autumn (independent of location, Northern Hemisphere) + start_mm = "09" + start_dd = "01" + end_mm = "11" + end_dd = "30" + case "24", "28": // Winter (independent of location, Northern Hemisphere) + start_mm = "12" + start_dd = "01" + end_mm = "02" + end_dd = "" // leave blank to make the code look up daysforyear(year) + + y, err := strconv.Atoi(end_yyyy) + + if err != nil { + return nil, err + } + + end_yyyy = strconv.Itoa(y + 1) + + case "29": // Spring - Southern Hemisphere + start_mm = "09" + start_dd = "01" + end_mm = "11" + end_dd = "30" + case "30": // Summer - Southern Hemisphere + start_mm = "12" + start_dd = "01" + + end_mm = "02" + end_dd = "" // leave blank to make the code look up daysforyear(year) + + y, err := strconv.Atoi(end_yyyy) + + if err != nil { + return nil, err + } + + end_yyyy = strconv.Itoa(y + 1) + + case "31": // Autumn - Southern Hemisphere + start_mm = "03" + start_dd = "01" + end_mm = "05" + end_dd = "31" + case "32": // Winter - Southern Hemisphere + start_mm = "06" + start_dd = "01" + end_mm = "08" + end_dd = "31" + case "33": // Quarter 1 (3 months in duration) + start_mm = "01" + start_dd = "01" + end_mm = "03" + end_dd = "31" + case "34": // Quarter 2 (3 months in duration) + start_mm = "04" + start_dd = "01" + end_mm = "06" + end_dd = "30" + case "35": // Quarter 3 (3 months in duration) + start_mm = "07" + start_dd = "01" + end_mm = "09" + end_dd = "30" + case "36": // Quarter 4 (3 months in duration) + start_mm = "10" + start_dd = "01" + end_mm = "12" + end_dd = "31" + case "37": // Quadrimester 1 (4 months in duration) + start_mm = "01" + start_dd = "01" + end_mm = "04" + end_dd = "30" + case "38": // Quadrimester 2 (4 months in duration) + start_mm = "05" + start_dd = "01" + end_mm = "08" + end_dd = "31" + case "39": // Quadrimester 3 (4 months in duration) + start_mm = "09" + start_dd = "01" + end_mm = "12" + end_dd = "31" + case "40": // Semestral 1 (6 months in duration) + start_mm = "01" + start_dd = "01" + end_mm = "06" + end_dd = "30" + case "41": // Semestral 2 (6 months in duration) + start_mm = "07" + start_dd = "01" + end_mm = "12" + end_dd = "31" + default: + return nil, edtf.Invalid(SUB_YEAR_GROUPINGS, edtf_str) + } + + start := fmt.Sprintf("%s-%s-%s", start_yyyy, start_mm, start_dd) + end := fmt.Sprintf("%s-%s", end_yyyy, end_mm) + + if end_dd != "" { + end = fmt.Sprintf("%s-%s", end, end_dd) + } + + _str := fmt.Sprintf("%s/%s", start, end) + + sp, err := common.DateSpanFromEDTF(_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: SUB_YEAR_GROUPINGS, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/tests.go b/vendor/github.com/sfomuseum/go-edtf/level2/tests.go new file mode 100644 index 0000000..5d31340 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/tests.go @@ -0,0 +1,230 @@ +package level2 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/tests" +) + +var Tests map[string]map[string]*tests.TestResult = map[string]map[string]*tests.TestResult{ + EXPONENTIAL_YEAR: map[string]*tests.TestResult{ + "Y-17E7": tests.NewTestResult(tests.TestResultOptions{}), // TO DO - https://github.com/sfomuseum/go-edtf/issues/5 + "Y10E7": tests.NewTestResult(tests.TestResultOptions{}), // TO DO + "Y20E2": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2000-01-01T00:00:00Z", + StartUpperTimeRFC3339: "2000-01-01T23:59:59Z", + EndLowerTimeRFC3339: "2000-12-31T00:00:00Z", + EndUpperTimeRFC3339: "2000-12-31T23:59:59Z", + }), + }, + SIGNIFICANT_DIGITS: map[string]*tests.TestResult{ + "1950S2": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1900-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1900-12-31T23:59:59Z", + EndLowerTimeRFC3339: "1999-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1999-12-31T23:59:59Z", + }), + "Y171010000S3": tests.NewTestResult(tests.TestResultOptions{}), + "Y-20E2S3": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-2999-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-2999-12-31T23:59:59Z", + EndLowerTimeRFC3339: "-2000-01-01T00:00:00Z", + EndUpperTimeRFC3339: "-2000-12-31T23:59:59Z", + }), + "Y3388E2S3": tests.NewTestResult(tests.TestResultOptions{}), + }, + SUB_YEAR_GROUPINGS: map[string]*tests.TestResult{ + "2001-34": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2001-04-01T00:00:00Z", + StartUpperTimeRFC3339: "2001-04-01T23:59:59Z", + EndLowerTimeRFC3339: "2001-06-30T00:00:00Z", + EndUpperTimeRFC3339: "2001-06-30T23:59:59Z", + }), + "2019-28": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2019-12-01T00:00:00Z", + StartUpperTimeRFC3339: "2019-12-01T23:59:59Z", + EndLowerTimeRFC3339: "2020-02-01T00:00:00Z", + EndUpperTimeRFC3339: "2020-02-29T23:59:59Z", + }), + // "second quarter of 2001": tests.NewTestResult(tests.TestResultOptions{}), // TO DO + }, + SET_REPRESENTATIONS: map[string]*tests.TestResult{ + "[1760-01,1760-02,1760-12..]": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1760-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1760-12-31T23:59:59Z", + EndLowerIsOpen: true, + EndUpperIsOpen: true, + StartLowerInclusivity: edtf.ANY, + }), + "[1667,1668,1670..1672]": tests.NewTestResult(tests.TestResultOptions{ + // THIS FEELS WRONG...LIKE IT'S BACKWARDS + StartLowerTimeRFC3339: "1667-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1667-12-31T23:59:59Z", + EndLowerTimeRFC3339: "1672-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1672-12-31T23:59:59Z", + StartLowerInclusivity: edtf.ANY, + EndUpperInclusivity: edtf.ANY, + }), + "[..1760-12-03]": tests.NewTestResult(tests.TestResultOptions{ + EndLowerTimeRFC3339: "1760-12-03T00:00:00Z", + EndUpperTimeRFC3339: "1760-12-03T23:59:59Z", + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndUpperInclusivity: edtf.ANY, + }), + "[1760-12..]": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1760-12-01T00:00:00Z", + StartUpperTimeRFC3339: "1760-12-31T23:59:59Z", + EndLowerIsOpen: true, + EndUpperIsOpen: true, + StartUpperInclusivity: edtf.ANY, + }), + "[1667,1760-12]": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1667-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1667-12-31T23:59:59Z", + EndLowerTimeRFC3339: "1760-12-01T00:00:00Z", + EndUpperTimeRFC3339: "1760-12-31T23:59:59Z", + StartUpperInclusivity: edtf.ANY, + EndLowerInclusivity: edtf.ANY, + }), + + "[..1984]": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndLowerTimeRFC3339: "1984-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1984-12-31T23:59:59Z", + EndLowerInclusivity: edtf.ANY, + }), + "{1667,1668,1670..1672}": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1667-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1667-12-31T23:59:59Z", + EndLowerTimeRFC3339: "1672-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1672-12-31T23:59:59Z", + StartUpperInclusivity: edtf.ALL, + EndLowerInclusivity: edtf.ALL, + }), + "{1960,1961-12}": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1960-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1960-12-31T23:59:59Z", + EndLowerTimeRFC3339: "1961-12-01T00:00:00Z", + EndUpperTimeRFC3339: "1961-12-31T23:59:59Z", + StartUpperInclusivity: edtf.ALL, + EndLowerInclusivity: edtf.ALL, + }), + "{..1984}": tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndLowerTimeRFC3339: "1984-01-01T00:00:00Z", + EndUpperTimeRFC3339: "1984-12-31T23:59:59Z", + EndLowerInclusivity: edtf.ALL, + }), + }, + GROUP_QUALIFICATION: map[string]*tests.TestResult{ + "2004-06-11%": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-11T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-11T00:00:00Z", + EndLowerTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperTimeRFC3339: "2004-06-11T23:59:59Z", + StartUpperUncertain: edtf.DAY, + StartUpperApproximate: edtf.DAY, + EndLowerApproximate: edtf.YEAR, + EndLowerUncertain: edtf.MONTH, + }), + "2004-06~-11": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-11T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-11T00:00:00Z", + EndLowerTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperApproximate: edtf.MONTH, + EndLowerApproximate: edtf.YEAR, + }), + "2004?-06-11": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-11T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-11T00:00:00Z", + EndLowerTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperTimeRFC3339: "2004-06-11T23:59:59Z", + EndLowerUncertain: edtf.YEAR, + }), + }, + INDIVIDUAL_QUALIFICATION: map[string]*tests.TestResult{ + "?2004-06-~11": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-11T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-11T00:00:00Z", + EndLowerTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperApproximate: edtf.DAY, + }), + "2004-%06-11": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-11T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-11T00:00:00Z", + EndLowerTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperTimeRFC3339: "2004-06-11T23:59:59Z", + EndUpperApproximate: edtf.MONTH, + EndUpperUncertain: edtf.MONTH, + }), + }, + UNSPECIFIED_DIGIT: map[string]*tests.TestResult{ + "156X-12-25": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1560-12-25T00:00:00Z", + StartUpperTimeRFC3339: "1560-12-25T23:59:59Z", + EndLowerTimeRFC3339: "1569-12-25T00:00:00Z", + EndUpperTimeRFC3339: "1569-12-25T23:59:59Z", + StartUpperPrecision: edtf.DECADE, + }), + "15XX-12-25": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1500-12-25T00:00:00Z", + StartUpperTimeRFC3339: "1500-12-25T23:59:59Z", + EndLowerTimeRFC3339: "1599-12-25T00:00:00Z", + EndUpperTimeRFC3339: "1599-12-25T23:59:59Z", + StartUpperPrecision: edtf.CENTURY, + }), + // "XXXX-12-XX": tests.NewTestResult(tests.TestResultOptions{}), // TO DO + "1XXX-XX": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1000-01-01T00:00:00Z", + StartUpperTimeRFC3339: "1000-01-01T23:59:59Z", + EndLowerTimeRFC3339: "1999-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1999-12-31T23:59:59Z", + StartUpperPrecision: edtf.MILLENIUM, + }), + "1XXX-12": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1000-12-01T00:00:00Z", + StartUpperTimeRFC3339: "1000-12-01T23:59:59Z", + EndLowerTimeRFC3339: "1999-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1999-12-31T23:59:59Z", + StartUpperPrecision: edtf.MILLENIUM, + }), + "1984-1X": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "1984-10-01T00:00:00Z", + StartUpperTimeRFC3339: "1984-10-01T23:59:59Z", + EndLowerTimeRFC3339: "1984-12-31T00:00:00Z", + EndUpperTimeRFC3339: "1984-12-31T23:59:59Z", + StartUpperPrecision: edtf.MONTH, + }), + }, + INTERVAL: map[string]*tests.TestResult{ + "2004-06-~01/2004-06-~20": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-01T23:59:59Z", + EndLowerTimeRFC3339: "2004-06-20T00:00:00Z", + EndUpperTimeRFC3339: "2004-06-20T23:59:59Z", + EndUpperApproximate: edtf.DAY, + }), + "2004-06-XX/2004-07-03": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "2004-06-01T00:00:00Z", + StartUpperTimeRFC3339: "2004-06-30T23:59:59Z", + EndLowerTimeRFC3339: "2004-07-03T00:00:00Z", + EndUpperTimeRFC3339: "2004-07-03T23:59:59Z", + }), + "~-0100/~2020": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-0100-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-0100-12-31T23:59:59Z", + EndLowerTimeRFC3339: "2020-01-01T00:00:00Z", + EndUpperTimeRFC3339: "2020-12-31T23:59:59Z", + }), + "~-0100/~-0010": tests.NewTestResult(tests.TestResultOptions{ + StartLowerTimeRFC3339: "-0100-01-01T00:00:00Z", + StartUpperTimeRFC3339: "-0100-12-31T23:59:59Z", + EndLowerTimeRFC3339: "-0010-01-01T00:00:00Z", + EndUpperTimeRFC3339: "-0010-12-31T23:59:59Z", + }), + }, +} diff --git a/vendor/github.com/sfomuseum/go-edtf/level2/unspecified_digit.go b/vendor/github.com/sfomuseum/go-edtf/level2/unspecified_digit.go new file mode 100644 index 0000000..b8db019 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/level2/unspecified_digit.go @@ -0,0 +1,67 @@ +package level2 + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/re" + // "strconv" + // "strings" +) + +/* + +Unspecified Digit + +For level 2 the unspecified digit, 'X', may occur anywhere within a component. + + Example 1 ‘156X-12-25’ + December 25 sometime during the 1560s + Example 2 ‘15XX-12-25’ + December 25 sometime during the 1500s + Example 3 ‘XXXX-12-XX’ + Some day in December in some year + Example 4 '1XXX-XX’ + Some month during the 1000s + Example 5 ‘1XXX-12’ + Some December during the 1000s + Example 6 ‘1984-1X’ + October, November, or December 1984 + +*/ + +func IsUnspecifiedDigit(edtf_str string) bool { + return re.UnspecifiedDigit.MatchString(edtf_str) +} + +func ParseUnspecifiedDigit(edtf_str string) (*edtf.EDTFDate, error) { + + /* + + UNSPEC 156X-12-25 4 156X-12-25,156X,12,25 + UNSPEC 15XX-12-25 4 15XX-12-25,15XX,12,25 + UNSPEC 1XXX-XX 4 1XXX-XX,1XXX,XX, + UNSPEC 1XXX-12 4 1XXX-12,1XXX,12, + UNSPEC 1984-1X 4 1984-1X,1984,1X, + + */ + + if !re.UnspecifiedDigit.MatchString(edtf_str) { + return nil, edtf.Invalid(UNSPECIFIED_DIGIT, edtf_str) + } + + sp, err := common.DateSpanFromEDTF(edtf_str) + + if err != nil { + return nil, err + } + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: LEVEL, + Feature: UNSPECIFIED_DIGIT, + } + + return d, nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/parser/parser.go b/vendor/github.com/sfomuseum/go-edtf/parser/parser.go new file mode 100644 index 0000000..2c36cd5 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/parser/parser.go @@ -0,0 +1,120 @@ +// package parser provides methods for parsing and validating EDTF strings. +package parser + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/common" + "github.com/sfomuseum/go-edtf/level0" + "github.com/sfomuseum/go-edtf/level1" + "github.com/sfomuseum/go-edtf/level2" + _ "log" +) + +// Return a boolean value indicating whether a string is a valid EDTF date. +func IsValid(edtf_str string) bool { + + if level0.IsLevel0(edtf_str) { + return true + } + + if level1.IsLevel1(edtf_str) { + return true + } + + if level2.IsLevel2(edtf_str) { + return true + } + + switch edtf_str { + case edtf.OPEN, edtf.UNKNOWN: + return true + default: + return false + } +} + +// Parse a string in to an edtf.EDTFDate instance. +func ParseString(edtf_str string) (*edtf.EDTFDate, error) { + + if level0.IsLevel0(edtf_str) { + return level0.ParseString(edtf_str) + } + + if level1.IsLevel1(edtf_str) { + return level1.ParseString(edtf_str) + } + + if level2.IsLevel2(edtf_str) { + return level2.ParseString(edtf_str) + } + + if edtf_str == edtf.OPEN { + sp := common.OpenDateSpan() + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: -1, + Feature: "Open", + } + + return d, nil + } + + if edtf_str == edtf.UNKNOWN { + + sp := common.UnknownDateSpan() + + d := &edtf.EDTFDate{ + Start: sp.Start, + End: sp.End, + EDTF: edtf_str, + Level: -1, + Feature: "Unknown", + } + + return d, nil + } + + return nil, edtf.Unrecognized("Invalid or unsupported EDTF string", edtf_str) +} + +// Determine which EDTF level and corresponding EDTF feature a string matches. +func Matches(edtf_str string) (int, string, error) { + + if level0.IsLevel0(edtf_str) { + + feature, err := level0.Matches(edtf_str) + + if err != nil { + return -1, "", err + } + + return level0.LEVEL, feature, nil + } + + if level1.IsLevel1(edtf_str) { + + feature, err := level1.Matches(edtf_str) + + if err != nil { + return -1, "", err + } + + return level1.LEVEL, feature, nil + } + + if level2.IsLevel2(edtf_str) { + + feature, err := level2.Matches(edtf_str) + + if err != nil { + return -1, "", err + } + + return level2.LEVEL, feature, nil + } + + return -1, "", edtf.Unrecognized("Invalid or unsupported EDTF string", edtf_str) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/parser/tests.go b/vendor/github.com/sfomuseum/go-edtf/parser/tests.go new file mode 100644 index 0000000..b6cd053 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/parser/tests.go @@ -0,0 +1,25 @@ +package parser + +import ( + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/tests" +) + +var Tests map[string]map[string]*tests.TestResult = map[string]map[string]*tests.TestResult{ + "Unknown": map[string]*tests.TestResult{ + edtf.UNKNOWN: tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsUnknown: true, + StartUpperIsUnknown: true, + EndLowerIsUnknown: true, + EndUpperIsUnknown: true, + }), + }, + "Open": map[string]*tests.TestResult{ + edtf.OPEN: tests.NewTestResult(tests.TestResultOptions{ + StartLowerIsOpen: true, + StartUpperIsOpen: true, + EndLowerIsOpen: true, + EndUpperIsOpen: true, + }), + }, +} diff --git a/vendor/github.com/sfomuseum/go-edtf/precision.go b/vendor/github.com/sfomuseum/go-edtf/precision.go new file mode 100644 index 0000000..ef00011 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/precision.go @@ -0,0 +1,37 @@ +package edtf + +import () + +const ( + NONE Precision = 0 + ALL Precision = 1 << iota // 2 + ANY // 4 + DAY // 8 + WEEK // 16 + MONTH // 32 + YEAR // 64 + DECADE // 128 + CENTURY // 256 + MILLENIUM // 512 +) + +// https://stackoverflow.com/questions/48050522/using-bitsets-in-golang-to-represent-capabilities + +type Precision uint32 + +func (f Precision) HasFlag(flag Precision) bool { return f&flag != 0 } +func (f *Precision) AddFlag(flag Precision) { *f |= flag } +func (f *Precision) ClearFlag(flag Precision) { *f &= ^flag } +func (f *Precision) ToggleFlag(flag Precision) { *f ^= flag } + +func (f *Precision) IsAnnual() bool { + return f.HasFlag(YEAR) +} + +func (f *Precision) IsMonthly() bool { + return f.HasFlag(MONTH) +} + +func (f *Precision) IsDaily() bool { + return f.HasFlag(DAY) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/re/common.go b/vendor/github.com/sfomuseum/go-edtf/re/common.go new file mode 100644 index 0000000..10ccb36 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/re/common.go @@ -0,0 +1,22 @@ +package re + +import ( + "regexp" +) + +var Year *regexp.Regexp + +var YMD *regexp.Regexp + +var QualifiedIndividual *regexp.Regexp +var QualifiedGroup *regexp.Regexp + +func init() { + Year = regexp.MustCompile(`^` + PATTERN_YEAR + `$`) + + YMD = regexp.MustCompile(`^` + PATTERN_YMD_X + `$`) + + QualifiedIndividual = regexp.MustCompile(`^(` + PATTERN_QUALIFIER + `)?` + PATTERN_DATE_X + `$`) + + QualifiedGroup = regexp.MustCompile(`^` + PATTERN_DATE_X + `(` + PATTERN_QUALIFIER + `)?$`) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/re/level0.go b/vendor/github.com/sfomuseum/go-edtf/re/level0.go new file mode 100644 index 0000000..8ef3f3c --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/re/level0.go @@ -0,0 +1,29 @@ +package re + +import ( + "regexp" + "strings" +) + +var Date *regexp.Regexp +var DateAndTime *regexp.Regexp +var TimeInterval *regexp.Regexp + +var Level0 *regexp.Regexp + +func init() { + + Date = regexp.MustCompile(`^` + PATTERN_DATE + `$`) + + DateAndTime = regexp.MustCompile(`^` + PATTERN_DATE_AND_TIME + `$`) + + TimeInterval = regexp.MustCompile(`^` + PATTERN_TIME_INTERVAL + `$`) + + level0_patterns := []string{ + PATTERN_DATE, + PATTERN_DATE_AND_TIME, + PATTERN_TIME_INTERVAL, + } + + Level0 = regexp.MustCompile(`^(` + strings.Join(level0_patterns, "|") + `)$`) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/re/level1.go b/vendor/github.com/sfomuseum/go-edtf/re/level1.go new file mode 100644 index 0000000..7471745 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/re/level1.go @@ -0,0 +1,44 @@ +package re + +import ( + "regexp" + "strings" +) + +var LetterPrefixedCalendarYear *regexp.Regexp +var Season *regexp.Regexp +var QualifiedDate *regexp.Regexp +var UnspecifiedDigits *regexp.Regexp +var IntervalEnd *regexp.Regexp +var IntervalStart *regexp.Regexp +var NegativeYear *regexp.Regexp +var Level1 *regexp.Regexp + +func init() { + + LetterPrefixedCalendarYear = regexp.MustCompile(`^` + PATTERN_LETTER_PREFIXED_CALENDAR_YEAR + `$`) + + Season = regexp.MustCompile(`^` + PATTERN_SEASON + `$`) + + QualifiedDate = regexp.MustCompile(`^` + PATTERN_QUALIFIED_DATE + `$`) + + UnspecifiedDigits = regexp.MustCompile(`^` + PATTERN_UNSPECIFIED_DIGITS + `$`) + + IntervalStart = regexp.MustCompile(`^` + PATTERN_INTERVAL_START + `$`) + + IntervalEnd = regexp.MustCompile(`^` + PATTERN_INTERVAL_END + `$`) + + NegativeYear = regexp.MustCompile(`^` + PATTERN_NEGATIVE_YEAR + `$`) + + level1_patterns := []string{ + PATTERN_LETTER_PREFIXED_CALENDAR_YEAR, + PATTERN_SEASON, + PATTERN_QUALIFIED_DATE, + PATTERN_UNSPECIFIED_DIGITS, + PATTERN_INTERVAL_START, + PATTERN_INTERVAL_END, + PATTERN_NEGATIVE_YEAR, + } + + Level1 = regexp.MustCompile(`^(` + strings.Join(level1_patterns, "|") + `)$`) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/re/level2.go b/vendor/github.com/sfomuseum/go-edtf/re/level2.go new file mode 100644 index 0000000..5a6e561 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/re/level2.go @@ -0,0 +1,49 @@ +package re + +import ( + "regexp" + "strings" +) + +var ExponentialYear *regexp.Regexp + +var SignificantDigits *regexp.Regexp +var SubYearGrouping *regexp.Regexp +var SetRepresentations *regexp.Regexp +var GroupQualification *regexp.Regexp +var IndividualQualification *regexp.Regexp +var UnspecifiedDigit *regexp.Regexp +var Interval *regexp.Regexp +var Level2 *regexp.Regexp + +func init() { + + ExponentialYear = regexp.MustCompile(`^` + PATTERN_EXPONENTIAL_YEAR + `$`) + + SignificantDigits = regexp.MustCompile(`^` + PATTERN_SIGNIFICANT_DIGITS + `$`) + + SubYearGrouping = regexp.MustCompile(`^` + PATTERN_SUB_YEAR_GROUPING + `$`) + + SetRepresentations = regexp.MustCompile(`^` + PATTERN_SET_REPRESENTATIONS + `$`) + + GroupQualification = regexp.MustCompile(`^` + PATTERN_GROUP_QUALIFICATION + `$`) + + IndividualQualification = regexp.MustCompile(`^` + PATTERN_INDIVIDUAL_QUALIFICATION + `$`) + + UnspecifiedDigit = regexp.MustCompile(`^` + PATTERN_UNSPECIFIED_DIGIT + `$`) + + Interval = regexp.MustCompile(`^` + PATTERN_INTERVAL + `$`) + + level2_patterns := []string{ + PATTERN_EXPONENTIAL_YEAR, + PATTERN_SIGNIFICANT_DIGITS, + PATTERN_SUB_YEAR_GROUPING, + PATTERN_SET_REPRESENTATIONS, + PATTERN_GROUP_QUALIFICATION, + PATTERN_INDIVIDUAL_QUALIFICATION, + PATTERN_UNSPECIFIED_DIGIT, + PATTERN_INTERVAL, + } + + Level2 = regexp.MustCompile(`^` + `(` + strings.Join(level2_patterns, "|") + `)`) +} diff --git a/vendor/github.com/sfomuseum/go-edtf/re/re.go b/vendor/github.com/sfomuseum/go-edtf/re/re.go new file mode 100644 index 0000000..d377452 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/re/re.go @@ -0,0 +1,69 @@ +package re + +import ( + "github.com/sfomuseum/go-edtf" +) + +// Common + +const PATTERN_YEAR string = `(\-?\d{4})` + +// these are used by common.DateRangeWithString + +const PATTERN_QUALIFIER string = `[\` + edtf.UNCERTAIN + edtf.APPROXIMATE + edtf.UNCERTAIN_AND_APPROXIMATE + `]` + +const PATTERN_YEAR_X string = `\-?[0-9X]{4}` +const PATTERN_MONTH_X string = `(?:[0X][1-9X]|[1X][0-2X])` +const PATTERN_DAY_X string = `(?:[012X][0-9X]|[3X][01X])` + +const PATTERN_YYYY string = `(` + PATTERN_QUALIFIER + `?` + PATTERN_YEAR_X + `|` + PATTERN_YEAR_X + PATTERN_QUALIFIER + `?)` +const PATTERN_MM string = `(` + PATTERN_QUALIFIER + `?` + PATTERN_MONTH_X + `|` + PATTERN_MONTH_X + PATTERN_QUALIFIER + `?)` +const PATTERN_DD string = `(` + PATTERN_QUALIFIER + `?` + PATTERN_DAY_X + `|` + PATTERN_DAY_X + PATTERN_QUALIFIER + `?)` + +const PATTERN_YMD_X string = `^` + PATTERN_YYYY + `(?:\-` + PATTERN_MM + `(?:\-` + PATTERN_DD + `)?` + `)?$` + +const PATTERN_DATE_X string = `(` + PATTERN_YEAR_X + `|(?:` + PATTERN_MONTH_X + `)|(?:` + PATTERN_DAY_X + `))` + +// Level 0 + +const PATTERN_DATE string = `(\-?\d{4})(?:-([0][1-9]|1[0-2])(?:-(0[1-9]|[12][0-9]|3[01]))?)?` + +const PATTERN_DATE_AND_TIME string = PATTERN_DATE + `T(\d{2}):(\d{2}):(\d{2})(Z|(\+|-)(\d{2})(\:(\d{2}))?)?` + +const PATTERN_TIME_INTERVAL string = PATTERN_DATE + `/` + PATTERN_DATE + +// Level 1 + +const PATTERN_LETTER_PREFIXED_CALENDAR_YEAR string = `Y(\-?\d+)` + +const PATTERN_SEASON string = PATTERN_YEAR + `\-(0[1-9]|1[0-2]|2[1-4])|(?i)(spring|summer|fall|winter)\s*,\s*(\d{4})` + +const PATTERN_QUALIFIED_DATE string = PATTERN_DATE + `(\?|~|%)` + +const PATTERN_UNSPECIFIED_DIGITS string = `(?:([0-9X]{4})(?:-([0X][1-9X]|[1X][0-2X])(?:-([012X][1-9X]|[3X][01X]))?)?)` + +const PATTERN_INTERVAL_START = `(\.\.)?\/` + PATTERN_DATE + +const PATTERN_INTERVAL_END = PATTERN_DATE + `\/(\.\.)?` + +const PATTERN_NEGATIVE_YEAR = `\-` + PATTERN_YEAR + +// Level 2 + +const PATTERN_EXPONENTIAL_YEAR string = `(?i)Y(\-?\d+E\d+)` + +const PATTERN_SIGNIFICANT_DIGITS string = `(?:` + PATTERN_YEAR + `|` + PATTERN_LETTER_PREFIXED_CALENDAR_YEAR + `|` + PATTERN_EXPONENTIAL_YEAR + `)S(\d+)` + +const PATTERN_SUB_YEAR_GROUPING string = `(\d{4})\-(1[0-2]|2[1-9]|3[0-9]|4[0-1])` + +// PLEASE FIX ME TO ENSURE CLOSING EL IS THE SAME AS OPENING EL : {}, (), [] + +const PATTERN_SET_REPRESENTATIONS string = `(\[|\{)((?:\.\.)?(?:(?:` + PATTERN_DATE + `(?:,|\.\.)?)+(?:\.\.)?))[\}\]]` + +const PATTERN_GROUP_QUALIFICATION string = `(?:(\d{4})(%|~|\?)?(?:-(\d{2})(%|~|\?)?(?:-(\d{2})(%|~|\?)?)?)?)` + +const PATTERN_INDIVIDUAL_QUALIFICATION string = `(?:(%|~|\?)?(\d{4})(?:-(%|~|\?)?(\d{2})(?:-(%|~|\?)?(\d{2}))?)?)` + +const PATTERN_UNSPECIFIED_DIGIT string = `([0-9X]{4})(?:-([0-9X]{2})(?:-([0-9X]{2}))?)?` + +const PATTERN_INTERVAL string = `(%|~|\?)?(\-?[0-9X]{4})(?:-(%|~|\?)?([0-9X]{2})(?:-(%|~|\?)?([0-9X]{2}))?)?\/(%|~|\?)?(\-?[0-9X]{4})(?:-(%|~|\?)?([0-9X]{2})(?:-(%|~|\?)?([0-9X]{2}))?)?` diff --git a/vendor/github.com/sfomuseum/go-edtf/tests/tests.go b/vendor/github.com/sfomuseum/go-edtf/tests/tests.go new file mode 100644 index 0000000..9aaae9d --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/tests/tests.go @@ -0,0 +1,438 @@ +package tests + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "time" +) + +type TestResult struct { + options TestResultOptions +} + +type TestResultOptions struct { + StartLowerTimeRFC3339 string + StartUpperTimeRFC3339 string + EndLowerTimeRFC3339 string + EndUpperTimeRFC3339 string + EndLowerTimeUnix int64 + StartUpperTimeUnix int64 + StartLowerTimeUnix int64 + EndUpperTimeUnix int64 + StartLowerUncertain edtf.Precision + StartUpperUncertain edtf.Precision + EndLowerUncertain edtf.Precision + EndUpperUncertain edtf.Precision + StartLowerApproximate edtf.Precision + StartUpperApproximate edtf.Precision + EndLowerApproximate edtf.Precision + EndUpperApproximate edtf.Precision + StartLowerPrecision edtf.Precision + StartUpperPrecision edtf.Precision + EndLowerPrecision edtf.Precision + EndUpperPrecision edtf.Precision + StartLowerIsOpen bool + StartUpperIsOpen bool + EndLowerIsOpen bool + EndUpperIsOpen bool + StartLowerIsUnknown bool + StartUpperIsUnknown bool + EndLowerIsUnknown bool + EndUpperIsUnknown bool + StartLowerInclusivity edtf.Precision + StartUpperInclusivity edtf.Precision + EndLowerInclusivity edtf.Precision + EndUpperInclusivity edtf.Precision +} + +func NewTestResult(opts TestResultOptions) *TestResult { + + r := &TestResult{ + options: opts, + } + + return r +} + +func (r *TestResult) TestDate(d *edtf.EDTFDate) error { + + /* + + if d.Start.Lower.Time != nil { + fmt.Printf("[%s][start.lower] %s %d\n", d.String(), d.Start.Lower.Time.Format(time.RFC3339), d.Start.Lower.Time.Unix()) + } + + if d.Start.Upper.Time != nil { + fmt.Printf("[%s][start.upper] %s %d\n", d.String(), d.Start.Lower.Time.Format(time.RFC3339), d.Start.Lower.Time.Unix()) + } + + if d.End.Lower.Time != nil { + fmt.Printf("[%s][end.lower] %s %d\n", d.String(), d.End.Lower.Time.Format(time.RFC3339), d.End.Lower.Time.Unix()) + } + + if d.End.Upper.Time != nil { + fmt.Printf("[%s][end.upper] %s %d\n", d.String(), d.End.Lower.Time.Format(time.RFC3339), d.End.Lower.Time.Unix()) + } + + */ + + err := r.testRFC3339All(d) + + if err != nil { + return err + } + + err = r.testUnixAll(d) + + if err != nil { + return err + } + + err = r.testPrecisionAll(d) + + if err != nil { + return err + } + + err = r.testUncertainAll(d) + + if err != nil { + return err + } + + err = r.testApproximateAll(d) + + if err != nil { + return err + } + + err = r.testIsOpenAll(d) + + if err != nil { + return err + } + + err = r.testIsUnknownAll(d) + + if err != nil { + return err + } + + err = r.testInclusivityAll(d) + + if err != nil { + return err + } + + return nil +} + +func (r *TestResult) testIsOpenAll(d *edtf.EDTFDate) error { + + err := r.testBoolean(d.Start.Lower.Open, r.options.StartLowerIsOpen) + + if err != nil { + return fmt.Errorf("Invalid StartLowerIsOpen flag, %v", err) + } + + err = r.testBoolean(d.Start.Upper.Open, r.options.StartUpperIsOpen) + + if err != nil { + return fmt.Errorf("Invalid StartUpperIsOpen flag, %v", err) + } + + err = r.testBoolean(d.End.Lower.Open, r.options.EndLowerIsOpen) + + if err != nil { + return fmt.Errorf("Invalid EndLowerIsOpen flag, %v", err) + } + + err = r.testBoolean(d.End.Upper.Open, r.options.EndUpperIsOpen) + + if err != nil { + return fmt.Errorf("Invalid EndUpperIsOpen flag, %v", err) + } + + return nil +} + +func (r *TestResult) testIsUnknownAll(d *edtf.EDTFDate) error { + + err := r.testBoolean(d.Start.Lower.Unknown, r.options.StartLowerIsUnknown) + + if err != nil { + return fmt.Errorf("Invalid StartLowerIsUnknown flag, %v", err) + } + + err = r.testBoolean(d.Start.Upper.Unknown, r.options.StartUpperIsUnknown) + + if err != nil { + return fmt.Errorf("Invalid StartUpperIsUnknown flag, %v", err) + } + + err = r.testBoolean(d.End.Lower.Unknown, r.options.EndLowerIsUnknown) + + if err != nil { + return fmt.Errorf("Invalid EndLowerIsUnknown flag, %v", err) + } + + err = r.testBoolean(d.End.Upper.Unknown, r.options.EndUpperIsUnknown) + + if err != nil { + return fmt.Errorf("Invalid EndUpperIsUnknown flag, %v", err) + } + + return nil +} + +func (r *TestResult) testBoolean(candidate bool, expected bool) error { + + if candidate != expected { + return fmt.Errorf("Boolean test failed, expected '%t' but got '%t'", expected, candidate) + } + + return nil +} + +func (r *TestResult) testInclusivityAll(d *edtf.EDTFDate) error { + + err := r.testPrecision(d.Start.Lower.Inclusivity, r.options.StartLowerInclusivity) + + if err != nil { + return fmt.Errorf("Invalid StartLowerInclusivity flag, %v", err) + } + + err = r.testPrecision(d.Start.Upper.Inclusivity, r.options.StartUpperInclusivity) + + if err != nil { + return fmt.Errorf("Invalid StartUpperInclusivity flag, %v", err) + } + + err = r.testPrecision(d.End.Lower.Inclusivity, r.options.EndLowerInclusivity) + + if err != nil { + return fmt.Errorf("Invalid EndLowerInclusivity flag, %v", err) + } + + err = r.testPrecision(d.End.Upper.Inclusivity, r.options.EndUpperInclusivity) + + if err != nil { + return fmt.Errorf("Invalid EndUpperInclusivity flag, %v", err) + } + + return nil +} + +func (r *TestResult) testPrecisionAll(d *edtf.EDTFDate) error { + + err := r.testPrecision(d.Start.Lower.Precision, r.options.StartLowerPrecision) + + if err != nil { + return fmt.Errorf("Invalid StartLowerPrecision flag, %v", err) + } + + err = r.testPrecision(d.Start.Upper.Precision, r.options.StartUpperPrecision) + + if err != nil { + return fmt.Errorf("Invalid StartUpperPrecision flag, %v", err) + } + + err = r.testPrecision(d.End.Lower.Precision, r.options.EndLowerPrecision) + + if err != nil { + return fmt.Errorf("Invalid EndLowerPrecision flag, %v", err) + } + + err = r.testPrecision(d.End.Upper.Precision, r.options.EndUpperPrecision) + + if err != nil { + return fmt.Errorf("Invalid EndUpperPrecision flag, %v", err) + } + + return nil +} + +func (r *TestResult) testUncertainAll(d *edtf.EDTFDate) error { + + err := r.testPrecision(d.Start.Lower.Uncertain, r.options.StartLowerUncertain) + + if err != nil { + return fmt.Errorf("Invalid StartLowerUncertain flag, %v", err) + } + + err = r.testPrecision(d.Start.Upper.Uncertain, r.options.StartUpperUncertain) + + if err != nil { + return fmt.Errorf("Invalid StartUpperUncertain flag, %v", err) + } + + err = r.testPrecision(d.End.Lower.Uncertain, r.options.EndLowerUncertain) + + if err != nil { + return fmt.Errorf("Invalid EndLowerUncertain flag, %v", err) + } + + err = r.testPrecision(d.End.Upper.Uncertain, r.options.EndUpperUncertain) + + if err != nil { + return fmt.Errorf("Invalid EndUpperUncertain flag, %v", err) + } + + return nil +} + +func (r *TestResult) testApproximateAll(d *edtf.EDTFDate) error { + + err := r.testPrecision(d.Start.Lower.Approximate, r.options.StartLowerApproximate) + + if err != nil { + return fmt.Errorf("Invalid StartLowerApproximate flag, %v", err) + } + + err = r.testPrecision(d.Start.Upper.Approximate, r.options.StartUpperApproximate) + + if err != nil { + return fmt.Errorf("Invalid StartUpperApproximate flag, %v", err) + } + + err = r.testPrecision(d.End.Lower.Approximate, r.options.EndLowerApproximate) + + if err != nil { + return fmt.Errorf("Invalid EndLowerApproximate flag, %v", err) + } + + err = r.testPrecision(d.End.Upper.Approximate, r.options.EndUpperApproximate) + + if err != nil { + return fmt.Errorf("Invalid EndUpperApproximate flag, %v", err) + } + + return nil +} + +func (r *TestResult) testPrecision(flags edtf.Precision, expected edtf.Precision) error { + + if expected == edtf.NONE { + return nil + } + + if !flags.HasFlag(expected) { + return fmt.Errorf("Missing flag %v", expected) + } + + return nil +} + +func (r *TestResult) testRFC3339All(d *edtf.EDTFDate) error { + + if r.options.StartLowerTimeRFC3339 != "" { + + err := r.testRFC3339(r.options.StartLowerTimeRFC3339, d.Start.Lower.Timestamp) + + if err != nil { + return fmt.Errorf("Failed StartLowerTimeRFC3339 test, %v", err) + } + } + + if r.options.StartUpperTimeRFC3339 != "" { + + err := r.testRFC3339(r.options.StartUpperTimeRFC3339, d.Start.Upper.Timestamp) + + if err != nil { + return fmt.Errorf("Failed StartUpperTimeRFC3339 test, %v", err) + } + } + + if r.options.EndLowerTimeRFC3339 != "" { + + err := r.testRFC3339(r.options.EndLowerTimeRFC3339, d.End.Lower.Timestamp) + + if err != nil { + return fmt.Errorf("Failed EndLowerTimeRFC3339 test, %v", err) + } + } + + if r.options.EndUpperTimeRFC3339 != "" { + + err := r.testRFC3339(r.options.EndUpperTimeRFC3339, d.End.Upper.Timestamp) + + if err != nil { + return fmt.Errorf("Failed EndUpperTimeRFC3339 test, %v", err) + } + } + + return nil +} + +func (r *TestResult) testRFC3339(expected string, ts *edtf.Timestamp) error { + + if ts == nil { + return fmt.Errorf("Missing edtf.Timestamp instance") + } + + t := ts.Time() + + t_str := t.Format(time.RFC3339) + + if t_str != expected { + return fmt.Errorf("Invalid RFC3339 time, expected '%s' but got '%s'", expected, t_str) + } + + return nil +} + +func (r *TestResult) testUnixAll(d *edtf.EDTFDate) error { + + if r.options.StartLowerTimeUnix != 0 { + + err := r.testUnix(r.options.StartLowerTimeUnix, d.Start.Lower.Timestamp) + + if err != nil { + return fmt.Errorf("Failed StartLowerTimeUnix test, %v", err) + } + } + + if r.options.StartUpperTimeUnix != 0 { + + err := r.testUnix(r.options.StartUpperTimeUnix, d.Start.Upper.Timestamp) + + if err != nil { + return fmt.Errorf("Failed StartUpperTimeUnix test, %v", err) + } + } + + if r.options.EndLowerTimeUnix != 0 { + + err := r.testUnix(r.options.EndLowerTimeUnix, d.End.Lower.Timestamp) + + if err != nil { + return fmt.Errorf("Failed EndLowerTimeUnix test, %v", err) + } + } + + if r.options.EndUpperTimeUnix != 0 { + + err := r.testUnix(r.options.EndUpperTimeUnix, d.End.Upper.Timestamp) + + if err != nil { + return fmt.Errorf("Failed EndUpperTimeUnix test, %v", err) + } + } + + return nil +} + +func (r *TestResult) testUnix(expected int64, ts *edtf.Timestamp) error { + + if ts == nil { + return fmt.Errorf("Missing edtf.Timestamp instance") + } + + ts_unix := ts.Unix() + + if ts_unix != expected { + return fmt.Errorf("Invalid Unix time, expected '%d' but got '%d'", expected, ts_unix) + } + + return nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/timestamp.go b/vendor/github.com/sfomuseum/go-edtf/timestamp.go new file mode 100644 index 0000000..cec31f6 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/timestamp.go @@ -0,0 +1,45 @@ +package edtf + +import ( + "strconv" + "strings" + "time" +) + +type Timestamp struct { + timestamp int64 +} + +func NewTimestampWithTime(t *time.Time) *Timestamp { + return &Timestamp{t.Unix()} +} + +func (ts *Timestamp) Time() *time.Time { + + t := time.Unix(ts.Unix(), 0) + t = t.UTC() + + return &t +} + +func (ts *Timestamp) Unix() int64 { + return ts.timestamp +} + +func (ts *Timestamp) UnmarshalJSON(b []byte) error { + + s := strings.Trim(string(b), `"`) + i, err := strconv.ParseInt(s, 10, 64) + + if err != nil { + return err + } + + *ts = Timestamp{i} + return nil +} + +func (ts Timestamp) MarshalJSON() ([]byte, error) { + str_ts := strconv.FormatInt(ts.timestamp, 10) + return []byte(str_ts), nil +} diff --git a/vendor/github.com/sfomuseum/go-edtf/unix/range.go b/vendor/github.com/sfomuseum/go-edtf/unix/range.go new file mode 100644 index 0000000..b6a26cc --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/unix/range.go @@ -0,0 +1,127 @@ +package unix + +import ( + "fmt" + "github.com/sfomuseum/go-edtf" + "github.com/sfomuseum/go-edtf/parser" + _ "log" +) + +// DateSpan is a struct containing Unix timestamps for a range of (two) dates. Dates before 1970-01-01 are represented as negative values. +type DateSpan struct { + // Start is the Unix timestamp for the starting date. + Start int64 + // End is the Unix timestamp for the ending date. + End int64 +} + +// DateRange is a struct containing inner and outer `DateSpan` instances for an EDTF date string. +type DateRange struct { + // Outer is a `DateSpan` instance which matches the lower value of a starting date range and the upper value of an ending date range. + Outer *DateSpan + // Outer is a `DateSpan` instance which matches the upper value of a starting date range and the lower value of an ending date range. + Inner *DateSpan +} + +// DeriveRanges will parse 'edtf_str' and return a boolean flag signaling that it was possible to derive date ranges and, when possible, a +// `DateRange` instance containing Unix timestamps. For example some EDTF date strings like ".." (indicating an "open" or "ongoing" date) +// are valid EDTF but not suitable for deriving a date range. +func DeriveRanges(edtf_str string) (bool, *DateRange, error) { + + if !isValid(edtf_str) { + return false, nil, nil + } + + edtf_dt, err := parser.ParseString(edtf_str) + + if err != nil { + return false, nil, fmt.Errorf("Failed to parse '%s', %w", edtf_str, err) + } + + start := edtf_dt.Start + end := edtf_dt.End + + start_lower := start.Lower + start_upper := start.Upper + + end_lower := end.Lower + end_upper := end.Upper + + if start_lower == nil { + return false, nil, nil + } + + if start_upper == nil { + return false, nil, nil + } + + if end_lower == nil { + return false, nil, nil + } + + if end_upper == nil { + return false, nil, nil + } + + start_lower_ts := start_lower.Timestamp + start_upper_ts := start_upper.Timestamp + + end_lower_ts := end_lower.Timestamp + end_upper_ts := end_upper.Timestamp + + if start_lower_ts == nil { + return false, nil, nil + } + + if start_upper_ts == nil { + return false, nil, nil + } + + if end_lower_ts == nil { + return false, nil, nil + } + + if end_upper_ts == nil { + return false, nil, nil + } + + outer_start := start_lower_ts.Unix() + outer_end := end_upper_ts.Unix() + + inner_start := start_upper_ts.Unix() + inner_end := end_lower_ts.Unix() + + outer := &DateSpan{ + Start: outer_start, + End: outer_end, + } + + inner := &DateSpan{ + Start: inner_start, + End: inner_end, + } + + r := &DateRange{ + Outer: outer, + Inner: inner, + } + + return true, r, nil +} + +func isValid(edtf_str string) bool { + + if edtf.IsOpen(edtf_str) { + return false + } + + if edtf.IsUnknown(edtf_str) { + return false + } + + if edtf.IsUnspecified(edtf_str) { + return false + } + + return true +} diff --git a/vendor/github.com/sfomuseum/go-edtf/unix/unix.go b/vendor/github.com/sfomuseum/go-edtf/unix/unix.go new file mode 100644 index 0000000..c98d535 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/unix/unix.go @@ -0,0 +1,2 @@ +// package unix provides methods for derving Unix timestamps from EDTF strings. +package unix diff --git a/vendor/github.com/sfomuseum/go-edtf/ymd.go b/vendor/github.com/sfomuseum/go-edtf/ymd.go new file mode 100644 index 0000000..d8b69c0 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-edtf/ymd.go @@ -0,0 +1,32 @@ +package edtf + +import ( + "fmt" +) + +type YMD struct { + Year int `json:"year"` + Month int `json:"month"` + Day int `json:"day"` +} + +func (ymd *YMD) String() string { + return fmt.Sprintf("[%T] Y: '%d' M: '%d' D: '%d'", ymd, ymd.Year, ymd.Month, ymd.Day) +} + +func (ymd *YMD) Equals(other_ymd *YMD) bool { + + if ymd.Year != other_ymd.Year { + return false + } + + if ymd.Month != other_ymd.Month { + return false + } + + if ymd.Day != other_ymd.Day { + return false + } + + return true +} diff --git a/vendor/github.com/sfomuseum/go-flags/LICENSE b/vendor/github.com/sfomuseum/go-flags/LICENSE new file mode 100644 index 0000000..d2b5283 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2020, City and County of San Francisco, acting by and through its +Airport Commission ("City"). All rights reserved. + +The City and County of San Francisco, acting by and through its Airport +Commission, created and operates the SFO Museum. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of the City nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/sfomuseum/go-flags/flagset/flagset.go b/vendor/github.com/sfomuseum/go-flags/flagset/flagset.go new file mode 100644 index 0000000..4a27655 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/flagset/flagset.go @@ -0,0 +1,85 @@ +// package flagset provides methods for working with `flag.FlagSet` instances. +package flagset + +import ( + "flag" + "fmt" + "log" + "os" + "strings" +) + +// Parse command line arguments with a flag.FlagSet instance. +func Parse(fs *flag.FlagSet) { + + args := os.Args[1:] + + if len(args) > 0 && args[0] == "-h" { + fs.Usage() + os.Exit(0) + } + + fs.Parse(args) +} + +// Assign values to a flag.FlagSet instance from matching environment variables. +func SetFlagsFromEnvVars(fs *flag.FlagSet, prefix string) error { + return SetFlagsFromEnvVarsWithFeedback(fs, prefix, false) +} + +// Assign values to a flag.FlagSet instance from matching environment variables, optionally logging progress and other feedback. +func SetFlagsFromEnvVarsWithFeedback(fs *flag.FlagSet, prefix string, feedback bool) error { + + fs.VisitAll(func(fl *flag.Flag) { + + name := fl.Name + env := FlagNameToEnvVar(prefix, name) + + val, ok := os.LookupEnv(env) + + if ok { + + if feedback { + log.Printf("set -%s flag from %s environment variable\n", name, env) + } + + fs.Set(name, val) + } + }) + + return nil +} + +// Create a new flag.FlagSet instance. +func NewFlagSet(name string) *flag.FlagSet { + + fs := flag.NewFlagSet(name, flag.ExitOnError) + + fs.Usage = func() { + fs.PrintDefaults() + } + + return fs +} + +// FlagNameToEnvVar formats 'name' and 'prefix' in to an environment variable name, used to lookup +// a value. +func FlagNameToEnvVar(prefix string, name string) string { + + prefix = normalizeEnvVar(prefix) + name = normalizeEnvVar(name) + + return fmt.Sprintf("%s_%s", prefix, name) + +} + +// normalizeEnvVar normalizes a flag name in to its corresponding environment variable name. +func normalizeEnvVar(raw string) string { + + new := raw + + new = strings.ToUpper(new) + new = strings.Replace(new, "-", "_", -1) + + return new +} diff --git a/vendor/github.com/sfomuseum/go-flags/multi/bool.go b/vendor/github.com/sfomuseum/go-flags/multi/bool.go new file mode 100644 index 0000000..6f8315d --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/multi/bool.go @@ -0,0 +1,12 @@ +package multi + +type MultiBool []bool + +func (m *MultiBool) Set(value bool) error { + *m = append(*m, value) + return nil +} + +func (m *MultiBool) Get() interface{} { + return *m +} diff --git a/vendor/github.com/sfomuseum/go-flags/multi/float.go b/vendor/github.com/sfomuseum/go-flags/multi/float.go new file mode 100644 index 0000000..19db318 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/multi/float.go @@ -0,0 +1,47 @@ +package multi + +import ( + "strconv" + "strings" +) + +type MultiFloat64 []float64 + +func (m *MultiFloat64) String() string { + + str_values := make([]string, len(*m)) + + for i, v := range *m { + str_values[i] = strconv.FormatFloat(v, 'f', 10, 64) + } + + return strings.Join(str_values, "\n") +} + +func (m *MultiFloat64) Set(str_value string) error { + + value, err := strconv.ParseFloat(str_value, 64) + + if err != nil { + return err + } + + *m = append(*m, value) + return nil +} + +func (m *MultiFloat64) Get() interface{} { + return *m +} + +func (m *MultiFloat64) Contains(value float64) bool { + + for _, test := range *m { + + if test == value { + return true + } + } + + return false +} diff --git a/vendor/github.com/sfomuseum/go-flags/multi/int.go b/vendor/github.com/sfomuseum/go-flags/multi/int.go new file mode 100644 index 0000000..fda94e5 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/multi/int.go @@ -0,0 +1,88 @@ +package multi + +import ( + "strconv" + "strings" +) + +type MultiInt []int + +func (m *MultiInt) String() string { + + str_values := make([]string, len(*m)) + + for i, v := range *m { + str_values[i] = strconv.Itoa(v) + } + + return strings.Join(str_values, "\n") +} + +func (m *MultiInt) Set(str_value string) error { + + value, err := strconv.Atoi(str_value) + + if err != nil { + return err + } + + *m = append(*m, value) + return nil +} + +func (m *MultiInt) Get() interface{} { + return *m +} + +func (m *MultiInt) Contains(value int) bool { + + for _, test := range *m { + + if test == value { + return true + } + } + + return false +} + +type MultiInt64 []int64 + +func (m *MultiInt64) String() string { + + str_values := make([]string, len(*m)) + + for i, v := range *m { + str_values[i] = strconv.FormatInt(v, 10) + } + + return strings.Join(str_values, "\n") +} + +func (m *MultiInt64) Set(str_value string) error { + + value, err := strconv.ParseInt(str_value, 10, 64) + + if err != nil { + return err + } + + *m = append(*m, value) + return nil +} + +func (m *MultiInt64) Get() interface{} { + return *m +} + +func (m *MultiInt64) Contains(value int64) bool { + + for _, test := range *m { + + if test == value { + return true + } + } + + return false +} diff --git a/vendor/github.com/sfomuseum/go-flags/multi/keyvalue.go b/vendor/github.com/sfomuseum/go-flags/multi/keyvalue.go new file mode 100644 index 0000000..d6ba650 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/multi/keyvalue.go @@ -0,0 +1,186 @@ +package multi + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const SEP string = "=" + +type KeyValueFlag interface { + Key() string + Value() interface{} +} + +type KeyValueStringFlag struct { + KeyValueFlag + key string + value string +} + +func (e *KeyValueStringFlag) Key() string { + return e.key +} + +func (e *KeyValueStringFlag) Value() interface{} { + return e.value +} + +type KeyValueCSVString []*KeyValueStringFlag + +func (e *KeyValueCSVString) String() string { + + parts := make([]string, len(*e)) + + for idx, k := range *e { + parts[idx] = fmt.Sprintf("%s=%s", k.Key(), k.Value().(string)) + } + + return strings.Join(parts, ",") +} + +func (e *KeyValueCSVString) Set(value string) error { + + for _, v := range strings.Split(value, ",") { + + value = strings.Trim(v, " ") + kv := strings.Split(v, SEP) + + if len(kv) != 2 { + return errors.New("Invalid key=value argument") + } + + a := KeyValueStringFlag{ + key: kv[0], + value: kv[1], + } + + *e = append(*e, &a) + } + + return nil +} + +type KeyValueString []*KeyValueStringFlag + +func (e *KeyValueString) String() string { + return fmt.Sprintf("%v", *e) +} + +func (e *KeyValueString) Set(value string) error { + + value = strings.Trim(value, " ") + kv := strings.Split(value, SEP) + + if len(kv) != 2 { + return errors.New("Invalid key=value argument") + } + + a := KeyValueStringFlag{ + key: kv[0], + value: kv[1], + } + + *e = append(*e, &a) + return nil +} + +func (e *KeyValueString) Get() interface{} { + return *e +} + +type KeyValueInt64Flag struct { + key string + value int64 +} + +func (e *KeyValueInt64Flag) Key() string { + return e.key +} + +func (e *KeyValueInt64Flag) Value() interface{} { + return e.value +} + +type KeyValueInt64 []*KeyValueInt64Flag + +func (e *KeyValueInt64) String() string { + return fmt.Sprintf("%v", *e) +} + +func (e *KeyValueInt64) Set(value string) error { + + value = strings.Trim(value, " ") + kv := strings.Split(value, SEP) + + if len(kv) != 2 { + return errors.New("Invalid key=value argument") + } + + v, err := strconv.ParseInt(kv[1], 10, 64) + + if err != nil { + return err + } + + a := KeyValueInt64Flag{ + key: kv[0], + value: v, + } + + *e = append(*e, &a) + return nil +} + +func (e *KeyValueInt64) Get() interface{} { + return *e +} + +type KeyValueFloat64Flag struct { + key string + value float64 +} + +func (e *KeyValueFloat64Flag) Key() string { + return e.key +} + +func (e *KeyValueFloat64Flag) Value() interface{} { + return e.value +} + +type KeyValueFloat64 []*KeyValueFloat64Flag + +func (e *KeyValueFloat64) String() string { + return fmt.Sprintf("%v", *e) +} + +func (e *KeyValueFloat64) Set(value string) error { + + value = strings.Trim(value, " ") + kv := strings.Split(value, SEP) + + if len(kv) != 2 { + return errors.New("Invalid key=value argument") + } + + v, err := strconv.ParseFloat(kv[1], 64) + + if err != nil { + return err + } + + a := KeyValueFloat64Flag{ + key: kv[0], + value: v, + } + + *e = append(*e, &a) + return nil +} + +func (e *KeyValueFloat64) Get() interface{} { + return *e +} diff --git a/vendor/github.com/sfomuseum/go-flags/multi/regexp.go b/vendor/github.com/sfomuseum/go-flags/multi/regexp.go new file mode 100644 index 0000000..178ca56 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/multi/regexp.go @@ -0,0 +1,36 @@ +package multi + +import ( + "fmt" + "regexp" + "strings" +) + +type MultiRegexp []*regexp.Regexp + +func (i *MultiRegexp) String() string { + + patterns := make([]string, 0) + + for _, re := range *i { + patterns = append(patterns, fmt.Sprintf("%v", re)) + } + + return strings.Join(patterns, "\n") +} + +func (i *MultiRegexp) Set(value string) error { + + re, err := regexp.Compile(value) + + if err != nil { + return err + } + + *i = append(*i, re) + return nil +} + +func (i *MultiRegexp) Get() interface{} { + return *i +} diff --git a/vendor/github.com/sfomuseum/go-flags/multi/string.go b/vendor/github.com/sfomuseum/go-flags/multi/string.go new file mode 100644 index 0000000..fc698c9 --- /dev/null +++ b/vendor/github.com/sfomuseum/go-flags/multi/string.go @@ -0,0 +1,63 @@ +package multi + +import ( + "strings" +) + +type MultiString []string + +func (m *MultiString) String() string { + return strings.Join(*m, "\n") +} + +func (m *MultiString) Set(value string) error { + *m = append(*m, value) + return nil +} + +func (m *MultiString) Get() interface{} { + return *m +} + +func (m *MultiString) Contains(value string) bool { + + for _, test := range *m { + + if test == value { + return true + } + } + + return false +} + +type MultiCSVString []string + +func (m *MultiCSVString) String() string { + return strings.Join(*m, "\n") +} + +func (m *MultiCSVString) Set(value string) error { + + for _, v := range strings.Split(value, ",") { + *m = append(*m, v) + } + + return nil +} + +func (m *MultiCSVString) Get() interface{} { + return *m +} + +func (m *MultiCSVString) Contains(value string) bool { + + for _, test := range *m { + + if test == value { + return true + } + } + + return false +} diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 0000000..387766d --- /dev/null +++ b/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,492 @@ +

+ + + + GJSON + +
+GoDoc +GJSON Playground +GJSON Syntax + +

+ +

get json values quickly

+ +GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document. +It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines). + +Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool. + +This README is a quick overview of how to use GJSON, for more information check out [GJSON Syntax](SYNTAX.md). + +GJSON is also available for [Python](https://github.com/volans-/gjson-py) and [Rust](https://github.com/tidwall/gjson.rs) + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +Below is a quick overview of the path syntax, for more complete information please +check out [GJSON Syntax](SYNTAX.md). + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#(...)`, or find all +matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` +comparison operators and the simple pattern matching `%` (like) and `!%` +(not like) operators. + +``` +friends.#(last=="Murphy").first >> "Dale" +friends.#(last=="Murphy")#.first >> ["Dale","Jane"] +friends.#(age>45)#.last >> ["Craig","Murphy"] +friends.#(first%"D*").last >> "Murphy" +friends.#(first!%"D*").last >> "Craig" +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new +[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility, +`#[...]` will continue to work until the next major release.* + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +result.Indexes // indexes of all the elements that match on a path containing the '#' query character. +``` + +There are a variety of handy functions that work on a result: + +```go +result.Exists() bool +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Time() time.Time +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +### 64-bit integers + +The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers. + +```go +result.Int() int64 // -9223372036854775808 to 9223372036854775807 +result.Uint() uint64 // 0 to 18446744073709551615 +``` + +## Modifiers and path chaining + +New in version 1.2 is support for modifier functions and path chaining. + +A modifier is a path component that performs custom processing on the +json. + +Multiple paths can be "chained" together using the pipe character. +This is useful for getting results from a modified query. + +For example, using the built-in `@reverse` modifier on the above json document, +we'll get `children` array and reverse the order: + +``` +"children|@reverse" >> ["Jack","Alex","Sara"] +"children|@reverse|0" >> "Jack" +``` + +There are currently the following built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from a json document. +- `@pretty`: Make the json document more human readable. +- `@this`: Returns the current element. It can be used to retrieve the root element. +- `@valid`: Ensure the json document is valid. +- `@flatten`: Flattens an array. +- `@join`: Joins multiple objects into a single object. +- `@keys`: Returns an array of keys for an object. +- `@values`: Returns an array of values for an object. +- `@tostr`: Converts json to a string. Wraps a json string. +- `@fromstr`: Converts a string from json. Unwraps a json string. +- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db). +- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf). + +### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON +document or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier that makes the entire json document upper +or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +``` + +``` +"children|@case:upper" >> ["SARA","ALEX","JACK"] +"children|@case:lower|@reverse" >> ["jack","alex","sara"] +``` + +## JSON Lines + +There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array. + +For example: + +``` +{"name": "Gilbert", "age": 61} +{"name": "Alexa", "age": 34} +{"name": "May", "age": 57} +{"name": "Deloise", "age": 44} +``` + +``` +..# >> 4 +..1 >> {"name": "Alexa", "age": 34} +..3 >> {"name": "Deloise", "age": 44} +..#.name >> ["Gilbert","Alexa","May","Deloise"] +..#(name="May").age >> 57 +``` + +The `ForEachLines` function will iterate through JSON lines. + +```go +gjson.ForEachLine(json, func(line gjson.Result) bool{ + println(line.String()) + return true +}) +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +} +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _, name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool { + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists() { + println("has a last name") +} +``` + +## Validate JSON + +The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results. + +If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON. + +```go +if !gjson.Valid(json) { + return errors.New("invalid json") +} +value := gjson.Get(json, "name.last") +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok { + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +[jsonparser](https://github.com/buger/jsonparser), +and [json-iterator](https://github.com/json-iterator/go) + +``` +BenchmarkGJSONGet-10 17893731 202.1 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-10 1663548 2157 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-10 832236 4279 ns/op 2920 B/op 68 allocs/op +BenchmarkJSONUnmarshalStruct-10 1076475 3219 ns/op 920 B/op 12 allocs/op +BenchmarkJSONDecoder-10 585729 6126 ns/op 3845 B/op 160 allocs/op +BenchmarkFFJSONLexer-10 2508573 1391 ns/op 880 B/op 8 allocs/op +BenchmarkEasyJSONLexer-10 3000000 537.9 ns/op 501 B/op 5 allocs/op +BenchmarkJSONParserGet-10 13707510 263.9 ns/op 21 B/op 0 allocs/op +BenchmarkJSONIterator-10 3000000 561.2 ns/op 693 B/op 14 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated through one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +** + +*These benchmarks were run on a MacBook Pro M1 Max using Go 1.22 and can be found [here](https://github.com/tidwall/gjson-benchmarks).* diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md new file mode 100644 index 0000000..a3f0fac --- /dev/null +++ b/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -0,0 +1,360 @@ +# GJSON Path Syntax + +A GJSON Path is a text string syntax that describes a search pattern for quickly retrieving values from a JSON payload. + +This document is designed to explain the structure of a GJSON Path through examples. + +- [Path structure](#path-structure) +- [Basic](#basic) +- [Wildcards](#wildcards) +- [Escape Character](#escape-character) +- [Arrays](#arrays) +- [Queries](#queries) +- [Dot vs Pipe](#dot-vs-pipe) +- [Modifiers](#modifiers) +- [Multipaths](#multipaths) +- [Literals](#literals) + +The definitive implementation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). +Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online. + +## Path structure + +A GJSON Path is intended to be easily expressed as a series of components separated by a `.` character. + +Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, `!`, and `?`. + +## Example + +Given this JSON + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` + +The following GJSON Paths evaluate to the accompanying values. + +### Basic + +In many cases you'll just want to retrieve values by object name or array index. + +```go +name.last "Anderson" +name.first "Tom" +age 37 +children ["Sara","Alex","Jack"] +children.0 "Sara" +children.1 "Alex" +friends.1 {"first": "Roger", "last": "Craig", "age": 68} +friends.1.first "Roger" +``` + +### Wildcards + +A key may contain the special wildcard characters `*` and `?`. +The `*` will match on any zero+ characters, and `?` matches on any one character. + +```go +child*.2 "Jack" +c?ildren.0 "Sara" +``` + +### Escape character + +Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`. + +```go +fav\.movie "Deer Hunter" +``` + +You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in your source code. + +```go +// Go +val := gjson.Get(json, "fav\\.movie") // must escape the slash +val := gjson.Get(json, `fav\.movie`) // no need to escape the slash +``` + +```rust +// Rust +let val = gjson::get(json, "fav\\.movie") // must escape the slash +let val = gjson::get(json, r#"fav\.movie"#) // no need to escape the slash +``` + + +### Arrays + +The `#` character allows for digging into JSON Arrays. + +To get the length of an array you'll just use the `#` all by itself. + +```go +friends.# 3 +friends.#.age [44,68,47] +``` + +### Queries + +You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators, +and the simple pattern matching `%` (like) and `!%` (not like) operators. + +```go +friends.#(last=="Murphy").first "Dale" +friends.#(last=="Murphy")#.first ["Dale","Jane"] +friends.#(age>45)#.last ["Craig","Murphy"] +friends.#(first%"D*").last "Murphy" +friends.#(first!%"D*").last "Craig" +``` + +To query for a non-object value in an array, you can forgo the string to the right of the operator. + +```go +children.#(!%"*a*") "Alex" +children.#(%"*a*")# ["Sara","Jack"] +``` + +Nested queries are allowed. + +```go +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths) +syntax. For backwards compatibility, `#[...]` will continue to work until the +next major release.* + +The `~` (tilde) operator will convert a value to a boolean before comparison. + +Supported tilde comparison type are: + +``` +~true Converts true-ish values to true +~false Converts false-ish and non-existent values to true +~null Converts null and non-existent values to true +~* Converts any existing value to true +``` + +For example, using the following JSON: + +```json +{ + "vals": [ + { "a": 1, "b": "data" }, + { "a": 2, "b": true }, + { "a": 3, "b": false }, + { "a": 4, "b": "0" }, + { "a": 5, "b": 0 }, + { "a": 6, "b": "1" }, + { "a": 7, "b": 1 }, + { "a": 8, "b": "true" }, + { "a": 9, "b": false }, + { "a": 10, "b": null }, + { "a": 11 } + ] +} +``` + +To query for all true-ish or false-ish values: + +``` +vals.#(b==~true)#.a >> [2,6,7,8] +vals.#(b==~false)#.a >> [3,4,5,9,10,11] +``` + +The last value which was non-existent is treated as `false` + +To query for null and explicit value existence: + +``` +vals.#(b==~null)#.a >> [10,11] +vals.#(b==~*)#.a >> [1,2,3,4,5,6,7,8,9,10] +vals.#(b!=~*)#.a >> [11] +``` + +### Dot vs Pipe + +The `.` is standard separator, but it's also possible to use a `|`. +In most cases they both end up returning the same results. +The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries). + +Here are some examples + +```go +friends.0.first "Dale" +friends|0.first "Dale" +friends.0|first "Dale" +friends|0|first "Dale" +friends|# 3 +friends.# 3 +friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +friends.#(last="Murphy")#.first ["Dale","Jane"] +friends.#(last="Murphy")#|first +friends.#(last="Murphy")#.0 [] +friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44} +friends.#(last="Murphy")#.# [] +friends.#(last="Murphy")#|# 2 +``` + +Let's break down a few of these. + +The path `friends.#(last="Murphy")#` all by itself results in + +```json +[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +``` + +The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes + +```json +["Dale","Jane"] +``` + +But the `|first` suffix actually processes the `first` path *after* the previous result. +Since the previous result is an array, not an object, it's not possible to process +because `first` does not exist. + +Yet, `|0` suffix returns + +```json +{"first": "Dale", "last": "Murphy", "age": 44} +``` + +Because `0` is the first index of the previous result. + +### Modifiers + +A modifier is a path component that performs custom processing on the JSON. + +For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array: + +```go +children.@reverse ["Jack","Alex","Sara"] +children.@reverse.0 "Jack" +``` + +There are currently the following built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from JSON. +- `@pretty`: Make the JSON more human readable. +- `@this`: Returns the current element. It can be used to retrieve the root element. +- `@valid`: Ensure the json document is valid. +- `@flatten`: Flattens an array. +- `@join`: Joins multiple objects into a single object. +- `@keys`: Returns an array of keys for an object. +- `@values`: Returns an array of values for an object. +- `@tostr`: Converts json to a string. Wraps a json string. +- `@fromstr`: Converts a string from json. Unwraps a json string. +- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db). +- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf). + +#### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +#### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier which makes the entire JSON payload upper or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +"children.@case:upper" ["SARA","ALEX","JACK"] +"children.@case:lower.@reverse" ["jack","alex","sara"] +``` + +*Note: Custom modifiers are not yet available in the Rust version* + +### Multipaths + +Starting with v1.3.0, GJSON added the ability to join multiple paths together +to form new documents. Wrapping comma-separated paths between `[...]` or +`{...}` will result in a new array or object, respectively. + +For example, using the given multipath: + +``` +{name.first,age,"the_murphys":friends.#(last="Murphy")#.first} +``` + +Here we selected the first name, age, and the first name for friends with the +last name "Murphy". + +You'll notice that an optional key can be provided, in this case +"the_murphys", to force assign a key to a value. Otherwise, the name of the +actual field will be used, in this case "first". If a name cannot be +determined, then "_" is used. + +This results in + +```json +{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]} +``` + +### Literals + +Starting with v1.12.0, GJSON added support of json literals, which provides a way for constructing static blocks of json. This is can be particularly useful when constructing a new json document using [multipaths](#multipaths). + +A json literal begins with the '!' declaration character. + +For example, using the given multipath: + +``` +{name.first,age,"company":!"Happysoft","employed":!true} +``` + +Here we selected the first name and age. Then add two new fields, "company" and "employed". + +This results in + +```json +{"first":"Tom","age":37,"company":"Happysoft","employed":true} +``` + +*See issue [#249](https://github.com/tidwall/gjson/issues/249) for additional context on JSON Literals.* diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 0000000..5aa2a4f --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,3603 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "strconv" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/tidwall/match" + "github.com/tidwall/pretty" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int + // Indexes of all the elements that match on a path containing the '#' + // query character. + Indexes []int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "" + case False: + return "false" + case Number: + if len(t.Raw) == 0 { + // calculated result + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + var i int + if t.Raw[0] == '-' { + i++ + } + for ; i < len(t.Raw); i++ { + if t.Raw[i] < '0' || t.Raw[i] > '9' { + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + } + return t.Raw + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + b, _ := strconv.ParseBool(strings.ToLower(t.Str)) + return b + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseInt(t.Str) + return n + case Number: + // try to directly convert the float64 to int64 + i, ok := safeInt(t.Num) + if ok { + return i + } + // now try to parse the raw string + i, ok = parseInt(t.Raw) + if ok { + return i + } + // fallback to a standard conversion + return int64(t.Num) + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseUint(t.Str) + return n + case Number: + // try to directly convert the float64 to uint64 + i, ok := safeInt(t.Num) + if ok && i >= 0 { + return uint64(i) + } + // now try to parse the raw string + u, ok := parseUint(t.Raw) + if ok { + return u + } + // fallback to a standard conversion + return uint64(t.Num) + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Time returns a time.Time representation. +func (t Result) Time() time.Time { + res, _ := time.Parse(time.RFC3339, t.String()) + return res +} + +// Array returns back an array of values. +// If the result represents a null value or is non-existent, then an empty +// array will be returned. +// If the result is not a JSON array, the return value will be an +// array containing one result. +func (t Result) Array() []Result { + if t.Type == Null { + return []Result{} + } + if !t.IsArray() { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// IsObject returns true if the result value is a JSON object. +func (t Result) IsObject() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' +} + +// IsArray returns true if the result value is a JSON array. +func (t Result) IsArray() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' +} + +// IsBool returns true if the result value is a JSON boolean. +func (t Result) IsBool() bool { + return t.Type == True || t.Type == False +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be +// iterated. If the result is an Object, the iterator will pass the key and +// value of each item. If the result is an Array, the iterator will only pass +// the value of each item. If the result is not a JSON array or object, the +// iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var obj bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + obj = true + break + } else if json[i] == '[' { + i++ + key.Type = Number + key.Num = -1 + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + var idx int + for ; i < len(json); i++ { + if obj { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + t.Index + } else { + key.Num += 1 + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + if t.Indexes != nil { + if idx < len(t.Indexes) { + value.Index = t.Indexes[idx] + } + } else { + value.Index = s + t.Index + } + if !iterator(key, value) { + return + } + idx++ + } +} + +// Map returns back a map of values. The result should be a JSON object. +// If the result is not a JSON object, the return value will be an empty map. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + r := Get(t.Raw, path) + if r.Indexes != nil { + for i := 0; i < len(r.Indexes); i++ { + r.Indexes[i] += t.Index + } + } else { + r.Index += t.Index + } + return r +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + value.Str = "" + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + value.Str, value.Num = "", 0 + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + value.Num = 0 + } + value.Index = i + t.Index + + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + if _, ok := r.oi[key.Str]; !ok { + r.oi[key.Str] = value.Value() + } + } else { + if _, ok := r.o[key.Str]; !ok { + r.o[key.Str] = value + } + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + if t.Indexes != nil { + if len(t.Indexes) != len(r.a) { + for i := 0; i < len(r.a); i++ { + r.a[i].Index = 0 + } + } else { + for i := 0; i < len(r.a); i++ { + r.a[i].Index = t.Indexes[i] + } + } + } + return +} + +// Parse parses the json and returns a result. +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Parse(json string) Result { + var value Result + i := 0 + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + case 'n': + if i+1 < len(json) && json[i+1] != 'u' { + // nan + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + // null + value.Type = Null + value.Raw = tolit(json[i:]) + } + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + default: + return Result{} + } + break + } + if value.Exists() { + value.Index = i + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' or '(' or '"' + // squash the value, ignoring all nested arrays and objects. + var i, depth int + if json[0] != '"' { + i, depth = 1, 1 + } + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + if depth == 0 { + if i >= len(json) { + return json + } + return json[:i+1] + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + } else if json[i] == ']' || json[i] == '}' { + // break on ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return json[:i+1], unescape(json[1:i]) + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// map[string]interface{}, for JSON objects +// []interface{}, for JSON arrays +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || + json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + pipe string + piped bool + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + all bool + path string + op string + value string + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + if !r.arrch && i < len(path)-1 && isDotPiperChar(path[i+1:]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' || path[1] == '(' { + // query + r.query.on = true + qpath, op, value, _, fi, vesc, ok := + parseQuery(path[i:]) + if !ok { + // bad query, end now + break + } + if len(value) >= 2 && value[0] == '"' && + value[len(value)-1] == '"' { + value = value[1 : len(value)-1] + if vesc { + value = unescape(value) + } + } + r.query.path = qpath + r.query.op = op + r.query.value = value + + i = fi - 1 + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +// splitQuery takes a query and splits it into three parts: +// +// path, op, middle, and right. +// +// So for this query: +// +// #(first_name=="Murphy").last +// +// Becomes +// +// first_name # path +// =="Murphy" # middle +// .last # right +// +// Or, +// +// #(service_roles.#(=="one")).cap +// +// Becomes +// +// service_roles.#(=="one") # path +// # middle +// .cap # right +func parseQuery(query string) ( + path, op, value, remain string, i int, vesc, ok bool, +) { + if len(query) < 2 || query[0] != '#' || + (query[1] != '(' && query[1] != '[') { + return "", "", "", "", i, false, false + } + i = 2 + j := 0 // start of value part + depth := 1 + for ; i < len(query); i++ { + if depth == 1 && j == 0 { + switch query[i] { + case '!', '=', '<', '>', '%': + // start of the value part + j = i + continue + } + } + if query[i] == '\\' { + i++ + } else if query[i] == '[' || query[i] == '(' { + depth++ + } else if query[i] == ']' || query[i] == ')' { + depth-- + if depth == 0 { + break + } + } else if query[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(query); i++ { + if query[i] == '\\' { + vesc = true + i++ + } else if query[i] == '"' { + break + } + } + } + } + if depth > 0 { + return "", "", "", "", i, false, false + } + if j > 0 { + path = trim(query[2:j]) + value = trim(query[j:i]) + remain = query[i+1:] + // parse the compare op from the value + var opsz int + switch { + case len(value) == 1: + opsz = 1 + case value[0] == '!' && value[1] == '=': + opsz = 2 + case value[0] == '!' && value[1] == '%': + opsz = 2 + case value[0] == '<' && value[1] == '=': + opsz = 2 + case value[0] == '>' && value[1] == '=': + opsz = 2 + case value[0] == '=' && value[1] == '=': + value = value[1:] + opsz = 1 + case value[0] == '<': + opsz = 1 + case value[0] == '>': + opsz = 1 + case value[0] == '=': + opsz = 1 + case value[0] == '%': + opsz = 1 + } + op = value[:opsz] + value = trim(value[opsz:]) + } else { + path = trim(query[2:i]) + remain = query[i+1:] + } + return path, op, value, remain, i + 1, vesc, true +} + +func trim(s string) string { +left: + if len(s) > 0 && s[0] <= ' ' { + s = s[1:] + goto left + } +right: + if len(s) > 0 && s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + goto right + } + return s +} + +// peek at the next byte and see if it's a '@', '[', or '{'. +func isDotPiperChar(s string) bool { + if DisableModifiers { + return false + } + c := s[0] + if c == '@' { + // check that the next component is *not* a modifier. + i := 1 + for ; i < len(s); i++ { + if s[i] == '.' || s[i] == '|' || s[i] == ':' { + break + } + } + _, ok := modifiers[s[1:i]] + return ok + } + return c == '[' || c == '{' +} + +type objectPathResult struct { + part string + path string + pipe string + piped bool + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + if i < len(path)-1 && isDotPiperChar(path[i+1:]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + if i < len(path)-1 && isDotPiperChar(path[i+1:]) { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } else if path[i] == '|' { + r.part = string(epart) + r.pipe = path[i+1:] + r.piped = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +var vchars = [256]byte{ + '"': 2, '{': 3, '(': 3, '[': 3, '}': 1, ')': 1, ']': 1, +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' or '(' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' or '(' has already been read + s := i + i++ + depth := 1 + var c byte + for i < len(json) { + for i < len(json)-8 { + jslice := json[i : i+8] + c = vchars[jslice[0]] + if c != 0 { + i += 0 + goto token + } + c = vchars[jslice[1]] + if c != 0 { + i += 1 + goto token + } + c = vchars[jslice[2]] + if c != 0 { + i += 2 + goto token + } + c = vchars[jslice[3]] + if c != 0 { + i += 3 + goto token + } + c = vchars[jslice[4]] + if c != 0 { + i += 4 + goto token + } + c = vchars[jslice[5]] + if c != 0 { + i += 5 + goto token + } + c = vchars[jslice[6]] + if c != 0 { + i += 6 + goto token + } + c = vchars[jslice[7]] + if c != 0 { + i += 7 + goto token + } + i += 8 + } + c = vchars[json[i]] + if c == 0 { + i++ + continue + } + token: + if c == 2 { + // '"' string + i++ + s2 := i + nextquote: + for i < len(json)-8 { + jslice := json[i : i+8] + if jslice[0] == '"' { + i += 0 + goto strchkesc + } + if jslice[1] == '"' { + i += 1 + goto strchkesc + } + if jslice[2] == '"' { + i += 2 + goto strchkesc + } + if jslice[3] == '"' { + i += 3 + goto strchkesc + } + if jslice[4] == '"' { + i += 4 + goto strchkesc + } + if jslice[5] == '"' { + i += 5 + goto strchkesc + } + if jslice[6] == '"' { + i += 6 + goto strchkesc + } + if jslice[7] == '"' { + i += 7 + goto strchkesc + } + i += 8 + } + goto strchkstd + strchkesc: + if json[i-1] != '\\' { + i++ + continue + } + strchkstd: + for i < len(json) { + if json[i] > '\\' || json[i] != '"' { + i++ + continue + } + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + i++ + goto nextquote + } + } + break + } + } else { + // '{', '[', '(', '}', ']', ')' + // open close tokens + depth += int(c) - 2 + if depth == 0 { + i++ + return i, json[s:i] + } + } + i++ + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + key, kesc, ok = c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = matchLimit(unescape(key), rp.part) + } else { + pmatch = matchLimit(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + var num bool + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case 'n': + if i+1 < len(c.json) && c.json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + } + if num { + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + } + break + } + } + return i, false +} + +// matchLimit will limit the complexity of the match operation to avoid ReDos +// attacks from arbitrary inputs. +// See the github.com/tidwall/match.MatchLimit function for more information. +func matchLimit(str, pattern string) bool { + matched, _ := match.MatchLimit(str, pattern, 10000) + return matched +} + +func falseish(t Result) bool { + switch t.Type { + case Null: + return true + case False: + return true + case String: + b, err := strconv.ParseBool(strings.ToLower(t.Str)) + if err != nil { + return false + } + return !b + case Number: + return t.Num == 0 + default: + return false + } +} + +func trueish(t Result) bool { + switch t.Type { + case True: + return true + case String: + b, err := strconv.ParseBool(strings.ToLower(t.Str)) + if err != nil { + return false + } + return b + case Number: + return t.Num != 0 + default: + return false + } +} + +func nullish(t Result) bool { + return t.Type == Null +} + +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 0 { + if rpv[0] == '~' { + // convert to bool + rpv = rpv[1:] + var ish, ok bool + switch rpv { + case "*": + ish, ok = value.Exists(), true + case "null": + ish, ok = nullish(value), true + case "true": + ish, ok = trueish(value), true + case "false": + ish, ok = falseish(value), true + } + if ok { + rpv = "true" + if ish { + value = Result{Type: True} + } else { + value = Result{Type: False} + } + } else { + rpv = "" + value = Result{} + } + } + } + if !value.Exists() { + return false + } + if rp.query.op == "" { + // the query is only looking for existence, such as: + // friends.#(name) + // which makes sure that the array "friends" has an element of + // "name" that exists + return true + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return matchLimit(value.Str, rpv) + case "!%": + return !matchLimit(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num != rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + var queryIndexes []int + rp := parseArrayPath(path) + if !rp.arrch { + n, ok := parseUint(rp.part) + if !ok { + partidx = -1 + } else { + partidx = int(n) + } + } + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + + procQuery := func(qval Result) bool { + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } + } + var tmp parseContext + tmp.value = qval + fillIndex(c.json, &tmp) + parentIndex := tmp.value.Index + var res Result + if qval.Type == JSON { + res = qval.Get(rp.query.path) + } else { + if rp.query.path != "" { + return false + } + res = qval + } + if queryMatches(&rp, res) { + if rp.more { + left, right, ok := splitPossiblePipe(rp.path) + if ok { + rp.path = left + c.pipe = right + c.piped = true + } + res = qval.Get(rp.path) + } else { + res = qval + } + if rp.query.all { + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + if raw != "" { + if len(multires) > 1 { + multires = append(multires, ',') + } + multires = append(multires, raw...) + queryIndexes = append(queryIndexes, res.Index+parentIndex) + } + } else { + c.value = res + return true + } + } + return false + } + for i < len(c.json)+1 { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; ; i++ { + var ch byte + if i > len(c.json) { + break + } else if i == len(c.json) { + ch = ']' + } else { + ch = c.json[i] + } + var num bool + switch ch { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if rp.query.on { + var qval Result + if vesc { + qval.Str = unescape(val[1 : len(val)-1]) + } else { + qval.Str = val[1 : len(val)-1] + } + qval.Raw = val + qval.Type = String + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case 'n': + if i+1 < len(c.json) && c.json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + switch vc { + case 't': + qval.Type = True + case 'f': + qval.Type = False + } + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + left, right, ok := splitPossiblePipe(rp.alogkey) + if ok { + rp.alogkey = left + c.pipe = right + c.piped = true + } + var indexes = make([]int, 0, 64) + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + idx := alog[j] + for idx < len(c.json) { + switch c.json[idx] { + case ' ', '\t', '\r', '\n': + idx++ + continue + } + break + } + if idx < len(c.json) && c.json[idx] != ']' { + _, res, ok := parseAny(c.json, idx, true) + if ok { + res := res.Get(rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + jsons = append(jsons, []byte(raw)...) + indexes = append(indexes, res.Index) + k++ + } + } + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + c.value.Indexes = indexes + return i + 1, true + } + if rp.alogok { + break + } + + c.value.Type = Number + c.value.Num = float64(h - 1) + c.value.Raw = strconv.Itoa(h - 1) + c.calcd = true + return i + 1, true + } + if !c.value.Exists() { + if len(multires) > 0 { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + Indexes: queryIndexes, + } + } else if rp.query.all { + c.value = Result{ + Raw: "[]", + Type: JSON, + } + } + } + return i + 1, false + } + if num { + i, val = parseNumber(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + qval.Type = Number + qval.Num, _ = strconv.ParseFloat(val, 64) + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + } + break + } + } + return i, false +} + +func splitPossiblePipe(path string) (left, right string, ok bool) { + // take a quick peek for the pipe character. If found we'll split the piped + // part of the path into the c.pipe field and shorten the rp. + var possible bool + for i := 0; i < len(path); i++ { + if path[i] == '|' { + possible = true + break + } + } + if !possible { + return + } + + if len(path) > 0 && path[0] == '{' { + squashed := squash(path[1:]) + if len(squashed) < len(path)-1 { + squashed = path[:len(squashed)+1] + remain := path[len(squashed):] + if remain[0] == '|' { + return squashed, remain[1:], true + } + } + return + } + + // split the left and right side of the path with the pipe character as + // the delimiter. This is a little tricky because we'll need to basically + // parse the entire path. + for i := 0; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '.' { + if i == len(path)-1 { + return + } + if path[i+1] == '#' { + i += 2 + if i == len(path) { + return + } + if path[i] == '[' || path[i] == '(' { + var start, end byte + if path[i] == '[' { + start, end = '[', ']' + } else { + start, end = '(', ')' + } + // inside selector, balance brackets + i++ + depth := 1 + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == start { + depth++ + } else if path[i] == end { + depth-- + if depth == 0 { + break + } + } else if path[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '"' { + break + } + } + } + } + } + } + } else if path[i] == '|' { + return path[:i], path[i+1:], true + } + } + return +} + +// ForEachLine iterates through lines of JSON as specified by the JSON Lines +// format (http://jsonlines.org/). +// Each line is returned as a GJSON Result. +func ForEachLine(json string, iterator func(line Result) bool) { + var res Result + var i int + for { + i, res, _ = parseAny(json, i, true) + if !res.Exists() { + break + } + if !iterator(res) { + return + } + } +} + +type subSelector struct { + name string + path string +} + +// parseSubSelectors returns the subselectors belonging to a '[path1,path2]' or +// '{"field1":path1,"field2":path2}' type subSelection. It's expected that the +// first character in path is either '[' or '{', and has already been checked +// prior to calling this function. +func parseSubSelectors(path string) (sels []subSelector, out string, ok bool) { + modifier := 0 + depth := 1 + colon := 0 + start := 1 + i := 1 + pushSel := func() { + var sel subSelector + if colon == 0 { + sel.path = path[start:i] + } else { + sel.name = path[start:colon] + sel.path = path[colon+1 : i] + } + sels = append(sels, sel) + colon = 0 + modifier = 0 + start = i + 1 + } + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '@': + if modifier == 0 && i > 0 && (path[i-1] == '.' || path[i-1] == '|') { + modifier = i + } + case ':': + if modifier == 0 && colon == 0 && depth == 1 { + colon = i + } + case ',': + if depth == 1 { + pushSel() + } + case '"': + i++ + loop: + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '"': + break loop + } + } + case '[', '(', '{': + depth++ + case ']', ')', '}': + depth-- + if depth == 0 { + pushSel() + path = path[i+1:] + return sels, path, true + } + } + } + return +} + +// nameOfLast returns the name of the last component +func nameOfLast(path string) string { + for i := len(path) - 1; i >= 0; i-- { + if path[i] == '|' || path[i] == '.' { + if i > 0 { + if path[i-1] == '\\' { + continue + } + } + return path[i+1:] + } + } + return path +} + +func isSimpleName(component string) bool { + for i := 0; i < len(component); i++ { + if component[i] < ' ' { + return false + } + switch component[i] { + case '[', ']', '{', '}', '(', ')', '#', '|', '!': + return false + } + } + return true +} + +var hexchars = [...]byte{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', +} + +func appendHex16(dst []byte, x uint16) []byte { + return append(dst, + hexchars[x>>12&0xF], hexchars[x>>8&0xF], + hexchars[x>>4&0xF], hexchars[x>>0&0xF], + ) +} + +// DisableEscapeHTML will disable the automatic escaping of certain +// "problamatic" HTML characters when encoding to JSON. +// These character include '>', '<' and '&', which get escaped to \u003e, +// \u0026, and \u003c respectively. +// +// This is a global flag and will affect all further gjson operations. +// Ideally, if used, it should be set one time before other gjson functions +// are called. +var DisableEscapeHTML = false + +// AppendJSONString is a convenience function that converts the provided string +// to a valid JSON string and appends it to dst. +func AppendJSONString(dst []byte, s string) []byte { + dst = append(dst, make([]byte, len(s)+2)...) + dst = append(dst[:len(dst)-len(s)-2], '"') + for i := 0; i < len(s); i++ { + if s[i] < ' ' { + dst = append(dst, '\\') + switch s[i] { + case '\b': + dst = append(dst, 'b') + case '\f': + dst = append(dst, 'f') + case '\n': + dst = append(dst, 'n') + case '\r': + dst = append(dst, 'r') + case '\t': + dst = append(dst, 't') + default: + dst = append(dst, 'u') + dst = appendHex16(dst, uint16(s[i])) + } + } else if !DisableEscapeHTML && + (s[i] == '>' || s[i] == '<' || s[i] == '&') { + dst = append(dst, '\\', 'u') + dst = appendHex16(dst, uint16(s[i])) + } else if s[i] == '\\' { + dst = append(dst, '\\', '\\') + } else if s[i] == '"' { + dst = append(dst, '\\', '"') + } else if s[i] > 127 { + // read utf8 character + r, n := utf8.DecodeRuneInString(s[i:]) + if n == 0 { + break + } + if r == utf8.RuneError && n == 1 { + dst = append(dst, `\ufffd`...) + } else if r == '\u2028' || r == '\u2029' { + dst = append(dst, `\u202`...) + dst = append(dst, hexchars[r&0xF]) + } else { + dst = append(dst, s[i:i+n]...) + } + i = i + n - 1 + } else { + dst = append(dst, s[i]) + } + } + return append(dst, '"') +} + +type parseContext struct { + json string + value Result + pipe string + piped bool + calcd bool + lines bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// When the value is found it's returned immediately. +// +// A path is a series of keys separated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use +// the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Get(json, path string) Result { + if len(path) > 1 { + if (path[0] == '@' && !DisableModifiers) || path[0] == '!' { + // possible modifier + var ok bool + var npath string + var rjson string + if path[0] == '@' && !DisableModifiers { + npath, rjson, ok = execModifier(json, path) + } else if path[0] == '!' { + npath, rjson, ok = execStatic(json, path) + } + if ok { + path = npath + if len(path) > 0 && (path[0] == '|' || path[0] == '.') { + res := Get(rjson, path[1:]) + res.Index = 0 + res.Indexes = nil + return res + } + return Parse(rjson) + } + } + if path[0] == '[' || path[0] == '{' { + // using a subselector path + kind := path[0] + var ok bool + var subs []subSelector + subs, path, ok = parseSubSelectors(path) + if ok { + if len(path) == 0 || (path[0] == '|' || path[0] == '.') { + var b []byte + b = append(b, kind) + var i int + for _, sub := range subs { + res := Get(json, sub.path) + if res.Exists() { + if i > 0 { + b = append(b, ',') + } + if kind == '{' { + if len(sub.name) > 0 { + if sub.name[0] == '"' && Valid(sub.name) { + b = append(b, sub.name...) + } else { + b = AppendJSONString(b, sub.name) + } + } else { + last := nameOfLast(sub.path) + if isSimpleName(last) { + b = AppendJSONString(b, last) + } else { + b = AppendJSONString(b, "_") + } + } + b = append(b, ':') + } + var raw string + if len(res.Raw) == 0 { + raw = res.String() + if len(raw) == 0 { + raw = "null" + } + } else { + raw = res.Raw + } + b = append(b, raw...) + i++ + } + } + b = append(b, kind+2) + var res Result + res.Raw = string(b) + res.Type = JSON + if len(path) > 0 { + res = res.Get(path[1:]) + } + res.Index = 0 + return res + } + } + } + } + var i int + var c = &parseContext{json: json} + if len(path) >= 2 && path[0] == '.' && path[1] == '.' { + c.lines = true + parseArray(c, 0, path[2:]) + } else { + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + } + if c.piped { + res := c.value.Get(c.pipe) + res.Index = 0 + return res + } + fillIndex(json, c) + return c.value +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + return getBytes(json, path) +} + +// runeit returns the rune from the the \uXXXX +func runeit(json string) rune { + n, _ := strconv.ParseUint(json[:4], 16, 64) + return rune(n) +} + +// unescape unescapes a string +func unescape(json string) string { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return string(str) + case json[i] == '\\': + i++ + if i >= len(json) { + return string(str) + } + switch json[i] { + default: + return string(str) + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return string(str) + } + r := runeit(json[i+1:]) + i += 5 + if utf16.IsSurrogate(r) { + // need another code + if len(json[i:]) >= 6 && json[i] == '\\' && + json[i+1] == 'u' { + // we expect it to be correct so just consume it + r = utf16.DecodeRune(r, runeit(json[i+2:])) + i += 6 + } + } + // provide enough space to encode the largest utf8 possible + str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) + n := utf8.EncodeRune(str[len(str)-8:], r) + str = str[:len(str)-8+n] + i-- // backtrack index by one + } + } + } + return string(str) +} + +// Less return true if a token is less than another token. +// The caseSensitive parameter is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + var tmp parseContext + tmp.value = res + fillIndex(json, &tmp) + return i, tmp.value, true + } + if json[i] <= ' ' { + continue + } + var num bool + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case 'n': + if i+1 < len(json) && json[i+1] != 'u' { + num = true + break + } + fallthrough + case 't', 'f': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'i', 'I', 'N': + num = true + } + if num { + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + } + + } + return i, res, false +} + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = Get(json, path) + } + return res +} + +// GetManyBytes searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetManyBytes(json []byte, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = GetBytes(json, path) + } + return res +} + +func validpayload(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + i, ok = validany(data, i) + if !ok { + return i, false + } + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + } + } + return i, true + case ' ', '\t', '\n', '\r': + continue + } + } + return i, false +} +func validany(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '{': + return validobject(data, i+1) + case '[': + return validarray(data, i+1) + case '"': + return validstring(data, i+1) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return validnumber(data, i+1) + case 't': + return validtrue(data, i+1) + case 'f': + return validfalse(data, i+1) + case 'n': + return validnull(data, i+1) + } + } + return i, false +} +func validobject(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '}': + return i + 1, true + case '"': + key: + if i, ok = validstring(data, i+1); !ok { + return i, false + } + if i, ok = validcolon(data, i); !ok { + return i, false + } + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, '}'); !ok { + return i, false + } + if data[i] == '}' { + return i + 1, true + } + i++ + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '"': + goto key + } + } + return i, false + } + } + return i, false +} +func validcolon(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ':': + return i + 1, true + } + } + return i, false +} +func validcomma(data []byte, i int, end byte) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ',': + return i, true + case end: + return i, true + } + } + return i, false +} +func validarray(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + for ; i < len(data); i++ { + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, ']'); !ok { + return i, false + } + if data[i] == ']' { + return i + 1, true + } + } + case ' ', '\t', '\n', '\r': + continue + case ']': + return i + 1, true + } + } + return i, false +} +func validstring(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + if data[i] < ' ' { + return i, false + } else if data[i] == '\\' { + i++ + if i == len(data) { + return i, false + } + switch data[i] { + default: + return i, false + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + case 'u': + for j := 0; j < 4; j++ { + i++ + if i >= len(data) { + return i, false + } + if !((data[i] >= '0' && data[i] <= '9') || + (data[i] >= 'a' && data[i] <= 'f') || + (data[i] >= 'A' && data[i] <= 'F')) { + return i, false + } + } + } + } else if data[i] == '"' { + return i + 1, true + } + } + return i, false +} +func validnumber(data []byte, i int) (outi int, ok bool) { + i-- + // sign + if data[i] == '-' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + } + // int + if i == len(data) { + return i, false + } + if data[i] == '0' { + i++ + } else { + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // frac + if i == len(data) { + return i, true + } + if data[i] == '.' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // exp + if i == len(data) { + return i, true + } + if data[i] == 'e' || data[i] == 'E' { + i++ + if i == len(data) { + return i, false + } + if data[i] == '+' || data[i] == '-' { + i++ + } + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + return i, true +} + +func validtrue(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && + data[i+2] == 'e' { + return i + 3, true + } + return i, false +} +func validfalse(data []byte, i int) (outi int, ok bool) { + if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && + data[i+2] == 's' && data[i+3] == 'e' { + return i + 4, true + } + return i, false +} +func validnull(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && + data[i+2] == 'l' { + return i + 3, true + } + return i, false +} + +// Valid returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +func Valid(json string) bool { + _, ok := validpayload(stringBytes(json), 0) + return ok +} + +// ValidBytes returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +// If working with bytes, this method preferred over ValidBytes(string(data)) +func ValidBytes(json []byte) bool { + _, ok := validpayload(json, 0) + return ok +} + +func parseUint(s string) (n uint64, ok bool) { + var i int + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + uint64(s[i]-'0') + } else { + return 0, false + } + } + return n, true +} + +func parseInt(s string) (n int64, ok bool) { + var i int + var sign bool + if len(s) > 0 && s[0] == '-' { + sign = true + i++ + } + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + int64(s[i]-'0') + } else { + return 0, false + } + } + if sign { + return n * -1, true + } + return n, true +} + +// safeInt validates a given JSON number +// ensures it lies within the minimum and maximum representable JSON numbers +func safeInt(f float64) (n int64, ok bool) { + // https://tc39.es/ecma262/#sec-number.min_safe_integer + // https://tc39.es/ecma262/#sec-number.max_safe_integer + if f < -9007199254740991 || f > 9007199254740991 { + return 0, false + } + return int64(f), true +} + +// execStatic parses the path to find a static value. +// The input expects that the path already starts with a '!' +func execStatic(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + if len(name) > 0 { + switch name[0] { + case '{', '[', '"', '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9': + _, res = parseSquash(name, 0) + pathOut = name[len(res):] + return pathOut, res, true + } + } + for i := 1; i < len(path); i++ { + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + switch strings.ToLower(name) { + case "true", "false", "null", "nan", "inf": + return pathOut, name, true + } + return pathOut, res, false +} + +// execModifier parses the path to find a matching modifier function. +// The input expects that the path already starts with a '@' +func execModifier(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + var hasArgs bool + for i := 1; i < len(path); i++ { + if path[i] == ':' { + pathOut = path[i+1:] + name = path[1:i] + hasArgs = len(pathOut) > 0 + break + } + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + if fn, ok := modifiers[name]; ok { + var args string + if hasArgs { + var parsedArgs bool + switch pathOut[0] { + case '{', '[', '"': + // json arg + res := Parse(pathOut) + if res.Exists() { + args = squash(pathOut) + pathOut = pathOut[len(args):] + parsedArgs = true + } + } + if !parsedArgs { + // simple arg + i := 0 + for ; i < len(pathOut); i++ { + if pathOut[i] == '|' { + break + } + switch pathOut[i] { + case '{', '[', '"', '(': + s := squash(pathOut[i:]) + i += len(s) - 1 + } + } + args = pathOut[:i] + pathOut = pathOut[i:] + } + } + return pathOut, fn(json, args), true + } + return pathOut, res, false +} + +// unwrap removes the '[]' or '{}' characters around json +func unwrap(json string) string { + json = trim(json) + if len(json) >= 2 && (json[0] == '[' || json[0] == '{') { + json = json[1 : len(json)-1] + } + return json +} + +// DisableModifiers will disable the modifier syntax +var DisableModifiers = false + +var modifiers map[string]func(json, arg string) string + +func init() { + modifiers = map[string]func(json, arg string) string{ + "pretty": modPretty, + "ugly": modUgly, + "reverse": modReverse, + "this": modThis, + "flatten": modFlatten, + "join": modJoin, + "valid": modValid, + "keys": modKeys, + "values": modValues, + "tostr": modToStr, + "fromstr": modFromStr, + "group": modGroup, + "dig": modDig, + } +} + +// AddModifier binds a custom modifier command to the GJSON syntax. +// This operation is not thread safe and should be executed prior to +// using all other gjson function. +func AddModifier(name string, fn func(json, arg string) string) { + modifiers[name] = fn +} + +// ModifierExists returns true when the specified modifier exists. +func ModifierExists(name string, fn func(json, arg string) string) bool { + _, ok := modifiers[name] + return ok +} + +// cleanWS remove any non-whitespace from string +func cleanWS(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case ' ', '\t', '\n', '\r': + continue + default: + var s2 []byte + for i := 0; i < len(s); i++ { + switch s[i] { + case ' ', '\t', '\n', '\r': + s2 = append(s2, s[i]) + } + } + return string(s2) + } + } + return s +} + +// @pretty modifier makes the json look nice. +func modPretty(json, arg string) string { + if len(arg) > 0 { + opts := *pretty.DefaultOptions + Parse(arg).ForEach(func(key, value Result) bool { + switch key.String() { + case "sortKeys": + opts.SortKeys = value.Bool() + case "indent": + opts.Indent = cleanWS(value.String()) + case "prefix": + opts.Prefix = cleanWS(value.String()) + case "width": + opts.Width = int(value.Int()) + } + return true + }) + return bytesString(pretty.PrettyOptions(stringBytes(json), &opts)) + } + return bytesString(pretty.Pretty(stringBytes(json))) +} + +// @this returns the current element. Can be used to retrieve the root element. +func modThis(json, arg string) string { + return json +} + +// @ugly modifier removes all whitespace. +func modUgly(json, arg string) string { + return bytesString(pretty.Ugly(stringBytes(json))) +} + +// @reverse reverses array elements or root object members. +func modReverse(json, arg string) string { + res := Parse(json) + if res.IsArray() { + var values []Result + res.ForEach(func(_, value Result) bool { + values = append(values, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '[') + for i, j := len(values)-1, 0; i >= 0; i, j = i-1, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, values[i].Raw...) + } + out = append(out, ']') + return bytesString(out) + } + if res.IsObject() { + var keyValues []Result + res.ForEach(func(key, value Result) bool { + keyValues = append(keyValues, key, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '{') + for i, j := len(keyValues)-2, 0; i >= 0; i, j = i-2, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, keyValues[i+0].Raw...) + out = append(out, ':') + out = append(out, keyValues[i+1].Raw...) + } + out = append(out, '}') + return bytesString(out) + } + return json +} + +// @flatten an array with child arrays. +// +// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,[6,7]] +// +// The {"deep":true} arg can be provide for deep flattening. +// +// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,6,7] +// +// The original json is returned when the json is not an array. +func modFlatten(json, arg string) string { + res := Parse(json) + if !res.IsArray() { + return json + } + var deep bool + if arg != "" { + Parse(arg).ForEach(func(key, value Result) bool { + if key.String() == "deep" { + deep = value.Bool() + } + return true + }) + } + var out []byte + out = append(out, '[') + var idx int + res.ForEach(func(_, value Result) bool { + var raw string + if value.IsArray() { + if deep { + raw = unwrap(modFlatten(value.Raw, arg)) + } else { + raw = unwrap(value.Raw) + } + } else { + raw = value.Raw + } + raw = strings.TrimSpace(raw) + if len(raw) > 0 { + if idx > 0 { + out = append(out, ',') + } + out = append(out, raw...) + idx++ + } + return true + }) + out = append(out, ']') + return bytesString(out) +} + +// @keys extracts the keys from an object. +// +// {"first":"Tom","last":"Smith"} -> ["first","last"] +func modKeys(json, arg string) string { + v := Parse(json) + if !v.Exists() { + return "[]" + } + obj := v.IsObject() + var out strings.Builder + out.WriteByte('[') + var i int + v.ForEach(func(key, _ Result) bool { + if i > 0 { + out.WriteByte(',') + } + if obj { + out.WriteString(key.Raw) + } else { + out.WriteString("null") + } + i++ + return true + }) + out.WriteByte(']') + return out.String() +} + +// @values extracts the values from an object. +// +// {"first":"Tom","last":"Smith"} -> ["Tom","Smith"] +func modValues(json, arg string) string { + v := Parse(json) + if !v.Exists() { + return "[]" + } + if v.IsArray() { + return json + } + var out strings.Builder + out.WriteByte('[') + var i int + v.ForEach(func(_, value Result) bool { + if i > 0 { + out.WriteByte(',') + } + out.WriteString(value.Raw) + i++ + return true + }) + out.WriteByte(']') + return out.String() +} + +// @join multiple objects into a single object. +// +// [{"first":"Tom"},{"last":"Smith"}] -> {"first","Tom","last":"Smith"} +// +// The arg can be "true" to specify that duplicate keys should be preserved. +// +// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":37,"age":41} +// +// Without preserved keys: +// +// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":41} +// +// The original json is returned when the json is not an object. +func modJoin(json, arg string) string { + res := Parse(json) + if !res.IsArray() { + return json + } + var preserve bool + if arg != "" { + Parse(arg).ForEach(func(key, value Result) bool { + if key.String() == "preserve" { + preserve = value.Bool() + } + return true + }) + } + var out []byte + out = append(out, '{') + if preserve { + // Preserve duplicate keys. + var idx int + res.ForEach(func(_, value Result) bool { + if !value.IsObject() { + return true + } + if idx > 0 { + out = append(out, ',') + } + out = append(out, unwrap(value.Raw)...) + idx++ + return true + }) + } else { + // Deduplicate keys and generate an object with stable ordering. + var keys []Result + kvals := make(map[string]Result) + res.ForEach(func(_, value Result) bool { + if !value.IsObject() { + return true + } + value.ForEach(func(key, value Result) bool { + k := key.String() + if _, ok := kvals[k]; !ok { + keys = append(keys, key) + } + kvals[k] = value + return true + }) + return true + }) + for i := 0; i < len(keys); i++ { + if i > 0 { + out = append(out, ',') + } + out = append(out, keys[i].Raw...) + out = append(out, ':') + out = append(out, kvals[keys[i].String()].Raw...) + } + } + out = append(out, '}') + return bytesString(out) +} + +// @valid ensures that the json is valid before moving on. An empty string is +// returned when the json is not valid, otherwise it returns the original json. +func modValid(json, arg string) string { + if !Valid(json) { + return "" + } + return json +} + +// @fromstr converts a string to json +// +// "{\"id\":1023,\"name\":\"alert\"}" -> {"id":1023,"name":"alert"} +func modFromStr(json, arg string) string { + if !Valid(json) { + return "" + } + return Parse(json).String() +} + +// @tostr converts a string to json +// +// {"id":1023,"name":"alert"} -> "{\"id\":1023,\"name\":\"alert\"}" +func modToStr(str, arg string) string { + return string(AppendJSONString(nil, str)) +} + +func modGroup(json, arg string) string { + res := Parse(json) + if !res.IsObject() { + return "" + } + var all [][]byte + res.ForEach(func(key, value Result) bool { + if !value.IsArray() { + return true + } + var idx int + value.ForEach(func(_, value Result) bool { + if idx == len(all) { + all = append(all, []byte{}) + } + all[idx] = append(all[idx], ("," + key.Raw + ":" + value.Raw)...) + idx++ + return true + }) + return true + }) + var data []byte + data = append(data, '[') + for i, item := range all { + if i > 0 { + data = append(data, ',') + } + data = append(data, '{') + data = append(data, item[1:]...) + data = append(data, '}') + } + data = append(data, ']') + return string(data) +} + +// stringHeader instead of reflect.StringHeader +type stringHeader struct { + data unsafe.Pointer + len int +} + +// sliceHeader instead of reflect.SliceHeader +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +// getBytes casts the input json bytes to a string and safely returns the +// results as uniquely allocated data. This operation is intended to minimize +// copies and allocations for the large json string->[]byte. +func getBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + // safely get the string headers + rawhi := *(*stringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*stringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := sliceHeader{data: rawhi.data, len: rawhi.len, cap: rawhi.len} + strh := sliceHeader{data: strhi.data, len: strhi.len, cap: rawhi.len} + if strh.data == nil { + // str is nil + if rawh.data == nil { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.data == nil { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if uintptr(strh.data) >= uintptr(rawh.data) && + uintptr(strh.data)+uintptr(strh.len) <= + uintptr(rawh.data)+uintptr(rawh.len) { + // Str is a substring of Raw. + start := uintptr(strh.data) - uintptr(rawh.data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+uintptr(strh.len)] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + } + return result +} + +// fillIndex finds the position of Raw data and assigns it to the Index field +// of the resulting value. If the position cannot be found then Index zero is +// used instead. +func fillIndex(json string, c *parseContext) { + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*stringHeader)(unsafe.Pointer(&json)) + rhdr := *(*stringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(uintptr(rhdr.data) - uintptr(jhdr.data)) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } +} + +func stringBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + data: (*stringHeader)(unsafe.Pointer(&s)).data, + len: len(s), + cap: len(s), + })) +} + +func bytesString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func revSquash(json string) string { + // reverse squash + // expects that the tail character is a ']' or '}' or ')' or '"' + // squash the value, ignoring all nested arrays and objects. + i := len(json) - 1 + var depth int + if json[i] != '"' { + depth++ + } + if json[i] == '}' || json[i] == ']' || json[i] == ')' { + i-- + } + for ; i >= 0; i-- { + switch json[i] { + case '"': + i-- + for ; i >= 0; i-- { + if json[i] == '"' { + esc := 0 + for i > 0 && json[i-1] == '\\' { + i-- + esc++ + } + if esc%2 == 1 { + continue + } + i += esc + break + } + } + if depth == 0 { + if i < 0 { + i = 0 + } + return json[i:] + } + case '}', ']', ')': + depth++ + case '{', '[', '(': + depth-- + if depth == 0 { + return json[i:] + } + } + } + return json +} + +// Paths returns the original GJSON paths for a Result where the Result came +// from a simple query path that returns an array, like: +// +// gjson.Get(json, "friends.#.first") +// +// The returned value will be in the form of a JSON array: +// +// ["friends.0.first","friends.1.first","friends.2.first"] +// +// The param 'json' must be the original JSON used when calling Get. +// +// Returns an empty string if the paths cannot be determined, which can happen +// when the Result came from a path that contained a multipath, modifier, +// or a nested query. +func (t Result) Paths(json string) []string { + if t.Indexes == nil { + return nil + } + paths := make([]string, 0, len(t.Indexes)) + t.ForEach(func(_, value Result) bool { + paths = append(paths, value.Path(json)) + return true + }) + if len(paths) != len(t.Indexes) { + return nil + } + return paths +} + +// Path returns the original GJSON path for a Result where the Result came +// from a simple path that returns a single value, like: +// +// gjson.Get(json, "friends.#(last=Murphy)") +// +// The returned value will be in the form of a JSON string: +// +// "friends.0" +// +// The param 'json' must be the original JSON used when calling Get. +// +// Returns an empty string if the paths cannot be determined, which can happen +// when the Result came from a path that contained a multipath, modifier, +// or a nested query. +func (t Result) Path(json string) string { + var path []byte + var comps []string // raw components + i := t.Index - 1 + if t.Index+len(t.Raw) > len(json) { + // JSON cannot safely contain Result. + goto fail + } + if !strings.HasPrefix(json[t.Index:], t.Raw) { + // Result is not at the JSON index as expected. + goto fail + } + for ; i >= 0; i-- { + if json[i] <= ' ' { + continue + } + if json[i] == ':' { + // inside of object, get the key + for ; i >= 0; i-- { + if json[i] != '"' { + continue + } + break + } + raw := revSquash(json[:i+1]) + i = i - len(raw) + comps = append(comps, raw) + // key gotten, now squash the rest + raw = revSquash(json[:i+1]) + i = i - len(raw) + i++ // increment the index for next loop step + } else if json[i] == '{' { + // Encountered an open object. The original result was probably an + // object key. + goto fail + } else if json[i] == ',' || json[i] == '[' { + // inside of an array, count the position + var arrIdx int + if json[i] == ',' { + arrIdx++ + i-- + } + for ; i >= 0; i-- { + if json[i] == ':' { + // Encountered an unexpected colon. The original result was + // probably an object key. + goto fail + } else if json[i] == ',' { + arrIdx++ + } else if json[i] == '[' { + comps = append(comps, strconv.Itoa(arrIdx)) + break + } else if json[i] == ']' || json[i] == '}' || json[i] == '"' { + raw := revSquash(json[:i+1]) + i = i - len(raw) + 1 + } + } + } + } + if len(comps) == 0 { + if DisableModifiers { + goto fail + } + return "@this" + } + for i := len(comps) - 1; i >= 0; i-- { + rcomp := Parse(comps[i]) + if !rcomp.Exists() { + goto fail + } + comp := Escape(rcomp.String()) + path = append(path, '.') + path = append(path, comp...) + } + if len(path) > 0 { + path = path[1:] + } + return string(path) +fail: + return "" +} + +// isSafePathKeyChar returns true if the input character is safe for not +// needing escaping. +func isSafePathKeyChar(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || c <= ' ' || c > '~' || c == '_' || + c == '-' || c == ':' +} + +// Escape returns an escaped path component. +// +// json := `{ +// "user":{ +// "first.name": "Janet", +// "last.name": "Prichard" +// } +// }` +// user := gjson.Get(json, "user") +// println(user.Get(gjson.Escape("first.name")) +// println(user.Get(gjson.Escape("last.name")) +// // Output: +// // Janet +// // Prichard +func Escape(comp string) string { + for i := 0; i < len(comp); i++ { + if !isSafePathKeyChar(comp[i]) { + ncomp := make([]byte, len(comp)+1) + copy(ncomp, comp[:i]) + ncomp = ncomp[:i] + for ; i < len(comp); i++ { + if !isSafePathKeyChar(comp[i]) { + ncomp = append(ncomp, '\\') + } + ncomp = append(ncomp, comp[i]) + } + return string(ncomp) + } + } + return comp +} + +func parseRecursiveDescent(all []Result, parent Result, path string) []Result { + if res := parent.Get(path); res.Exists() { + all = append(all, res) + } + if parent.IsArray() || parent.IsObject() { + parent.ForEach(func(_, val Result) bool { + all = parseRecursiveDescent(all, val, path) + return true + }) + } + return all +} + +func modDig(json, arg string) string { + all := parseRecursiveDescent(nil, Parse(json), arg) + var out []byte + out = append(out, '[') + for i, res := range all { + if i > 0 { + out = append(out, ',') + } + out = append(out, res.Raw...) + } + out = append(out, ']') + return string(out) +} diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/match/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md new file mode 100644 index 0000000..5fdd4cf --- /dev/null +++ b/vendor/github.com/tidwall/match/README.md @@ -0,0 +1,29 @@ +# Match + +[![GoDoc](https://godoc.org/github.com/tidwall/match?status.svg)](https://godoc.org/github.com/tidwall/match) + +Match is a very simple pattern matcher where '*' matches on any +number characters and '?' matches on any one character. + +## Installing + +``` +go get -u github.com/tidwall/match +``` + +## Example + +```go +match.Match("hello", "*llo") +match.Match("jello", "?ello") +match.Match("hello", "h*o") +``` + + +## Contact + +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Redcon source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go new file mode 100644 index 0000000..11da28f --- /dev/null +++ b/vendor/github.com/tidwall/match/match.go @@ -0,0 +1,237 @@ +// Package match provides a simple pattern matcher with unicode support. +package match + +import ( + "unicode/utf8" +) + +// Match returns true if str matches pattern. This is a very +// simple wildcard match where '*' matches on any number characters +// and '?' matches on any one character. +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// c matches character c (c != '*', '?', '\\') +// '\\' c matches character c +// +func Match(str, pattern string) bool { + if pattern == "*" { + return true + } + return match(str, pattern, 0, nil, -1) == rMatch +} + +// MatchLimit is the same as Match but will limit the complexity of the match +// operation. This is to avoid long running matches, specifically to avoid ReDos +// attacks from arbritary inputs. +// +// How it works: +// The underlying match routine is recursive and may call itself when it +// encounters a sandwiched wildcard pattern, such as: `user:*:name`. +// Everytime it calls itself a counter is incremented. +// The operation is stopped when counter > maxcomp*len(str). +func MatchLimit(str, pattern string, maxcomp int) (matched, stopped bool) { + if pattern == "*" { + return true, false + } + counter := 0 + r := match(str, pattern, len(str), &counter, maxcomp) + if r == rStop { + return false, true + } + return r == rMatch, false +} + +type result int + +const ( + rNoMatch result = iota + rMatch + rStop +) + +func match(str, pat string, slen int, counter *int, maxcomp int) result { + // check complexity limit + if maxcomp > -1 { + if *counter > slen*maxcomp { + return rStop + } + *counter++ + } + + for len(pat) > 0 { + var wild bool + pc, ps := rune(pat[0]), 1 + if pc > 0x7f { + pc, ps = utf8.DecodeRuneInString(pat) + } + var sc rune + var ss int + if len(str) > 0 { + sc, ss = rune(str[0]), 1 + if sc > 0x7f { + sc, ss = utf8.DecodeRuneInString(str) + } + } + switch pc { + case '?': + if ss == 0 { + return rNoMatch + } + case '*': + // Ignore repeating stars. + for len(pat) > 1 && pat[1] == '*' { + pat = pat[1:] + } + + // If this star is the last character then it must be a match. + if len(pat) == 1 { + return rMatch + } + + // Match and trim any non-wildcard suffix characters. + var ok bool + str, pat, ok = matchTrimSuffix(str, pat) + if !ok { + return rNoMatch + } + + // Check for single star again. + if len(pat) == 1 { + return rMatch + } + + // Perform recursive wildcard search. + r := match(str, pat[1:], slen, counter, maxcomp) + if r != rNoMatch { + return r + } + if len(str) == 0 { + return rNoMatch + } + wild = true + default: + if ss == 0 { + return rNoMatch + } + if pc == '\\' { + pat = pat[ps:] + pc, ps = utf8.DecodeRuneInString(pat) + if ps == 0 { + return rNoMatch + } + } + if sc != pc { + return rNoMatch + } + } + str = str[ss:] + if !wild { + pat = pat[ps:] + } + } + if len(str) == 0 { + return rMatch + } + return rNoMatch +} + +// matchTrimSuffix matches and trims any non-wildcard suffix characters. +// Returns the trimed string and pattern. +// +// This is called because the pattern contains extra data after the wildcard +// star. Here we compare any suffix characters in the pattern to the suffix of +// the target string. Basically a reverse match that stops when a wildcard +// character is reached. This is a little trickier than a forward match because +// we need to evaluate an escaped character in reverse. +// +// Any matched characters will be trimmed from both the target +// string and the pattern. +func matchTrimSuffix(str, pat string) (string, string, bool) { + // It's expected that the pattern has at least two bytes and the first byte + // is a wildcard star '*' + match := true + for len(str) > 0 && len(pat) > 1 { + pc, ps := utf8.DecodeLastRuneInString(pat) + var esc bool + for i := 0; ; i++ { + if pat[len(pat)-ps-i-1] != '\\' { + if i&1 == 1 { + esc = true + ps++ + } + break + } + } + if pc == '*' && !esc { + match = true + break + } + sc, ss := utf8.DecodeLastRuneInString(str) + if !((pc == '?' && !esc) || pc == sc) { + match = false + break + } + str = str[:len(str)-ss] + pat = pat[:len(pat)-ps] + } + return str, pat, match +} + +var maxRuneBytes = [...]byte{244, 143, 191, 191} + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes[:]...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) +} + +// IsPattern returns true if the string is a pattern. +func IsPattern(str string) bool { + for i := 0; i < len(str); i++ { + if str[i] == '*' || str[i] == '?' { + return true + } + } + return false +} diff --git a/vendor/github.com/tidwall/pretty/LICENSE b/vendor/github.com/tidwall/pretty/LICENSE new file mode 100644 index 0000000..993b83f --- /dev/null +++ b/vendor/github.com/tidwall/pretty/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md new file mode 100644 index 0000000..d3be5e5 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/README.md @@ -0,0 +1,122 @@ +# Pretty + +[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/tidwall/pretty) + +Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads. + +Getting Started +=============== + +## Installing + +To start using Pretty, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/pretty +``` + +This will retrieve the library. + +## Pretty + +Using this example: + +```json +{"name": {"first":"Tom","last":"Anderson"}, "age":37, +"children": ["Sara","Alex","Jack"], +"fav.movie": "Deer Hunter", "friends": [ + {"first": "Janet", "last": "Murphy", "age": 44} + ]} +``` + +The following code: +```go +result = pretty.Pretty(example) +``` + +Will format the json to: + +```json +{ + "name": { + "first": "Tom", + "last": "Anderson" + }, + "age": 37, + "children": ["Sara", "Alex", "Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + { + "first": "Janet", + "last": "Murphy", + "age": 44 + } + ] +} +``` + +## Color + +Color will colorize the json for outputing to the screen. + +```json +result = pretty.Color(json, nil) +``` + +Will add color to the result for printing to the terminal. +The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`. + +## Ugly + +The following code: +```go +result = pretty.Ugly(example) +``` + +Will format the json to: + +```json +{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}``` +``` + +## Customized output + +There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options: + +```go +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} +``` +## Performance + +Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods. +``` +BenchmarkPretty-16 1000000 1034 ns/op 720 B/op 2 allocs/op +BenchmarkPrettySortKeys-16 586797 1983 ns/op 2848 B/op 14 allocs/op +BenchmarkUgly-16 4652365 254 ns/op 240 B/op 1 allocs/op +BenchmarkUglyInPlace-16 6481233 183 ns/op 0 B/op 0 allocs/op +BenchmarkJSONIndent-16 450654 2687 ns/op 1221 B/op 0 allocs/op +BenchmarkJSONCompact-16 685111 1699 ns/op 442 B/op 0 allocs/op +``` + +*These benchmarks were run on a MacBook Pro 2.4 GHz 8-Core Intel Core i9.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Pretty source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go new file mode 100644 index 0000000..f3f756a --- /dev/null +++ b/vendor/github.com/tidwall/pretty/pretty.go @@ -0,0 +1,674 @@ +package pretty + +import ( + "bytes" + "encoding/json" + "sort" + "strconv" +) + +// Options is Pretty options +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} + +// DefaultOptions is the default options for pretty formats. +var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false} + +// Pretty converts the input json into a more human readable format where each +// element is on it's own line with clear indentation. +func Pretty(json []byte) []byte { return PrettyOptions(json, nil) } + +// PrettyOptions is like Pretty but with customized options. +func PrettyOptions(json []byte, opts *Options) []byte { + if opts == nil { + opts = DefaultOptions + } + buf := make([]byte, 0, len(json)) + if len(opts.Prefix) != 0 { + buf = append(buf, opts.Prefix...) + } + buf, _, _, _ = appendPrettyAny(buf, json, 0, true, + opts.Width, opts.Prefix, opts.Indent, opts.SortKeys, + 0, 0, -1) + if len(buf) > 0 { + buf = append(buf, '\n') + } + return buf +} + +// Ugly removes insignificant space characters from the input json byte slice +// and returns the compacted result. +func Ugly(json []byte) []byte { + buf := make([]byte, 0, len(json)) + return ugly(buf, json) +} + +// UglyInPlace removes insignificant space characters from the input json +// byte slice and returns the compacted result. This method reuses the +// input json buffer to avoid allocations. Do not use the original bytes +// slice upon return. +func UglyInPlace(json []byte) []byte { return ugly(json, json) } + +func ugly(dst, src []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] > ' ' { + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } + } + } + return dst +} + +func isNaNOrInf(src []byte) bool { + return src[0] == 'i' || //Inf + src[0] == 'I' || // inf + src[0] == '+' || // +Inf + src[0] == 'N' || // Nan + (src[0] == 'n' && len(src) > 1 && src[1] != 'u') // nan +} + +func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == '"' { + return appendPrettyString(buf, json, i, nl) + } + + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' || isNaNOrInf(json[i:]) { + return appendPrettyNumber(buf, json, i, nl) + } + if json[i] == '{' { + return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + if json[i] == '[' { + return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + switch json[i] { + case 't': + return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true + case 'f': + return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true + case 'n': + return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true + } + } + return buf, i, nl, true +} + +type pair struct { + kstart, kend int + vstart, vend int +} + +type byKeyVal struct { + sorted bool + json []byte + buf []byte + pairs []pair +} + +func (arr *byKeyVal) Len() int { + return len(arr.pairs) +} +func (arr *byKeyVal) Less(i, j int) bool { + if arr.isLess(i, j, byKey) { + return true + } + if arr.isLess(j, i, byKey) { + return false + } + return arr.isLess(i, j, byVal) +} +func (arr *byKeyVal) Swap(i, j int) { + arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i] + arr.sorted = true +} + +type byKind int + +const ( + byKey byKind = 0 + byVal byKind = 1 +) + +type jtype int + +const ( + jnull jtype = iota + jfalse + jnumber + jstring + jtrue + jjson +) + +func getjtype(v []byte) jtype { + if len(v) == 0 { + return jnull + } + switch v[0] { + case '"': + return jstring + case 'f': + return jfalse + case 't': + return jtrue + case 'n': + return jnull + case '[', '{': + return jjson + default: + return jnumber + } +} + +func (arr *byKeyVal) isLess(i, j int, kind byKind) bool { + k1 := arr.json[arr.pairs[i].kstart:arr.pairs[i].kend] + k2 := arr.json[arr.pairs[j].kstart:arr.pairs[j].kend] + var v1, v2 []byte + if kind == byKey { + v1 = k1 + v2 = k2 + } else { + v1 = bytes.TrimSpace(arr.buf[arr.pairs[i].vstart:arr.pairs[i].vend]) + v2 = bytes.TrimSpace(arr.buf[arr.pairs[j].vstart:arr.pairs[j].vend]) + if len(v1) >= len(k1)+1 { + v1 = bytes.TrimSpace(v1[len(k1)+1:]) + } + if len(v2) >= len(k2)+1 { + v2 = bytes.TrimSpace(v2[len(k2)+1:]) + } + } + t1 := getjtype(v1) + t2 := getjtype(v2) + if t1 < t2 { + return true + } + if t1 > t2 { + return false + } + if t1 == jstring { + s1 := parsestr(v1) + s2 := parsestr(v2) + return string(s1) < string(s2) + } + if t1 == jnumber { + n1, _ := strconv.ParseFloat(string(v1), 64) + n2, _ := strconv.ParseFloat(string(v2), 64) + return n1 < n2 + } + return string(v1) < string(v2) + +} + +func parsestr(s []byte) []byte { + for i := 1; i < len(s); i++ { + if s[i] == '\\' { + var str string + json.Unmarshal(s, &str) + return []byte(str) + } + if s[i] == '"' { + return s[1:i] + } + } + return nil +} + +func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + var ok bool + if width > 0 { + if pretty && open == '[' && max == -1 { + // here we try to create a single line array + max := width - (len(buf) - nl) + if max > 3 { + s1, s2 := len(buf), i + buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max) + if ok && len(buf)-s1 <= max { + return buf, i, nl, true + } + buf = buf[:s1] + i = s2 + } + } else if max != -1 && open == '{' { + return buf, i, nl, false + } + } + buf = append(buf, open) + i++ + var pairs []pair + if open == '{' && sortkeys { + pairs = make([]pair, 0, 8) + } + var n int + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == close { + if pretty { + if open == '{' && sortkeys { + buf = sortPairs(json, buf, pairs) + } + if n > 0 { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + } + if buf[len(buf)-1] != open { + buf = appendTabs(buf, prefix, indent, tabs) + } + } + buf = append(buf, close) + return buf, i + 1, nl, open != '{' + } + if open == '[' || json[i] == '"' { + if n > 0 { + buf = append(buf, ',') + if width != -1 && open == '[' { + buf = append(buf, ' ') + } + } + var p pair + if pretty { + nl = len(buf) + if buf[nl-1] == ' ' { + buf[nl-1] = '\n' + } else { + buf = append(buf, '\n') + } + if open == '{' && sortkeys { + p.kstart = i + p.vstart = len(buf) + } + buf = appendTabs(buf, prefix, indent, tabs+1) + } + if open == '{' { + buf, i, nl, _ = appendPrettyString(buf, json, i, nl) + if sortkeys { + p.kend = i + } + buf = append(buf, ':') + if pretty { + buf = append(buf, ' ') + } + } + buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max) + if max != -1 && !ok { + return buf, i, nl, false + } + if pretty && open == '{' && sortkeys { + p.vend = len(buf) + if p.kstart > p.kend || p.vstart > p.vend { + // bad data. disable sorting + sortkeys = false + } else { + pairs = append(pairs, p) + } + } + i-- + n++ + } + } + return buf, i, nl, open != '{' +} +func sortPairs(json, buf []byte, pairs []pair) []byte { + if len(pairs) == 0 { + return buf + } + vstart := pairs[0].vstart + vend := pairs[len(pairs)-1].vend + arr := byKeyVal{false, json, buf, pairs} + sort.Stable(&arr) + if !arr.sorted { + return buf + } + nbuf := make([]byte, 0, vend-vstart) + for i, p := range pairs { + nbuf = append(nbuf, buf[p.vstart:p.vend]...) + if i < len(pairs)-1 { + nbuf = append(nbuf, ',') + nbuf = append(nbuf, '\n') + } + } + return append(buf[:vstart], nbuf...) +} + +func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] == '"' { + var sc int + for j := i - 1; j > s; j-- { + if json[j] == '\\' { + sc++ + } else { + break + } + } + if sc%2 == 1 { + continue + } + i++ + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' { + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendTabs(buf []byte, prefix, indent string, tabs int) []byte { + if len(prefix) != 0 { + buf = append(buf, prefix...) + } + if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' { + for i := 0; i < tabs; i++ { + buf = append(buf, ' ', ' ') + } + } else { + for i := 0; i < tabs; i++ { + buf = append(buf, indent...) + } + } + return buf +} + +// Style is the color style +type Style struct { + Key, String, Number [2]string + True, False, Null [2]string + Escape [2]string + Append func(dst []byte, c byte) []byte +} + +func hexp(p byte) byte { + switch { + case p < 10: + return p + '0' + default: + return (p - 10) + 'a' + } +} + +// TerminalStyle is for terminals +var TerminalStyle *Style + +func init() { + TerminalStyle = &Style{ + Key: [2]string{"\x1B[94m", "\x1B[0m"}, + String: [2]string{"\x1B[92m", "\x1B[0m"}, + Number: [2]string{"\x1B[93m", "\x1B[0m"}, + True: [2]string{"\x1B[96m", "\x1B[0m"}, + False: [2]string{"\x1B[96m", "\x1B[0m"}, + Null: [2]string{"\x1B[91m", "\x1B[0m"}, + Escape: [2]string{"\x1B[35m", "\x1B[0m"}, + Append: func(dst []byte, c byte) []byte { + if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') { + dst = append(dst, "\\u00"...) + dst = append(dst, hexp((c>>4)&0xF)) + return append(dst, hexp((c)&0xF)) + } + return append(dst, c) + }, + } +} + +// Color will colorize the json. The style parma is used for customizing +// the colors. Passing nil to the style param will use the default +// TerminalStyle. +func Color(src []byte, style *Style) []byte { + if style == nil { + style = TerminalStyle + } + apnd := style.Append + if apnd == nil { + apnd = func(dst []byte, c byte) []byte { + return append(dst, c) + } + } + type stackt struct { + kind byte + key bool + } + var dst []byte + var stack []stackt + for i := 0; i < len(src); i++ { + if src[i] == '"' { + key := len(stack) > 0 && stack[len(stack)-1].key + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + dst = apnd(dst, '"') + esc := false + uesc := 0 + for i = i + 1; i < len(src); i++ { + if src[i] == '\\' { + if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + dst = append(dst, style.Escape[0]...) + dst = apnd(dst, src[i]) + esc = true + if i+1 < len(src) && src[i+1] == 'u' { + uesc = 5 + } else { + uesc = 1 + } + } else if esc { + dst = apnd(dst, src[i]) + if uesc == 1 { + esc = false + dst = append(dst, style.Escape[1]...) + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + } else { + uesc-- + } + } else { + dst = apnd(dst, src[i]) + } + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + if esc { + dst = append(dst, style.Escape[1]...) + } else if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + } else if src[i] == '{' || src[i] == '[' { + stack = append(stack, stackt{src[i], src[i] == '{'}) + dst = apnd(dst, src[i]) + } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 { + stack = stack[:len(stack)-1] + dst = apnd(dst, src[i]) + } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' { + stack[len(stack)-1].key = !stack[len(stack)-1].key + dst = apnd(dst, src[i]) + } else { + var kind byte + if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' || isNaNOrInf(src[i:]) { + kind = '0' + dst = append(dst, style.Number[0]...) + } else if src[i] == 't' { + kind = 't' + dst = append(dst, style.True[0]...) + } else if src[i] == 'f' { + kind = 'f' + dst = append(dst, style.False[0]...) + } else if src[i] == 'n' { + kind = 'n' + dst = append(dst, style.Null[0]...) + } else { + dst = apnd(dst, src[i]) + } + if kind != 0 { + for ; i < len(src); i++ { + if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' { + i-- + break + } + dst = apnd(dst, src[i]) + } + if kind == '0' { + dst = append(dst, style.Number[1]...) + } else if kind == 't' { + dst = append(dst, style.True[1]...) + } else if kind == 'f' { + dst = append(dst, style.False[1]...) + } else if kind == 'n' { + dst = append(dst, style.Null[1]...) + } + } + } + } + return dst +} + +// Spec strips out comments and trailing commas and convert the input to a +// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259 +// +// The resulting JSON will always be the same length as the input and it will +// include all of the same line breaks at matching offsets. This is to ensure +// the result can be later processed by a external parser and that that +// parser will report messages or errors with the correct offsets. +func Spec(src []byte) []byte { + return spec(src, nil) +} + +// SpecInPlace is the same as Spec, but this method reuses the input json +// buffer to avoid allocations. Do not use the original bytes slice upon return. +func SpecInPlace(src []byte) []byte { + return spec(src, src) +} + +func spec(src, dst []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] == '/' { + if i < len(src)-1 { + if src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src); i++ { + if src[i] == '\n' { + dst = append(dst, '\n') + break + } else if src[i] == '\t' || src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + if src[i+1] == '*' { + dst = append(dst, ' ', ' ') + i += 2 + for ; i < len(src)-1; i++ { + if src[i] == '*' && src[i+1] == '/' { + dst = append(dst, ' ', ' ') + i++ + break + } else if src[i] == '\n' || src[i] == '\t' || + src[i] == '\r' { + dst = append(dst, src[i]) + } else { + dst = append(dst, ' ') + } + } + continue + } + } + } + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } else if src[i] == '}' || src[i] == ']' { + for j := len(dst) - 2; j >= 0; j-- { + if dst[j] <= ' ' { + continue + } + if dst[j] == ',' { + dst[j] = ' ' + } + break + } + } + } + return dst +} diff --git a/vendor/github.com/tidwall/sjson/LICENSE b/vendor/github.com/tidwall/sjson/LICENSE new file mode 100644 index 0000000..89593c7 --- /dev/null +++ b/vendor/github.com/tidwall/sjson/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/tidwall/sjson/README.md b/vendor/github.com/tidwall/sjson/README.md new file mode 100644 index 0000000..4598424 --- /dev/null +++ b/vendor/github.com/tidwall/sjson/README.md @@ -0,0 +1,278 @@ +

+SJSON +
+GoDoc +

+ +

set a json value quickly

+ +SJSON is a Go package that provides a [very fast](#performance) and simple way to set a value in a json document. +For quickly retrieving json values check out [GJSON](https://github.com/tidwall/gjson). + +For a command line interface check out [JJ](https://github.com/tidwall/jj). + +Getting Started +=============== + +Installing +---------- + +To start using SJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/sjson +``` + +This will retrieve the library. + +Set a value +----------- +Set sets the value for the specified path. +A path is in dot syntax, such as "name.last" or "age". +This function expects that the json is well-formed and validated. +Invalid json will not panic, but it may return back unexpected results. +Invalid paths may return an error. + +```go +package main + +import "github.com/tidwall/sjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value, _ := sjson.Set(json, "name.last", "Anderson") + println(value) +} +``` + +This will print: + +```json +{"name":{"first":"Janet","last":"Anderson"},"age":47} +``` + +Path syntax +----------- + +A path is a series of keys separated by a dot. +The dot and colon characters can be escaped with ``\``. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "James", "last": "Murphy"}, + {"first": "Roger", "last": "Craig"} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children.1" >> "Alex" +"friends.1.last" >> "Craig" +``` + +The `-1` key can be used to append a value to an existing array: + +``` +"children.-1" >> appends a new value to the end of the children array +``` + +Normally number keys are used to modify arrays, but it's possible to force a numeric object key by using the colon character: + +```json +{ + "users":{ + "2313":{"name":"Sara"}, + "7839":{"name":"Andy"} + } +} +``` + +A colon path would look like: + +``` +"users.:2313.name" >> "Sara" +``` + +Supported types +--------------- + +Pretty much any type is supported: + +```go +sjson.Set(`{"key":true}`, "key", nil) +sjson.Set(`{"key":true}`, "key", false) +sjson.Set(`{"key":true}`, "key", 1) +sjson.Set(`{"key":true}`, "key", 10.5) +sjson.Set(`{"key":true}`, "key", "hello") +sjson.Set(`{"key":true}`, "key", []string{"hello", "world"}) +sjson.Set(`{"key":true}`, "key", map[string]interface{}{"hello":"world"}) +``` + +When a type is not recognized, SJSON will fallback to the `encoding/json` Marshaller. + + +Examples +-------- + +Set a value from empty document: +```go +value, _ := sjson.Set("", "name", "Tom") +println(value) + +// Output: +// {"name":"Tom"} +``` + +Set a nested value from empty document: +```go +value, _ := sjson.Set("", "name.last", "Anderson") +println(value) + +// Output: +// {"name":{"last":"Anderson"}} +``` + +Set a new value: +```go +value, _ := sjson.Set(`{"name":{"last":"Anderson"}}`, "name.first", "Sara") +println(value) + +// Output: +// {"name":{"first":"Sara","last":"Anderson"}} +``` + +Update an existing value: +```go +value, _ := sjson.Set(`{"name":{"last":"Anderson"}}`, "name.last", "Smith") +println(value) + +// Output: +// {"name":{"last":"Smith"}} +``` + +Set a new array value: +```go +value, _ := sjson.Set(`{"friends":["Andy","Carol"]}`, "friends.2", "Sara") +println(value) + +// Output: +// {"friends":["Andy","Carol","Sara"] +``` + +Append an array value by using the `-1` key in a path: +```go +value, _ := sjson.Set(`{"friends":["Andy","Carol"]}`, "friends.-1", "Sara") +println(value) + +// Output: +// {"friends":["Andy","Carol","Sara"] +``` + +Append an array value that is past the end: +```go +value, _ := sjson.Set(`{"friends":["Andy","Carol"]}`, "friends.4", "Sara") +println(value) + +// Output: +// {"friends":["Andy","Carol",null,null,"Sara"] +``` + +Delete a value: +```go +value, _ := sjson.Delete(`{"name":{"first":"Sara","last":"Anderson"}}`, "name.first") +println(value) + +// Output: +// {"name":{"last":"Anderson"}} +``` + +Delete an array value: +```go +value, _ := sjson.Delete(`{"friends":["Andy","Carol"]}`, "friends.1") +println(value) + +// Output: +// {"friends":["Andy"]} +``` + +Delete the last array value: +```go +value, _ := sjson.Delete(`{"friends":["Andy","Carol"]}`, "friends.-1") +println(value) + +// Output: +// {"friends":["Andy"]} +``` + +## Performance + +Benchmarks of SJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +and [Gabs](https://github.com/Jeffail/gabs) + +``` +Benchmark_SJSON-8 3000000 805 ns/op 1077 B/op 3 allocs/op +Benchmark_SJSON_ReplaceInPlace-8 3000000 449 ns/op 0 B/op 0 allocs/op +Benchmark_JSON_Map-8 300000 21236 ns/op 6392 B/op 150 allocs/op +Benchmark_JSON_Struct-8 300000 14691 ns/op 1789 B/op 24 allocs/op +Benchmark_Gabs-8 300000 21311 ns/op 6752 B/op 150 allocs/op +Benchmark_FFJSON-8 300000 17673 ns/op 3589 B/op 47 allocs/op +Benchmark_EasyJSON-8 1500000 3119 ns/op 1061 B/op 13 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated though one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7 and can be be found [here](https://github.com/tidwall/sjson-benchmarks)*. + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +SJSON source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/sjson/logo.png b/vendor/github.com/tidwall/sjson/logo.png new file mode 100644 index 0000000..b5aa257 Binary files /dev/null and b/vendor/github.com/tidwall/sjson/logo.png differ diff --git a/vendor/github.com/tidwall/sjson/sjson.go b/vendor/github.com/tidwall/sjson/sjson.go new file mode 100644 index 0000000..a55eef3 --- /dev/null +++ b/vendor/github.com/tidwall/sjson/sjson.go @@ -0,0 +1,737 @@ +// Package sjson provides setting json values. +package sjson + +import ( + jsongo "encoding/json" + "sort" + "strconv" + "unsafe" + + "github.com/tidwall/gjson" +) + +type errorType struct { + msg string +} + +func (err *errorType) Error() string { + return err.msg +} + +// Options represents additional options for the Set and Delete functions. +type Options struct { + // Optimistic is a hint that the value likely exists which + // allows for the sjson to perform a fast-track search and replace. + Optimistic bool + // ReplaceInPlace is a hint to replace the input json rather than + // allocate a new json byte slice. When this field is specified + // the input json will not longer be valid and it should not be used + // In the case when the destination slice doesn't have enough free + // bytes to replace the data in place, a new bytes slice will be + // created under the hood. + // The Optimistic flag must be set to true and the input must be a + // byte slice in order to use this field. + ReplaceInPlace bool +} + +type pathResult struct { + part string // current key part + gpart string // gjson get part + path string // remaining path + force bool // force a string key + more bool // there is more path to parse +} + +func isSimpleChar(ch byte) bool { + switch ch { + case '|', '#', '@', '*', '?': + return false + default: + return true + } +} + +func parsePath(path string) (res pathResult, simple bool) { + var r pathResult + if len(path) > 0 && path[0] == ':' { + r.force = true + path = path[1:] + } + for i := 0; i < len(path); i++ { + if path[i] == '.' { + r.part = path[:i] + r.gpart = path[:i] + r.path = path[i+1:] + r.more = true + return r, true + } + if !isSimpleChar(path[i]) { + return r, false + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + gpart := []byte(path[:i+1]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + gpart = append(gpart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + gpart = append(gpart, '\\') + i++ + if i < len(path) { + epart = append(epart, path[i]) + gpart = append(gpart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + r.gpart = string(gpart) + r.path = path[i+1:] + r.more = true + return r, true + } else if !isSimpleChar(path[i]) { + return r, false + } + epart = append(epart, path[i]) + gpart = append(gpart, path[i]) + } + } + // append the last part + r.part = string(epart) + r.gpart = string(gpart) + return r, true + } + } + r.part = path + r.gpart = path + return r, true +} + +func mustMarshalString(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > 0x7f || s[i] == '"' || s[i] == '\\' { + return true + } + } + return false +} + +// appendStringify makes a json string and appends to buf. +func appendStringify(buf []byte, s string) []byte { + if mustMarshalString(s) { + b, _ := jsongo.Marshal(s) + return append(buf, b...) + } + buf = append(buf, '"') + buf = append(buf, s...) + buf = append(buf, '"') + return buf +} + +// appendBuild builds a json block from a json path. +func appendBuild(buf []byte, array bool, paths []pathResult, raw string, + stringify bool) []byte { + if !array { + buf = appendStringify(buf, paths[0].part) + buf = append(buf, ':') + } + if len(paths) > 1 { + n, numeric := atoui(paths[1]) + if numeric || (!paths[1].force && paths[1].part == "-1") { + buf = append(buf, '[') + buf = appendRepeat(buf, "null,", n) + buf = appendBuild(buf, true, paths[1:], raw, stringify) + buf = append(buf, ']') + } else { + buf = append(buf, '{') + buf = appendBuild(buf, false, paths[1:], raw, stringify) + buf = append(buf, '}') + } + } else { + if stringify { + buf = appendStringify(buf, raw) + } else { + buf = append(buf, raw...) + } + } + return buf +} + +// atoui does a rip conversion of string -> unigned int. +func atoui(r pathResult) (n int, ok bool) { + if r.force { + return 0, false + } + for i := 0; i < len(r.part); i++ { + if r.part[i] < '0' || r.part[i] > '9' { + return 0, false + } + n = n*10 + int(r.part[i]-'0') + } + return n, true +} + +// appendRepeat repeats string "n" times and appends to buf. +func appendRepeat(buf []byte, s string, n int) []byte { + for i := 0; i < n; i++ { + buf = append(buf, s...) + } + return buf +} + +// trim does a rip trim +func trim(s string) string { + for len(s) > 0 { + if s[0] <= ' ' { + s = s[1:] + continue + } + break + } + for len(s) > 0 { + if s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + continue + } + break + } + return s +} + +// deleteTailItem deletes the previous key or comma. +func deleteTailItem(buf []byte) ([]byte, bool) { +loop: + for i := len(buf) - 1; i >= 0; i-- { + // look for either a ',',':','[' + switch buf[i] { + case '[': + return buf, true + case ',': + return buf[:i], false + case ':': + // delete tail string + i-- + for ; i >= 0; i-- { + if buf[i] == '"' { + i-- + for ; i >= 0; i-- { + if buf[i] == '"' { + i-- + if i >= 0 && buf[i] == '\\' { + i-- + continue + } + for ; i >= 0; i-- { + // look for either a ',','{' + switch buf[i] { + case '{': + return buf[:i+1], true + case ',': + return buf[:i], false + } + } + } + } + break + } + } + break loop + } + } + return buf, false +} + +var errNoChange = &errorType{"no change"} + +func appendRawPaths(buf []byte, jstr string, paths []pathResult, raw string, + stringify, del bool) ([]byte, error) { + var err error + var res gjson.Result + var found bool + if del { + if paths[0].part == "-1" && !paths[0].force { + res = gjson.Get(jstr, "#") + if res.Int() > 0 { + res = gjson.Get(jstr, strconv.FormatInt(int64(res.Int()-1), 10)) + found = true + } + } + } + if !found { + res = gjson.Get(jstr, paths[0].gpart) + } + if res.Index > 0 { + if len(paths) > 1 { + buf = append(buf, jstr[:res.Index]...) + buf, err = appendRawPaths(buf, res.Raw, paths[1:], raw, + stringify, del) + if err != nil { + return nil, err + } + buf = append(buf, jstr[res.Index+len(res.Raw):]...) + return buf, nil + } + buf = append(buf, jstr[:res.Index]...) + var exidx int // additional forward stripping + if del { + var delNextComma bool + buf, delNextComma = deleteTailItem(buf) + if delNextComma { + i, j := res.Index+len(res.Raw), 0 + for ; i < len(jstr); i, j = i+1, j+1 { + if jstr[i] <= ' ' { + continue + } + if jstr[i] == ',' { + exidx = j + 1 + } + break + } + } + } else { + if stringify { + buf = appendStringify(buf, raw) + } else { + buf = append(buf, raw...) + } + } + buf = append(buf, jstr[res.Index+len(res.Raw)+exidx:]...) + return buf, nil + } + if del { + return nil, errNoChange + } + n, numeric := atoui(paths[0]) + isempty := true + for i := 0; i < len(jstr); i++ { + if jstr[i] > ' ' { + isempty = false + break + } + } + if isempty { + if numeric { + jstr = "[]" + } else { + jstr = "{}" + } + } + jsres := gjson.Parse(jstr) + if jsres.Type != gjson.JSON { + if numeric { + jstr = "[]" + } else { + jstr = "{}" + } + jsres = gjson.Parse(jstr) + } + var comma bool + for i := 1; i < len(jsres.Raw); i++ { + if jsres.Raw[i] <= ' ' { + continue + } + if jsres.Raw[i] == '}' || jsres.Raw[i] == ']' { + break + } + comma = true + break + } + switch jsres.Raw[0] { + default: + return nil, &errorType{"json must be an object or array"} + case '{': + end := len(jsres.Raw) - 1 + for ; end > 0; end-- { + if jsres.Raw[end] == '}' { + break + } + } + buf = append(buf, jsres.Raw[:end]...) + if comma { + buf = append(buf, ',') + } + buf = appendBuild(buf, false, paths, raw, stringify) + buf = append(buf, '}') + return buf, nil + case '[': + var appendit bool + if !numeric { + if paths[0].part == "-1" && !paths[0].force { + appendit = true + } else { + return nil, &errorType{ + "cannot set array element for non-numeric key '" + + paths[0].part + "'"} + } + } + if appendit { + njson := trim(jsres.Raw) + if njson[len(njson)-1] == ']' { + njson = njson[:len(njson)-1] + } + buf = append(buf, njson...) + if comma { + buf = append(buf, ',') + } + + buf = appendBuild(buf, true, paths, raw, stringify) + buf = append(buf, ']') + return buf, nil + } + buf = append(buf, '[') + ress := jsres.Array() + for i := 0; i < len(ress); i++ { + if i > 0 { + buf = append(buf, ',') + } + buf = append(buf, ress[i].Raw...) + } + if len(ress) == 0 { + buf = appendRepeat(buf, "null,", n-len(ress)) + } else { + buf = appendRepeat(buf, ",null", n-len(ress)) + if comma { + buf = append(buf, ',') + } + } + buf = appendBuild(buf, true, paths, raw, stringify) + buf = append(buf, ']') + return buf, nil + } +} + +func isOptimisticPath(path string) bool { + for i := 0; i < len(path); i++ { + if path[i] < '.' || path[i] > 'z' { + return false + } + if path[i] > '9' && path[i] < 'A' { + return false + } + if path[i] > 'z' { + return false + } + } + return true +} + +// Set sets a json value for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// An error is returned if the path is not valid. +// +// A path is a series of keys separated by a dot. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children.1" >> "Alex" +// +func Set(json, path string, value interface{}) (string, error) { + return SetOptions(json, path, value, nil) +} + +// SetBytes sets a json value for the specified path. +// If working with bytes, this method preferred over +// Set(string(data), path, value) +func SetBytes(json []byte, path string, value interface{}) ([]byte, error) { + return SetBytesOptions(json, path, value, nil) +} + +// SetRaw sets a raw json value for the specified path. +// This function works the same as Set except that the value is set as a +// raw block of json. This allows for setting premarshalled json objects. +func SetRaw(json, path, value string) (string, error) { + return SetRawOptions(json, path, value, nil) +} + +// SetRawOptions sets a raw json value for the specified path with options. +// This furnction works the same as SetOptions except that the value is set +// as a raw block of json. This allows for setting premarshalled json objects. +func SetRawOptions(json, path, value string, opts *Options) (string, error) { + var optimistic bool + if opts != nil { + optimistic = opts.Optimistic + } + res, err := set(json, path, value, false, false, optimistic, false) + if err == errNoChange { + return json, nil + } + return string(res), err +} + +// SetRawBytes sets a raw json value for the specified path. +// If working with bytes, this method preferred over +// SetRaw(string(data), path, value) +func SetRawBytes(json []byte, path string, value []byte) ([]byte, error) { + return SetRawBytesOptions(json, path, value, nil) +} + +type dtype struct{} + +// Delete deletes a value from json for the specified path. +func Delete(json, path string) (string, error) { + return Set(json, path, dtype{}) +} + +// DeleteBytes deletes a value from json for the specified path. +func DeleteBytes(json []byte, path string) ([]byte, error) { + return SetBytes(json, path, dtype{}) +} + +type stringHeader struct { + data unsafe.Pointer + len int +} + +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +func set(jstr, path, raw string, + stringify, del, optimistic, inplace bool) ([]byte, error) { + if path == "" { + return []byte(jstr), &errorType{"path cannot be empty"} + } + if !del && optimistic && isOptimisticPath(path) { + res := gjson.Get(jstr, path) + if res.Exists() && res.Index > 0 { + sz := len(jstr) - len(res.Raw) + len(raw) + if stringify { + sz += 2 + } + if inplace && sz <= len(jstr) { + if !stringify || !mustMarshalString(raw) { + jsonh := *(*stringHeader)(unsafe.Pointer(&jstr)) + jsonbh := sliceHeader{ + data: jsonh.data, len: jsonh.len, cap: jsonh.len} + jbytes := *(*[]byte)(unsafe.Pointer(&jsonbh)) + if stringify { + jbytes[res.Index] = '"' + copy(jbytes[res.Index+1:], []byte(raw)) + jbytes[res.Index+1+len(raw)] = '"' + copy(jbytes[res.Index+1+len(raw)+1:], + jbytes[res.Index+len(res.Raw):]) + } else { + copy(jbytes[res.Index:], []byte(raw)) + copy(jbytes[res.Index+len(raw):], + jbytes[res.Index+len(res.Raw):]) + } + return jbytes[:sz], nil + } + return []byte(jstr), nil + } + buf := make([]byte, 0, sz) + buf = append(buf, jstr[:res.Index]...) + if stringify { + buf = appendStringify(buf, raw) + } else { + buf = append(buf, raw...) + } + buf = append(buf, jstr[res.Index+len(res.Raw):]...) + return buf, nil + } + } + var paths []pathResult + r, simple := parsePath(path) + if simple { + paths = append(paths, r) + for r.more { + r, simple = parsePath(r.path) + if !simple { + break + } + paths = append(paths, r) + } + } + if !simple { + if del { + return []byte(jstr), + &errorType{"cannot delete value from a complex path"} + } + return setComplexPath(jstr, path, raw, stringify) + } + njson, err := appendRawPaths(nil, jstr, paths, raw, stringify, del) + if err != nil { + return []byte(jstr), err + } + return njson, nil +} + +func setComplexPath(jstr, path, raw string, stringify bool) ([]byte, error) { + res := gjson.Get(jstr, path) + if !res.Exists() || !(res.Index != 0 || len(res.Indexes) != 0) { + return []byte(jstr), errNoChange + } + if res.Index != 0 { + njson := []byte(jstr[:res.Index]) + if stringify { + njson = appendStringify(njson, raw) + } else { + njson = append(njson, raw...) + } + njson = append(njson, jstr[res.Index+len(res.Raw):]...) + jstr = string(njson) + } + if len(res.Indexes) > 0 { + type val struct { + index int + res gjson.Result + } + vals := make([]val, 0, len(res.Indexes)) + res.ForEach(func(_, vres gjson.Result) bool { + vals = append(vals, val{res: vres}) + return true + }) + if len(res.Indexes) != len(vals) { + return []byte(jstr), errNoChange + } + for i := 0; i < len(res.Indexes); i++ { + vals[i].index = res.Indexes[i] + } + sort.SliceStable(vals, func(i, j int) bool { + return vals[i].index > vals[j].index + }) + for _, val := range vals { + vres := val.res + index := val.index + njson := []byte(jstr[:index]) + if stringify { + njson = appendStringify(njson, raw) + } else { + njson = append(njson, raw...) + } + njson = append(njson, jstr[index+len(vres.Raw):]...) + jstr = string(njson) + } + } + return []byte(jstr), nil +} + +// SetOptions sets a json value for the specified path with options. +// A path is in dot syntax, such as "name.last" or "age". +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// An error is returned if the path is not valid. +func SetOptions(json, path string, value interface{}, + opts *Options) (string, error) { + if opts != nil { + if opts.ReplaceInPlace { + // it's not safe to replace bytes in-place for strings + // copy the Options and set options.ReplaceInPlace to false. + nopts := *opts + opts = &nopts + opts.ReplaceInPlace = false + } + } + jsonh := *(*stringHeader)(unsafe.Pointer(&json)) + jsonbh := sliceHeader{data: jsonh.data, len: jsonh.len, cap: jsonh.len} + jsonb := *(*[]byte)(unsafe.Pointer(&jsonbh)) + res, err := SetBytesOptions(jsonb, path, value, opts) + return string(res), err +} + +// SetBytesOptions sets a json value for the specified path with options. +// If working with bytes, this method preferred over +// SetOptions(string(data), path, value) +func SetBytesOptions(json []byte, path string, value interface{}, + opts *Options) ([]byte, error) { + var optimistic, inplace bool + if opts != nil { + optimistic = opts.Optimistic + inplace = opts.ReplaceInPlace + } + jstr := *(*string)(unsafe.Pointer(&json)) + var res []byte + var err error + switch v := value.(type) { + default: + b, merr := jsongo.Marshal(value) + if merr != nil { + return nil, merr + } + raw := *(*string)(unsafe.Pointer(&b)) + res, err = set(jstr, path, raw, false, false, optimistic, inplace) + case dtype: + res, err = set(jstr, path, "", false, true, optimistic, inplace) + case string: + res, err = set(jstr, path, v, true, false, optimistic, inplace) + case []byte: + raw := *(*string)(unsafe.Pointer(&v)) + res, err = set(jstr, path, raw, true, false, optimistic, inplace) + case bool: + if v { + res, err = set(jstr, path, "true", false, false, optimistic, inplace) + } else { + res, err = set(jstr, path, "false", false, false, optimistic, inplace) + } + case int8: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case int16: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case int32: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case int64: + res, err = set(jstr, path, strconv.FormatInt(int64(v), 10), + false, false, optimistic, inplace) + case uint8: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case uint16: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case uint32: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case uint64: + res, err = set(jstr, path, strconv.FormatUint(uint64(v), 10), + false, false, optimistic, inplace) + case float32: + res, err = set(jstr, path, strconv.FormatFloat(float64(v), 'f', -1, 64), + false, false, optimistic, inplace) + case float64: + res, err = set(jstr, path, strconv.FormatFloat(float64(v), 'f', -1, 64), + false, false, optimistic, inplace) + } + if err == errNoChange { + return json, nil + } + return res, err +} + +// SetRawBytesOptions sets a raw json value for the specified path with options. +// If working with bytes, this method preferred over +// SetRawOptions(string(data), path, value, opts) +func SetRawBytesOptions(json []byte, path string, value []byte, + opts *Options) ([]byte, error) { + jstr := *(*string)(unsafe.Pointer(&json)) + vstr := *(*string)(unsafe.Pointer(&value)) + var optimistic, inplace bool + if opts != nil { + optimistic = opts.Optimistic + inplace = opts.ReplaceInPlace + } + res, err := set(jstr, path, vstr, false, false, optimistic, inplace) + if err == errNoChange { + return json, nil + } + return res, err +} diff --git a/vendor/github.com/whosonfirst/go-ioutil/.gitignore b/vendor/github.com/whosonfirst/go-ioutil/.gitignore new file mode 100644 index 0000000..afa44cd --- /dev/null +++ b/vendor/github.com/whosonfirst/go-ioutil/.gitignore @@ -0,0 +1,11 @@ +*~ +pkg +src +!vendor/src +bin +!bin/.gitignore +*.log +*.json +.travis.yml +*.db +testdata/*.txt \ No newline at end of file diff --git a/vendor/github.com/whosonfirst/go-ioutil/LICENSE b/vendor/github.com/whosonfirst/go-ioutil/LICENSE new file mode 100644 index 0000000..7832dde --- /dev/null +++ b/vendor/github.com/whosonfirst/go-ioutil/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2021, Aaron Straup Cope +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/whosonfirst/go-ioutil/README.md b/vendor/github.com/whosonfirst/go-ioutil/README.md new file mode 100644 index 0000000..4fe61b3 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-ioutil/README.md @@ -0,0 +1,42 @@ +# go-ioutil + +Go package for creating instances conforming to the Go 1.16 `io.ReadSeekCloser` interface from a variety of io.Read* instances that implement some but not all of the `io.Reader`, `io.Seeker` and `io.Closer` interfaces. + +## Documentation + +[![Go Reference](https://pkg.go.dev/badge/github.com/whosonfirst/go-ioutil.svg)](https://pkg.go.dev/github.com/whosonfirst/go-ioutil) + +## Example + +``` +import ( + "bytes" + "github.com/whosonfirst/go-ioutil" + "io" + "log" +) + +func main(){ + + fh, _ := os.Open("README.md") + + rsc, _ := NewReadSeekCloser(fh) + + body, _ := io.ReadAll(rsc) + + rsc.Seek(0, 0) + + body2, _ := io.ReadAll(rsc) + + same := bytes.Equal(body, body2) + log.Printf("Same %t\n", same) + + rsc.Close() +} +``` + +_Error handling removed for brevity._ + +## See also + +* https://golang.org/pkg/io/#ReadSeekCloser \ No newline at end of file diff --git a/vendor/github.com/whosonfirst/go-ioutil/doc.go b/vendor/github.com/whosonfirst/go-ioutil/doc.go new file mode 100644 index 0000000..954c86d --- /dev/null +++ b/vendor/github.com/whosonfirst/go-ioutil/doc.go @@ -0,0 +1,29 @@ +// package ioutil provides methods for creating a new instance conforming to the Go 1.16 io.ReadSeekCloser interface from a variety of io.Read* instances that implement some but not all of the io.Reader, io.Seeker and io.Closer interfaces. +// +// Example +// +// import ( +// "bytes" +// "github.com/whosonfirst/go-ioutil" +// "io" +// "log" +// ) +// +// func main(){ +// +// fh, _ := os.Open("README.md") +// +// rsc, _ := NewReadSeekCloser(fh) +// +// body, _ := io.ReadAll(rsc) +// +// rsc.Seek(0, 0) +// +// body2, _ := io.ReadAll(rsc) +// +// same := bytes.Equal(body, body2) +// log.Printf("Same %t\n", same) +// +// rsc.Close() +// } +package ioutil diff --git a/vendor/github.com/whosonfirst/go-ioutil/readseekcloser.go b/vendor/github.com/whosonfirst/go-ioutil/readseekcloser.go new file mode 100644 index 0000000..f9700b8 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-ioutil/readseekcloser.go @@ -0,0 +1,124 @@ +package ioutil + +// This is only here until there is an equivalent package/construct in the core Go language +// (20210217/thisisaaronland) + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// Type ReadSeekCloser implements the io.Reader, io.Seeker and io.Closer interfaces. +type ReadSeekCloser struct { + io.Reader + io.Seeker + io.Closer + reader bool + closer bool + seeker bool + fh interface{} + br *bytes.Reader + mu *sync.RWMutex +} + +// Create a new NewReadSeekCloser instance conforming to the Go 1.16 `io.ReadSeekCloser` interface. This method accepts the following types: io.ReadSeekCloser, io.Reader, io.ReadCloser and io.ReadSeeker. +func NewReadSeekCloser(fh interface{}) (io.ReadSeekCloser, error) { + + reader := false + seeker := false + closer := false + + switch fh.(type) { + case io.ReadSeekCloser: + return fh.(io.ReadSeekCloser), nil + case io.Closer: + closer = true + case io.ReadCloser: + reader = true + closer = true + case io.ReadSeeker: + reader = true + seeker = true + case io.Reader: + reader = true + default: + return nil, fmt.Errorf("Invalid or unsupported type: %T", fh) + } + + mu := new(sync.RWMutex) + + rsc := &ReadSeekCloser{ + reader: reader, + seeker: seeker, + closer: closer, + fh: fh, + mu: mu, + } + + return rsc, nil +} + +// Read implements the standard Read interface: it reads data from the pipe, blocking until a writer arrives or the write end is closed. If the write end is closed with an error, that error is returned as err; otherwise err is `io.EOF`. +func (rsc *ReadSeekCloser) Read(p []byte) (n int, err error) { + + if rsc.seeker { + return rsc.fh.(io.Reader).Read(p) + } + + br, err := rsc.bytesReader() + + if err != nil { + return 0, err + } + + return br.Read(p) +} + +// Close closes the reader; subsequent writes to the write half of the pipe will return the error `io.ErrClosedPipe`. +func (rsc *ReadSeekCloser) Close() error { + + if rsc.closer { + return rsc.fh.(io.ReadCloser).Close() + } + + return nil +} + +// Seek implements the `io.Seeker` interface. +func (rsc *ReadSeekCloser) Seek(offset int64, whence int) (int64, error) { + + if rsc.seeker { + return rsc.fh.(io.Seeker).Seek(offset, whence) + } + + br, err := rsc.bytesReader() + + if err != nil { + return 0, err + } + + return br.Seek(offset, whence) +} + +func (rsc *ReadSeekCloser) bytesReader() (*bytes.Reader, error) { + + rsc.mu.Lock() + defer rsc.mu.Unlock() + + if rsc.br != nil { + return rsc.br, nil + } + + body, err := io.ReadAll(rsc.fh.(io.Reader)) + + if err != nil { + return nil, err + } + + br := bytes.NewReader(body) + rsc.br = br + + return br, nil +} diff --git a/vendor/github.com/whosonfirst/go-reader/.gitignore b/vendor/github.com/whosonfirst/go-reader/.gitignore new file mode 100644 index 0000000..afa44cd --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/.gitignore @@ -0,0 +1,11 @@ +*~ +pkg +src +!vendor/src +bin +!bin/.gitignore +*.log +*.json +.travis.yml +*.db +testdata/*.txt \ No newline at end of file diff --git a/vendor/github.com/whosonfirst/go-reader/LICENSE b/vendor/github.com/whosonfirst/go-reader/LICENSE new file mode 100644 index 0000000..29b6a83 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019, Aaron Straup Cope +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/whosonfirst/go-reader/Makefile b/vendor/github.com/whosonfirst/go-reader/Makefile new file mode 100644 index 0000000..4263626 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/Makefile @@ -0,0 +1,2 @@ +cli: + go build -mod vendor -o bin/read cmd/read/main.go diff --git a/vendor/github.com/whosonfirst/go-reader/README.md b/vendor/github.com/whosonfirst/go-reader/README.md new file mode 100644 index 0000000..c94ebdb --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/README.md @@ -0,0 +1,360 @@ +# go-reader + +There are many interfaces for reading files. This one is ours. It returns `io.ReadSeekCloser` instances. + +## Documentation + +[![Go Reference](https://pkg.go.dev/badge/github.com/whosonfirst/go-reader.svg)](https://pkg.go.dev/github.com/whosonfirst/go-reader) + +### Example + +Readers are instantiated with the `reader.NewReader` method which takes as its arguments a `context.Context` instance and a URI string. The URI's scheme represents the type of reader it implements and the remaining (URI) properties are used by that reader type to instantiate itself. + +For example to read files from a directory on the local filesystem you would write: + +``` +package main + +import ( + "context" + "github.com/whosonfirst/go-reader" + "io" + "os" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "file:///usr/local/data") + fh, _ := r.Read(ctx, "example.txt") + defer fh.Close() + io.Copy(os.Stdout, fh) +} +``` + +There is also a handy "null" reader in case you need a "pretend" reader that doesn't actually do anything: + +``` +package main + +import ( + "context" + "github.com/whosonfirst/go-reader" + "io" + "os" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "null://") + fh, _ := r.Read(ctx, "example.txt") + defer fh.Close() + io.Copy(os.Stdout, fh) +} +``` + +## Interfaces + +### reader.Reader + +``` +type Reader interface { + Read(context.Context, string) (io.ReadSeekCloser, error) + ReaderURI(context.Context, string) string +} +``` + +## Custom readers + +Custom readers need to: + +1. Implement the interface above. +2. Announce their availability using the `go-reader.RegisterReader` method on initialization, passing in an initialization function implementing the `go-reader.ReaderInitializationFunc` interface. + +For example, this is how the [go-reader-http](https://github.com/whosonfirst/go-reader-http) reader is implemented: + +``` +package reader + +import ( + "context" + "errors" + wof_reader "github.com/whosonfirst/go-reader" + "github.com/whosonfirst/go-ioutil" + "io" + _ "log" + "net/http" + "net/url" + "path/filepath" + "time" +) + +type HTTPReader struct { + wof_reader.Reader + url *url.URL + throttle <-chan time.Time +} + +func init() { + + ctx := context.Background() + + schemes := []string{ + "http", + "https", + } + + for _, s := range schemes { + + err := wof_reader.RegisterReader(ctx, s, NewHTTPReader) + + if err != nil { + panic(err) + } + } +} + +func NewHTTPReader(ctx context.Context, uri string) (wof_reader.Reader, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, err + } + + rate := time.Second / 3 + throttle := time.Tick(rate) + + r := HTTPReader{ + throttle: throttle, + url: u, + } + + return &r, nil +} + +func (r *HTTPReader) Read(ctx context.Context, uri string) (io.ReadSeekCloser, error) { + + <-r.throttle + + u, _ := url.Parse(r.url.String()) + u.Path = filepath.Join(u.Path, uri) + + url := u.String() + + rsp, err := http.Get(url) + + if err != nil { + return nil, err + } + + if rsp.StatusCode != 200 { + return nil, errors.New(rsp.Status) + } + + fh, err := ioutil.NewReadSeekCloser(rsp.Body) + + if err != nil { + return nil, err + } + + return fh, nil +} + +func (r *HTTPReader) ReaderURI(ctx context.Context, uri string) string { + return uri +} +``` + +And then to use it you would do this: + +``` +package main + +import ( + "context" + "github.com/whosonfirst/go-reader" + _ "github.com/whosonfirst/go-reader-http" + "io" + "os" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "https://data.whosonfirst.org") + fh, _ := r.Read(ctx, "101/736/545/101736545.geojson") + defer fh.Close() + io.Copy(os.Stdout, fh) +} +``` + +## Available readers + +### "blob" + +Read files from any registered [Go Cloud](https://gocloud.dev/howto/blob/) `Blob` source. For example: + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" + _ "github.com/whosonfirst/go-reader-blob" + _ "gocloud.dev/blob/s3blob" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "s3://whosonfirst-data?region=us-west-1") +} +``` + +* https://github.com/whosonfirst/go-reader-blob + +### github:// + +Read files from a GitHub repository. + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" + _ "github.com/whosonfirst/go-reader-github" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "github://{GITHUB_OWNER}/{GITHUB_REPO}") + + // to specify a specific branch you would do this: + // r, _ := reader.NewReader(ctx, "github://{GITHUB_OWNER}/{GITHUB_REPO}?branch={GITHUB_BRANCH}") +} +``` + +* https://github.com/whosonfirst/go-reader-github + +### githubapi:// + +Read files from a GitHub repository using the GitHub API. + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" + _ "github.com/whosonfirst/go-reader-github" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "githubapi://{GITHUB_OWNER}/{GITHUB_REPO}?access_token={GITHUBAPI_ACCESS_TOKEN}") + + // to specify a specific branch you would do this: + // r, _ := reader.NewReader(ctx, "githubapi://{GITHUB_OWNER}/{GITHUB_REPO}/{GITHUB_BRANCH}?access_token={GITHUBAPI_ACCESS_TOKEN}") +} +``` + +* https://github.com/whosonfirst/go-reader-github + +### http:// and https:// + +Read files from an HTTP(S) endpoint. + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" + _ "github.com/whosonfirst/go-reader-http" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "https://{HTTP_HOST_AND_PATH}") +} +``` + +* https://github.com/whosonfirst/go-reader-http + +### file:// + +Read files from a local filesystem. + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "file://{PATH_TO_DIRECTORY}") +} +``` + +If you are importing the `go-reader-blob` package and using the GoCloud's [fileblob](https://gocloud.dev/howto/blob/#local) driver then instantiating the `file://` scheme will fail since it will have already been registered. You can work around this by using the `fs://` scheme. For example: + +``` +r, _ := reader.NewReader(ctx, "fs://{PATH_TO_DIRECTORY}") +``` + +* https://github.com/whosonfirst/go-reader + +### null:// + +Pretend to read files. + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "null://") +} +``` + +### repo:// + +This is a convenience scheme for working with Who's On First data repositories. + +It will update a URI by appending a `data` directory to its path and changing its scheme to `fs://` before invoking `reader.NewReader` with the updated URI. + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "repo:///usr/local/data/whosonfirst-data-admin-ca") +} +``` + +### stdin:// + +Read "files" from `STDIN` + +``` +import ( + "context" + "github.com/whosonfirst/go-reader" +) + +func main() { + ctx := context.Background() + r, _ := reader.NewReader(ctx, "stdin://") +} +``` + +And then to use, something like: + +``` +> cat README.md | ./bin/read -reader-uri stdin:// - | wc -l + 339 +``` + +Note the use of `-` for a URI. This is the convention (when reading from STDIN) but it can be whatever you want it to be. + +## See also + +* https://github.com/whosonfirst/go-writer diff --git a/vendor/github.com/whosonfirst/go-reader/doc.go b/vendor/github.com/whosonfirst/go-reader/doc.go new file mode 100644 index 0000000..229e60d --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/doc.go @@ -0,0 +1,45 @@ +// Example: +// +// package main +// +// import ( +// "context" +// "github.com/whosonfirst/go-reader" +// "io" +// "os" +// ) +// +// func main() { +// ctx := context.Background() +// r, _ := reader.NewReader(ctx, "fs:///usr/local/data") +// fh, _ := r.Read(ctx, "example.txt") +// defer fh.Close() +// io.Copy(os.Stdout, fh) +// } +// +// Package reader provides a common interface for reading from a variety of sources. It has the following interface: +// +// type Reader interface { +// Read(context.Context, string) (io.ReadSeekCloser, error) +// ReaderURI(string) string +// } +// +// Reader intstances are created either by calling a package-specific New{SOME_READER}Reader method or by invoking the +// reader.NewReader method passing in a context.Context instance and a URI specific to the reader class. For example: +// +// r, _ := reader.NewReader(ctx, "fs:///usr/local/data") +// +// Custom reader packages implement the reader.Reader interface and register their availability by calling the reader.RegisterRegister +// method on initialization. For example: +// +// func init() { +// +// ctx := context.Background() +// +// err = RegisterReader(ctx, "file", NewFileReader) +// +// if err != nil { +// panic(err) +// } +// } +package reader diff --git a/vendor/github.com/whosonfirst/go-reader/fs.go b/vendor/github.com/whosonfirst/go-reader/fs.go new file mode 100644 index 0000000..9a832a3 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/fs.go @@ -0,0 +1,119 @@ +package reader + +import ( + "compress/bzip2" + "context" + "fmt" + "github.com/whosonfirst/go-ioutil" + "io" + "net/url" + "os" + "path/filepath" + "strconv" +) + +// FileReader is a struct that implements the `Reader` interface for reading documents from files on a local disk. +type FileReader struct { + Reader + root string + allow_bz2 bool +} + +func init() { + + ctx := context.Background() + + err := RegisterReader(ctx, "fs", NewFileReader) // Deprecated + + if err != nil { + panic(err) + } + +} + +// NewFileReader returns a new `FileReader` instance for reading documents from local files on +// disk, configured by 'uri' in the form of: +// +// fs://{PATH} +// +// Where {PATH} is an absolute path to an existing directory where files will be read from. +func NewFileReader(ctx context.Context, uri string) (Reader, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, fmt.Errorf("Failed to parse URI, %w", err) + } + + root := u.Path + info, err := os.Stat(root) + + if err != nil { + return nil, fmt.Errorf("Failed to stat %s, %w", root, err) + } + + if !info.IsDir() { + return nil, fmt.Errorf("Root (%s) is not a directory", root) + } + + r := &FileReader{ + root: root, + } + + q := u.Query() + + allow_bz2 := q.Get("allow_bz2") + + if allow_bz2 != "" { + + allow, err := strconv.ParseBool(allow_bz2) + + if err != nil { + return nil, fmt.Errorf("Unable to parse '%s' parameter, %w", allow_bz2, err) + } + + r.allow_bz2 = allow + } + + return r, nil +} + +// Read will open an `io.ReadSeekCloser` for a file matching 'path'. +func (r *FileReader) Read(ctx context.Context, path string) (io.ReadSeekCloser, error) { + + abs_path := r.ReaderURI(ctx, path) + + _, err := os.Stat(abs_path) + + if err != nil { + return nil, fmt.Errorf("Failed to stat %s, %v", abs_path, err) + } + + var fh io.ReadSeekCloser + + fh, err = os.Open(abs_path) + + if err != nil { + return nil, fmt.Errorf("Failed to open %s, %w", abs_path, err) + } + + if filepath.Ext(abs_path) == ".bz2" && r.allow_bz2 { + + bz_r := bzip2.NewReader(fh) + + rsc, err := ioutil.NewReadSeekCloser(bz_r) + + if err != nil { + return nil, fmt.Errorf("Failed create ReadSeekCloser for bzip2 reader for %s, %w", path, err) + } + + fh = rsc + } + + return fh, nil +} + +// ReaderURI returns the absolute URL for 'path'. +func (r *FileReader) ReaderURI(ctx context.Context, path string) string { + return filepath.Join(r.root, path) +} diff --git a/vendor/github.com/whosonfirst/go-reader/multi.go b/vendor/github.com/whosonfirst/go-reader/multi.go new file mode 100644 index 0000000..8f6dc22 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/multi.go @@ -0,0 +1,137 @@ +package reader + +import ( + "context" + "errors" + "fmt" + "github.com/hashicorp/go-multierror" + "io" + _ "log" + "sync" +) + +// MultiReader is a struct that implements the `Reader` interface for reading documents from one or more `Reader` instances. +type MultiReader struct { + Reader + readers []Reader + lookup map[string]int + mu *sync.RWMutex +} + +// NewMultiReaderFromURIs returns a new `Reader` instance for reading documents from one or more `Reader` instances. +// 'uris' is assumed to be a list of URIs each of which will be used to invoke the `NewReader` method. +func NewMultiReaderFromURIs(ctx context.Context, uris ...string) (Reader, error) { + + readers := make([]Reader, 0) + + for _, uri := range uris { + + r, err := NewReader(ctx, uri) + + if err != nil { + return nil, fmt.Errorf("Failed to create reader for %s, %w", uri, err) + } + + readers = append(readers, r) + } + + return NewMultiReader(ctx, readers...) +} + +// NewMultiReaderFromURIs returns a new `Reader` instance for reading documents from one or more `Reader` instances. +func NewMultiReader(ctx context.Context, readers ...Reader) (Reader, error) { + + lookup := make(map[string]int) + + mu := new(sync.RWMutex) + + mr := MultiReader{ + readers: readers, + lookup: lookup, + mu: mu, + } + + return &mr, nil +} + +// Read will open an `io.ReadSeekCloser` for a file matching 'path'. In the case of multiple underlying +// `Reader` instances the first instance to successfully load 'path' will be returned. +func (mr *MultiReader) Read(ctx context.Context, path string) (io.ReadSeekCloser, error) { + + missing := errors.New("Unable to read URI") + + mr.mu.RLock() + + idx, ok := mr.lookup[path] + + mr.mu.RUnlock() + + if ok { + + // log.Printf("READ MULTIREADER LOOKUP INDEX FOR %s AS %d\n", path, idx) + + if idx == -1 { + return nil, missing + } + + r := mr.readers[idx] + return r.Read(ctx, path) + } + + var fh io.ReadSeekCloser + idx = -1 + + var errors error + + for i, r := range mr.readers { + + rsp, err := r.Read(ctx, path) + + if err != nil { + errors = multierror.Append(fmt.Errorf("Failed to read %s with %T, %w", path, r, err)) + } else { + + fh = rsp + idx = i + + break + } + } + + // log.Printf("SET MULTIREADER LOOKUP INDEX FOR %s AS %d\n", path, idx) + + mr.mu.Lock() + mr.lookup[path] = idx + mr.mu.Unlock() + + if fh == nil { + return nil, errors + } + + return fh, nil +} + +// ReaderURI returns the absolute URL for 'path'. In the case of multiple underlying +// `Reader` instances the first instance to successfully load 'path' will be returned. +func (mr *MultiReader) ReaderURI(ctx context.Context, path string) string { + + mr.mu.RLock() + + idx, ok := mr.lookup[path] + + mr.mu.RUnlock() + + if ok { + return mr.readers[idx].ReaderURI(ctx, path) + } + + r, err := mr.Read(ctx, path) + + if err != nil { + return fmt.Sprintf("x-urn:go-reader:multi#%s", path) + } + + defer r.Close() + + return mr.ReaderURI(ctx, path) +} diff --git a/vendor/github.com/whosonfirst/go-reader/null.go b/vendor/github.com/whosonfirst/go-reader/null.go new file mode 100644 index 0000000..9818f85 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/null.go @@ -0,0 +1,46 @@ +package reader + +import ( + "bytes" + "context" + "github.com/whosonfirst/go-ioutil" + "io" +) + +// NullReader is a struct that implements the `Reader` interface for reading documents from nowhere. +type NullReader struct { + Reader +} + +func init() { + + ctx := context.Background() + err := RegisterReader(ctx, "null", NewNullReader) + + if err != nil { + panic(err) + } +} + +// NewNullReader returns a new `FileReader` instance for reading documents from nowhere, +// configured by 'uri' in the form of: +// +// null:// +// +// Technically 'uri' can also be an empty string. +func NewNullReader(ctx context.Context, uri string) (Reader, error) { + + r := &NullReader{} + return r, nil +} + +// Read will open and return an empty `io.ReadSeekCloser` for any value of 'path'. +func (r *NullReader) Read(ctx context.Context, path string) (io.ReadSeekCloser, error) { + br := bytes.NewReader([]byte("")) + return ioutil.NewReadSeekCloser(br) +} + +// ReaderURI returns the value of 'path'. +func (r *NullReader) ReaderURI(ctx context.Context, path string) string { + return path +} diff --git a/vendor/github.com/whosonfirst/go-reader/reader.go b/vendor/github.com/whosonfirst/go-reader/reader.go new file mode 100644 index 0000000..a79616a --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/reader.go @@ -0,0 +1,99 @@ +package reader + +import ( + "context" + "fmt" + "github.com/aaronland/go-roster" + "io" + "net/url" + "sort" + "strings" +) + +var reader_roster roster.Roster + +// ReaderInitializationFunc is a function defined by individual reader package and used to create +// an instance of that reader +type ReaderInitializationFunc func(ctx context.Context, uri string) (Reader, error) + +// Reader is an interface for reading data from multiple sources or targets. +type Reader interface { + // Reader returns a `io.ReadSeekCloser` instance for a URI resolved by the instance implementing the `Reader` interface. + Read(context.Context, string) (io.ReadSeekCloser, error) + // The absolute path for the file is determined by the instance implementing the `Reader` interface. + ReaderURI(context.Context, string) string +} + +// RegisterReader registers 'scheme' as a key pointing to 'init_func' in an internal lookup table +// used to create new `Reader` instances by the `NewReader` method. +func RegisterReader(ctx context.Context, scheme string, init_func ReaderInitializationFunc) error { + + err := ensureReaderRoster() + + if err != nil { + return err + } + + return reader_roster.Register(ctx, scheme, init_func) +} + +func ensureReaderRoster() error { + + if reader_roster == nil { + + r, err := roster.NewDefaultRoster() + + if err != nil { + return err + } + + reader_roster = r + } + + return nil +} + +// NewReader returns a new `Reader` instance configured by 'uri'. The value of 'uri' is parsed +// as a `url.URL` and its scheme is used as the key for a corresponding `ReaderInitializationFunc` +// function used to instantiate the new `Reader`. It is assumed that the scheme (and initialization +// function) have been registered by the `RegisterReader` method. +func NewReader(ctx context.Context, uri string) (Reader, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, err + } + + scheme := u.Scheme + + i, err := reader_roster.Driver(ctx, scheme) + + if err != nil { + return nil, err + } + + init_func := i.(ReaderInitializationFunc) + return init_func(ctx, uri) +} + +// Schemes returns the list of schemes that have been registered. +func Schemes() []string { + + ctx := context.Background() + schemes := []string{} + + err := ensureReaderRoster() + + if err != nil { + return schemes + } + + for _, dr := range reader_roster.Drivers(ctx) { + scheme := fmt.Sprintf("%s://", strings.ToLower(dr)) + schemes = append(schemes, scheme) + } + + sort.Strings(schemes) + return schemes +} diff --git a/vendor/github.com/whosonfirst/go-reader/repo.go b/vendor/github.com/whosonfirst/go-reader/repo.go new file mode 100644 index 0000000..ed9f02e --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/repo.go @@ -0,0 +1,37 @@ +package reader + +import ( + "context" + "fmt" + "net/url" + "path/filepath" +) + +func init() { + + ctx := context.Background() + + err := RegisterReader(ctx, "repo", NewRepoReader) + + if err != nil { + panic(err) + } + +} + +// NewRepoReader is a convenience method to update 'uri' by appending a `data` +// directory to its path and changing its scheme to `fs://` before invoking +// NewReader with the updated URI. +func NewRepoReader(ctx context.Context, uri string) (Reader, error) { + + u, err := url.Parse(uri) + + if err != nil { + return nil, err + } + + root := filepath.Join(u.Path, "data") + + uri = fmt.Sprintf("fs://%s", root) + return NewReader(ctx, uri) +} diff --git a/vendor/github.com/whosonfirst/go-reader/stdin.go b/vendor/github.com/whosonfirst/go-reader/stdin.go new file mode 100644 index 0000000..3846177 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-reader/stdin.go @@ -0,0 +1,48 @@ +package reader + +import ( + "context" + "github.com/whosonfirst/go-ioutil" + "io" + "os" +) + +// Constant string value representing STDIN. +const STDIN string = "-" + +// StdinReader is a struct that implements the `Reader` interface for reading documents from STDIN. +type StdinReader struct { + Reader +} + +func init() { + + ctx := context.Background() + err := RegisterReader(ctx, "stdin", NewStdinReader) + + if err != nil { + panic(err) + } +} + +// NewStdinReader returns a new `FileReader` instance for reading documents from STDIN, +// configured by 'uri' in the form of: +// +// stdin:// +// +// Technically 'uri' can also be an empty string. +func NewStdinReader(ctx context.Context, uri string) (Reader, error) { + + r := &StdinReader{} + return r, nil +} + +// Read will open a `io.ReadSeekCloser` instance wrapping `os.Stdin`. +func (r *StdinReader) Read(ctx context.Context, uri string) (io.ReadSeekCloser, error) { + return ioutil.NewReadSeekCloser(os.Stdin) +} + +// ReaderURI will return the value of the `STDIN` constant. +func (r *StdinReader) ReaderURI(ctx context.Context, uri string) string { + return STDIN +} diff --git a/vendor/github.com/whosonfirst/go-rfc-5646/.gitignore b/vendor/github.com/whosonfirst/go-rfc-5646/.gitignore new file mode 100644 index 0000000..f5cccdd --- /dev/null +++ b/vendor/github.com/whosonfirst/go-rfc-5646/.gitignore @@ -0,0 +1,10 @@ +*~ +pkg +src +!vendor/src +bin +!bin/.gitignore +*.log +*.json +.travis.yml +*.db \ No newline at end of file diff --git a/vendor/github.com/whosonfirst/go-rfc-5646/5646.go b/vendor/github.com/whosonfirst/go-rfc-5646/5646.go new file mode 100644 index 0000000..fe0c700 --- /dev/null +++ b/vendor/github.com/whosonfirst/go-rfc-5646/5646.go @@ -0,0 +1,101 @@ +package rfc5646 + +import ( + "regexp" +) + +var RE_LANGUAGETAG *regexp.Regexp + +func init() { + + RE_LANGUAGETAG = regexp.MustCompile(`^((?P(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)|(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|zh-min|zh-min-nan|zh-xiang))|((?P([A-Za-z]{2,3}(-(?P[A-Za-z]{3}(-[A-Za-z]{3}){0,2}))?)|[A-Za-z]{4}|[A-Za-z]{5,8})(-(?P