diff --git a/Jenkinsfile b/Jenkinsfile index 5c1ff5b..c243e4a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -82,7 +82,7 @@ pipeline { /// /// Ontobio Validation /// - VALIDATION_ONTOLOGY_URL="http://skyhook.berkeleybop.org/release/ontology/go.json" + VALIDATION_ONTOLOGY_URL="http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/ontology/go.json" /// /// Minerva input. @@ -90,7 +90,7 @@ pipeline { // Minerva operating profile. MINERVA_INPUT_ONTOLOGIES = [ - "http://skyhook.berkeleybop.org/release/ontology/extensions/go-lego.owl" + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/ontology/extensions/go-lego.owl" ].join(" ") /// @@ -101,19 +101,19 @@ pipeline { GOLR_SOLR_MEMORY = "128G" GOLR_LOADER_MEMORY = "192G" GOLR_INPUT_ONTOLOGIES = [ - "http://skyhook.berkeleybop.org/release/ontology/extensions/go-amigo.owl" + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/ontology/extensions/go-amigo.owl" ].join(" ") GOLR_INPUT_GAFS = [ //"http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/products/upstream_and_raw_data/paint_other.gaf.gz", - "http://skyhook.berkeleybop.org/release/annotations/goa_chicken.gaf.gz", - "http://skyhook.berkeleybop.org/release/annotations/goa_chicken_complex.gaf.gz", - "http://skyhook.berkeleybop.org/release/annotations/goa_uniprot_all_noiea.gaf.gz", + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/annotations/goa_chicken.gaf.gz", + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/annotations/goa_chicken_complex.gaf.gz", + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/annotations/goa_uniprot_all_noiea.gaf.gz", "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/annotations/mgi.gaf.gz", - "http://skyhook.berkeleybop.org/release/annotations/pombase.gaf.gz", - "http://skyhook.berkeleybop.org/release/annotations/wb.gaf.gz" + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/annotations/pombase.gaf.gz", + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/annotations/wb.gaf.gz" ].join(" ") GOLR_INPUT_PANTHER_TREES = [ - "http://skyhook.berkeleybop.org/release/products/panther/arbre.tgz" + "http://skyhook.berkeleybop.org/full-issue-325-gopreprocess/products/panther/arbre.tgz" ].join(" ") /// @@ -156,6 +156,9 @@ pipeline { // Check to make sure we have coherent metadata so we // don't clobber good products. watchdog(); + + // Give us a minute to cancel if we want. + sleep time: 1, unit: 'MINUTES' cleanWs deleteDirs: true, disableDeferredWipeout: true } } @@ -316,6 +319,180 @@ pipeline { } } } + // See https://github.com/geneontology/go-ontology for details + // on the ontology release pipeline. This ticket runs + // daily(TODO?) and creates all the files normally included in + // a release, and deploys to S3. + stage('Produce ontology (*)') { + agent { + docker { + // Upgrade test for: geneontology/go-ontology#25019, from v1.2.32 + image 'obolibrary/odkfull:v1.4' + // Reset Jenkins Docker agent default to original + // root. + args '-u root:root' + } + } + // CHECKPOINT: Recover key environmental variables. + environment { + START_DOW = sh(script: 'curl http://skyhook.berkeleybop.org/$BRANCH_NAME/metadata/dow.txt', , returnStdout: true).trim() + START_DATE = sh(script: 'curl http://skyhook.berkeleybop.org/$BRANCH_NAME/metadata/date.txt', , returnStdout: true).trim() + } + steps { + // Create a relative working directory and setup our + // data environment. + dir('./go-ontology') { + + // We're starting to run into problems with + // ontology download taking too long for the + // default 10m, so try and get into the guts of + // the git commands a little. Issues #248. + // git branch: TARGET_GO_ONTOLOGY_BRANCH, url: 'https://github.com/geneontology/go-ontology.git' + checkout changelog: false, poll: false, scm: [$class: 'GitSCM', branches: [[name: TARGET_GO_ONTOLOGY_BRANCH]], extensions: [[$class: 'CloneOption', depth: 1, noTags: true, reference: '', shallow: true, timeout: 120]], userRemoteConfigs: [[url: 'https://github.com/geneontology/go-ontology.git', refspec: "+refs/heads/${env.TARGET_GO_ONTOLOGY_BRANCH}:refs/remotes/origin/${env.TARGET_GO_ONTOLOGY_BRANCH}"]]] + + // Default namespace. + sh 'env' + + dir('./src/ontology') { + retry(3){ + sh 'make RELEASEDATE=$START_DATE OBO=http://purl.obolibrary.org/obo ROBOT_ENV="ROBOT_JAVA_ARGS=-Xmx48G" all' + } + retry(3){ + sh 'make prepare_release' + } + } + + // Make sure that we copy any files there, + // including the core dump of produced. + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + //sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" target/* skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/ontology' + sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY -r target/* skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/ontology/' + } + + // Now that the files are safely away onto skyhook for + // debugging, test for the core dump. + script { + if( WE_ARE_BEING_SAFE_P == 'TRUE' ){ + + def found_core_dump_p = fileExists 'target/core_dump.owl' + if( found_core_dump_p ){ + error 'ROBOT core dump detected--bailing out.' + } + } + } + + // Try and force destruction of anything remaining + // on disk after build as cleanup. + sh 'git clean -fx || true' + } + } + } + stage('Minerva generations') { + steps { + parallel( + "Make Noctua GPAD": { + + // May be parallelized in the future, but may need to + // serve as input into into mega step. + script { + + // Create a relative working directory and setup our + // data environment. + dir('./noctua-models') { + + // Attempt to trim/prune/speed up + // noctua-models as we do for + // go-ontology for + // https://github.com/geneontology/pipeline/issues/278 + // . + checkout changelog: false, poll: false, scm: [$class: 'GitSCM', branches: [[name: TARGET_NOCTUA_MODELS_BRANCH]], extensions: [[$class: 'CloneOption', depth: 1, noTags: true, reference: '', shallow: true, timeout: 120]], userRemoteConfigs: [[url: 'https://github.com/geneontology/noctua-models.git', refspec: "+refs/heads/${env.TARGET_NOCTUA_MODELS_BRANCH}:refs/remotes/origin/${env.TARGET_NOCTUA_MODELS_BRANCH}"]]] + + // Make all software products + // available in bin/ (and lib/). + sh 'mkdir -p bin/' + sh 'mkdir -p lib/' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/bin/* ./bin/' + // WARNING/BUG: needed for blazegraph-runner + // to run at this point. + sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/lib/* ./lib/' + } + sh 'chmod +x bin/*' + + // Compile models. + sh 'mkdir -p legacy/gpad' + withEnv(['MINERVA_CLI_MEMORY=128G']){ + // "Import" models. + sh './bin/minerva-cli.sh --import-owl-models -f models -j blazegraph.jnl' + // Convert GO-CAM to GPAD. + sh './bin/minerva-cli.sh --lego-to-gpad-sparql --ontology $MINERVA_INPUT_ONTOLOGIES --ontojournal ontojournal.jnl -i blazegraph.jnl --gpad-output legacy/gpad' + } + + // Collation. + // Hack for iterating quickly on + // https://github.com/geneontology/pipeline/issues/313 . + sh 'wget -N https://raw.githubusercontent.com/geneontology/go-site/$TARGET_GO_SITE_BRANCH/scripts/collate-gpads.pl' + sh 'perl ./collate-gpads.pl legacy/gpad/*.gpad' + + // Rename, compress, and move to skyhook. + sh 'mcp "legacy/*.gpad" "legacy/noctua_#1-src.gpad"' + sh 'gzip -vk legacy/noctua_*.gpad' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY legacy/noctua_*-src.gpad.gz skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/upstream_and_raw_data/' + } + } + } + }, + "JSON model generation": { + + // May be parallelized in the future, but may need to + // serve as input into into mega step. + script { + + // Create a relative working directory and setup our + // data environment. + dir('./json-noctua-models') { + + // Attempt to trim/prune/speed up + // noctua-models as we do for + // go-ontology for + // https://github.com/geneontology/pipeline/issues/278 + // . + checkout changelog: false, poll: false, scm: [$class: 'GitSCM', branches: [[name: TARGET_NOCTUA_MODELS_BRANCH]], extensions: [[$class: 'CloneOption', depth: 1, noTags: true, reference: '', shallow: true, timeout: 120]], userRemoteConfigs: [[url: 'https://github.com/geneontology/noctua-models.git', refspec: "+refs/heads/${env.TARGET_NOCTUA_MODELS_BRANCH}:refs/remotes/origin/${env.TARGET_NOCTUA_MODELS_BRANCH}"]]] + + // Make all software products + // available in bin/ (and lib/). + sh 'mkdir -p bin/' + sh 'mkdir -p lib/' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/bin/* ./bin/' + // WARNING/BUG: needed for blazegraph-runner + // to run at this point. + sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/lib/* ./lib/' + } + sh 'chmod +x bin/*' + + // Compile models. + sh 'mkdir -p jsonout' + withEnv(['MINERVA_CLI_MEMORY=128G']){ + // "Import" models. + sh './bin/minerva-cli.sh --import-owl-models -f models -j blazegraph.jnl' + // JSON out to directory. + sh './bin/minerva-cli.sh --dump-owl-json --journal blazegraph.jnl --ontojournal blazegraph-go-lego-reacto-neo.jnl --folder jsonout' + } + + // Compress and out. + sh 'tar --use-compress-program=pigz -cvf noctua-models-json.tgz -C jsonout .' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY noctua-models-json.tgz skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/json/' + } + } + } + } + ) + } + } + stage('Produce GAFs, TTLs, and journal (*)') { agent { docker { @@ -348,7 +525,7 @@ pipeline { sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/lib/* /opt/lib/' // Copy the sources we downloaded earlier to local. // We're grabbing anything that's gaf, zipped or unzipped. This leaves gpad or anything else behind since currently we only expect gafs - sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/release/products/upstream_and_raw_data/*.gaf* /opt/go-site/sources/' + sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/upstream_and_raw_data/*.gaf* /opt/go-site/sources/' } sh "chmod +x /opt/bin/*" @@ -404,7 +581,7 @@ pipeline { // - all irregular gaffy files + anything paint-y // - but not uniprot_all anything (elsewhere) // - and not any of the ttls - sh 'find /opt/go-site/pipeline/target/groups -type f -regex "^.*\\(\\-src.gaf\\|\\-src.gpi\\|\\_noiea.gaf\\|\\_valid.gaf\\|paint\\_.*\\).gz$" -not -regex "^.*.ttl.gz$" -not -regex "^.*goa_uniprot_all_noiea.gaf.gz$" -not -regex "^.*.ttl.gz$" -exec scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY {} skyhook@skyhook.berkeleybop.org:/home/skyhook/release/products/upstream_and_raw_data \\;' + sh 'find /opt/go-site/pipeline/target/groups -type f -regex "^.*\\(\\-src.gaf\\|\\-src.gpi\\|\\_noiea.gaf\\|\\_valid.gaf\\|paint\\_.*\\).gz$" -not -regex "^.*.ttl.gz$" -not -regex "^.*goa_uniprot_all_noiea.gaf.gz$" -not -regex "^.*.ttl.gz$" -exec scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY {} skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/upstream_and_raw_data \\;' // No longer copy goa uniprot all source to products: // https://github.com/geneontology/pipeline/issues/207 // // Now copy over the (single) uniprot @@ -418,13 +595,13 @@ pipeline { // } // } // Finally, the non-zipped prediction files. - sh 'find /opt/go-site/pipeline/target/groups -type f -regex "^.*\\-prediction.gaf$" -exec scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY {} skyhook@skyhook.berkeleybop.org:/home/skyhook/release/products/upstream_and_raw_data \\;' + sh 'find /opt/go-site/pipeline/target/groups -type f -regex "^.*\\-prediction.gaf$" -exec scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY {} skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/upstream_and_raw_data \\;' // Flatten all GAFs and GAF-like products // onto skyhook. Basically: // - all product-y files // - but not uniprot_all anything (elsewhere) // - and not anything "irregular", like src - sh 'find /opt/go-site/pipeline/target/groups -type f -regex "^.*.\\(gaf\\|gpad\\|gpi\\).gz$" -not -regex "^.*\\(\\-src.gaf\\|\\-src.gpi\\|\\_noiea.gaf\\|\\_valid.gaf\\|noctua_.*\\|paint_.*\\).gz$" -exec scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY {} skyhook@skyhook.berkeleybop.org:/home/skyhook/release/annotations \\;' + sh 'find /opt/go-site/pipeline/target/groups -type f -regex "^.*.\\(gaf\\|gpad\\|gpi\\).gz$" -not -regex "^.*\\(\\-src.gaf\\|\\-src.gpi\\|\\_noiea.gaf\\|\\_valid.gaf\\|noctua_.*\\|paint_.*\\).gz$" -exec scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY {} skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/annotations \\;' // Now copy over the four uniprot core // files, if they are in our run set // (e.g. may not be there on speed runs @@ -474,6 +651,7 @@ pipeline { } } } + } } // WARNING: This stage is a hack required to work around data damage described in https://github.com/geneontology/go-site/issues/1484 and @@ -529,6 +707,7 @@ pipeline { // Upload noctua valid to skyhook sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY /opt/go-site/noctua_target/noctua*.gpad.gz skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/upstream_and_raw_data' sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY /opt/go-site/noctua_target/*.report.* skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/reports' + } } } // A new step to think about. What is our core metadata? @@ -712,6 +891,592 @@ pipeline { } } } + stage('Sanity I') { + steps { + // Prep a copyover point, as the overhead for doing + // large i/o over sshfs seems /really/ high. + sh 'mkdir -p $WORKSPACE/copyover/ || true' + // Mount the remote filesystem. + sh 'mkdir -p $WORKSPACE/mnt/ || true' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'sshfs -oStrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY -o idmap=user skyhook@skyhook.berkeleybop.org:/home/skyhook $WORKSPACE/mnt/' + } + // Copy over the files that we want to work on--both + // annotations/ and reports/ (which we separated + // earlier). + sh 'cp $WORKSPACE/mnt/$BRANCH_NAME/annotations/* $WORKSPACE/copyover/' + sh 'cp $WORKSPACE/mnt/$BRANCH_NAME/products/upstream_and_raw_data/* $WORKSPACE/copyover/' + sh 'cp $WORKSPACE/mnt/$BRANCH_NAME/reports/* $WORKSPACE/copyover/' + // Ready... + dir('./go-site') { + git branch: TARGET_GO_SITE_BRANCH, url: 'https://github.com/geneontology/go-site.git' + + // Run sanity checks. + sh 'python3 ./scripts/sanity-check-ann-report.py -v -d $WORKSPACE/copyover/ --ignore_noctua' + // Make sure that the SPARTA report has nothing + // nasty in it. + // Note: Used to be pipes (|), but Jenkins Pipeline shell + // commands do not apparently respect that. + sh 'jq \'.build\' $WORKSPACE/copyover/sparta-report.json > $WORKSPACE/build-status.txt' + sh 'grep -v \'fail\' $WORKSPACE/build-status.txt' + } + } + // WARNING: Extra safety as I expect this to sometimes fail. + post { + always { + // Bail on the remote filesystem. + sh 'fusermount -u $WORKSPACE/mnt/ || true' + // Purge the copyover point. + sh 'rm -r -f $WORKSPACE/copyover || true' + } + } + } + + //... + stage('Produce derivatives (*)') { + agent { + docker { + image 'geneontology/golr-autoindex:28a693d28b37196d3f79acdea8c0406c9930c818_2022-03-17T171930_master' + // Reset Jenkins Docker agent default to original + // root. + args '-u root:root --mount type=tmpfs,destination=/srv/solr/data' + } + } + // CHECKPOINT: Recover key environmental variables. + environment { + START_DOW = sh(script: 'curl http://skyhook.berkeleybop.org/$BRANCH_NAME/metadata/dow.txt', , returnStdout: true).trim() + START_DATE = sh(script: 'curl http://skyhook.berkeleybop.org/$BRANCH_NAME/metadata/date.txt', , returnStdout: true).trim() + } + + steps { + + // Build index into tmpfs. + sh 'bash /tmp/run-indexer.sh' + + // Immediately check to see if it looks like we have + // enough docs when trying a + // release. SANITY_SOLR_DOC_COUNT_MIN must be greater + // than what we seen in the index. + script { + if( env.BRANCH_NAME == 'release' ){ + + // Test overall. + echo "SANITY_SOLR_DOC_COUNT_MIN:${env.SANITY_SOLR_DOC_COUNT_MIN}" + sh 'curl "http://localhost:8080/solr/select?q=*:*&rows=0&wt=json"' + sh 'if [ $SANITY_SOLR_DOC_COUNT_MIN -gt $(curl "http://localhost:8080/solr/select?q=*:*&rows=0&wt=json" | grep -oh \'"numFound":[[:digit:]]*\' | grep -oh [[:digit:]]*) ]; then exit 1; else echo "We seem to be clear wrt doc count"; fi' + + // Test bioentity. + echo "SANITY_SOLR_BIOENTITY_DOC_COUNT_MIN:${env.SANITY_SOLR_BIOENTITY_DOC_COUNT_MIN}" + sh 'curl "http://localhost:8080/solr/select?q=*:*&rows=0&wt=json&fq=document_category:bioentity"' + sh 'if [ $SANITY_SOLR_BIOENTITY_DOC_COUNT_MIN -gt $(curl "http://localhost:8080/solr/select?q=*:*&rows=0&wt=json&fq=document_category:bioentity" | grep -oh \'"numFound":[[:digit:]]*\' | grep -oh [[:digit:]]*) ]; then exit 1; else echo "We seem to be clear wrt doc count"; fi' + } + } + + // Copy tmpfs Solr contents onto skyhook. + sh 'tar --use-compress-program=pigz -cvf /tmp/golr-index-contents.tgz -C /srv/solr/data/index .' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + // Copy over index. + // Copy over log. + //sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" /tmp/golr-index-contents.tgz skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/solr/' + //sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" /tmp/golr_timestamp.log skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/solr/' + sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY /tmp/golr-index-contents.tgz skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/solr/' + sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY /tmp/golr_timestamp.log skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/products/solr/' + } + + // Solr should still be running in the background here + // from indexing--create stats products from running + // GOlr. + // Prepare a working directory based around go-site. + dir('./go-stats') { + git branch: TARGET_GO_STATS_BRANCH, url: 'https://github.com/geneontology/go-stats.git' + + // Not much want or need here--simple + // python3. However, using the information hidden + // in run-indexer.sh to know where the Solr + // instance is hiding. + sh 'mkdir -p /tmp/stats/ || true' + sh 'cp ./libraries/go-stats/*.py /tmp' + // Needed as extra library. + sh 'pip3 install --force-reinstall requests==2.19.1' + sh 'pip3 install --force-reinstall networkx==2.2' + + // Final command, sealed into docker work + // environment. + echo "Check that results have been stored properly" + sh "curl 'http://localhost:8080/solr/select?q=*:*&rows=0'" + echo "End of results" + retry(3){ + sh 'python3 /tmp/go_reports.py -g http://localhost:8080/solr/ -s http://current.geneontology.org/release_stats/go-stats.json -n http://current.geneontology.org/release_stats/go-stats-no-pb.json -c http://skyhook.berkeleybop.org/$BRANCH_NAME/ontology/go.obo -p http://current.geneontology.org/ontology/go.obo -r http://current.geneontology.org/release_stats/go-references.tsv -o /tmp/stats/ -d $START_DATE' + } + retry(3) { + sh 'wget -N http://current.geneontology.org/release_stats/aggregated-go-stats-summaries.json' + } + + // Roll the stats forward. + sh 'python3 /tmp/aggregate-stats.py -a aggregated-go-stats-summaries.json -b /tmp/stats/go-stats-summary.json -o /tmp/stats/aggregated-go-stats-summaries.json' + + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + retry(3) { + // Copy over stats files. + //sh 'rsync -avz -e "ssh -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY" /tmp/stats/* skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/release_stats/' + sh 'scp -o StrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY /tmp/stats/* skyhook@skyhook.berkeleybop.org:/home/skyhook/$BRANCH_NAME/release_stats/' + } + } + } + } + } + //... + stage('Sanity II') { + when { anyOf { branch 'release' } } + steps { + + // + echo 'Push pre-release to http://amigo-staging.geneontology.io for testing.' + + // Ninja in our file credentials from Jenkins. + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY'), file(credentialsId: 'go-svn-private-key', variable: 'GO_SVN_IDENTITY'), file(credentialsId: 'ansible-bbop-local-slave', variable: 'DEPLOY_LOCAL_IDENTITY'), file(credentialsId: 'go-aws-ec2-ansible-slave', variable: 'DEPLOY_REMOTE_IDENTITY')]) { + + // Get our operations code and decend into ansible + // working directory. + dir('./operations') { + + git([branch: 'master', + credentialsId: 'bbop-agent-github-user-pass', + url: 'https://github.com/geneontology/operations.git']) + dir('./ansible') { + + retry(3){ + sh 'ansible-playbook update-golr-w-skyhook-forced.yaml --inventory=hosts.amigo --private-key="$DEPLOY_LOCAL_IDENTITY" -e skyhook_branch=release -e target_host=amigo-golr-staging' + } + + // Pause on user input. + echo 'Sanity II: Awaiting user input before proceeding.' + emailext to: "${TARGET_RELEASE_HOLD_EMAILS}", + subject: "GO Pipeline waiting on input for ${env.BRANCH_NAME}", + body: "The ${env.BRANCH_NAME} pipeline is waiting on user input. Please see: https://build.geneontology.org/job/geneontology/job/pipeline/job/${env.BRANCH_NAME}" + lock(resource: 'release-run', inversePrecedence: true) { + echo "Sanity II: A release run holds the lock." + timeout(time:7, unit:'DAYS') { + input message:'Approve release products?' + } + } + echo 'Sanity II: Positive user input input given.' + } + } + } + } + } + + stage('Archive (*)') { + // CHECKPOINT: Recover key environmental variables. + environment { + START_DOW = sh(script: 'curl http://skyhook.berkeleybop.org/$BRANCH_NAME/metadata/dow.txt', , returnStdout: true).trim() + START_DATE = sh(script: 'curl http://skyhook.berkeleybop.org/$BRANCH_NAME/metadata/date.txt', , returnStdout: true).trim() + } + + when { anyOf { branch 'release'; branch 'snapshot'; branch 'master' } } + steps { + // Experimental stanza to support mounting the sshfs + // using the "hidden" skyhook identity. + sh 'mkdir -p $WORKSPACE/mnt/ || true' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'sshfs -oStrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY -o idmap=user skyhook@skyhook.berkeleybop.org:/home/skyhook $WORKSPACE/mnt/' + + // Try to catch and prevent goa_uniprot_all-src + // from getting into zenodo archive, etc. Re: + // #207. + sh 'pwd' + sh 'ls -AlF $WORKSPACE/mnt/$BRANCH_NAME/products/upstream_and_raw_data/ || true' + sh 'rm -f $WORKSPACE/mnt/$BRANCH_NAME/products/upstream_and_raw_data/goa_uniprot_all-src.gaf.gz || true' + sh 'ls -AlF $WORKSPACE/mnt/$BRANCH_NAME/products/upstream_and_raw_data/ || true' + + // Redo goa_uniprot_all names for publication. From: + // https://github.com/geneontology/go-site/issues/1984 + sh 'mv $WORKSPACE/mnt/$BRANCH_NAME/annotations/goa_uniprot_all.gaf.gz $WORKSPACE/mnt/$BRANCH_NAME/annotations/filtered_goa_uniprot_all.gaf.gz || true' + sh 'mv $WORKSPACE/mnt/$BRANCH_NAME/annotations/goa_uniprot_all_noiea.gaf.gz $WORKSPACE/mnt/$BRANCH_NAME/annotations/filtered_goa_uniprot_all_noiea.gaf.gz || true' + sh 'mv $WORKSPACE/mnt/$BRANCH_NAME/annotations/goa_uniprot_all_noiea.gpad.gz $WORKSPACE/mnt/$BRANCH_NAME/annotations/filtered_goa_uniprot_all_noiea.gpad.gz || true' + sh 'mv $WORKSPACE/mnt/$BRANCH_NAME/annotations/goa_uniprot_all_noiea.gpi.gz $WORKSPACE/mnt/$BRANCH_NAME/annotations/filtered_goa_uniprot_all_noiea.gpi.gz || true' + + // Get annotation download directory prepped. From: + // https://github.com/geneontology/go-site/issues/1971 + sh 'rm -f README-annotation-downloads.txt || true' + sh 'wget -N https://raw.githubusercontent.com/geneontology/go-site/$TARGET_GO_SITE_BRANCH/static/pages/README-annotation-downloads.txt' + sh 'mv README-annotation-downloads.txt $WORKSPACE/mnt/$BRANCH_NAME/annotations/README.txt || true' + + // Try and remove /lib and /bin from getting into + // the archives by removing them now that we're + // done using them for product builds. Re: #268. + sh 'ls -AlF $WORKSPACE/mnt/$BRANCH_NAME/' + sh 'rm -r -f $WORKSPACE/mnt/$BRANCH_NAME/bin || true' + sh 'rm -r -f $WORKSPACE/mnt/$BRANCH_NAME/lib || true' + sh 'ls -AlF $WORKSPACE/mnt/$BRANCH_NAME/' + } + // Copy the product to the right location. As well, + // archive. + withCredentials([file(credentialsId: 'aws_go_push_json', variable: 'S3_PUSH_JSON'), file(credentialsId: 's3cmd_go_push_configuration', variable: 'S3CMD_JSON'), string(credentialsId: 'zenodo_go_production_token', variable: 'ZENODO_PRODUCTION_TOKEN'), string(credentialsId: 'zenodo_go_sandbox_token', variable: 'ZENODO_SANDBOX_TOKEN')]) { + // Ready... + dir('./go-site') { + git branch: TARGET_GO_SITE_BRANCH, url: 'https://github.com/geneontology/go-site.git' + + // WARNING: Caveats and reasons as same + // pattern above. We need this as some clients + // are not standard and it turns out there are + // some subtle incompatibilities with urllib3 + // and boto in some versions, so we will use a + // virtual env to paper that over. See: + // https://github.com/geneontology/pipeline/issues/8#issuecomment-356762604 + sh 'python3 -m venv mypyenv' + withEnv(["PATH+EXTRA=${WORKSPACE}/go-site/bin:${WORKSPACE}/go-site/mypyenv/bin", 'PYTHONHOME=', "VIRTUAL_ENV=${WORKSPACE}/go-site/mypyenv", 'PY_ENV=mypyenv', 'PY_BIN=mypyenv/bin']){ + + // Extra package for the indexer. + sh 'python3 ./mypyenv/bin/pip3 install --force-reinstall pystache==0.5.4' + + // Correct for (possibly) bad boto3, + // as mentioned above. + sh 'python3 ./mypyenv/bin/pip3 install boto3==1.18.52' + sh 'python3 ./mypyenv/bin/pip3 install botocore==1.21.52' + + // Needed to work around new incompatibility: + // https://github.com/geneontology/pipeline/issues/286 + sh 'python3 ./mypyenv/bin/pip3 install --force-reinstall certifi==2021.10.8' + + // Extra package for the uploader. + sh 'python3 ./mypyenv/bin/pip3 install filechunkio' + + // Grab BDBag. + sh 'python3 ./mypyenv/bin/pip3 install bdbag' + + // Need for large uploads in requests. + sh 'python3 ./mypyenv/bin/pip3 install requests-toolbelt' + + // Need as replacement for awful requests lib. + sh 'python3 ./mypyenv/bin/pip3 install pycurl' + + // Apparently something wrong with default + // version; error like + // https://stackoverflow.com/questions/45821085/awshttpsconnection-object-has-no-attribute-ssl-context + sh 'python3 ./mypyenv/bin/pip3 install awscli' + + // A temporary workaround for + // https://github.com/geneontology/pipeline/issues/247, + // forcing requests used by bdbags to a + // verion that is usable by python 3.5 + // (our current raw machine default + // version of python3). + sh 'python3 ./mypyenv/bin/pip3 install --force-reinstall requests==2.25.1' + + // Well, we need to do a couple of things here in + // a structured way, so we'll go ahead and drop + // into the scripting mode. + script { + + // Build either a release or testing + // version of a generic BDBag/DOI + // workflow, keeping special bucket + // mappings in mind. + if( env.BRANCH_NAME == 'release' ){ + sh 'python3 ./scripts/create-bdbag-remote-file-manifest.py -v --walk $WORKSPACE/mnt/$BRANCH_NAME/ --remote http://release.geneontology.org/$START_DATE --output manifest.json' + }else if( env.BRANCH_NAME == 'snapshot' || env.BRANCH_NAME == 'master' ){ + sh 'python3 ./scripts/create-bdbag-remote-file-manifest.py -v --walk $WORKSPACE/mnt/$BRANCH_NAME/ --remote $TARGET_INDEXER_PREFIX --output manifest.json' + } + + // To make a full BDBag, we first need + // a copy of the data as BDBags change + // directory layout (e.g. data/). + sh 'mkdir -p $WORKSPACE/copyover/ || true' + sh 'cp -r $WORKSPACE/mnt/$BRANCH_NAME/* $WORKSPACE/copyover/' + // Make the BDBag in the copyover/ + // (unarchived, as we want to leave it + // to pigz). + sh 'python3 ./mypyenv/bin/bdbag $WORKSPACE/copyover' + // Tarball the whole directory for + // "deep" archive (handmade BDBag). + sh 'tar --use-compress-program=pigz -cvf go-release-archive.tgz -C $WORKSPACE/copyover .' + + // We have the archives, now let's try + // and get them into position--this is + // fail-y, so we are going to try and + // buffer failure here for the time + // being until we work it all out. We + // are going to do the "hard"/large + // one first, then skip the + // "easy"/small one if we fail, so + // that we can retry this whole stage + // again on failure. + try { + // Archive full archive. + if( env.BRANCH_NAME == 'release' ){ + sh 'python3 ./scripts/zenodo-version-update.py --verbose --key $ZENODO_PRODUCTION_TOKEN --concept $ZENODO_ARCHIVE_CONCEPT --file go-release-archive.tgz --output ./release-archive-doi.json --revision $START_DATE' + }else if( env.BRANCH_NAME == 'snapshot' ){ + // WARNING: to save Zenodo 1TB + // a month, for snapshot, + // we'll lie about the DOI + // that we get (not a big lie + // as they don't resolve on + // sandbox anyways). + //sh 'python3 ./scripts/zenodo-version-update.py --verbose --sandbox --key $ZENODO_SANDBOX_TOKEN --concept $ZENODO_ARCHIVE_CONCEPT --file go-release-archive.tgz --output ./release-archive-doi.json --revision $START_DATE' + sh 'echo \'{\' > ./release-archive-doi.json' + sh 'echo \' "doi": "10.5072/zenodo.000000"\' >> ./release-archive-doi.json' + sh 'echo \'}\' >> ./release-archive-doi.json' + + }else if( env.BRANCH_NAME == 'master' ){ + sh 'python3 ./scripts/zenodo-version-update.py --verbose --sandbox --key $ZENODO_SANDBOX_TOKEN --concept $ZENODO_ARCHIVE_CONCEPT --file go-release-archive.tgz --output ./release-archive-doi.json --revision $START_DATE' + } + + // Get the DOI to skyhook for + // publishing, but don't bother + // with the full thing--too much + // space and already in Zenodo. + sh 'cp release-archive-doi.json $WORKSPACE/mnt/$BRANCH_NAME/metadata/release-archive-doi.json' + + } catch (exception) { + // Something went bad with the + // Zenodo archive upload. + echo "There has been a failure in the archive upload to Zenodo." + emailext to: "${TARGET_ADMIN_EMAILS}", + subject: "GO Pipeline Zenodo archive upload fail for ${env.BRANCH_NAME}", + body: "There has been a failure in the archive upload to Zenodo, in ${env.BRANCH_NAME}. Please see: https://build.geneontology.org/job/geneontology/job/pipeline/job/${env.BRANCH_NAME}" + // Hard die if this is a release. + if( env.BRANCH_NAME == 'release' ){ + error 'Zenodo archive upload error on release--no recovery.' + } + } + } + } + } + } + } + // WARNING: Extra safety as I expect this to sometimes fail. + post { + always { + // Bail on the remote filesystem. + sh 'fusermount -u $WORKSPACE/mnt/ || true' + // Purge the copyover point. + sh 'rm -r -f $WORKSPACE/copyover || true' + } + } + } + stage('Publish') { + when { anyOf { branch 'release'; branch 'snapshot'; branch 'master' } } + steps { + // Experimental stanza to support mounting the sshfs + // using the "hidden" skyhook identity. + sh 'mkdir -p $WORKSPACE/mnt/ || true' + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY')]) { + sh 'sshfs -oStrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=$SKYHOOK_IDENTITY -o idmap=user skyhook@skyhook.berkeleybop.org:/home/skyhook $WORKSPACE/mnt/' + } + // Copy the product to the right location. As well, + // archive. + withCredentials([file(credentialsId: 'aws_go_push_json', variable: 'S3_PUSH_JSON'), file(credentialsId: 's3cmd_go_push_configuration', variable: 'S3CMD_JSON'), string(credentialsId: 'aws_go_access_key', variable: 'AWS_ACCESS_KEY_ID'), string(credentialsId: 'aws_go_secret_key', variable: 'AWS_SECRET_ACCESS_KEY')]) { + // Ready... + dir('./go-site') { + git branch: TARGET_GO_SITE_BRANCH, url: 'https://github.com/geneontology/go-site.git' + + // TODO: Special handling still needed w/o OSF.io? + // WARNING: Caveats and reasons as same + // pattern above. We need this as some clients + // are not standard and it turns out there are + // some subtle incompatibilities with urllib3 + // and boto in some versions, so we will use a + // virtual env to paper that over. See: + // https://github.com/geneontology/pipeline/issues/8#issuecomment-356762604 + sh 'python3 -m venv mypyenv' + withEnv(["PATH+EXTRA=${WORKSPACE}/go-site/bin:${WORKSPACE}/go-site/mypyenv/bin", 'PYTHONHOME=', "VIRTUAL_ENV=${WORKSPACE}/go-site/mypyenv", 'PY_ENV=mypyenv', 'PY_BIN=mypyenv/bin']){ + + // Extra package for the indexer. + sh 'python3 ./mypyenv/bin/pip3 install --force-reinstall pystache==0.5.4' + + // Extra package for the uploader. + sh 'python3 ./mypyenv/bin/pip3 install filechunkio' + + // Let's be explicit here as well, as there were recent issues. + // + sh 'python3 ./mypyenv/bin/pip3 install rsa' + sh 'python3 ./mypyenv/bin/pip3 install awscli' + + // Version locking for boto3 / botocore + // upgrade that is incompatible with + // python3.5. See issues #250 and #271. + sh 'python3 ./mypyenv/bin/pip3 install boto3==1.18.52' + sh 'python3 ./mypyenv/bin/pip3 install botocore==1.21.52' + sh 'python3 ./mypyenv/bin/pip3 install s3transfer==0.5.0' + + // Well, we need to do a couple of things here in + // a structured way, so we'll go ahead and drop + // into the scripting mode. + script { + + // Create working index off of + // skyhook. For "release", this will + // be "current". For "snapshot", this + // will be "snapshot". + sh 'python3 ./scripts/directory_indexer.py -v --inject ./scripts/directory-index-template.html --directory $WORKSPACE/mnt/$BRANCH_NAME --prefix $TARGET_INDEXER_PREFIX -x' + + // Push into S3 buckets. Simple + // overall case: copy tree directly + // over. For "release", this will be + // "current". For "snapshot", this + // will be "snapshot". + sh 'python3 ./scripts/s3-uploader.py -v --credentials $S3_PUSH_JSON --directory $WORKSPACE/mnt/$BRANCH_NAME/ --bucket $TARGET_BUCKET --number $BUILD_ID --pipeline $BRANCH_NAME' + + // Also, some runs have special maps + // to buckets... + if( env.BRANCH_NAME == 'release' ){ + + // "release" -> dated path for + // indexing (clobbering + // "current"'s index. + sh 'python3 ./scripts/directory_indexer.py -v --inject ./scripts/directory-index-template.html --directory $WORKSPACE/mnt/$BRANCH_NAME --prefix http://release.geneontology.org/$START_DATE -x -u' + // "release" -> dated path for S3. + sh 'python3 ./scripts/s3-uploader.py -v --credentials $S3_PUSH_JSON --directory $WORKSPACE/mnt/$BRANCH_NAME/ --bucket go-data-product-release/$START_DATE --number $BUILD_ID --pipeline $BRANCH_NAME' + + // Build the capper index.html... + sh 'python3 ./scripts/bucket-indexer.py --credentials $S3_PUSH_JSON --bucket go-data-product-release --inject ./scripts/directory-index-template.html --prefix http://release.geneontology.org > top-level-index.html' + // ...and push it up to S3. + sh 's3cmd -c $S3CMD_JSON --acl-public --mime-type=text/html --cf-invalidate put top-level-index.html s3://go-data-product-release/index.html' + + }else if( env.BRANCH_NAME == 'snapshot' ){ + + // Currently, the "daily" + // debugging buckets are intended + // to be RO directly in S3 for + // debugging. + sh 'python3 ./scripts/s3-uploader.py -v --credentials $S3_PUSH_JSON --directory $WORKSPACE/mnt/$BRANCH_NAME/ --bucket go-data-product-daily/$START_DAY --number $BUILD_ID --pipeline $BRANCH_NAME' + + }else if( env.BRANCH_NAME == 'master' ){ + // Pass. + } + + // Invalidate the CDN now that the new + // files are up. + sh 'echo "[preview]" > ./awscli_config.txt && echo "cloudfront=true" >> ./awscli_config.txt' + sh 'AWS_CONFIG_FILE=./awscli_config.txt python3 ./mypyenv/bin/aws cloudfront create-invalidation --distribution-id $AWS_CLOUDFRONT_DISTRIBUTION_ID --paths "/*"' + // The release branch also needs to + // deal with the second location. + if( env.BRANCH_NAME == 'release' ){ + sh 'AWS_CONFIG_FILE=./awscli_config.txt python3 ./mypyenv/bin/aws cloudfront create-invalidation --distribution-id $AWS_CLOUDFRONT_RELEASE_DISTRIBUTION_ID --paths "/*"' + } + } + } + } + } + } + // WARNING: Extra safety as I expect this to sometimes fail. + post { + always { + // Bail on the remote filesystem. + sh 'fusermount -u $WORKSPACE/mnt/ || true' + } + } + } + // Big things to do on major branches. + stage('Deploy') { + // For exploration of #204, we'll hold back attempts to push out to AmiGO for master and snapshot + // so we don't keep clobbering #204 trials out. + //when { anyOf { branch 'release'; branch 'snapshot'; branch 'master' } } + when { anyOf { branch 'release' } } + steps { + parallel( + "AmiGO": { + + // Ninja in our file credentials from Jenkins. + withCredentials([file(credentialsId: 'skyhook-private-key', variable: 'SKYHOOK_IDENTITY'), file(credentialsId: 'go-svn-private-key', variable: 'GO_SVN_IDENTITY'), file(credentialsId: 'ansible-bbop-local-slave', variable: 'DEPLOY_LOCAL_IDENTITY'), file(credentialsId: 'go-aws-ec2-ansible-slave', variable: 'DEPLOY_REMOTE_IDENTITY')]) { + + // Get our operations code and decend into ansible + // working directory. + dir('./operations') { + + git([branch: 'master', + credentialsId: 'bbop-agent-github-user-pass', + url: 'https://github.com/geneontology/operations.git']) + dir('./ansible') { + /// + /// Push out to an AmiGO. + /// + script { + if( env.BRANCH_NAME == 'release' ){ + + echo 'No current public push on release to Blazegraph.' + // retry(3){ + // sh 'ansible-playbook update-endpoint.yaml --inventory=hosts.local-rdf-endpoint --private-key="$DEPLOY_LOCAL_IDENTITY" -e target_user=bbop --extra-vars="pipeline=current build=production endpoint=production"' + // } + + echo 'No current public push on release to GOlr.' + // retry(3){ + // sh 'ansible-playbook ./update-golr.yaml --inventory=hosts.amigo --private-key="$DEPLOY_LOCAL_IDENTITY" -e target_host=amigo-golr-aux -e target_user=bbop' + // } + // retry(3){ + // sh 'ansible-playbook ./update-golr.yaml --inventory=hosts.amigo --private-key="$DEPLOY_LOCAL_IDENTITY" -e target_host=amigo-golr-production -e target_user=bbop' + // } + + }else if( env.BRANCH_NAME == 'snapshot' ){ + + echo 'Push snapshot out internal Blazegraph' + retry(3){ + sh 'ansible-playbook update-endpoint.yaml --inventory=hosts.local-rdf-endpoint --private-key="$DEPLOY_LOCAL_IDENTITY" -e target_user=bbop --extra-vars="pipeline=current build=internal endpoint=internal"' + } + + echo 'Push snapshot out to experimental AmiGO' + retry(3){ + sh 'ansible-playbook ./update-golr-w-snap.yaml --inventory=hosts.amigo --private-key="$DEPLOY_REMOTE_IDENTITY" -e target_host=amigo-golr-exp -e target_user=ubuntu' + } + + }else if( env.BRANCH_NAME == 'master' ){ + + echo 'Push master out to experimental AmiGO' + retry(3){ + sh 'ansible-playbook ./update-golr-w-exp.yaml --inventory=hosts.amigo --private-key="$DEPLOY_REMOTE_IDENTITY" -e target_host=amigo-golr-exp -e target_user=ubuntu' + } + + } + } + } + } + } + } + ) + } + // WARNING: Extra safety as I expect this to sometimes fail. + post { + always { + // Bail on the remote filesystem. + sh 'fusermount -u $WORKSPACE/mnt/ || true' + } + } + } + // stage('TODO: Final status') { + // steps { + // echo 'TODO: final' + // } + // } + } + post { + // Let's let our people know if things go well. + success { + script { + if( env.BRANCH_NAME == 'release' ){ + echo "There has been a successful run of the ${env.BRANCH_NAME} pipeline." + emailext to: "${TARGET_SUCCESS_EMAILS}", + subject: "GO Pipeline success for ${env.BRANCH_NAME}", + body: "There has been successful run of the ${env.BRANCH_NAME} pipeline. Please see: https://build.geneontology.org/job/geneontology/job/pipeline/job/${env.BRANCH_NAME}" + } + } + } + // Let's let our internal people know if things change. + changed { + echo "There has been a change in the ${env.BRANCH_NAME} pipeline." + emailext to: "${TARGET_ADMIN_EMAILS}", + subject: "GO Pipeline change for ${env.BRANCH_NAME}", + body: "There has been a pipeline status change in ${env.BRANCH_NAME}. Please see: https://build.geneontology.org/job/geneontology/job/pipeline/job/${env.BRANCH_NAME}" + } + // Let's let our internal people know if things go badly. + failure { + echo "There has been a failure in the ${env.BRANCH_NAME} pipeline." + emailext to: "${TARGET_ADMIN_EMAILS}", + subject: "GO Pipeline FAIL for ${env.BRANCH_NAME}", + body: "There has been a pipeline failure in ${env.BRANCH_NAME}. Please see: https://build.geneontology.org/job/geneontology/job/pipeline/job/${env.BRANCH_NAME}" + } + } } // Check that we do not affect public targets on non-mainline runs.