Skip to content

Commit

Permalink
DRILL-8016: Default to lazy outbound connections for storage-jdbc and…
Browse files Browse the repository at this point in the history
… storage-splunk (#2342)

* New lazy defaults for HikarCP setting min pool size = 0, max = 10.

* Make storage-splunk plugin connect to Splunk lazily.

* Remove commented code, enable code based on DRILL-8005.
  • Loading branch information
jnturton authored and estherbuchwalter committed Oct 26, 2021
1 parent c80a89f commit 374db31
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,32 @@ public void close() {
static HikariDataSource initDataSource(JdbcStorageConfig config) {
try {
Properties properties = new Properties();

/*
Set default HikariCP values which prefer to connect lazily to avoid overwhelming source
systems with connections which mostly remain idle. A data source that is present in N
storage configs replicated over P drillbits with a HikariCP minimumIdle value of Q will
have N×P×Q connections made to it eagerly.
The trade off of lazier connections is increased latency should there be a spike in user
queries involving a JDBC data source. When comparing the defaults that follow with e.g. the
HikariCP defaults, bear in mind that the context here is OLAP, not OLTP. It is normal
for queries to run for a long time and to be separated by long intermissions. Users who
prefer eager to lazy connections remain free to overwrite the following defaults in their
storage config.
*/

// maximum amount of time that a connection is allowed to sit idle in the pool, 0 = forever
properties.setProperty("dataSource.idleTimeout", String.format("%d000", 1*60*60)); // 1 hour
// how frequently HikariCP will attempt to keep a connection alive, 0 = disabled
properties.setProperty("dataSource.keepaliveTime", String.format("%d000", 0));
// maximum lifetime of a connection in the pool, 0 = forever
properties.setProperty("dataSource.maxLifetime", String.format("%d000", 6*60*60)); // 6 hours
// minimum number of idle connections that HikariCP tries to maintain in the pool, 0 = none
properties.setProperty("dataSource.minimumIdle", "0");
// maximum size that the pool is allowed to reach, including both idle and in-use connections
properties.setProperty("dataSource.maximumPoolSize", "10");

// apply any HikariCP parameters the user may have set, overwriting defaults
properties.putAll(config.getSourceParameters());

HikariConfig hikariConfig = new HikariConfig(properties);
Expand All @@ -129,6 +155,8 @@ static HikariDataSource initDataSource(JdbcStorageConfig config) {
UsernamePasswordCredentials credentials = config.getUsernamePasswordCredentials();
hikariConfig.setUsername(credentials.getUsername());
hikariConfig.setPassword(credentials.getPassword());
// this serves as a hint to the driver, which *might* enable database optimizations
hikariConfig.setReadOnly(!config.isWritable());

return new HikariDataSource(hikariConfig);
} catch (RuntimeException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
"password": "xxx",
"caseInsensitiveTableNames": false,
"sourceParameters" : {
"idleTimeout": 3600000,
"keepaliveTime": 0,
"maxLifetime": 21600000,
"minimumIdle": 0,
"maximumPoolSize": 10
},
"enabled": false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@

package org.apache.drill.exec.store.splunk;

import com.splunk.EntityCollection;
import com.splunk.Index;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.schema.Table;
import org.apache.drill.exec.planner.logical.DynamicDrillTable;
Expand All @@ -40,19 +38,10 @@ public class SplunkSchemaFactory extends AbstractSchemaFactory {
private static final Logger logger = LoggerFactory.getLogger(SplunkSchemaFactory.class);
private static final String SPL_TABLE_NAME = "spl";
private final SplunkStoragePlugin plugin;
private final EntityCollection<Index> indexes;

public SplunkSchemaFactory(SplunkStoragePlugin plugin) {
super(plugin.getName());
this.plugin = plugin;
SplunkPluginConfig config = plugin.getConfig();
SplunkConnection connection = new SplunkConnection(config);



// Get Splunk Indexes
connection.connect();
indexes = connection.getIndexes();
}

@Override
Expand Down Expand Up @@ -110,11 +99,15 @@ private void registerIndexes() {
registerTable(SPL_TABLE_NAME, new DynamicDrillTable(plugin, plugin.getName(),
new SplunkScanSpec(plugin.getName(), SPL_TABLE_NAME, plugin.getConfig())));

// Add all other indexes
for (String indexName : indexes.keySet()) {
// Retrieve and add all other Splunk indexes
SplunkPluginConfig config = plugin.getConfig();
SplunkConnection connection = new SplunkConnection(config);
connection.connect();

for (String indexName : connection.getIndexes().keySet()) {
logger.debug("Registering {}", indexName);
registerTable(indexName, new DynamicDrillTable(plugin, plugin.getName(),
new SplunkScanSpec(plugin.getName(), indexName, plugin.getConfig())));
new SplunkScanSpec(plugin.getName(), indexName, config)));
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public Set<? extends RelOptRule> getPhysicalOptimizerRules(OptimizerRulesContext

/**
*
* Note: Move this method to {@link StoragePlugin} interface in next major version release.
* TODO: Move this method to {@link StoragePlugin} interface in next major version release.
*/
public Set<? extends RelOptRule> getOptimizerRules(OptimizerRulesContext optimizerContext, PlannerPhase phase) {
switch (phase) {
Expand Down

0 comments on commit 374db31

Please sign in to comment.