Skip to content

Commit

Permalink
add random resolver + tests
Browse files Browse the repository at this point in the history
  • Loading branch information
DerRockWolf committed Nov 4, 2023
1 parent b3af607 commit ac4b80d
Show file tree
Hide file tree
Showing 6 changed files with 507 additions and 5 deletions.
2 changes: 1 addition & 1 deletion docs/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ upstreams:
laptop*:
- 123.123.123.123
# optional: Determines what strategy blocky uses to choose the upstream servers.
# accepted: parallel_best, strict
# accepted: parallel_best, strict, random
# default: parallel_best
strategy: parallel_best
# optional: timeout to query the upstream resolver. Default: 2s
Expand Down
9 changes: 7 additions & 2 deletions docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,15 @@ Blocky supports different upstream strategies (default `parallel_best`) that det

Currently available strategies:

- `parallel_best`: blocky picks 2 random (weighted) resolvers from the upstream group for each query and returns the answer from the fastest one.
- `parallel_best`: blocky picks 2 random (weighted) resolvers from the upstream group for each query and returns the answer from the fastest one.
If an upstream failed to answer within the last hour, it is less likely to be chosen for the race.
This improves your network speed and increases your privacy - your DNS traffic will be distributed over multiple providers
This improves your network speed and increases your privacy - your DNS traffic will be distributed over multiple providers.
(When using 10 upstream servers, each upstream will get on average 20% of the DNS requests)
- `random`: blocky picks one random (weighted) resolver from the upstream group for each query and if successful, returns its response.
If the selected resolver fails to respond, a second one is picked to which the query is sent.
The weighting is identical to the `parallel_best` strategy.
Although the `random` strategy might be slower than the `parallel_best` strategy, it offers a larger increase of privacy.
(When using 10 upstream servers, each upstream will get on average 10% of the DNS requests)
- `strict`: blocky forwards the request in a strict order. If the first upstream does not respond, the second is asked, and so on.

!!! example
Expand Down
140 changes: 140 additions & 0 deletions resolver/random_resolver.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
package resolver

import (
"context"
"errors"
"fmt"
"strings"
"time"

"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
"github.com/sirupsen/logrus"
)

const (
randomResolverType = "random"
)

// RandomResolver delegates the DNS message to one random upstream resolver
// if it can't provide the answer in time a different resolver is chosen randomly
// resolvers who fail to response get a penalty and are less likely to be chosen for the next request
type RandomResolver struct {
configurable[*config.UpstreamGroupConfig]
typed

groupName string
resolvers []*upstreamResolverStatus
}

// NewRandomResolver creates a new random resolver instance
func NewRandomResolver(
cfg config.UpstreamGroupConfig, bootstrap *Bootstrap, shoudVerifyUpstreams bool,
) (*RandomResolver, error) {
logger := log.PrefixedLog(randomResolverType)

resolvers, err := createResolvers(logger, cfg, bootstrap, shoudVerifyUpstreams)
if err != nil {
return nil, err
}

return newRandomResolver(cfg, resolvers), nil
}

func newRandomResolver(
cfg config.UpstreamGroupConfig, resolvers []Resolver,
) *RandomResolver {
resolverStatuses := make([]*upstreamResolverStatus, 0, len(resolvers))

for _, r := range resolvers {
resolverStatuses = append(resolverStatuses, newUpstreamResolverStatus(r))
}

r := RandomResolver{
configurable: withConfig(&cfg),
typed: withType(randomResolverType),

groupName: cfg.Name,
resolvers: resolverStatuses,
}

return &r
}

func (r *RandomResolver) Name() string {
return r.String()
}

func (r *RandomResolver) String() string {
result := make([]string, len(r.resolvers))
for i, s := range r.resolvers {
result[i] = fmt.Sprintf("%s", s.resolver)
}

return fmt.Sprintf("%s upstreams '%s (%s)'", randomResolverType, r.groupName, strings.Join(result, ","))
}

// Resolve sends the query request to a random upstream resolver
func (r *RandomResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := log.WithPrefix(request.Log, randomResolverType)

if len(r.resolvers) == 1 {
logger.WithField("resolver", r.resolvers[0].resolver).Debug("delegating to resolver")

return r.resolvers[0].resolver.Resolve(request)
}

timeout := config.GetConfig().Upstreams.Timeout.ToDuration()

ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()

// first try
r1 := weightedRandom(r.resolvers, nil)
logger.Debugf("using %s as resolver", r1.resolver)

ch := make(chan requestResponse, 1)

go r1.resolve(request, ch)

select {
case <-ctx.Done():
logger.WithField("resolver", r1.resolver).Debug("upstream exceeded timeout, trying other upstream")
r1.lastErrorTime.Store(time.Now())
case result := <-ch:
if result.err != nil {
logger.Debug("resolution failed from resolver, cause: ", result.err)
} else {
logger.WithFields(logrus.Fields{
"resolver": *result.resolver,
"answer": util.AnswerToString(result.response.Res.Answer),
}).Debug("using response from resolver")

return result.response, nil
}
}

// second try
r2 := weightedRandom(r.resolvers, r1.resolver)
logger.Debugf("using %s as second resolver", r2.resolver)

ch = make(chan requestResponse, 1)

r2.resolve(request, ch)

result := <-ch
if result.err != nil {
logger.Debug("resolution failed from resolver, cause: ", result.err)

return nil, errors.New("resolution was not successful, no resolver returned answer in time")
}

logger.WithFields(logrus.Fields{
"resolver": *result.resolver,
"answer": util.AnswerToString(result.response.Res.Answer),
}).Debug("using response from resolver")

return result.response, nil
}
Loading

0 comments on commit ac4b80d

Please sign in to comment.