duplicatedStores := promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "thanos_query_duplicated_store_addresses_total",
Help: "The number of times a duplicated store addresses is detected from the different configs in query",
})
dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, secure, skipVerify, cert, key, caCert, serverName)
if err != nil {
return errors.Wrap(err, "building gRPC client")
}
fileSDCache := cache.New()
dnsStoreProvider := dns.NewProvider(
logger,
extprom.WrapRegistererWithPrefix("thanos_query_store_apis_", reg),
dns.ResolverType(dnsSDResolver),
)
for _, store := range strictStores {
if dns.IsDynamicNode(store) {
return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", store)
}
}
dnsRuleProvider := dns.NewProvider(
logger,
extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg),
dns.ResolverType(dnsSDResolver),
)
dnsTargetProvider := dns.NewProvider(
logger,
extprom.WrapRegistererWithPrefix("thanos_query_target_apis_", reg),
dns.ResolverType(dnsSDResolver),
)
dnsMetadataProvider := dns.NewProvider(
logger,
extprom.WrapRegistererWithPrefix("thanos_query_metadata_apis_", reg),
dns.ResolverType(dnsSDResolver),
)
dnsExemplarProvider := dns.NewProvider(
logger,
extprom.WrapRegistererWithPrefix("thanos_query_exemplar_apis_", reg),
dns.ResolverType(dnsSDResolver),
)
创建查询的endpoint集合
go复制代码
var (
endpoints = query.NewEndpointSet(
logger,
reg,
func() (specs []query.EndpointSpec) {
// Add strict & static nodes.
for _, addr := range strictStores {
specs = append(specs, query.NewGRPCEndpointSpec(addr, true))
}
for _, dnsProvider := range []*dns.Provider{dnsStoreProvider, dnsRuleProvider, dnsExemplarProvider, dnsMetadataProvider, dnsTargetProvider} {
var tmpSpecs []query.EndpointSpec
for _, addr := range dnsProvider.Addresses() {
tmpSpecs = append(tmpSpecs, query.NewGRPCEndpointSpec(addr, false))
}
tmpSpecs = removeDuplicateEndpointSpecs(logger, duplicatedStores, tmpSpecs)
specs = append(specs, tmpSpecs...)
}
return specs
},
dialOpts,
unhealthyStoreTimeout,
)
if len(matcherSets) > 0 {
// Get all series which match matchers.
var sets []storage.SeriesSet
for _, mset := range matcherSets {
s := q.Select(false, nil, mset...)
sets = append(sets, s)
}
names, warnings, err = labelNamesByMatchers(sets)
} else {
names, warnings, err = q.LabelNames()
}
// LabelNames implements the storepb.StoreServer interface.
func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) {
reqSeriesMatchers, err := storepb.MatchersToPromMatchers(req.Matchers...)
if err != nil {
return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request labels matchers").Error())
}
resHints := &hintspb.LabelNamesResponseHints{}
var reqBlockMatchers []*labels.Matcher
if req.Hints != nil {
reqHints := &hintspb.LabelNamesRequestHints{}
err := types.UnmarshalAny(req.Hints, reqHints)
if err != nil {
return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label names request hints").Error())
}
reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...)
if err != nil {
return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error())
}
}
g, gctx := errgroup.WithContext(ctx)
s.mtx.RLock()
var mtx sync.Mutex
var sets [][]string
var seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series"))
for _, b := range s.blocks {
b := b
if !b.overlapsClosedInterval(req.Start, req.End) {
continue
}
if len(reqBlockMatchers) > 0 && !b.matchRelabelLabels(reqBlockMatchers) {
continue
}
resHints.AddQueriedBlock(b.meta.ULID)
indexr := b.indexReader(gctx)
g.Go(func() error {
defer runutil.CloseWithLogOnErr(s.logger, indexr, "label names")
var result []string
if len(reqSeriesMatchers) == 0 {
// Do it via index reader to have pending reader registered correctly.
// LabelNames are already sorted.
res, err := indexr.block.indexHeaderReader.LabelNames()
if err != nil {
return errors.Wrapf(err, "label names for block %s", b.meta.ULID)
}
// Add a set for the external labels as well.
// We're not adding them directly to res because there could be duplicates.
// b.extLset is already sorted by label name, no need to sort it again.
extRes := make([]string, 0, len(b.extLset))
for _, l := range b.extLset {
extRes = append(extRes, l.Name)
}
result = strutil.MergeSlices(res, extRes)
} else {
seriesSet, _, err := blockSeries(b.extLset, indexr, nil, reqSeriesMatchers, nil, seriesLimiter, true, req.Start, req.End, nil)
if err != nil {
return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID)
}
// Extract label names from all series. Many label names will be the same, so we need to deduplicate them.
// Note that label names will already include external labels (passed to blockSeries), so we don't need
// to add them again.
labelNames := map[string]struct{}{}
for seriesSet.Next() {
ls, _ := seriesSet.At()
for _, l := range ls {
labelNames[l.Name] = struct{}{}
}
}
if seriesSet.Err() != nil {
return errors.Wrapf(seriesSet.Err(), "iterate series for block %s", b.meta.ULID)
}
result = make([]string, 0, len(labelNames))
for n := range labelNames {
result = append(result, n)
}
sort.Strings(result)
}
if len(result) > 0 {
mtx.Lock()
sets = append(sets, result)
mtx.Unlock()
}
return nil
})
}
s.mtx.RUnlock()
if err := g.Wait(); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
anyHints, err := types.MarshalAny(resHints)
if err != nil {
return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label names response hints").Error())
}
return &storepb.LabelNamesResponse{
Names: strutil.MergeSlices(sets...),
Hints: anyHints,
}, nil
}