execinfrapb

package
v0.0.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 18, 2022 License: Apache-2.0 Imports: 66 Imported by: 0

Documentation

Index

Constants

View Source
const (
	AnyNotNull     = AggregatorSpec_ANY_NOT_NULL
	Avg            = AggregatorSpec_AVG
	BoolAnd        = AggregatorSpec_BOOL_AND
	BoolOr         = AggregatorSpec_BOOL_OR
	ConcatAgg      = AggregatorSpec_CONCAT_AGG
	Count          = AggregatorSpec_COUNT
	Max            = AggregatorSpec_MAX
	Min            = AggregatorSpec_MIN
	Stddev         = AggregatorSpec_STDDEV
	Sum            = AggregatorSpec_SUM
	SumInt         = AggregatorSpec_SUM_INT
	Variance       = AggregatorSpec_VARIANCE
	XorAgg         = AggregatorSpec_XOR_AGG
	CountRows      = AggregatorSpec_COUNT_ROWS
	Sqrdiff        = AggregatorSpec_SQRDIFF
	FinalVariance  = AggregatorSpec_FINAL_VARIANCE
	FinalVarPop    = AggregatorSpec_FINAL_VAR_POP
	FinalStddev    = AggregatorSpec_FINAL_STDDEV
	FinalStddevPop = AggregatorSpec_FINAL_STDDEV_POP
	ArrayAgg       = AggregatorSpec_ARRAY_AGG
	JSONAgg        = AggregatorSpec_JSON_AGG
	// JSONBAgg is an alias for JSONAgg, they do the same thing.
	JSONBAgg                = AggregatorSpec_JSONB_AGG
	StringAgg               = AggregatorSpec_STRING_AGG
	BitAnd                  = AggregatorSpec_BIT_AND
	BitOr                   = AggregatorSpec_BIT_OR
	Corr                    = AggregatorSpec_CORR
	PercentileDiscImpl      = AggregatorSpec_PERCENTILE_DISC_IMPL
	PercentileContImpl      = AggregatorSpec_PERCENTILE_CONT_IMPL
	JSONObjectAgg           = AggregatorSpec_JSON_OBJECT_AGG
	JSONBObjectAgg          = AggregatorSpec_JSONB_OBJECT_AGG
	VarPop                  = AggregatorSpec_VAR_POP
	StddevPop               = AggregatorSpec_STDDEV_POP
	StMakeline              = AggregatorSpec_ST_MAKELINE
	StExtent                = AggregatorSpec_ST_EXTENT
	StUnion                 = AggregatorSpec_ST_UNION
	StCollect               = AggregatorSpec_ST_COLLECT
	CovarPop                = AggregatorSpec_COVAR_POP
	CovarSamp               = AggregatorSpec_COVAR_SAMP
	RegrIntercept           = AggregatorSpec_REGR_INTERCEPT
	RegrR2                  = AggregatorSpec_REGR_R2
	RegrSlope               = AggregatorSpec_REGR_SLOPE
	RegrSxx                 = AggregatorSpec_REGR_SXX
	RegrSyy                 = AggregatorSpec_REGR_SYY
	RegrSxy                 = AggregatorSpec_REGR_SXY
	RegrCount               = AggregatorSpec_REGR_COUNT
	RegrAvgx                = AggregatorSpec_REGR_AVGX
	RegrAvgy                = AggregatorSpec_REGR_AVGY
	TransitionRegrAggregate = AggregatorSpec_TRANSITION_REGRESSION_AGGREGATE
	FinalCovarPop           = AggregatorSpec_FINAL_COVAR_POP
	FinalRegrSxx            = AggregatorSpec_FINAL_REGR_SXX
	FinalRegrSxy            = AggregatorSpec_FINAL_REGR_SXY
	FinalRegrSyy            = AggregatorSpec_FINAL_REGR_SYY
	FinalRegrAvgx           = AggregatorSpec_FINAL_REGR_AVGX
	FinalRegrAvgy           = AggregatorSpec_FINAL_REGR_AVGY
	FinalRegrIntercept      = AggregatorSpec_FINAL_REGR_INTERCEPT
	FinalRegrR2             = AggregatorSpec_FINAL_REGR_R2
	FinalRegrSlope          = AggregatorSpec_FINAL_REGR_SLOPE
	FinalCovarSamp          = AggregatorSpec_FINAL_COVAR_SAMP
	FinalCorr               = AggregatorSpec_FINAL_CORR
	FinalSqrdiff            = AggregatorSpec_FINAL_SQRDIFF
)

Prettier aliases for AggregatorSpec_Func values.

View Source
const (
	FlowIDTagKey = tracing.TagPrefix + "flowid"

	// StreamIDTagKey is the key used for stream id tags in tracing spans.
	StreamIDTagKey = tracing.TagPrefix + "streamid"

	// ProcessorIDTagKey is the key used for processor id tags in tracing spans.
	ProcessorIDTagKey = tracing.TagPrefix + "processorid"
)

FlowIDTagKey is the key used for flow id tags in tracing spans.

Variables

View Source
var (
	ErrInvalidLengthApi        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowApi          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthComponentStats        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowComponentStats          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupComponentStats = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthData        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowData          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupData = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthProcessors        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessors          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupProcessors = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthProcessorsBase        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessorsBase          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupProcessorsBase = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthProcessorsBulkIo        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessorsBulkIo          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupProcessorsBulkIo = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthProcessorsChangefeeds        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessorsChangefeeds          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupProcessorsChangefeeds = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthProcessorsSql        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessorsSql          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupProcessorsSql = fmt.Errorf("proto: unexpected end of group")
)
View Source
var (
	ErrInvalidLengthProcessorsTableStats        = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowProcessorsTableStats          = fmt.Errorf("proto: integer overflow")
	ErrUnexpectedEndOfGroupProcessorsTableStats = fmt.Errorf("proto: unexpected end of group")
)
View Source
var AggregatorSpec_Func_name = map[int32]string{
	0:  "ANY_NOT_NULL",
	1:  "AVG",
	2:  "BOOL_AND",
	3:  "BOOL_OR",
	4:  "CONCAT_AGG",
	5:  "COUNT",
	7:  "MAX",
	8:  "MIN",
	9:  "STDDEV",
	10: "SUM",
	11: "SUM_INT",
	12: "VARIANCE",
	13: "XOR_AGG",
	14: "COUNT_ROWS",
	15: "SQRDIFF",
	16: "FINAL_VARIANCE",
	17: "FINAL_STDDEV",
	18: "ARRAY_AGG",
	19: "JSON_AGG",
	20: "JSONB_AGG",
	21: "STRING_AGG",
	22: "BIT_AND",
	23: "BIT_OR",
	24: "CORR",
	25: "PERCENTILE_DISC_IMPL",
	26: "PERCENTILE_CONT_IMPL",
	27: "JSON_OBJECT_AGG",
	28: "JSONB_OBJECT_AGG",
	29: "VAR_POP",
	30: "STDDEV_POP",
	31: "ST_MAKELINE",
	32: "ST_EXTENT",
	33: "ST_UNION",
	34: "ST_COLLECT",
	35: "COVAR_POP",
	36: "COVAR_SAMP",
	37: "REGR_INTERCEPT",
	38: "REGR_R2",
	39: "REGR_SLOPE",
	40: "REGR_SXX",
	41: "REGR_SYY",
	42: "REGR_SXY",
	43: "REGR_COUNT",
	44: "REGR_AVGX",
	45: "REGR_AVGY",
	46: "FINAL_STDDEV_POP",
	47: "FINAL_VAR_POP",
	48: "TRANSITION_REGRESSION_AGGREGATE",
	49: "FINAL_COVAR_POP",
	50: "FINAL_REGR_SXX",
	51: "FINAL_REGR_SXY",
	52: "FINAL_REGR_SYY",
	53: "FINAL_REGR_AVGX",
	54: "FINAL_REGR_AVGY",
	55: "FINAL_REGR_INTERCEPT",
	56: "FINAL_REGR_R2",
	57: "FINAL_REGR_SLOPE",
	58: "FINAL_COVAR_SAMP",
	59: "FINAL_CORR",
	60: "FINAL_SQRDIFF",
}
View Source
var AggregatorSpec_Func_value = map[string]int32{
	"ANY_NOT_NULL":                    0,
	"AVG":                             1,
	"BOOL_AND":                        2,
	"BOOL_OR":                         3,
	"CONCAT_AGG":                      4,
	"COUNT":                           5,
	"MAX":                             7,
	"MIN":                             8,
	"STDDEV":                          9,
	"SUM":                             10,
	"SUM_INT":                         11,
	"VARIANCE":                        12,
	"XOR_AGG":                         13,
	"COUNT_ROWS":                      14,
	"SQRDIFF":                         15,
	"FINAL_VARIANCE":                  16,
	"FINAL_STDDEV":                    17,
	"ARRAY_AGG":                       18,
	"JSON_AGG":                        19,
	"JSONB_AGG":                       20,
	"STRING_AGG":                      21,
	"BIT_AND":                         22,
	"BIT_OR":                          23,
	"CORR":                            24,
	"PERCENTILE_DISC_IMPL":            25,
	"PERCENTILE_CONT_IMPL":            26,
	"JSON_OBJECT_AGG":                 27,
	"JSONB_OBJECT_AGG":                28,
	"VAR_POP":                         29,
	"STDDEV_POP":                      30,
	"ST_MAKELINE":                     31,
	"ST_EXTENT":                       32,
	"ST_UNION":                        33,
	"ST_COLLECT":                      34,
	"COVAR_POP":                       35,
	"COVAR_SAMP":                      36,
	"REGR_INTERCEPT":                  37,
	"REGR_R2":                         38,
	"REGR_SLOPE":                      39,
	"REGR_SXX":                        40,
	"REGR_SYY":                        41,
	"REGR_SXY":                        42,
	"REGR_COUNT":                      43,
	"REGR_AVGX":                       44,
	"REGR_AVGY":                       45,
	"FINAL_STDDEV_POP":                46,
	"FINAL_VAR_POP":                   47,
	"TRANSITION_REGRESSION_AGGREGATE": 48,
	"FINAL_COVAR_POP":                 49,
	"FINAL_REGR_SXX":                  50,
	"FINAL_REGR_SXY":                  51,
	"FINAL_REGR_SYY":                  52,
	"FINAL_REGR_AVGX":                 53,
	"FINAL_REGR_AVGY":                 54,
	"FINAL_REGR_INTERCEPT":            55,
	"FINAL_REGR_R2":                   56,
	"FINAL_REGR_SLOPE":                57,
	"FINAL_COVAR_SAMP":                58,
	"FINAL_CORR":                      59,
	"FINAL_SQRDIFF":                   60,
}
View Source
var AggregatorSpec_Type_name = map[int32]string{
	0: "AUTO",
	1: "SCALAR",
	2: "NON_SCALAR",
}
View Source
var AggregatorSpec_Type_value = map[string]int32{
	"AUTO":       0,
	"SCALAR":     1,
	"NON_SCALAR": 2,
}
View Source
var BackfillerSpec_Type_name = map[int32]string{
	0: "Invalid",
	1: "Column",
	2: "Index",
}
View Source
var BackfillerSpec_Type_value = map[string]int32{
	"Invalid": 0,
	"Column":  1,
	"Index":   2,
}
View Source
var ComponentID_Type_name = map[int32]string{
	0: "UNSET",
	1: "PROCESSOR",
	2: "STREAM",
	3: "FLOW",
}
View Source
var ComponentID_Type_value = map[string]int32{
	"UNSET":     0,
	"PROCESSOR": 1,
	"STREAM":    2,
	"FLOW":      3,
}
View Source
var InputSyncSpec_Type_name = map[int32]string{
	0: "PARALLEL_UNORDERED",
	1: "ORDERED",
	2: "SERIAL_UNORDERED",
}
View Source
var InputSyncSpec_Type_value = map[string]int32{
	"PARALLEL_UNORDERED": 0,
	"ORDERED":            1,
	"SERIAL_UNORDERED":   2,
}
View Source
var Ordering_Column_Direction_name = map[int32]string{
	0: "ASC",
	1: "DESC",
}
View Source
var Ordering_Column_Direction_value = map[string]int32{
	"ASC":  0,
	"DESC": 1,
}
View Source
var OutputRouterSpec_Type_name = map[int32]string{
	0: "PASS_THROUGH",
	1: "MIRROR",
	2: "BY_HASH",
	3: "BY_RANGE",
}
View Source
var OutputRouterSpec_Type_value = map[string]int32{
	"PASS_THROUGH": 0,
	"MIRROR":       1,
	"BY_HASH":      2,
	"BY_RANGE":     3,
}
View Source
var SketchType_name = map[int32]string{
	0: "HLL_PLUS_PLUS_V1",
}
View Source
var SketchType_value = map[string]int32{
	"HLL_PLUS_PLUS_V1": 0,
}
View Source
var StreamEndpointSpec_Type_name = map[int32]string{
	0: "LOCAL",
	1: "REMOTE",
	2: "SYNC_RESPONSE",
}
View Source
var StreamEndpointSpec_Type_value = map[string]int32{
	"LOCAL":         0,
	"REMOTE":        1,
	"SYNC_RESPONSE": 2,
}
View Source
var WindowerSpec_Frame_BoundType_name = map[int32]string{
	0: "UNBOUNDED_PRECEDING",
	1: "UNBOUNDED_FOLLOWING",
	2: "OFFSET_PRECEDING",
	3: "OFFSET_FOLLOWING",
	4: "CURRENT_ROW",
}
View Source
var WindowerSpec_Frame_BoundType_value = map[string]int32{
	"UNBOUNDED_PRECEDING": 0,
	"UNBOUNDED_FOLLOWING": 1,
	"OFFSET_PRECEDING":    2,
	"OFFSET_FOLLOWING":    3,
	"CURRENT_ROW":         4,
}
View Source
var WindowerSpec_Frame_Exclusion_name = map[int32]string{
	0: "NO_EXCLUSION",
	1: "EXCLUDE_CURRENT_ROW",
	2: "EXCLUDE_GROUP",
	3: "EXCLUDE_TIES",
}
View Source
var WindowerSpec_Frame_Exclusion_value = map[string]int32{
	"NO_EXCLUSION":        0,
	"EXCLUDE_CURRENT_ROW": 1,
	"EXCLUDE_GROUP":       2,
	"EXCLUDE_TIES":        3,
}
View Source
var WindowerSpec_Frame_Mode_name = map[int32]string{
	0: "RANGE",
	1: "ROWS",
	2: "GROUPS",
}
View Source
var WindowerSpec_Frame_Mode_value = map[string]int32{
	"RANGE":  0,
	"ROWS":   1,
	"GROUPS": 2,
}
View Source
var WindowerSpec_WindowFunc_name = map[int32]string{
	0:  "ROW_NUMBER",
	1:  "RANK",
	2:  "DENSE_RANK",
	3:  "PERCENT_RANK",
	4:  "CUME_DIST",
	5:  "NTILE",
	6:  "LAG",
	7:  "LEAD",
	8:  "FIRST_VALUE",
	9:  "LAST_VALUE",
	10: "NTH_VALUE",
}
View Source
var WindowerSpec_WindowFunc_value = map[string]int32{
	"ROW_NUMBER":   0,
	"RANK":         1,
	"DENSE_RANK":   2,
	"PERCENT_RANK": 3,
	"CUME_DIST":    4,
	"NTILE":        5,
	"LAG":          6,
	"LEAD":         7,
	"FIRST_VALUE":  8,
	"LAST_VALUE":   9,
	"NTH_VALUE":    10,
}

Functions

func ConvertToColumnOrdering

func ConvertToColumnOrdering(specOrdering Ordering) colinfo.ColumnOrdering

ConvertToColumnOrdering converts an Ordering type (as defined in data.proto) to a colinfo.ColumnOrdering type.

func DeserializeExpr

func DeserializeExpr(
	expr string, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext, vars *tree.IndexedVarHelper,
) (tree.TypedExpr, error)

DeserializeExpr deserializes expr, binds the indexed variables to the provided IndexedVarHelper, and evaluates any constants in the expression.

evalCtx will not be mutated.

func ExprFmtCtxBase

func ExprFmtCtxBase(evalCtx *tree.EvalContext) *tree.FmtCtx

ExprFmtCtxBase produces a FmtCtx used for serializing expressions; a proper IndexedVar formatting function needs to be added on. It replaces placeholders with their values.

func ExtractNodesFromSpans

func ExtractNodesFromSpans(ctx context.Context, spans []tracingpb.RecordedSpan) util.FastIntSet

ExtractNodesFromSpans extracts a list of node ids from a set of tracing spans.

func ExtractStatsFromSpans

func ExtractStatsFromSpans(
	spans []tracingpb.RecordedSpan, makeDeterministic bool,
) map[ComponentID]*ComponentStats

ExtractStatsFromSpans extracts all ComponentStats from a set of tracing spans.

func GeneratePlanDiagramURL

func GeneratePlanDiagramURL(
	sql string, flows map[base.SQLInstanceID]*FlowSpec, flags DiagramFlags,
) (string, url.URL, error)

GeneratePlanDiagramURL generates the json data for a flow diagram and a URL which encodes the diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.

func GetAggregateFuncIdx

func GetAggregateFuncIdx(funcName string) (int32, error)

GetAggregateFuncIdx converts the aggregate function name to the enum value with the same string representation.

func GetWindowFuncIdx

func GetWindowFuncIdx(funcName string) (int32, error)

GetWindowFuncIdx converts the window function name to the enum value with the same string representation.

func GetWindowFunctionInfo

func GetWindowFunctionInfo(
	fn WindowerSpec_Func, inputTypes ...*types.T,
) (windowConstructor func(*tree.EvalContext) tree.WindowFunc, returnType *types.T, err error)

GetWindowFunctionInfo returns windowFunc constructor and the return type when given fn is applied to given inputTypes.

func RegisterDistSQLServer

func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)

func RunFilter

func RunFilter(filter tree.TypedExpr, evalCtx *tree.EvalContext) (bool, error)

RunFilter runs a filter expression and returns whether the filter passes.

Types

type AggregateConstructor

type AggregateConstructor func(*tree.EvalContext, tree.Datums) tree.AggregateFunc

AggregateConstructor is a function that creates an aggregate function.

func GetAggregateConstructor

func GetAggregateConstructor(
	evalCtx *tree.EvalContext,
	semaCtx *tree.SemaContext,
	aggInfo *AggregatorSpec_Aggregation,
	inputTypes []*types.T,
) (constructor AggregateConstructor, arguments tree.Datums, outputType *types.T, err error)

GetAggregateConstructor processes the specification of a single aggregate function.

evalCtx will not be mutated.

func GetAggregateInfo

func GetAggregateInfo(
	fn AggregatorSpec_Func, inputTypes ...*types.T,
) (aggregateConstructor AggregateConstructor, returnType *types.T, err error)

GetAggregateInfo returns the aggregate constructor and the return type for the given aggregate function when applied on the given type.

type AggregatorSpec

type AggregatorSpec struct {
	// The group key is a subset of the columns in the input stream schema on the
	// basis of which we define our groups.
	GroupCols    []uint32                     `protobuf:"varint,2,rep,packed,name=group_cols,json=groupCols" json:"group_cols,omitempty"`
	Aggregations []AggregatorSpec_Aggregation `protobuf:"bytes,3,rep,name=aggregations" json:"aggregations"`
	// A subset of the GROUP BY columns which are ordered in the input.
	OrderedGroupCols []uint32            `protobuf:"varint,4,rep,packed,name=ordered_group_cols,json=orderedGroupCols" json:"ordered_group_cols,omitempty"`
	Type             AggregatorSpec_Type `protobuf:"varint,5,opt,name=type,enum=cockroach.sql.distsqlrun.AggregatorSpec_Type" json:"type"`
	// OutputOrdering specifies the required ordering of the output produced by
	// the aggregator. The input to the processor *must* already be ordered
	// according to it.
	OutputOrdering Ordering `protobuf:"bytes,6,opt,name=output_ordering,json=outputOrdering" json:"output_ordering"`
}

AggregatorSpec is the specification for an "aggregator" (processor core type, not the logical plan computation stage). An aggregator performs 'aggregation' in the SQL sense in that it groups rows and computes an aggregate for each group. The group is configured using the group key. The aggregator can be configured with one or more aggregation functions.

The "internal columns" of an Aggregator map 1-1 to the aggregations.

func (*AggregatorSpec) Descriptor

func (*AggregatorSpec) Descriptor() ([]byte, []int)

func (*AggregatorSpec) IsRowCount

func (spec *AggregatorSpec) IsRowCount() bool

IsRowCount returns true if the aggregator spec is scalar and has a single COUNT_ROWS aggregation with no FILTER or DISTINCT.

func (*AggregatorSpec) IsScalar

func (spec *AggregatorSpec) IsScalar() bool

IsScalar returns whether the aggregate function is in scalar context.

func (*AggregatorSpec) Marshal

func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)

func (*AggregatorSpec) MarshalTo

func (m *AggregatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*AggregatorSpec) MarshalToSizedBuffer

func (m *AggregatorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*AggregatorSpec) ProtoMessage

func (*AggregatorSpec) ProtoMessage()

func (*AggregatorSpec) Reset

func (m *AggregatorSpec) Reset()

func (*AggregatorSpec) Size

func (m *AggregatorSpec) Size() (n int)

func (*AggregatorSpec) String

func (m *AggregatorSpec) String() string

func (*AggregatorSpec) Unmarshal

func (m *AggregatorSpec) Unmarshal(dAtA []byte) error

func (*AggregatorSpec) XXX_DiscardUnknown

func (m *AggregatorSpec) XXX_DiscardUnknown()

func (*AggregatorSpec) XXX_Marshal

func (m *AggregatorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AggregatorSpec) XXX_Merge

func (m *AggregatorSpec) XXX_Merge(src proto.Message)

func (*AggregatorSpec) XXX_Size

func (m *AggregatorSpec) XXX_Size() int

func (*AggregatorSpec) XXX_Unmarshal

func (m *AggregatorSpec) XXX_Unmarshal(b []byte) error

type AggregatorSpec_Aggregation

type AggregatorSpec_Aggregation struct {
	Func AggregatorSpec_Func `protobuf:"varint,1,opt,name=func,enum=cockroach.sql.distsqlrun.AggregatorSpec_Func" json:"func"`
	// Aggregation functions with distinct = true functions like you would
	// expect '<FUNC> DISTINCT' to operate, the default behavior would be
	// the '<FUNC> ALL' operation.
	Distinct bool `protobuf:"varint,2,opt,name=distinct" json:"distinct"`
	// The column index specifies the argument(s) to the aggregator function.
	//
	// Most aggregations take one argument
	// COUNT_ROWS takes no arguments.
	// FINAL_STDDEV and FINAL_VARIANCE take three arguments (SQRDIFF, SUM,
	// COUNT).
	ColIdx []uint32 `protobuf:"varint,5,rep,name=col_idx,json=colIdx" json:"col_idx,omitempty"`
	// If set, this column index specifies a boolean argument; rows for which
	// this value is not true don't contribute to this aggregation. This enables
	// the filter clause, e.g.:
	//   SELECT SUM(x) FILTER (WHERE y > 1), SUM(x) FILTER (WHERE y < 1) FROM t
	FilterColIdx *uint32 `protobuf:"varint,4,opt,name=filter_col_idx,json=filterColIdx" json:"filter_col_idx,omitempty"`
	// Arguments are const expressions passed to aggregation functions.
	Arguments []Expression `protobuf:"bytes,6,rep,name=arguments" json:"arguments"`
}

func (*AggregatorSpec_Aggregation) Descriptor

func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)

func (AggregatorSpec_Aggregation) Equals

Equals returns true if two aggregation specifiers are identical (and thus will always yield the same result).

func (*AggregatorSpec_Aggregation) Marshal

func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)

func (*AggregatorSpec_Aggregation) MarshalTo

func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)

func (*AggregatorSpec_Aggregation) MarshalToSizedBuffer

func (m *AggregatorSpec_Aggregation) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*AggregatorSpec_Aggregation) ProtoMessage

func (*AggregatorSpec_Aggregation) ProtoMessage()

func (*AggregatorSpec_Aggregation) Reset

func (m *AggregatorSpec_Aggregation) Reset()

func (*AggregatorSpec_Aggregation) Size

func (m *AggregatorSpec_Aggregation) Size() (n int)

func (*AggregatorSpec_Aggregation) String

func (m *AggregatorSpec_Aggregation) String() string

func (*AggregatorSpec_Aggregation) Unmarshal

func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error

func (*AggregatorSpec_Aggregation) XXX_DiscardUnknown

func (m *AggregatorSpec_Aggregation) XXX_DiscardUnknown()

func (*AggregatorSpec_Aggregation) XXX_Marshal

func (m *AggregatorSpec_Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AggregatorSpec_Aggregation) XXX_Merge

func (m *AggregatorSpec_Aggregation) XXX_Merge(src proto.Message)

func (*AggregatorSpec_Aggregation) XXX_Size

func (m *AggregatorSpec_Aggregation) XXX_Size() int

func (*AggregatorSpec_Aggregation) XXX_Unmarshal

func (m *AggregatorSpec_Aggregation) XXX_Unmarshal(b []byte) error

type AggregatorSpec_Func

type AggregatorSpec_Func int32

These mirror the aggregate functions supported by sql/parser. See sql/parser/aggregate_builtins.go.

const (
	AggregatorSpec_ANY_NOT_NULL   AggregatorSpec_Func = 0
	AggregatorSpec_AVG            AggregatorSpec_Func = 1
	AggregatorSpec_BOOL_AND       AggregatorSpec_Func = 2
	AggregatorSpec_BOOL_OR        AggregatorSpec_Func = 3
	AggregatorSpec_CONCAT_AGG     AggregatorSpec_Func = 4
	AggregatorSpec_COUNT          AggregatorSpec_Func = 5
	AggregatorSpec_MAX            AggregatorSpec_Func = 7
	AggregatorSpec_MIN            AggregatorSpec_Func = 8
	AggregatorSpec_STDDEV         AggregatorSpec_Func = 9
	AggregatorSpec_SUM            AggregatorSpec_Func = 10
	AggregatorSpec_SUM_INT        AggregatorSpec_Func = 11
	AggregatorSpec_VARIANCE       AggregatorSpec_Func = 12
	AggregatorSpec_XOR_AGG        AggregatorSpec_Func = 13
	AggregatorSpec_COUNT_ROWS     AggregatorSpec_Func = 14
	AggregatorSpec_SQRDIFF        AggregatorSpec_Func = 15
	AggregatorSpec_FINAL_VARIANCE AggregatorSpec_Func = 16
	AggregatorSpec_FINAL_STDDEV   AggregatorSpec_Func = 17
	AggregatorSpec_ARRAY_AGG      AggregatorSpec_Func = 18
	AggregatorSpec_JSON_AGG       AggregatorSpec_Func = 19
	// JSONB_AGG is an alias for JSON_AGG, they do the same thing.
	AggregatorSpec_JSONB_AGG                       AggregatorSpec_Func = 20
	AggregatorSpec_STRING_AGG                      AggregatorSpec_Func = 21
	AggregatorSpec_BIT_AND                         AggregatorSpec_Func = 22
	AggregatorSpec_BIT_OR                          AggregatorSpec_Func = 23
	AggregatorSpec_CORR                            AggregatorSpec_Func = 24
	AggregatorSpec_PERCENTILE_DISC_IMPL            AggregatorSpec_Func = 25
	AggregatorSpec_PERCENTILE_CONT_IMPL            AggregatorSpec_Func = 26
	AggregatorSpec_JSON_OBJECT_AGG                 AggregatorSpec_Func = 27
	AggregatorSpec_JSONB_OBJECT_AGG                AggregatorSpec_Func = 28
	AggregatorSpec_VAR_POP                         AggregatorSpec_Func = 29
	AggregatorSpec_STDDEV_POP                      AggregatorSpec_Func = 30
	AggregatorSpec_ST_MAKELINE                     AggregatorSpec_Func = 31
	AggregatorSpec_ST_EXTENT                       AggregatorSpec_Func = 32
	AggregatorSpec_ST_UNION                        AggregatorSpec_Func = 33
	AggregatorSpec_ST_COLLECT                      AggregatorSpec_Func = 34
	AggregatorSpec_COVAR_POP                       AggregatorSpec_Func = 35
	AggregatorSpec_COVAR_SAMP                      AggregatorSpec_Func = 36
	AggregatorSpec_REGR_INTERCEPT                  AggregatorSpec_Func = 37
	AggregatorSpec_REGR_R2                         AggregatorSpec_Func = 38
	AggregatorSpec_REGR_SLOPE                      AggregatorSpec_Func = 39
	AggregatorSpec_REGR_SXX                        AggregatorSpec_Func = 40
	AggregatorSpec_REGR_SYY                        AggregatorSpec_Func = 41
	AggregatorSpec_REGR_SXY                        AggregatorSpec_Func = 42
	AggregatorSpec_REGR_COUNT                      AggregatorSpec_Func = 43
	AggregatorSpec_REGR_AVGX                       AggregatorSpec_Func = 44
	AggregatorSpec_REGR_AVGY                       AggregatorSpec_Func = 45
	AggregatorSpec_FINAL_STDDEV_POP                AggregatorSpec_Func = 46
	AggregatorSpec_FINAL_VAR_POP                   AggregatorSpec_Func = 47
	AggregatorSpec_TRANSITION_REGRESSION_AGGREGATE AggregatorSpec_Func = 48
	AggregatorSpec_FINAL_COVAR_POP                 AggregatorSpec_Func = 49
	AggregatorSpec_FINAL_REGR_SXX                  AggregatorSpec_Func = 50
	AggregatorSpec_FINAL_REGR_SXY                  AggregatorSpec_Func = 51
	AggregatorSpec_FINAL_REGR_SYY                  AggregatorSpec_Func = 52
	AggregatorSpec_FINAL_REGR_AVGX                 AggregatorSpec_Func = 53
	AggregatorSpec_FINAL_REGR_AVGY                 AggregatorSpec_Func = 54
	AggregatorSpec_FINAL_REGR_INTERCEPT            AggregatorSpec_Func = 55
	AggregatorSpec_FINAL_REGR_R2                   AggregatorSpec_Func = 56
	AggregatorSpec_FINAL_REGR_SLOPE                AggregatorSpec_Func = 57
	AggregatorSpec_FINAL_COVAR_SAMP                AggregatorSpec_Func = 58
	AggregatorSpec_FINAL_CORR                      AggregatorSpec_Func = 59
	AggregatorSpec_FINAL_SQRDIFF                   AggregatorSpec_Func = 60
)

func (AggregatorSpec_Func) Enum

func (AggregatorSpec_Func) EnumDescriptor

func (AggregatorSpec_Func) EnumDescriptor() ([]byte, []int)

func (AggregatorSpec_Func) String

func (x AggregatorSpec_Func) String() string

func (*AggregatorSpec_Func) UnmarshalJSON

func (x *AggregatorSpec_Func) UnmarshalJSON(data []byte) error

type AggregatorSpec_Type

type AggregatorSpec_Type int32
const (
	// This setting exists just for backwards compatibility; it's equivalent to
	// SCALAR when there are no grouping columns, and to NON_SCALAR when there
	// are grouping columns.
	AggregatorSpec_AUTO AggregatorSpec_Type = 0
	// A scalar aggregation has no grouping columns and always returns one
	// result row.
	AggregatorSpec_SCALAR AggregatorSpec_Type = 1
	// A non-scalar aggregation returns no rows if there are no input rows; it
	// may or may not have grouping columns.
	AggregatorSpec_NON_SCALAR AggregatorSpec_Type = 2
)

func (AggregatorSpec_Type) Enum

func (AggregatorSpec_Type) EnumDescriptor

func (AggregatorSpec_Type) EnumDescriptor() ([]byte, []int)

func (AggregatorSpec_Type) String

func (x AggregatorSpec_Type) String() string

func (*AggregatorSpec_Type) UnmarshalJSON

func (x *AggregatorSpec_Type) UnmarshalJSON(data []byte) error

type BackfillerSpec

type BackfillerSpec struct {
	JobID int64                  `protobuf:"varint,13,opt,name=job_id,json=jobId" json:"job_id"`
	Type  BackfillerSpec_Type    `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.BackfillerSpec_Type" json:"type"`
	Table descpb.TableDescriptor `protobuf:"bytes,2,opt,name=table" json:"table"`
	Spans []roachpb.Span         `protobuf:"bytes,10,rep,name=spans" json:"spans"`
	// Run the backfill for approximately this duration.
	// The backfill will always process at least one backfill chunk.
	Duration time.Duration `protobuf:"varint,4,opt,name=duration,casttype=time.Duration" json:"duration"`
	// The backfill involves a complete table scan in chunks,
	// where each chunk is a transactional read of a set of rows
	// along with a backfill for the rows. This is the maximum number
	// of entries backfilled per chunk.
	ChunkSize int64 `protobuf:"varint,5,opt,name=chunk_size,json=chunkSize" json:"chunk_size"`
	// WriteAsOf is the time that the backfill entries should be written.
	// Note: Older nodes may also use this as the read time instead of readAsOf.
	WriteAsOf hlc.Timestamp `protobuf:"bytes,7,opt,name=writeAsOf" json:"writeAsOf"`
	// The timestamp to perform index backfill historical scans at.
	ReadAsOf hlc.Timestamp `protobuf:"bytes,9,opt,name=readAsOf" json:"readAsOf"`
	// IndexesToBackfill is the set of indexes to backfill. This is populated only
	// starting in 21.1, prior to that the implied index set are those containing
	// the mutation ID of the first mutation on the table descriptor.
	IndexesToBackfill []github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.IndexID `` /* 165-byte string literal not displayed */
	InitialSplits     int32                                                             `protobuf:"varint,11,opt,name=initial_splits,json=initialSplits" json:"initial_splits"`
	// WriteAtBatchTimestamp will write the SST MVCC timestamps at the batch
	// timestamp, even if the request gets pushed server-side. This ensures the
	// writes comply with the timestamp cache and closed timestamp. See also
	// AddSSTableRequest.SSTTimestampToRequestTimestamp.
	//
	// Note that older nodes do not respect this flag so callers should
	// check MVCCAddSSTable before setting this option.
	WriteAtBatchTimestamp bool `protobuf:"varint,12,opt,name=write_at_batch_timestamp,json=writeAtBatchTimestamp" json:"write_at_batch_timestamp"`
}

BackfillerSpec is the specification for a "schema change backfiller". The created backfill processor runs a backfill for the first mutations in the table descriptor mutation list with the same mutation id and type. A backfiller processor performs KV operations to retrieve rows for a table and backfills the new indexes/columns contained in the table descriptor. It checkpoints its progress by updating the table descriptor in the database, and doesn't emit any rows nor support any post-processing.

func (*BackfillerSpec) Descriptor

func (*BackfillerSpec) Descriptor() ([]byte, []int)

func (*BackfillerSpec) Marshal

func (m *BackfillerSpec) Marshal() (dAtA []byte, err error)

func (*BackfillerSpec) MarshalTo

func (m *BackfillerSpec) MarshalTo(dAtA []byte) (int, error)

func (*BackfillerSpec) MarshalToSizedBuffer

func (m *BackfillerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*BackfillerSpec) ProtoMessage

func (*BackfillerSpec) ProtoMessage()

func (*BackfillerSpec) Reset

func (m *BackfillerSpec) Reset()

func (*BackfillerSpec) Size

func (m *BackfillerSpec) Size() (n int)

func (*BackfillerSpec) String

func (m *BackfillerSpec) String() string

func (*BackfillerSpec) Unmarshal

func (m *BackfillerSpec) Unmarshal(dAtA []byte) error

func (*BackfillerSpec) XXX_DiscardUnknown

func (m *BackfillerSpec) XXX_DiscardUnknown()

func (*BackfillerSpec) XXX_Marshal

func (m *BackfillerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BackfillerSpec) XXX_Merge

func (m *BackfillerSpec) XXX_Merge(src proto.Message)

func (*BackfillerSpec) XXX_Size

func (m *BackfillerSpec) XXX_Size() int

func (*BackfillerSpec) XXX_Unmarshal

func (m *BackfillerSpec) XXX_Unmarshal(b []byte) error

type BackfillerSpec_Type

type BackfillerSpec_Type int32
const (
	BackfillerSpec_Invalid BackfillerSpec_Type = 0
	BackfillerSpec_Column  BackfillerSpec_Type = 1
	BackfillerSpec_Index   BackfillerSpec_Type = 2
)

func (BackfillerSpec_Type) Enum

func (BackfillerSpec_Type) EnumDescriptor

func (BackfillerSpec_Type) EnumDescriptor() ([]byte, []int)

func (BackfillerSpec_Type) String

func (x BackfillerSpec_Type) String() string

func (*BackfillerSpec_Type) UnmarshalJSON

func (x *BackfillerSpec_Type) UnmarshalJSON(data []byte) error

type BackupDataSpec

type BackupDataSpec struct {
	JobID            int64                          `protobuf:"varint,11,opt,name=job_id,json=jobId" json:"job_id"`
	Spans            []roachpb.Span                 `protobuf:"bytes,1,rep,name=spans" json:"spans"`
	IntroducedSpans  []roachpb.Span                 `protobuf:"bytes,2,rep,name=introduced_spans,json=introducedSpans" json:"introduced_spans"`
	DefaultURI       string                         `protobuf:"bytes,3,opt,name=default_uri,json=defaultUri" json:"default_uri"`
	URIsByLocalityKV map[string]string              `` /* 180-byte string literal not displayed */
	MVCCFilter       roachpb.MVCCFilter             `protobuf:"varint,5,opt,name=mvcc_filter,json=mvccFilter,enum=cockroach.roachpb.MVCCFilter" json:"mvcc_filter"`
	Encryption       *roachpb.FileEncryptionOptions `protobuf:"bytes,6,opt,name=encryption" json:"encryption,omitempty"`
	BackupStartTime  hlc.Timestamp                  `protobuf:"bytes,7,opt,name=backup_start_time,json=backupStartTime" json:"backup_start_time"`
	BackupEndTime    hlc.Timestamp                  `protobuf:"bytes,8,opt,name=backup_end_time,json=backupEndTime" json:"backup_end_time"`
	// PKIDs is used to convert result from an ExportRequest into row count
	// information passed back to track progress in the backup job.
	PKIDs map[uint64]bool `` /* 145-byte string literal not displayed */
	// User who initiated the backup. This is used to check access privileges
	// when using FileTable ExternalStorage.
	UserProto github_com_cockroachdb_cockroach_pkg_security.SQLUsernameProto `` /* 128-byte string literal not displayed */
}

func (*BackupDataSpec) Descriptor

func (*BackupDataSpec) Descriptor() ([]byte, []int)

func (*BackupDataSpec) Marshal

func (m *BackupDataSpec) Marshal() (dAtA []byte, err error)

func (*BackupDataSpec) MarshalTo

func (m *BackupDataSpec) MarshalTo(dAtA []byte) (int, error)

func (*BackupDataSpec) MarshalToSizedBuffer

func (m *BackupDataSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*BackupDataSpec) ProtoMessage

func (*BackupDataSpec) ProtoMessage()

func (*BackupDataSpec) Reset

func (m *BackupDataSpec) Reset()

func (*BackupDataSpec) Size

func (m *BackupDataSpec) Size() (n int)

func (*BackupDataSpec) String

func (m *BackupDataSpec) String() string

func (*BackupDataSpec) Unmarshal

func (m *BackupDataSpec) Unmarshal(dAtA []byte) error

func (*BackupDataSpec) User

User accesses the user field.

func (*BackupDataSpec) XXX_DiscardUnknown

func (m *BackupDataSpec) XXX_DiscardUnknown()

func (*BackupDataSpec) XXX_Marshal

func (m *BackupDataSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BackupDataSpec) XXX_Merge

func (m *BackupDataSpec) XXX_Merge(src proto.Message)

func (*BackupDataSpec) XXX_Size

func (m *BackupDataSpec) XXX_Size() int

func (*BackupDataSpec) XXX_Unmarshal

func (m *BackupDataSpec) XXX_Unmarshal(b []byte) error

type BulkRowWriterSpec

type BulkRowWriterSpec struct {
	Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
}

BulkRowWriterSpec is the specification for a processor that consumes rows and writes them to a target table using AddSSTable. It outputs a BulkOpSummary.

func (*BulkRowWriterSpec) Descriptor

func (*BulkRowWriterSpec) Descriptor() ([]byte, []int)

func (*BulkRowWriterSpec) Marshal

func (m *BulkRowWriterSpec) Marshal() (dAtA []byte, err error)

func (*BulkRowWriterSpec) MarshalTo

func (m *BulkRowWriterSpec) MarshalTo(dAtA []byte) (int, error)

func (*BulkRowWriterSpec) MarshalToSizedBuffer

func (m *BulkRowWriterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*BulkRowWriterSpec) ProtoMessage

func (*BulkRowWriterSpec) ProtoMessage()

func (*BulkRowWriterSpec) Reset

func (m *BulkRowWriterSpec) Reset()

func (*BulkRowWriterSpec) Size

func (m *BulkRowWriterSpec) Size() (n int)

func (*BulkRowWriterSpec) String

func (m *BulkRowWriterSpec) String() string

func (*BulkRowWriterSpec) Unmarshal

func (m *BulkRowWriterSpec) Unmarshal(dAtA []byte) error

func (*BulkRowWriterSpec) XXX_DiscardUnknown

func (m *BulkRowWriterSpec) XXX_DiscardUnknown()

func (*BulkRowWriterSpec) XXX_Marshal

func (m *BulkRowWriterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BulkRowWriterSpec) XXX_Merge

func (m *BulkRowWriterSpec) XXX_Merge(src proto.Message)

func (*BulkRowWriterSpec) XXX_Size

func (m *BulkRowWriterSpec) XXX_Size() int

func (*BulkRowWriterSpec) XXX_Unmarshal

func (m *BulkRowWriterSpec) XXX_Unmarshal(b []byte) error

type CancelDeadFlowsRequest

type CancelDeadFlowsRequest struct {
	// FlowIDs is a list of IDs of flows to be canceled if they are currently in
	// the queue to be scheduled. If a particular flow is not found for any reason
	// in the queue (i.e. it has already completed or is currently running), such
	// flow is ignored.
	FlowIDs []FlowID `protobuf:"bytes,1,rep,name=flow_ids,json=flowIds,customtype=FlowID" json:"flow_ids"`
}

CancelDeadFlowsRequest is a request to cancel some flows that have been scheduled to run on the receiving node but haven't been started yet (i.e. they are in the queue to be run). This request originates on the node that performed SetupFlow RPC in order to setup flows mentioned in the request.

func (*CancelDeadFlowsRequest) Descriptor

func (*CancelDeadFlowsRequest) Descriptor() ([]byte, []int)

func (*CancelDeadFlowsRequest) Marshal

func (m *CancelDeadFlowsRequest) Marshal() (dAtA []byte, err error)

func (*CancelDeadFlowsRequest) MarshalTo

func (m *CancelDeadFlowsRequest) MarshalTo(dAtA []byte) (int, error)

func (*CancelDeadFlowsRequest) MarshalToSizedBuffer

func (m *CancelDeadFlowsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*CancelDeadFlowsRequest) ProtoMessage

func (*CancelDeadFlowsRequest) ProtoMessage()

func (*CancelDeadFlowsRequest) Reset

func (m *CancelDeadFlowsRequest) Reset()

func (*CancelDeadFlowsRequest) Size

func (m *CancelDeadFlowsRequest) Size() (n int)

func (*CancelDeadFlowsRequest) String

func (m *CancelDeadFlowsRequest) String() string

func (*CancelDeadFlowsRequest) Unmarshal

func (m *CancelDeadFlowsRequest) Unmarshal(dAtA []byte) error

func (*CancelDeadFlowsRequest) XXX_DiscardUnknown

func (m *CancelDeadFlowsRequest) XXX_DiscardUnknown()

func (*CancelDeadFlowsRequest) XXX_Marshal

func (m *CancelDeadFlowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CancelDeadFlowsRequest) XXX_Merge

func (m *CancelDeadFlowsRequest) XXX_Merge(src proto.Message)

func (*CancelDeadFlowsRequest) XXX_Size

func (m *CancelDeadFlowsRequest) XXX_Size() int

func (*CancelDeadFlowsRequest) XXX_Unmarshal

func (m *CancelDeadFlowsRequest) XXX_Unmarshal(b []byte) error

type ChangeAggregatorSpec

type ChangeAggregatorSpec struct {
	Watches []ChangeAggregatorSpec_Watch `protobuf:"bytes,1,rep,name=watches" json:"watches"`
	// Change aggregator checkpoint
	Checkpoint ChangeAggregatorSpec_Checkpoint `protobuf:"bytes,5,opt,name=checkpoint" json:"checkpoint"`
	// Feed is the specification for this changefeed.
	Feed jobspb.ChangefeedDetails `protobuf:"bytes,2,opt,name=feed" json:"feed"`
	// User who initiated the changefeed. This is used to check access privileges
	// when using FileTable ExternalStorage.
	UserProto github_com_cockroachdb_cockroach_pkg_security.SQLUsernameProto `` /* 127-byte string literal not displayed */
	// JobID is the id of this changefeed in the system jobs.
	JobID github_com_cockroachdb_cockroach_pkg_jobs_jobspb.JobID `protobuf:"varint,4,opt,name=job_id,json=jobId,casttype=sqlfmt/cockroach/pkg/jobs/jobspb.JobID" json:"job_id"`
}

ChangeAggregatorSpec is the specification for a processor that watches for changes in a set of spans. Each span may cross multiple ranges.

func (*ChangeAggregatorSpec) Descriptor

func (*ChangeAggregatorSpec) Descriptor() ([]byte, []int)

func (*ChangeAggregatorSpec) Marshal

func (m *ChangeAggregatorSpec) Marshal() (dAtA []byte, err error)

func (*ChangeAggregatorSpec) MarshalTo

func (m *ChangeAggregatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec) MarshalToSizedBuffer

func (m *ChangeAggregatorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec) ProtoMessage

func (*ChangeAggregatorSpec) ProtoMessage()

func (*ChangeAggregatorSpec) Reset

func (m *ChangeAggregatorSpec) Reset()

func (*ChangeAggregatorSpec) Size

func (m *ChangeAggregatorSpec) Size() (n int)

func (*ChangeAggregatorSpec) String

func (m *ChangeAggregatorSpec) String() string

func (*ChangeAggregatorSpec) Unmarshal

func (m *ChangeAggregatorSpec) Unmarshal(dAtA []byte) error

func (*ChangeAggregatorSpec) User

User accesses the user field.

func (*ChangeAggregatorSpec) XXX_DiscardUnknown

func (m *ChangeAggregatorSpec) XXX_DiscardUnknown()

func (*ChangeAggregatorSpec) XXX_Marshal

func (m *ChangeAggregatorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeAggregatorSpec) XXX_Merge

func (m *ChangeAggregatorSpec) XXX_Merge(src proto.Message)

func (*ChangeAggregatorSpec) XXX_Size

func (m *ChangeAggregatorSpec) XXX_Size() int

func (*ChangeAggregatorSpec) XXX_Unmarshal

func (m *ChangeAggregatorSpec) XXX_Unmarshal(b []byte) error

type ChangeAggregatorSpec_Checkpoint

type ChangeAggregatorSpec_Checkpoint struct {
	// The checkpoint is currently applicable only to the spans which have
	// already been backfilled.
	Spans []roachpb.Span `protobuf:"bytes,1,rep,name=spans" json:"spans"`
}

Checkpoint enables change aggregators to resume faster on restart.

func (*ChangeAggregatorSpec_Checkpoint) Descriptor

func (*ChangeAggregatorSpec_Checkpoint) Descriptor() ([]byte, []int)

func (*ChangeAggregatorSpec_Checkpoint) Marshal

func (m *ChangeAggregatorSpec_Checkpoint) Marshal() (dAtA []byte, err error)

func (*ChangeAggregatorSpec_Checkpoint) MarshalTo

func (m *ChangeAggregatorSpec_Checkpoint) MarshalTo(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec_Checkpoint) MarshalToSizedBuffer

func (m *ChangeAggregatorSpec_Checkpoint) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec_Checkpoint) ProtoMessage

func (*ChangeAggregatorSpec_Checkpoint) ProtoMessage()

func (*ChangeAggregatorSpec_Checkpoint) Reset

func (*ChangeAggregatorSpec_Checkpoint) Size

func (m *ChangeAggregatorSpec_Checkpoint) Size() (n int)

func (*ChangeAggregatorSpec_Checkpoint) String

func (*ChangeAggregatorSpec_Checkpoint) Unmarshal

func (m *ChangeAggregatorSpec_Checkpoint) Unmarshal(dAtA []byte) error

func (*ChangeAggregatorSpec_Checkpoint) XXX_DiscardUnknown

func (m *ChangeAggregatorSpec_Checkpoint) XXX_DiscardUnknown()

func (*ChangeAggregatorSpec_Checkpoint) XXX_Marshal

func (m *ChangeAggregatorSpec_Checkpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeAggregatorSpec_Checkpoint) XXX_Merge

func (m *ChangeAggregatorSpec_Checkpoint) XXX_Merge(src proto.Message)

func (*ChangeAggregatorSpec_Checkpoint) XXX_Size

func (m *ChangeAggregatorSpec_Checkpoint) XXX_Size() int

func (*ChangeAggregatorSpec_Checkpoint) XXX_Unmarshal

func (m *ChangeAggregatorSpec_Checkpoint) XXX_Unmarshal(b []byte) error

type ChangeAggregatorSpec_Watch

type ChangeAggregatorSpec_Watch struct {
	InitialResolved hlc.Timestamp `protobuf:"bytes,1,opt,name=initial_resolved,json=initialResolved" json:"initial_resolved"`
	Span            roachpb.Span  `protobuf:"bytes,2,opt,name=span" json:"span"`
}

func (*ChangeAggregatorSpec_Watch) Descriptor

func (*ChangeAggregatorSpec_Watch) Descriptor() ([]byte, []int)

func (*ChangeAggregatorSpec_Watch) Marshal

func (m *ChangeAggregatorSpec_Watch) Marshal() (dAtA []byte, err error)

func (*ChangeAggregatorSpec_Watch) MarshalTo

func (m *ChangeAggregatorSpec_Watch) MarshalTo(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec_Watch) MarshalToSizedBuffer

func (m *ChangeAggregatorSpec_Watch) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ChangeAggregatorSpec_Watch) ProtoMessage

func (*ChangeAggregatorSpec_Watch) ProtoMessage()

func (*ChangeAggregatorSpec_Watch) Reset

func (m *ChangeAggregatorSpec_Watch) Reset()

func (*ChangeAggregatorSpec_Watch) Size

func (m *ChangeAggregatorSpec_Watch) Size() (n int)

func (*ChangeAggregatorSpec_Watch) String

func (m *ChangeAggregatorSpec_Watch) String() string

func (*ChangeAggregatorSpec_Watch) Unmarshal

func (m *ChangeAggregatorSpec_Watch) Unmarshal(dAtA []byte) error

func (*ChangeAggregatorSpec_Watch) XXX_DiscardUnknown

func (m *ChangeAggregatorSpec_Watch) XXX_DiscardUnknown()

func (*ChangeAggregatorSpec_Watch) XXX_Marshal

func (m *ChangeAggregatorSpec_Watch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeAggregatorSpec_Watch) XXX_Merge

func (m *ChangeAggregatorSpec_Watch) XXX_Merge(src proto.Message)

func (*ChangeAggregatorSpec_Watch) XXX_Size

func (m *ChangeAggregatorSpec_Watch) XXX_Size() int

func (*ChangeAggregatorSpec_Watch) XXX_Unmarshal

func (m *ChangeAggregatorSpec_Watch) XXX_Unmarshal(b []byte) error

type ChangeFrontierSpec

type ChangeFrontierSpec struct {
	// TrackedSpans is the entire span set being watched. Once all these spans
	// have been resolved at a certain timestamp, then it's safe to resolve the
	// changefeed at that timestamp.
	TrackedSpans []roachpb.Span `protobuf:"bytes,1,rep,name=tracked_spans,json=trackedSpans" json:"tracked_spans"`
	// Feed is the specification for this changefeed.
	Feed jobspb.ChangefeedDetails `protobuf:"bytes,2,opt,name=feed" json:"feed"`
	// JobID is the id of this changefeed in the system jobs.
	JobID github_com_cockroachdb_cockroach_pkg_jobs_jobspb.JobID `protobuf:"varint,3,opt,name=job_id,json=jobId,casttype=sqlfmt/cockroach/pkg/jobs/jobspb.JobID" json:"job_id"`
	// User who initiated the changefeed. This is used to check access privileges
	// when using FileTable ExternalStorage.
	UserProto github_com_cockroachdb_cockroach_pkg_security.SQLUsernameProto `` /* 127-byte string literal not displayed */
}

ChangeFrontierSpec is the specification for a processor that receives span-level resolved timestamps, track them, and emits the changefeed-level resolved timestamp whenever it changes.

func (*ChangeFrontierSpec) Descriptor

func (*ChangeFrontierSpec) Descriptor() ([]byte, []int)

func (*ChangeFrontierSpec) Marshal

func (m *ChangeFrontierSpec) Marshal() (dAtA []byte, err error)

func (*ChangeFrontierSpec) MarshalTo

func (m *ChangeFrontierSpec) MarshalTo(dAtA []byte) (int, error)

func (*ChangeFrontierSpec) MarshalToSizedBuffer

func (m *ChangeFrontierSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ChangeFrontierSpec) ProtoMessage

func (*ChangeFrontierSpec) ProtoMessage()

func (*ChangeFrontierSpec) Reset

func (m *ChangeFrontierSpec) Reset()

func (*ChangeFrontierSpec) Size

func (m *ChangeFrontierSpec) Size() (n int)

func (*ChangeFrontierSpec) String

func (m *ChangeFrontierSpec) String() string

func (*ChangeFrontierSpec) Unmarshal

func (m *ChangeFrontierSpec) Unmarshal(dAtA []byte) error

func (*ChangeFrontierSpec) User

User accesses the user field.

func (*ChangeFrontierSpec) XXX_DiscardUnknown

func (m *ChangeFrontierSpec) XXX_DiscardUnknown()

func (*ChangeFrontierSpec) XXX_Marshal

func (m *ChangeFrontierSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ChangeFrontierSpec) XXX_Merge

func (m *ChangeFrontierSpec) XXX_Merge(src proto.Message)

func (*ChangeFrontierSpec) XXX_Size

func (m *ChangeFrontierSpec) XXX_Size() int

func (*ChangeFrontierSpec) XXX_Unmarshal

func (m *ChangeFrontierSpec) XXX_Unmarshal(b []byte) error

type Columns

type Columns struct {
	Columns []uint32 `protobuf:"varint,1,rep,packed,name=columns" json:"columns,omitempty"`
}

func (*Columns) Descriptor

func (*Columns) Descriptor() ([]byte, []int)

func (*Columns) Marshal

func (m *Columns) Marshal() (dAtA []byte, err error)

func (*Columns) MarshalTo

func (m *Columns) MarshalTo(dAtA []byte) (int, error)

func (*Columns) MarshalToSizedBuffer

func (m *Columns) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*Columns) ProtoMessage

func (*Columns) ProtoMessage()

func (*Columns) Reset

func (m *Columns) Reset()

func (*Columns) Size

func (m *Columns) Size() (n int)

func (*Columns) String

func (m *Columns) String() string

func (*Columns) Unmarshal

func (m *Columns) Unmarshal(dAtA []byte) error

func (*Columns) XXX_DiscardUnknown

func (m *Columns) XXX_DiscardUnknown()

func (*Columns) XXX_Marshal

func (m *Columns) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Columns) XXX_Merge

func (m *Columns) XXX_Merge(src proto.Message)

func (*Columns) XXX_Size

func (m *Columns) XXX_Size() int

func (*Columns) XXX_Unmarshal

func (m *Columns) XXX_Unmarshal(b []byte) error

type ComponentID

type ComponentID struct {
	FlowID FlowID           `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
	Type   ComponentID_Type `protobuf:"varint,2,opt,name=type,enum=cockroach.sql.distsqlrun.ComponentID_Type" json:"type"`
	// Identifier of this component, within the domain of components of the same
	// type.
	ID int32 `protobuf:"varint,3,opt,name=id" json:"id"`
	// SQLInstanceID of the node this component is associated with. For cross-node
	// streams, this is the *origin* node for the stream.
	SQLInstanceID github_com_cockroachdb_cockroach_pkg_base.SQLInstanceID `` /* 135-byte string literal not displayed */
}

ComponentID identifies a component in a flow. There are multiple types of components (e.g. processors, streams); each component of a certain type has an integer identifier.

func FlowComponentID

func FlowComponentID(instanceID base.SQLInstanceID, flowID FlowID) ComponentID

FlowComponentID returns a ComponentID for the given flow.

func ProcessorComponentID

func ProcessorComponentID(
	instanceID base.SQLInstanceID, flowID FlowID, processorID int32,
) ComponentID

ProcessorComponentID returns a ComponentID for the given processor in a flow.

func StreamComponentID

func StreamComponentID(
	originInstanceID base.SQLInstanceID, flowID FlowID, streamID StreamID,
) ComponentID

StreamComponentID returns a ComponentID for the given stream in a flow.

func (*ComponentID) Descriptor

func (*ComponentID) Descriptor() ([]byte, []int)

func (*ComponentID) Marshal

func (m *ComponentID) Marshal() (dAtA []byte, err error)

func (*ComponentID) MarshalTo

func (m *ComponentID) MarshalTo(dAtA []byte) (int, error)

func (*ComponentID) MarshalToSizedBuffer

func (m *ComponentID) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ComponentID) ProtoMessage

func (*ComponentID) ProtoMessage()

func (*ComponentID) Reset

func (m *ComponentID) Reset()

func (*ComponentID) Size

func (m *ComponentID) Size() (n int)

func (*ComponentID) String

func (m *ComponentID) String() string

func (*ComponentID) Unmarshal

func (m *ComponentID) Unmarshal(dAtA []byte) error

func (*ComponentID) XXX_DiscardUnknown

func (m *ComponentID) XXX_DiscardUnknown()

func (*ComponentID) XXX_Marshal

func (m *ComponentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ComponentID) XXX_Merge

func (m *ComponentID) XXX_Merge(src proto.Message)

func (*ComponentID) XXX_Size

func (m *ComponentID) XXX_Size() int

func (*ComponentID) XXX_Unmarshal

func (m *ComponentID) XXX_Unmarshal(b []byte) error

type ComponentID_Type

type ComponentID_Type int32
const (
	ComponentID_UNSET ComponentID_Type = 0
	// The component is a processor; the ID field corresponds to a processor ID
	// in the plan.
	ComponentID_PROCESSOR ComponentID_Type = 1
	// The component is a stream; the ID field corresponds to a stream ID in the
	// plan.
	ComponentID_STREAM ComponentID_Type = 2
	// The "component" is a flow (specifically, the part of a distributed plan
	// that runs on a given node).
	// TODO(radu): the ID field should correspond to a node ID in the plan.
	ComponentID_FLOW ComponentID_Type = 3
)

func (ComponentID_Type) Enum

func (ComponentID_Type) EnumDescriptor

func (ComponentID_Type) EnumDescriptor() ([]byte, []int)

func (ComponentID_Type) String

func (x ComponentID_Type) String() string

func (*ComponentID_Type) UnmarshalJSON

func (x *ComponentID_Type) UnmarshalJSON(data []byte) error

type ComponentStats

type ComponentStats struct {
	Component ComponentID    `protobuf:"bytes,1,opt,name=component" json:"component"`
	NetRx     NetworkRxStats `protobuf:"bytes,2,opt,name=net_rx,json=netRx" json:"net_rx"`
	NetTx     NetworkTxStats `protobuf:"bytes,3,opt,name=net_tx,json=netTx" json:"net_tx"`
	KV        KVStats        `protobuf:"bytes,4,opt,name=kv" json:"kv"`
	Exec      ExecStats      `protobuf:"bytes,5,opt,name=exec" json:"exec"`
	Output    OutputStats    `protobuf:"bytes,6,opt,name=output" json:"output"`
	// Stats for the inputs of an operator (only in the row execution engine).
	Inputs    []InputStats `protobuf:"bytes,7,rep,name=inputs" json:"inputs"`
	FlowStats FlowStats    `protobuf:"bytes,8,opt,name=flow_stats,json=flowStats" json:"flow_stats"`
}

ComponentStats contains statistics for an execution component. A component is an arbitrary unit in the execution infrastructure; it can correspond to an operator or a stream.

Depending on the component, not all statistics apply. For all fields, the zero value indicates that the particular stat is not available.

func (*ComponentStats) Descriptor

func (*ComponentStats) Descriptor() ([]byte, []int)

func (*ComponentStats) MakeDeterministic

func (s *ComponentStats) MakeDeterministic()

MakeDeterministic is used only for testing; it modifies any non-deterministic statistics like elapsed time or exact number of bytes to fixed or manufactured values.

Note that it does not modify which fields that are set. In other words, a field will have a non-zero protobuf value iff it had a non-zero protobuf value before. This allows tests to verify the set of stats that were collected.

func (*ComponentStats) Marshal

func (m *ComponentStats) Marshal() (dAtA []byte, err error)

func (*ComponentStats) MarshalTo

func (m *ComponentStats) MarshalTo(dAtA []byte) (int, error)

func (*ComponentStats) MarshalToSizedBuffer

func (m *ComponentStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ComponentStats) ProtoMessage

func (*ComponentStats) ProtoMessage()

func (*ComponentStats) Reset

func (m *ComponentStats) Reset()

func (*ComponentStats) Size

func (m *ComponentStats) Size() (n int)

func (*ComponentStats) StatsForQueryPlan

func (s *ComponentStats) StatsForQueryPlan() []string

StatsForQueryPlan returns the statistics as a list of strings that can be displayed in query plans and diagrams.

func (*ComponentStats) String

func (m *ComponentStats) String() string

func (*ComponentStats) Union

func (s *ComponentStats) Union(other *ComponentStats) *ComponentStats

Union creates a new ComponentStats that contains all statistics in either the receiver (s) or the argument (other). If a statistic is set in both, the one in the receiver (s) is preferred.

func (*ComponentStats) Unmarshal

func (m *ComponentStats) Unmarshal(dAtA []byte) error

func (*ComponentStats) XXX_DiscardUnknown

func (m *ComponentStats) XXX_DiscardUnknown()

func (*ComponentStats) XXX_Marshal

func (m *ComponentStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ComponentStats) XXX_Merge

func (m *ComponentStats) XXX_Merge(src proto.Message)

func (*ComponentStats) XXX_Size

func (m *ComponentStats) XXX_Size() int

func (*ComponentStats) XXX_Unmarshal

func (m *ComponentStats) XXX_Unmarshal(b []byte) error

type ConsumerHandshake

type ConsumerHandshake struct {
	// consumer_scheduled is true if the flow that's consuming this stream has
	// already been scheduled and so it is ready to consume data. If this is
	// false, then the consumer has not yet been scheduled. In this case, the
	// server (i.e. the consumer node) will send another ConsumerHandshake with
	// consumer_scheduled = true when the consumer is finally scheduled (unless
	// the scheduling timeout fires first, in which case the stream will be
	// closed server-side).
	ConsumerScheduled bool `protobuf:"varint,1,opt,name=consumer_scheduled,json=consumerScheduled" json:"consumer_scheduled"`
	// If consumer_scheduled is false, then this indicates the deadline for the
	// scheduling of the consumer flow. If the flow is not scheduled within that
	// dealine, this stream will be disconnected by the server-side.
	ConsumerScheduleDeadline *time.Time `` /* 136-byte string literal not displayed */
	// The server's DistSQL version range.
	Version            DistSQLVersion `protobuf:"varint,3,opt,name=version,casttype=DistSQLVersion" json:"version"`
	MinAcceptedVersion DistSQLVersion `protobuf:"varint,4,opt,name=min_accepted_version,json=minAcceptedVersion,casttype=DistSQLVersion" json:"min_accepted_version"`
}

ConsumerHandshake is the first one or two message sent in the consumer->producer direction on a stream. It informs the producer about the status of the consumer flow. Introduced in version 1.1 for future use by producers.

func (*ConsumerHandshake) Descriptor

func (*ConsumerHandshake) Descriptor() ([]byte, []int)

func (*ConsumerHandshake) Marshal

func (m *ConsumerHandshake) Marshal() (dAtA []byte, err error)

func (*ConsumerHandshake) MarshalTo

func (m *ConsumerHandshake) MarshalTo(dAtA []byte) (int, error)

func (*ConsumerHandshake) MarshalToSizedBuffer

func (m *ConsumerHandshake) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ConsumerHandshake) ProtoMessage

func (*ConsumerHandshake) ProtoMessage()

func (*ConsumerHandshake) Reset

func (m *ConsumerHandshake) Reset()

func (*ConsumerHandshake) Size

func (m *ConsumerHandshake) Size() (n int)

func (*ConsumerHandshake) String

func (m *ConsumerHandshake) String() string

func (*ConsumerHandshake) Unmarshal

func (m *ConsumerHandshake) Unmarshal(dAtA []byte) error

func (*ConsumerHandshake) XXX_DiscardUnknown

func (m *ConsumerHandshake) XXX_DiscardUnknown()

func (*ConsumerHandshake) XXX_Marshal

func (m *ConsumerHandshake) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConsumerHandshake) XXX_Merge

func (m *ConsumerHandshake) XXX_Merge(src proto.Message)

func (*ConsumerHandshake) XXX_Size

func (m *ConsumerHandshake) XXX_Size() int

func (*ConsumerHandshake) XXX_Unmarshal

func (m *ConsumerHandshake) XXX_Unmarshal(b []byte) error

type ConsumerSignal

type ConsumerSignal struct {
	// The consumer is done (doesn't need to consume any more rows) and is asking
	// the producer to push whatever trailing metadata it has and close its
	// stream.
	DrainRequest *DrainRequest `protobuf:"bytes,1,opt,name=drain_request,json=drainRequest" json:"drain_request,omitempty"`
	// Consumer->Producer handshake messages. See message definition.
	Handshake *ConsumerHandshake `protobuf:"bytes,3,opt,name=handshake" json:"handshake,omitempty"`
}

ConsumerSignal are messages flowing from consumer to producer (so, from RPC server to client) for the FlowStream RPC.

func (*ConsumerSignal) Descriptor

func (*ConsumerSignal) Descriptor() ([]byte, []int)

func (*ConsumerSignal) GetValue

func (this *ConsumerSignal) GetValue() interface{}

func (*ConsumerSignal) Marshal

func (m *ConsumerSignal) Marshal() (dAtA []byte, err error)

func (*ConsumerSignal) MarshalTo

func (m *ConsumerSignal) MarshalTo(dAtA []byte) (int, error)

func (*ConsumerSignal) MarshalToSizedBuffer

func (m *ConsumerSignal) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ConsumerSignal) ProtoMessage

func (*ConsumerSignal) ProtoMessage()

func (*ConsumerSignal) Reset

func (m *ConsumerSignal) Reset()

func (*ConsumerSignal) SetValue

func (this *ConsumerSignal) SetValue(value interface{}) bool

func (*ConsumerSignal) Size

func (m *ConsumerSignal) Size() (n int)

func (*ConsumerSignal) String

func (m *ConsumerSignal) String() string

func (*ConsumerSignal) Unmarshal

func (m *ConsumerSignal) Unmarshal(dAtA []byte) error

func (*ConsumerSignal) XXX_DiscardUnknown

func (m *ConsumerSignal) XXX_DiscardUnknown()

func (*ConsumerSignal) XXX_Marshal

func (m *ConsumerSignal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ConsumerSignal) XXX_Merge

func (m *ConsumerSignal) XXX_Merge(src proto.Message)

func (*ConsumerSignal) XXX_Size

func (m *ConsumerSignal) XXX_Size() int

func (*ConsumerSignal) XXX_Unmarshal

func (m *ConsumerSignal) XXX_Unmarshal(b []byte) error

type DatumInfo

type DatumInfo struct {
	Encoding descpb.DatumEncoding `protobuf:"varint,1,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"`
	Type     *types.T             `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
}

func (*DatumInfo) Descriptor

func (*DatumInfo) Descriptor() ([]byte, []int)

func (*DatumInfo) Marshal

func (m *DatumInfo) Marshal() (dAtA []byte, err error)

func (*DatumInfo) MarshalTo

func (m *DatumInfo) MarshalTo(dAtA []byte) (int, error)

func (*DatumInfo) MarshalToSizedBuffer

func (m *DatumInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DatumInfo) ProtoMessage

func (*DatumInfo) ProtoMessage()

func (*DatumInfo) Reset

func (m *DatumInfo) Reset()

func (*DatumInfo) Size

func (m *DatumInfo) Size() (n int)

func (*DatumInfo) String

func (m *DatumInfo) String() string

func (*DatumInfo) Unmarshal

func (m *DatumInfo) Unmarshal(dAtA []byte) error

func (*DatumInfo) XXX_DiscardUnknown

func (m *DatumInfo) XXX_DiscardUnknown()

func (*DatumInfo) XXX_Marshal

func (m *DatumInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DatumInfo) XXX_Merge

func (m *DatumInfo) XXX_Merge(src proto.Message)

func (*DatumInfo) XXX_Size

func (m *DatumInfo) XXX_Size() int

func (*DatumInfo) XXX_Unmarshal

func (m *DatumInfo) XXX_Unmarshal(b []byte) error

type DiagramFlags

type DiagramFlags struct {
	// ShowInputTypes adds input type information.
	ShowInputTypes bool

	// MakeDeterministic resets all stats that can vary from run to run (like
	// execution time), suitable for tests. See CompositeStats.MakeDeterministic.
	MakeDeterministic bool
}

DiagramFlags contains diagram settings.

type DistSQLClient

type DistSQLClient interface {
	// SetupFlow instantiates a flow (subgraphs of a distributed SQL
	// computation) on the receiving node.
	SetupFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (*SimpleResponse, error)
	// CancelDeadFlows cancels all specified flows that are currently scheduled to
	// run on the receiving node but haven't been started yet because the flows
	// are dead (the client of SetupFlow RPC has abandoned them). Flows that have
	// been started aren't affected even if mentioned in the request.
	//
	// This RPC is performed on a best effort basis, so any errors returned from
	// it should be ignored.
	CancelDeadFlows(ctx context.Context, in *CancelDeadFlowsRequest, opts ...grpc.CallOption) (*SimpleResponse, error)
	// FlowStream is used to push a stream of messages that is part of a flow. The
	// first message will have a StreamHeader which identifies the flow and the
	// stream (mailbox).
	//
	// The response is a stream that the consumer uses to perform a handshake and
	// to signal the producer when it wants it to start draining. The client (i.e.
	// the producer) will read from this consumer->producer stream until it has
	// sent everything it needs to send and it performs CloseSend() on the
	// producer->consumer stream; after that point the producer isn't listening
	// for consumer signals any more.
	FlowStream(ctx context.Context, opts ...grpc.CallOption) (DistSQL_FlowStreamClient, error)
}

DistSQLClient is the client API for DistSQL service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewDistSQLClient

func NewDistSQLClient(cc *grpc.ClientConn) DistSQLClient

type DistSQLDrainingInfo

type DistSQLDrainingInfo struct {
	Draining bool `protobuf:"varint,1,opt,name=draining" json:"draining"`
}

DistSQLDrainingInfo represents the DistSQL draining state that gets gossiped for each node. This is used by planners to avoid planning on nodes that are known to be draining.

func (*DistSQLDrainingInfo) Descriptor

func (*DistSQLDrainingInfo) Descriptor() ([]byte, []int)

func (*DistSQLDrainingInfo) Marshal

func (m *DistSQLDrainingInfo) Marshal() (dAtA []byte, err error)

func (*DistSQLDrainingInfo) MarshalTo

func (m *DistSQLDrainingInfo) MarshalTo(dAtA []byte) (int, error)

func (*DistSQLDrainingInfo) MarshalToSizedBuffer

func (m *DistSQLDrainingInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DistSQLDrainingInfo) ProtoMessage

func (*DistSQLDrainingInfo) ProtoMessage()

func (*DistSQLDrainingInfo) Reset

func (m *DistSQLDrainingInfo) Reset()

func (*DistSQLDrainingInfo) Size

func (m *DistSQLDrainingInfo) Size() (n int)

func (*DistSQLDrainingInfo) String

func (m *DistSQLDrainingInfo) String() string

func (*DistSQLDrainingInfo) Unmarshal

func (m *DistSQLDrainingInfo) Unmarshal(dAtA []byte) error

func (*DistSQLDrainingInfo) XXX_DiscardUnknown

func (m *DistSQLDrainingInfo) XXX_DiscardUnknown()

func (*DistSQLDrainingInfo) XXX_Marshal

func (m *DistSQLDrainingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistSQLDrainingInfo) XXX_Merge

func (m *DistSQLDrainingInfo) XXX_Merge(src proto.Message)

func (*DistSQLDrainingInfo) XXX_Size

func (m *DistSQLDrainingInfo) XXX_Size() int

func (*DistSQLDrainingInfo) XXX_Unmarshal

func (m *DistSQLDrainingInfo) XXX_Unmarshal(b []byte) error

type DistSQLRemoteFlowInfo

type DistSQLRemoteFlowInfo struct {
	FlowID FlowID
	// Timestamp must be in the UTC timezone.
	Timestamp time.Time
	// StatementSQL is the SQL statement for which this flow is executing.
	StatementSQL string
}

DistSQLRemoteFlowInfo contains some information about a single DistSQL remote flow.

type DistSQLServer

type DistSQLServer interface {
	// SetupFlow instantiates a flow (subgraphs of a distributed SQL
	// computation) on the receiving node.
	SetupFlow(context.Context, *SetupFlowRequest) (*SimpleResponse, error)
	// CancelDeadFlows cancels all specified flows that are currently scheduled to
	// run on the receiving node but haven't been started yet because the flows
	// are dead (the client of SetupFlow RPC has abandoned them). Flows that have
	// been started aren't affected even if mentioned in the request.
	//
	// This RPC is performed on a best effort basis, so any errors returned from
	// it should be ignored.
	CancelDeadFlows(context.Context, *CancelDeadFlowsRequest) (*SimpleResponse, error)
	// FlowStream is used to push a stream of messages that is part of a flow. The
	// first message will have a StreamHeader which identifies the flow and the
	// stream (mailbox).
	//
	// The response is a stream that the consumer uses to perform a handshake and
	// to signal the producer when it wants it to start draining. The client (i.e.
	// the producer) will read from this consumer->producer stream until it has
	// sent everything it needs to send and it performs CloseSend() on the
	// producer->consumer stream; after that point the producer isn't listening
	// for consumer signals any more.
	FlowStream(DistSQL_FlowStreamServer) error
}

DistSQLServer is the server API for DistSQL service.

type DistSQLVersion

type DistSQLVersion uint32

DistSQLVersion identifies DistSQL engine versions.

type DistSQLVersionGossipInfo

type DistSQLVersionGossipInfo struct {
	Version            DistSQLVersion `protobuf:"varint,1,opt,name=version,casttype=DistSQLVersion" json:"version"`
	MinAcceptedVersion DistSQLVersion `protobuf:"varint,2,opt,name=min_accepted_version,json=minAcceptedVersion,casttype=DistSQLVersion" json:"min_accepted_version"`
}

DistSQLVersionGossipInfo represents the DistSQL server version information that gets gossiped for each node. This is used by planners to avoid planning on nodes with incompatible version during rolling cluster updates.

For the meaning of the fields, see the corresponding constants in distsqlrun/server.go.

func (*DistSQLVersionGossipInfo) Descriptor

func (*DistSQLVersionGossipInfo) Descriptor() ([]byte, []int)

func (*DistSQLVersionGossipInfo) Marshal

func (m *DistSQLVersionGossipInfo) Marshal() (dAtA []byte, err error)

func (*DistSQLVersionGossipInfo) MarshalTo

func (m *DistSQLVersionGossipInfo) MarshalTo(dAtA []byte) (int, error)

func (*DistSQLVersionGossipInfo) MarshalToSizedBuffer

func (m *DistSQLVersionGossipInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DistSQLVersionGossipInfo) ProtoMessage

func (*DistSQLVersionGossipInfo) ProtoMessage()

func (*DistSQLVersionGossipInfo) Reset

func (m *DistSQLVersionGossipInfo) Reset()

func (*DistSQLVersionGossipInfo) Size

func (m *DistSQLVersionGossipInfo) Size() (n int)

func (*DistSQLVersionGossipInfo) String

func (m *DistSQLVersionGossipInfo) String() string

func (*DistSQLVersionGossipInfo) Unmarshal

func (m *DistSQLVersionGossipInfo) Unmarshal(dAtA []byte) error

func (*DistSQLVersionGossipInfo) XXX_DiscardUnknown

func (m *DistSQLVersionGossipInfo) XXX_DiscardUnknown()

func (*DistSQLVersionGossipInfo) XXX_Marshal

func (m *DistSQLVersionGossipInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistSQLVersionGossipInfo) XXX_Merge

func (m *DistSQLVersionGossipInfo) XXX_Merge(src proto.Message)

func (*DistSQLVersionGossipInfo) XXX_Size

func (m *DistSQLVersionGossipInfo) XXX_Size() int

func (*DistSQLVersionGossipInfo) XXX_Unmarshal

func (m *DistSQLVersionGossipInfo) XXX_Unmarshal(b []byte) error

type DistSQL_FlowStreamClient

type DistSQL_FlowStreamClient interface {
	Send(*ProducerMessage) error
	Recv() (*ConsumerSignal, error)
	grpc.ClientStream
}

type DistSQL_FlowStreamServer

type DistSQL_FlowStreamServer interface {
	Send(*ConsumerSignal) error
	Recv() (*ProducerMessage, error)
	grpc.ServerStream
}

type DistinctSpec

type DistinctSpec struct {
	// The ordered columns in the input stream can be optionally specified for
	// possible optimizations. The specific ordering (ascending/descending) of
	// the column itself is not important nor is the order in which the columns
	// are specified. The ordered columns must be a subset of the distinct
	// columns.
	OrderedColumns []uint32 `protobuf:"varint,1,rep,name=ordered_columns,json=orderedColumns" json:"ordered_columns,omitempty"`
	// The distinct columns in the input stream are those columns on which we
	// check for distinct rows. If A,B,C are in distinct_columns and there is a
	// 4th column D which is not included in distinct_columns, its values are not
	// considered, so rows A1,B1,C1,D1 and A1,B1,C1,D2 are considered equal and
	// only one of them (the first) is output.
	DistinctColumns []uint32 `protobuf:"varint,2,rep,name=distinct_columns,json=distinctColumns" json:"distinct_columns,omitempty"`
	// If true, then NULL values are treated as not equal to one another. Each NULL
	// value will cause a new row group to be created. For example:
	//
	//   c
	//   ----
	//   NULL
	//   NULL
	//
	// A distinct operation on column "c" will result in one output row if
	// NullsAreDistinct is false, or two output rows if true. This is set to true
	// for UPSERT and INSERT..ON CONFLICT statements, since they must treat NULL
	// values as distinct.
	NullsAreDistinct bool `protobuf:"varint,3,opt,name=nulls_are_distinct,json=nullsAreDistinct" json:"nulls_are_distinct"`
	// If not empty, then an error with this text will be raised if there are two
	// rows with duplicate distinct column values. This is used to implement the
	// UPSERT and INSERT..ON CONFLICT statements, both of which prohibit the same
	// row from being changed twice.
	ErrorOnDup string `protobuf:"bytes,4,opt,name=error_on_dup,json=errorOnDup" json:"error_on_dup"`
	// OutputOrdering specifies the required ordering of the output produced by
	// the distinct. The input to the processor *must* already be ordered
	// according to it.
	OutputOrdering Ordering `protobuf:"bytes,5,opt,name=output_ordering,json=outputOrdering" json:"output_ordering"`
}

func (*DistinctSpec) Descriptor

func (*DistinctSpec) Descriptor() ([]byte, []int)

func (*DistinctSpec) Marshal

func (m *DistinctSpec) Marshal() (dAtA []byte, err error)

func (*DistinctSpec) MarshalTo

func (m *DistinctSpec) MarshalTo(dAtA []byte) (int, error)

func (*DistinctSpec) MarshalToSizedBuffer

func (m *DistinctSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DistinctSpec) ProtoMessage

func (*DistinctSpec) ProtoMessage()

func (*DistinctSpec) Reset

func (m *DistinctSpec) Reset()

func (*DistinctSpec) Size

func (m *DistinctSpec) Size() (n int)

func (*DistinctSpec) String

func (m *DistinctSpec) String() string

func (*DistinctSpec) Unmarshal

func (m *DistinctSpec) Unmarshal(dAtA []byte) error

func (*DistinctSpec) XXX_DiscardUnknown

func (m *DistinctSpec) XXX_DiscardUnknown()

func (*DistinctSpec) XXX_Marshal

func (m *DistinctSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DistinctSpec) XXX_Merge

func (m *DistinctSpec) XXX_Merge(src proto.Message)

func (*DistinctSpec) XXX_Size

func (m *DistinctSpec) XXX_Size() int

func (*DistinctSpec) XXX_Unmarshal

func (m *DistinctSpec) XXX_Unmarshal(b []byte) error

type DrainRequest

type DrainRequest struct {
}

func (*DrainRequest) Descriptor

func (*DrainRequest) Descriptor() ([]byte, []int)

func (*DrainRequest) Marshal

func (m *DrainRequest) Marshal() (dAtA []byte, err error)

func (*DrainRequest) MarshalTo

func (m *DrainRequest) MarshalTo(dAtA []byte) (int, error)

func (*DrainRequest) MarshalToSizedBuffer

func (m *DrainRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DrainRequest) ProtoMessage

func (*DrainRequest) ProtoMessage()

func (*DrainRequest) Reset

func (m *DrainRequest) Reset()

func (*DrainRequest) Size

func (m *DrainRequest) Size() (n int)

func (*DrainRequest) String

func (m *DrainRequest) String() string

func (*DrainRequest) Unmarshal

func (m *DrainRequest) Unmarshal(dAtA []byte) error

func (*DrainRequest) XXX_DiscardUnknown

func (m *DrainRequest) XXX_DiscardUnknown()

func (*DrainRequest) XXX_Marshal

func (m *DrainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DrainRequest) XXX_Merge

func (m *DrainRequest) XXX_Merge(src proto.Message)

func (*DrainRequest) XXX_Size

func (m *DrainRequest) XXX_Size() int

func (*DrainRequest) XXX_Unmarshal

func (m *DrainRequest) XXX_Unmarshal(b []byte) error

type Error

type Error struct {
	// full_error contains a structured errors with possibly multiple
	// wrapping layers implementing the errors.Cause() interface.
	FullError *errorspb.EncodedError `protobuf:"bytes,3,opt,name=full_error,json=fullError" json:"full_error,omitempty"`
}

Error is a generic representation including a string message.

func NewError

func NewError(ctx context.Context, err error) *Error

NewError creates an Error from an error, to be sent on the wire. It will recognize certain errors and marshall them accordingly, and everything unrecognized is turned into a PGError with code "internal".

func (*Error) Descriptor

func (*Error) Descriptor() ([]byte, []int)

func (*Error) ErrorDetail

func (e *Error) ErrorDetail(ctx context.Context) (err error)

ErrorDetail returns the payload as a Go error.

func (*Error) Marshal

func (m *Error) Marshal() (dAtA []byte, err error)

func (*Error) MarshalTo

func (m *Error) MarshalTo(dAtA []byte) (int, error)

func (*Error) MarshalToSizedBuffer

func (m *Error) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*Error) ProtoMessage

func (*Error) ProtoMessage()

func (*Error) Reset

func (m *Error) Reset()

func (*Error) Size

func (m *Error) Size() (n int)

func (*Error) String

func (e *Error) String() string

String implements fmt.Stringer.

func (*Error) Unmarshal

func (m *Error) Unmarshal(dAtA []byte) error

func (*Error) XXX_DiscardUnknown

func (m *Error) XXX_DiscardUnknown()

func (*Error) XXX_Marshal

func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Error) XXX_Merge

func (m *Error) XXX_Merge(src proto.Message)

func (*Error) XXX_Size

func (m *Error) XXX_Size() int

func (*Error) XXX_Unmarshal

func (m *Error) XXX_Unmarshal(b []byte) error

type EvalContext

type EvalContext struct {
	StmtTimestampNanos int64                     `protobuf:"varint,1,opt,name=stmt_timestamp_nanos,json=stmtTimestampNanos" json:"stmt_timestamp_nanos"`
	TxnTimestampNanos  int64                     `protobuf:"varint,2,opt,name=txn_timestamp_nanos,json=txnTimestampNanos" json:"txn_timestamp_nanos"`
	SessionData        sessiondatapb.SessionData `protobuf:"bytes,15,opt,name=session_data,json=sessionData" json:"session_data"`
}

EvalContext is used to marshall some planner.EvalContext members.

func MakeEvalContext

func MakeEvalContext(evalCtx *tree.EvalContext) EvalContext

MakeEvalContext serializes some of the fields of a tree.EvalContext into a execinfrapb.EvalContext proto.

func (*EvalContext) Descriptor

func (*EvalContext) Descriptor() ([]byte, []int)

func (*EvalContext) Marshal

func (m *EvalContext) Marshal() (dAtA []byte, err error)

func (*EvalContext) MarshalTo

func (m *EvalContext) MarshalTo(dAtA []byte) (int, error)

func (*EvalContext) MarshalToSizedBuffer

func (m *EvalContext) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*EvalContext) ProtoMessage

func (*EvalContext) ProtoMessage()

func (*EvalContext) Reset

func (m *EvalContext) Reset()

func (*EvalContext) Size

func (m *EvalContext) Size() (n int)

func (*EvalContext) String

func (m *EvalContext) String() string

func (*EvalContext) Unmarshal

func (m *EvalContext) Unmarshal(dAtA []byte) error

func (*EvalContext) XXX_DiscardUnknown

func (m *EvalContext) XXX_DiscardUnknown()

func (*EvalContext) XXX_Marshal

func (m *EvalContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*EvalContext) XXX_Merge

func (m *EvalContext) XXX_Merge(src proto.Message)

func (*EvalContext) XXX_Size

func (m *EvalContext) XXX_Size() int

func (*EvalContext) XXX_Unmarshal

func (m *EvalContext) XXX_Unmarshal(b []byte) error

type ExecStats

type ExecStats struct {
	// Time spent executing the component.
	ExecTime optional.Duration `protobuf:"bytes,1,opt,name=exec_time,json=execTime" json:"exec_time"`
	// Maximum memory allocated by the component.
	MaxAllocatedMem optional.Uint `protobuf:"bytes,2,opt,name=max_allocated_mem,json=maxAllocatedMem" json:"max_allocated_mem"`
	// Maximum scratch disk allocated by the component.
	MaxAllocatedDisk optional.Uint `protobuf:"bytes,3,opt,name=max_allocated_disk,json=maxAllocatedDisk" json:"max_allocated_disk"`
}

ExecStats contains statistics about the execution of a component.

func (*ExecStats) Descriptor

func (*ExecStats) Descriptor() ([]byte, []int)

func (*ExecStats) Marshal

func (m *ExecStats) Marshal() (dAtA []byte, err error)

func (*ExecStats) MarshalTo

func (m *ExecStats) MarshalTo(dAtA []byte) (int, error)

func (*ExecStats) MarshalToSizedBuffer

func (m *ExecStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ExecStats) ProtoMessage

func (*ExecStats) ProtoMessage()

func (*ExecStats) Reset

func (m *ExecStats) Reset()

func (*ExecStats) Size

func (m *ExecStats) Size() (n int)

func (*ExecStats) String

func (m *ExecStats) String() string

func (*ExecStats) Unmarshal

func (m *ExecStats) Unmarshal(dAtA []byte) error

func (*ExecStats) XXX_DiscardUnknown

func (m *ExecStats) XXX_DiscardUnknown()

func (*ExecStats) XXX_Marshal

func (m *ExecStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ExecStats) XXX_Merge

func (m *ExecStats) XXX_Merge(src proto.Message)

func (*ExecStats) XXX_Size

func (m *ExecStats) XXX_Size() int

func (*ExecStats) XXX_Unmarshal

func (m *ExecStats) XXX_Unmarshal(b []byte) error

type ExportSpec

type ExportSpec struct {
	// destination as a cloud.ExternalStorage URI pointing to an export store
	// location (directory).
	Destination string               `protobuf:"bytes,1,opt,name=destination" json:"destination"`
	NamePattern string               `protobuf:"bytes,2,opt,name=name_pattern,json=namePattern" json:"name_pattern"`
	Format      roachpb.IOFileFormat `protobuf:"bytes,3,opt,name=format" json:"format"`
	// chunk_rows is num rows to write per file. 0 = no limit.
	ChunkRows int64 `protobuf:"varint,4,opt,name=chunk_rows,json=chunkRows" json:"chunk_rows"`
	// chunk_size is the target byte size per file.
	ChunkSize int64 `protobuf:"varint,5,opt,name=chunk_size,json=chunkSize" json:"chunk_size"`
	// User who initiated the export. This is used to check access privileges
	// when using FileTable ExternalStorage.
	UserProto github_com_cockroachdb_cockroach_pkg_security.SQLUsernameProto `` /* 127-byte string literal not displayed */
	// col_names specifies the logical column names for the exported parquet file.
	ColNames []string `protobuf:"bytes,7,rep,name=col_names,json=colNames" json:"col_names,omitempty"`
}

ExporterSpec is the specification for a processor that consumes rows and writes them to Parquet or CSV files at uri. It outputs a row per file written with the file name, row count and byte size.

func (*ExportSpec) Descriptor

func (*ExportSpec) Descriptor() ([]byte, []int)

func (*ExportSpec) Marshal

func (m *ExportSpec) Marshal() (dAtA []byte, err error)

func (*ExportSpec) MarshalTo

func (m *ExportSpec) MarshalTo(dAtA []byte) (int, error)

func (*ExportSpec) MarshalToSizedBuffer

func (m *ExportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ExportSpec) ProtoMessage

func (*ExportSpec) ProtoMessage()

func (*ExportSpec) Reset

func (m *ExportSpec) Reset()

func (*ExportSpec) Size

func (m *ExportSpec) Size() (n int)

func (*ExportSpec) String

func (m *ExportSpec) String() string

func (*ExportSpec) Unmarshal

func (m *ExportSpec) Unmarshal(dAtA []byte) error

func (*ExportSpec) User

func (m *ExportSpec) User() security.SQLUsername

User accesses the user field.

func (*ExportSpec) XXX_DiscardUnknown

func (m *ExportSpec) XXX_DiscardUnknown()

func (*ExportSpec) XXX_Marshal

func (m *ExportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ExportSpec) XXX_Merge

func (m *ExportSpec) XXX_Merge(src proto.Message)

func (*ExportSpec) XXX_Size

func (m *ExportSpec) XXX_Size() int

func (*ExportSpec) XXX_Unmarshal

func (m *ExportSpec) XXX_Unmarshal(b []byte) error

type ExprHelper

type ExprHelper struct {
	Expr tree.TypedExpr
	// Vars is used to generate IndexedVars that are "backed" by the values in
	// `Row`.
	Vars tree.IndexedVarHelper

	Types []*types.T
	Row   rowenc.EncDatumRow
	// contains filtered or unexported fields
}

ExprHelper implements the common logic around evaluating an expression that depends on a set of values.

func (*ExprHelper) Eval

func (eh *ExprHelper) Eval(row rowenc.EncDatumRow) (tree.Datum, error)

Eval - given a row - evaluates the wrapped expression and returns the resulting datum. For example, given a row (1, 2, 3, 4, 5):

'@2' would return '2'
'@2 + @5' would return '7'
'@1' would return '1'
'@2 + 10' would return '12'

func (*ExprHelper) EvalFilter

func (eh *ExprHelper) EvalFilter(row rowenc.EncDatumRow) (bool, error)

EvalFilter is used for filter expressions; it evaluates the expression and returns whether the filter passes.

func (*ExprHelper) IndexedVarEval

func (eh *ExprHelper) IndexedVarEval(idx int, ctx *tree.EvalContext) (tree.Datum, error)

IndexedVarEval is part of the tree.IndexedVarContainer interface.

func (*ExprHelper) IndexedVarNodeFormatter

func (eh *ExprHelper) IndexedVarNodeFormatter(idx int) tree.NodeFormatter

IndexedVarNodeFormatter is part of the parser.IndexedVarContainer interface.

func (*ExprHelper) IndexedVarResolvedType

func (eh *ExprHelper) IndexedVarResolvedType(idx int) *types.T

IndexedVarResolvedType is part of the tree.IndexedVarContainer interface.

func (*ExprHelper) Init

func (eh *ExprHelper) Init(
	expr Expression, types []*types.T, semaCtx *tree.SemaContext, evalCtx *tree.EvalContext,
) error

Init initializes the ExprHelper.

func (*ExprHelper) String

func (eh *ExprHelper) String() string

type Expression

type Expression struct {
	// Version is unused.
	Version string

	// Expr, if present, is the string representation of this expression.
	// SQL expressions are passed as a string, with ordinal references
	// (@1, @2, @3 ..) used for "input" variables.
	Expr string

	// LocalExpr is an unserialized field that's used to pass expressions to
	// the gateway node without serializing/deserializing them. It is always
	// set in non-test setup.
	LocalExpr tree.TypedExpr
}

Expression is the representation of a SQL expression. See data.proto for the corresponding proto definition. Its automatic type declaration is suppressed in the proto via the typedecl=false option, so that we can add the LocalExpr field which is not serialized. It never needs to be serialized because we only use it in the case where we know we won't need to send it, as a proto, to another machine.

func (*Expression) Descriptor

func (*Expression) Descriptor() ([]byte, []int)

func (*Expression) Empty

func (e *Expression) Empty() bool

Empty returns true if the expression has neither an Expr nor LocalExpr.

func (*Expression) Marshal

func (m *Expression) Marshal() (dAtA []byte, err error)

func (*Expression) MarshalTo

func (m *Expression) MarshalTo(dAtA []byte) (int, error)

func (*Expression) MarshalToSizedBuffer

func (m *Expression) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*Expression) ProtoMessage

func (*Expression) ProtoMessage()

func (*Expression) Reset

func (m *Expression) Reset()

func (*Expression) Size

func (m *Expression) Size() (n int)

func (Expression) String

func (e Expression) String() string

String implements the Stringer interface.

func (*Expression) Unmarshal

func (m *Expression) Unmarshal(dAtA []byte) error

func (*Expression) XXX_DiscardUnknown

func (m *Expression) XXX_DiscardUnknown()

func (*Expression) XXX_Marshal

func (m *Expression) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Expression) XXX_Merge

func (m *Expression) XXX_Merge(src proto.Message)

func (*Expression) XXX_Size

func (m *Expression) XXX_Size() int

func (*Expression) XXX_Unmarshal

func (m *Expression) XXX_Unmarshal(b []byte) error

type FiltererSpec

type FiltererSpec struct {
	// A filtering expression which references the internal columns of the
	// processor via ordinal references (@1, @2, etc).
	Filter Expression `protobuf:"bytes,1,opt,name=filter" json:"filter"`
}

FiltererSpec is the specification for a processor that filters input rows according to a boolean expression.

func (*FiltererSpec) Descriptor

func (*FiltererSpec) Descriptor() ([]byte, []int)

func (*FiltererSpec) Marshal

func (m *FiltererSpec) Marshal() (dAtA []byte, err error)

func (*FiltererSpec) MarshalTo

func (m *FiltererSpec) MarshalTo(dAtA []byte) (int, error)

func (*FiltererSpec) MarshalToSizedBuffer

func (m *FiltererSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*FiltererSpec) ProtoMessage

func (*FiltererSpec) ProtoMessage()

func (*FiltererSpec) Reset

func (m *FiltererSpec) Reset()

func (*FiltererSpec) Size

func (m *FiltererSpec) Size() (n int)

func (*FiltererSpec) String

func (m *FiltererSpec) String() string

func (*FiltererSpec) Unmarshal

func (m *FiltererSpec) Unmarshal(dAtA []byte) error

func (*FiltererSpec) XXX_DiscardUnknown

func (m *FiltererSpec) XXX_DiscardUnknown()

func (*FiltererSpec) XXX_Marshal

func (m *FiltererSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*FiltererSpec) XXX_Merge

func (m *FiltererSpec) XXX_Merge(src proto.Message)

func (*FiltererSpec) XXX_Size

func (m *FiltererSpec) XXX_Size() int

func (*FiltererSpec) XXX_Unmarshal

func (m *FiltererSpec) XXX_Unmarshal(b []byte) error

type FlowDiagram

type FlowDiagram interface {
	// ToURL generates the json data for a flow diagram and a URL which encodes the
	// diagram.
	ToURL() (string, url.URL, error)

	// AddSpans adds stats extracted from the input spans to the diagram.
	AddSpans([]tracingpb.RecordedSpan)
}

FlowDiagram is a plan diagram that can be made into a URL.

func GeneratePlanDiagram

func GeneratePlanDiagram(
	sql string, flows map[base.SQLInstanceID]*FlowSpec, flags DiagramFlags,
) (FlowDiagram, error)

GeneratePlanDiagram generates the data for a flow diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.

type FlowID

type FlowID struct {
	uuid.UUID
}

FlowID identifies a flow. It is most importantly used when setting up streams between nodes.

func (FlowID) Equal

func (f FlowID) Equal(other FlowID) bool

Equal returns whether the two FlowIDs are equal.

func (FlowID) IsUnset

func (f FlowID) IsUnset() bool

IsUnset returns whether the FlowID is unset.

type FlowSpec

type FlowSpec struct {
	FlowID FlowID `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
	// The SQLInstanceID of the gateway that planned this Flow. Used for debugging.
	Gateway    github_com_cockroachdb_cockroach_pkg_base.SQLInstanceID `protobuf:"varint,3,opt,name=gateway,casttype=sqlfmt/cockroach/pkg/base.SQLInstanceID" json:"gateway"`
	Processors []ProcessorSpec                                         `protobuf:"bytes,2,rep,name=processors" json:"processors"`
}

FlowSpec describes a "flow" which is a subgraph of a distributed SQL computation consisting of processors and streams.

func (*FlowSpec) Descriptor

func (*FlowSpec) Descriptor() ([]byte, []int)

func (*FlowSpec) Marshal

func (m *FlowSpec) Marshal() (dAtA []byte, err error)

func (*FlowSpec) MarshalTo

func (m *FlowSpec) MarshalTo(dAtA []byte) (int, error)

func (*FlowSpec) MarshalToSizedBuffer

func (m *FlowSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*FlowSpec) ProtoMessage

func (*FlowSpec) ProtoMessage()

func (*FlowSpec) Reset

func (m *FlowSpec) Reset()

func (*FlowSpec) Size

func (m *FlowSpec) Size() (n int)

func (*FlowSpec) String

func (m *FlowSpec) String() string

func (*FlowSpec) Unmarshal

func (m *FlowSpec) Unmarshal(dAtA []byte) error

func (*FlowSpec) XXX_DiscardUnknown

func (m *FlowSpec) XXX_DiscardUnknown()

func (*FlowSpec) XXX_Marshal

func (m *FlowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*FlowSpec) XXX_Merge

func (m *FlowSpec) XXX_Merge(src proto.Message)

func (*FlowSpec) XXX_Size

func (m *FlowSpec) XXX_Size() int

func (*FlowSpec) XXX_Unmarshal

func (m *FlowSpec) XXX_Unmarshal(b []byte) error

type FlowStats

type FlowStats struct {
	MaxMemUsage  optional.Uint `protobuf:"bytes,1,opt,name=max_mem_usage,json=maxMemUsage" json:"max_mem_usage"`
	MaxDiskUsage optional.Uint `protobuf:"bytes,2,opt,name=max_disk_usage,json=maxDiskUsage" json:"max_disk_usage"`
}

FlowStats contains flow level statistics.

func (*FlowStats) Descriptor

func (*FlowStats) Descriptor() ([]byte, []int)

func (*FlowStats) Marshal

func (m *FlowStats) Marshal() (dAtA []byte, err error)

func (*FlowStats) MarshalTo

func (m *FlowStats) MarshalTo(dAtA []byte) (int, error)

func (*FlowStats) MarshalToSizedBuffer

func (m *FlowStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*FlowStats) ProtoMessage

func (*FlowStats) ProtoMessage()

func (*FlowStats) Reset

func (m *FlowStats) Reset()

func (*FlowStats) Size

func (m *FlowStats) Size() (n int)

func (*FlowStats) String

func (m *FlowStats) String() string

func (*FlowStats) Unmarshal

func (m *FlowStats) Unmarshal(dAtA []byte) error

func (*FlowStats) XXX_DiscardUnknown

func (m *FlowStats) XXX_DiscardUnknown()

func (*FlowStats) XXX_Marshal

func (m *FlowStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*FlowStats) XXX_Merge

func (m *FlowStats) XXX_Merge(src proto.Message)

func (*FlowStats) XXX_Size

func (m *FlowStats) XXX_Size() int

func (*FlowStats) XXX_Unmarshal

func (m *FlowStats) XXX_Unmarshal(b []byte) error

type HashJoinerSpec

type HashJoinerSpec struct {
	// The join constraints certain columns from the left stream to equal
	// corresponding columns on the right stream. These must have the same length.
	LeftEqColumns  []uint32 `protobuf:"varint,1,rep,packed,name=left_eq_columns,json=leftEqColumns" json:"left_eq_columns,omitempty"`
	RightEqColumns []uint32 `protobuf:"varint,2,rep,packed,name=right_eq_columns,json=rightEqColumns" json:"right_eq_columns,omitempty"`
	// "ON" expression (in addition to the equality constraints captured by the
	// orderings). Assuming that the left stream has N columns and the right
	// stream has M columns, in this expression variables @1 to @N refer to
	// columns of the left stream and variables @(N+1) to @(N+M) refer to columns
	// in the right stream.
	OnExpr Expression      `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
	Type   descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
	// If true, it is guaranteed that the left equality columns form a key for
	// the left input. In other words, no two rows from the left input have the
	// same set of values on the left equality columns.
	LeftEqColumnsAreKey bool `protobuf:"varint,8,opt,name=left_eq_columns_are_key,json=leftEqColumnsAreKey" json:"left_eq_columns_are_key"`
	// If true, it is guaranteed that the right equality columns form a key for
	// the right input. In other words, no two rows from the right input have the
	// same set of values on the right equality columns.
	RightEqColumnsAreKey bool `protobuf:"varint,9,opt,name=right_eq_columns_are_key,json=rightEqColumnsAreKey" json:"right_eq_columns_are_key"`
}

HashJoinerSpec is the specification for a hash join processor. The processor has two inputs and one output.

The processor works by reading the entire right input and putting it in a hash table. Thus, there is no guarantee on the ordering of results that stem only from the right input (in the case of RIGHT_OUTER, FULL_OUTER). However, it is guaranteed that results that involve the left stream preserve the ordering; i.e. all results that stem from left row (i) precede results that stem from left row (i+1).

The "internal columns" of a HashJoiner (see ProcessorSpec) are:

  • for INNER, LEFT_OUTER, RIGHT_OUTER, FULL_OUTER - the concatenation of left input columns and right input columns. If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side.
  • for LEFT_SEMI, LEFT_ANTI, INTERSECT_ALL, EXCEPT_ALL - the left input columns.
  • for RIGHT_SEMI, RIGHT_ANTI - the right input columns.

Note that, regardless of the join type, an optional ON expression can refer to columns from both inputs.

func (*HashJoinerSpec) Descriptor

func (*HashJoinerSpec) Descriptor() ([]byte, []int)

func (*HashJoinerSpec) Marshal

func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)

func (*HashJoinerSpec) MarshalTo

func (m *HashJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*HashJoinerSpec) MarshalToSizedBuffer

func (m *HashJoinerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*HashJoinerSpec) ProtoMessage

func (*HashJoinerSpec) ProtoMessage()

func (*HashJoinerSpec) Reset

func (m *HashJoinerSpec) Reset()

func (*HashJoinerSpec) Size

func (m *HashJoinerSpec) Size() (n int)

func (*HashJoinerSpec) String

func (m *HashJoinerSpec) String() string

func (*HashJoinerSpec) Unmarshal

func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error

func (*HashJoinerSpec) XXX_DiscardUnknown

func (m *HashJoinerSpec) XXX_DiscardUnknown()

func (*HashJoinerSpec) XXX_Marshal

func (m *HashJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HashJoinerSpec) XXX_Merge

func (m *HashJoinerSpec) XXX_Merge(src proto.Message)

func (*HashJoinerSpec) XXX_Size

func (m *HashJoinerSpec) XXX_Size() int

func (*HashJoinerSpec) XXX_Unmarshal

func (m *HashJoinerSpec) XXX_Unmarshal(b []byte) error

type InboundStreamNotification

type InboundStreamNotification struct {
	Stream DistSQL_FlowStreamServer
	Donec  chan<- error
}

InboundStreamNotification is the MockDistSQLServer's way to tell its clients that a new gRPC call has arrived and thus a stream has arrived. The rpc handler is blocked until Donec is signaled.

type IndexBackfillMergerSpec

type IndexBackfillMergerSpec struct {
	Table            descpb.TableDescriptor                                            `protobuf:"bytes,1,opt,name=table" json:"table"`
	TemporaryIndexes []github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.IndexID `` /* 160-byte string literal not displayed */
	AddedIndexes     []github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.IndexID `` /* 148-byte string literal not displayed */
	Spans            []roachpb.Span                                                    `protobuf:"bytes,4,rep,name=spans" json:"spans"`
	SpanIdx          []int32                                                           `protobuf:"varint,5,rep,name=span_idx,json=spanIdx" json:"span_idx,omitempty"`
}

func (*IndexBackfillMergerSpec) Descriptor

func (*IndexBackfillMergerSpec) Descriptor() ([]byte, []int)

func (*IndexBackfillMergerSpec) Marshal

func (m *IndexBackfillMergerSpec) Marshal() (dAtA []byte, err error)

func (*IndexBackfillMergerSpec) MarshalTo

func (m *IndexBackfillMergerSpec) MarshalTo(dAtA []byte) (int, error)

func (*IndexBackfillMergerSpec) MarshalToSizedBuffer

func (m *IndexBackfillMergerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*IndexBackfillMergerSpec) ProtoMessage

func (*IndexBackfillMergerSpec) ProtoMessage()

func (*IndexBackfillMergerSpec) Reset

func (m *IndexBackfillMergerSpec) Reset()

func (*IndexBackfillMergerSpec) Size

func (m *IndexBackfillMergerSpec) Size() (n int)

func (*IndexBackfillMergerSpec) String

func (m *IndexBackfillMergerSpec) String() string

func (*IndexBackfillMergerSpec) Unmarshal

func (m *IndexBackfillMergerSpec) Unmarshal(dAtA []byte) error

func (*IndexBackfillMergerSpec) XXX_DiscardUnknown

func (m *IndexBackfillMergerSpec) XXX_DiscardUnknown()

func (*IndexBackfillMergerSpec) XXX_Marshal

func (m *IndexBackfillMergerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*IndexBackfillMergerSpec) XXX_Merge

func (m *IndexBackfillMergerSpec) XXX_Merge(src proto.Message)

func (*IndexBackfillMergerSpec) XXX_Size

func (m *IndexBackfillMergerSpec) XXX_Size() int

func (*IndexBackfillMergerSpec) XXX_Unmarshal

func (m *IndexBackfillMergerSpec) XXX_Unmarshal(b []byte) error

type IndexSkipTableReaderSpec

type IndexSkipTableReaderSpec struct {
	Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
	// If 0, we use the primary index. If non-zero, we use the index_idx-th index,
	// i.e. table.indexes[index_idx-1]
	IndexIdx uint32         `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
	Spans    []roachpb.Span `protobuf:"bytes,8,rep,name=spans" json:"spans"`
	Reverse  bool           `protobuf:"varint,5,opt,name=reverse" json:"reverse"`
	// Indicates the row-level locking strength to be used by the scan. If set to
	// FOR_NONE, no row-level locking should be performed.
	LockingStrength descpb.ScanLockingStrength `` /* 137-byte string literal not displayed */
	// Indicates the policy to be used by the scan for handling conflicting locks
	// held by other active transactions when attempting to lock rows. Always set
	// to BLOCK when locking_stength is FOR_NONE.
	LockingWaitPolicy descpb.ScanLockingWaitPolicy `` /* 147-byte string literal not displayed */
}

IndexSkipTableReaderSpec is the specification for a table reader that is performing a loose index scan over rows in the table. This means that this reader will return distinct rows from the table while using the index to skip unnecessary rows. This reader is used for different optimizations when operating on a prefix of a compound key.

func (*IndexSkipTableReaderSpec) Descriptor

func (*IndexSkipTableReaderSpec) Descriptor() ([]byte, []int)

func (*IndexSkipTableReaderSpec) Marshal

func (m *IndexSkipTableReaderSpec) Marshal() (dAtA []byte, err error)

func (*IndexSkipTableReaderSpec) MarshalTo

func (m *IndexSkipTableReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*IndexSkipTableReaderSpec) MarshalToSizedBuffer

func (m *IndexSkipTableReaderSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*IndexSkipTableReaderSpec) ProtoMessage

func (*IndexSkipTableReaderSpec) ProtoMessage()

func (*IndexSkipTableReaderSpec) Reset

func (m *IndexSkipTableReaderSpec) Reset()

func (*IndexSkipTableReaderSpec) Size

func (m *IndexSkipTableReaderSpec) Size() (n int)

func (*IndexSkipTableReaderSpec) String

func (m *IndexSkipTableReaderSpec) String() string

func (*IndexSkipTableReaderSpec) Unmarshal

func (m *IndexSkipTableReaderSpec) Unmarshal(dAtA []byte) error

func (*IndexSkipTableReaderSpec) XXX_DiscardUnknown

func (m *IndexSkipTableReaderSpec) XXX_DiscardUnknown()

func (*IndexSkipTableReaderSpec) XXX_Marshal

func (m *IndexSkipTableReaderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*IndexSkipTableReaderSpec) XXX_Merge

func (m *IndexSkipTableReaderSpec) XXX_Merge(src proto.Message)

func (*IndexSkipTableReaderSpec) XXX_Size

func (m *IndexSkipTableReaderSpec) XXX_Size() int

func (*IndexSkipTableReaderSpec) XXX_Unmarshal

func (m *IndexSkipTableReaderSpec) XXX_Unmarshal(b []byte) error

type InputStats

type InputStats struct {
	NumTuples optional.Uint `protobuf:"bytes,1,opt,name=num_tuples,json=numTuples" json:"num_tuples"`
	// Cumulated time spent waiting for rows from the input operator.
	WaitTime optional.Duration `protobuf:"bytes,2,opt,name=wait_time,json=waitTime" json:"wait_time"`
}

InputStats contains statistics about the rows received as an input to a processor. Currently only used in the row execution engine.

func (*InputStats) Descriptor

func (*InputStats) Descriptor() ([]byte, []int)

func (*InputStats) Marshal

func (m *InputStats) Marshal() (dAtA []byte, err error)

func (*InputStats) MarshalTo

func (m *InputStats) MarshalTo(dAtA []byte) (int, error)

func (*InputStats) MarshalToSizedBuffer

func (m *InputStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*InputStats) ProtoMessage

func (*InputStats) ProtoMessage()

func (*InputStats) Reset

func (m *InputStats) Reset()

func (*InputStats) Size

func (m *InputStats) Size() (n int)

func (*InputStats) String

func (m *InputStats) String() string

func (*InputStats) Unmarshal

func (m *InputStats) Unmarshal(dAtA []byte) error

func (*InputStats) XXX_DiscardUnknown

func (m *InputStats) XXX_DiscardUnknown()

func (*InputStats) XXX_Marshal

func (m *InputStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InputStats) XXX_Merge

func (m *InputStats) XXX_Merge(src proto.Message)

func (*InputStats) XXX_Size

func (m *InputStats) XXX_Size() int

func (*InputStats) XXX_Unmarshal

func (m *InputStats) XXX_Unmarshal(b []byte) error

type InputSyncSpec

type InputSyncSpec struct {
	Type     InputSyncSpec_Type   `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.InputSyncSpec_Type" json:"type"`
	Ordering Ordering             `protobuf:"bytes,2,opt,name=ordering" json:"ordering"`
	Streams  []StreamEndpointSpec `protobuf:"bytes,3,rep,name=streams" json:"streams"`
	// Schema for the streams entering this synchronizer.
	ColumnTypes []*types.T `protobuf:"bytes,4,rep,name=column_types,json=columnTypes" json:"column_types,omitempty"`
}

InputSyncSpec is the specification for an input synchronizer; it decides how to interleave rows from multiple input streams.

func (*InputSyncSpec) Descriptor

func (*InputSyncSpec) Descriptor() ([]byte, []int)

func (*InputSyncSpec) Marshal

func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)

func (*InputSyncSpec) MarshalTo

func (m *InputSyncSpec) MarshalTo(dAtA []byte) (int, error)

func (*InputSyncSpec) MarshalToSizedBuffer

func (m *InputSyncSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*InputSyncSpec) ProtoMessage

func (*InputSyncSpec) ProtoMessage()

func (*InputSyncSpec) Reset

func (m *InputSyncSpec) Reset()

func (*InputSyncSpec) Size

func (m *InputSyncSpec) Size() (n int)

func (*InputSyncSpec) String

func (m *InputSyncSpec) String() string

func (*InputSyncSpec) Unmarshal

func (m *InputSyncSpec) Unmarshal(dAtA []byte) error

func (*InputSyncSpec) XXX_DiscardUnknown

func (m *InputSyncSpec) XXX_DiscardUnknown()

func (*InputSyncSpec) XXX_Marshal

func (m *InputSyncSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InputSyncSpec) XXX_Merge

func (m *InputSyncSpec) XXX_Merge(src proto.Message)

func (*InputSyncSpec) XXX_Size

func (m *InputSyncSpec) XXX_Size() int

func (*InputSyncSpec) XXX_Unmarshal

func (m *InputSyncSpec) XXX_Unmarshal(b []byte) error

type InputSyncSpec_Type

type InputSyncSpec_Type int32
const (
	// Rows from the input streams are interleaved arbitrarily. Each input
	// stream runs in a separate goroutine in order to not block on the slow
	// producers.
	InputSyncSpec_PARALLEL_UNORDERED InputSyncSpec_Type = 0
	// The input streams are guaranteed to be ordered according to the column
	// ordering field; rows from the streams are interleaved to preserve that
	// ordering.
	InputSyncSpec_ORDERED InputSyncSpec_Type = 1
	// Each input stream is consumed fully, in turn, before the next input
	// stream is received from.
	InputSyncSpec_SERIAL_UNORDERED InputSyncSpec_Type = 2
)

func (InputSyncSpec_Type) Enum

func (InputSyncSpec_Type) EnumDescriptor

func (InputSyncSpec_Type) EnumDescriptor() ([]byte, []int)

func (InputSyncSpec_Type) String

func (x InputSyncSpec_Type) String() string

func (*InputSyncSpec_Type) UnmarshalJSON

func (x *InputSyncSpec_Type) UnmarshalJSON(data []byte) error

type InvertedFiltererSpec

type InvertedFiltererSpec struct {
	// The index in the input row of the inverted column.
	InvertedColIdx uint32 `protobuf:"varint,1,opt,name=inverted_col_idx,json=invertedColIdx" json:"inverted_col_idx"`
	// The expression to evaluate. The SpansToRead are ignored since they
	// have already been used to setup the input.
	InvertedExpr    inverted.SpanExpressionProto          `protobuf:"bytes,2,opt,name=inverted_expr,json=invertedExpr" json:"inverted_expr"`
	PreFiltererSpec *InvertedFiltererSpec_PreFiltererSpec `protobuf:"bytes,6,opt,name=pre_filterer_spec,json=preFiltererSpec" json:"pre_filterer_spec,omitempty"`
}

InvertedFiltererSpec is the specification of a processor that does filtering on a table by evaluating an invertedexpr.SpanExpressionProto on an inverted index of the table. The input consists of the inverted index rows from InvertedExpr.SpansToRead. It is acceptable for a filter on the primary key to be pushed down between the scan and the inverted filterer.

Example: Table columns: | a | b | c | d | where a, d are the primary key and b is the column with the inverted index. Inverted index columns: | a | b' | d | where b' is derived from b. For instance, if b is an array, b' will be elements of the array.

Internal columns are | a | b | d |. The output sets b to NULL, since it does not have the value of the original column that was indexed in the inverted column.

Optionally, there can be a pre-filtering spec that describes an expression (derived from the original expression that was converted to inverted_expr), that must evaluate to true on each inverted row. This is a performance optimization -- for more details see invertedidx.PreFilterer (geometry and geography inverted indexes are the only ones that currently use pre-filtering).

func (*InvertedFiltererSpec) Descriptor

func (*InvertedFiltererSpec) Descriptor() ([]byte, []int)

func (*InvertedFiltererSpec) Marshal

func (m *InvertedFiltererSpec) Marshal() (dAtA []byte, err error)

func (*InvertedFiltererSpec) MarshalTo

func (m *InvertedFiltererSpec) MarshalTo(dAtA []byte) (int, error)

func (*InvertedFiltererSpec) MarshalToSizedBuffer

func (m *InvertedFiltererSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*InvertedFiltererSpec) ProtoMessage

func (*InvertedFiltererSpec) ProtoMessage()

func (*InvertedFiltererSpec) Reset

func (m *InvertedFiltererSpec) Reset()

func (*InvertedFiltererSpec) Size

func (m *InvertedFiltererSpec) Size() (n int)

func (*InvertedFiltererSpec) String

func (m *InvertedFiltererSpec) String() string

func (*InvertedFiltererSpec) Unmarshal

func (m *InvertedFiltererSpec) Unmarshal(dAtA []byte) error

func (*InvertedFiltererSpec) XXX_DiscardUnknown

func (m *InvertedFiltererSpec) XXX_DiscardUnknown()

func (*InvertedFiltererSpec) XXX_Marshal

func (m *InvertedFiltererSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InvertedFiltererSpec) XXX_Merge

func (m *InvertedFiltererSpec) XXX_Merge(src proto.Message)

func (*InvertedFiltererSpec) XXX_Size

func (m *InvertedFiltererSpec) XXX_Size() int

func (*InvertedFiltererSpec) XXX_Unmarshal

func (m *InvertedFiltererSpec) XXX_Unmarshal(b []byte) error

type InvertedFiltererSpec_PreFiltererSpec

type InvertedFiltererSpec_PreFiltererSpec struct {
	// Expression has only one variable, @1, which refers to the column with
	// the inverted index.
	Expression Expression `protobuf:"bytes,1,opt,name=expression" json:"expression"`
	// The type of the original column that was indexed in the inverted index.
	Type *types.T `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
}

Optional pre-filtering expression.

func (*InvertedFiltererSpec_PreFiltererSpec) Descriptor

func (*InvertedFiltererSpec_PreFiltererSpec) Descriptor() ([]byte, []int)

func (*InvertedFiltererSpec_PreFiltererSpec) Marshal

func (m *InvertedFiltererSpec_PreFiltererSpec) Marshal() (dAtA []byte, err error)

func (*InvertedFiltererSpec_PreFiltererSpec) MarshalTo

func (m *InvertedFiltererSpec_PreFiltererSpec) MarshalTo(dAtA []byte) (int, error)

func (*InvertedFiltererSpec_PreFiltererSpec) MarshalToSizedBuffer

func (m *InvertedFiltererSpec_PreFiltererSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*InvertedFiltererSpec_PreFiltererSpec) ProtoMessage

func (*InvertedFiltererSpec_PreFiltererSpec) ProtoMessage()

func (*InvertedFiltererSpec_PreFiltererSpec) Reset

func (*InvertedFiltererSpec_PreFiltererSpec) Size

func (*InvertedFiltererSpec_PreFiltererSpec) String

func (*InvertedFiltererSpec_PreFiltererSpec) Unmarshal

func (m *InvertedFiltererSpec_PreFiltererSpec) Unmarshal(dAtA []byte) error

func (*InvertedFiltererSpec_PreFiltererSpec) XXX_DiscardUnknown

func (m *InvertedFiltererSpec_PreFiltererSpec) XXX_DiscardUnknown()

func (*InvertedFiltererSpec_PreFiltererSpec) XXX_Marshal

func (m *InvertedFiltererSpec_PreFiltererSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InvertedFiltererSpec_PreFiltererSpec) XXX_Merge

func (*InvertedFiltererSpec_PreFiltererSpec) XXX_Size

func (*InvertedFiltererSpec_PreFiltererSpec) XXX_Unmarshal

func (m *InvertedFiltererSpec_PreFiltererSpec) XXX_Unmarshal(b []byte) error

type InvertedJoinerSpec

type InvertedJoinerSpec struct {
	Table descpb.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"`
	// The ID of the inverted index. The first column in the index is the
	// inverted column, and the remaining columns are the primary key.
	IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"`
	// Expression involving the indexed column and columns from the input.
	// Assuming that the input stream has N columns and the table that has been
	// indexed has M columns, in this expression variables @1 to @N refer to
	// columns of the input stream and variables @(N+1) to @(N+M) refer to
	// columns in the table. Although the numbering includes all columns, only
	// columns corresponding to the indexed column and the input columns may be
	// present in this expression. Note that the column numbering matches the
	// numbering used below by the on expression.
	//
	// The expression is passed to xform.NewDatumToInvertedExpr to construct an
	// implementation of invertedexpr.DatumToInvertedExpr, which will be fed each
	// input row and output an expression to evaluate over the inverted index.
	InvertedExpr Expression `protobuf:"bytes,4,opt,name=inverted_expr,json=invertedExpr" json:"inverted_expr"`
	// Optional expression involving the columns in the index (other than the
	// inverted column) and the columns in the input stream. Assuming that the
	// input stream has N columns and the table that has been indexed has M
	// columns, in this expression variables @1 to @N refer to columns of the
	// input stream and variables @(N+1) to @(N+M) refer to columns in the
	// table. The numbering does not omit the column in the table corresponding
	// to the inverted column, or other table columns absent from the index, but
	// they cannot be present in this expression. Note that the column numbering
	// matches the numbering used above by the inverted expression.
	OnExpr Expression `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
	// Only INNER, LEFT_OUTER, LEFT_SEMI, LEFT_ANTI are supported. For indexes
	// that produce false positives for user expressions, like geospatial
	// indexes, only INNER and LEFT_OUTER are actually useful -- LEFT_SEMI will
	// be mapped to INNER by the optimizer, and LEFT_ANTI to LEFT_OUTER, to
	// allow the false positives to be eliminated by evaluating the exact
	// expression on the rows output by this join.
	Type descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
	// Indicates that the inverted joiner should maintain the ordering of the
	// input stream.
	MaintainOrdering bool `protobuf:"varint,7,opt,name=maintain_ordering,json=maintainOrdering" json:"maintain_ordering"`
	// Indicates that the join should output a continuation column that
	// indicates whether a row is a continuation of a group corresponding to a
	// left row.
	OutputGroupContinuationForLeftRow bool `` /* 152-byte string literal not displayed */
	// Column indexes in the input stream specifying the columns which match with
	// the non-inverted prefix columns of the index, if the index is multi-column.
	// These are the equality columns of the join. The length of
	// prefix_equality_columns should be equal to the number of non-inverted
	// prefix columns in the index.
	PrefixEqualityColumns []uint32 `` /* 127-byte string literal not displayed */
}

InvertedJoinerSpec is the specification for an inverted join. The processor has two inputs and one output.

The processor uses the inverted index on a column of the right input to join with a column of the left input. In addition to the InvertedExpr which is specified on these two columns, it also evaluates an OnExpr on the joined rows that satisfy the InvertedExpr. The "internal columns" of an InvertedJoiner for INNER and LEFT_OUTER joins are a concatenation of the columns of left and right input. The only columns of the right input that are populated are the columns present in the inverted index, except for the inverted column (since it does not represent a complete value for the datum that was indexed). For LEFT_SEMI and LEFT_ANTI, the "internal columns" are the columns of the left input.

In many cases, the inverted join will contain false positives wrt the original join condition. This is handled by pairing it with a lookup join. This pairing works naturally when the user query specified INNER, by running an INNER inverted join followed by INNER lookup join. For a user query with LEFT_OUTER/LEFT_ANTI, the inverted join is run as a LEFT_OUTER with a special mode that outputs an additional bool column that represents whether this row is a continuation of a group, where a group is defined as rows corresponding to the same original left row. This is paired with a lookup join that also knows about the semantics of this bool column. For a user query with LEFT_SEMI, the inverted join is run as an INNER join with the same special mode. See the JoinReaderSpec for an example.

Example: Input stream columns: | a | b | Table columns: | c | d | e | The InvertedExpr involves columns b, e and the primary key for the right input is c. The inverted index has columns: | e' | c | where e' is derived from e. For instance, if e is an array, e' will correspond to elements of the array. The OnExpr can use columns a, b, c, since they are the other columns that are present in the input stream and the inverted index.

Internal columns for INNER and LEFT_OUTER: | a | b | c | d | e | where d, e are not populated. Internal columns for LEFT_SEMI and LEFT_ANTI: | a | b |

Multi-column inverted index example: Input stream columns: | a | b | c | Table columns: | d | e | f | g | The InvertedExpr involves columns b, e. The non-inverted prefix key columns equate c to f. The primary key for the right input is d. The inverted index has columns: | f | e' | d | where e' is derived from e and f is a non-inverted prefix column. The OnExpr can use columns a, b, c, d, f.

Internal columns for INNER and LEFT_OUTER: | a | b | c | d | e | f | g | where e, g are not populated. Internal columns for LEFT_SEMI and LEFT_ANTI: | a | b | c |

For INNER/LEFT_OUTER with OutputGroupContinuationForLeftRow = true, the internal columns include an additional bool column as the last column.

func (*InvertedJoinerSpec) Descriptor

func (*InvertedJoinerSpec) Descriptor() ([]byte, []int)

func (*InvertedJoinerSpec) Marshal

func (m *InvertedJoinerSpec) Marshal() (dAtA []byte, err error)

func (*InvertedJoinerSpec) MarshalTo

func (m *InvertedJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*InvertedJoinerSpec) MarshalToSizedBuffer

func (m *InvertedJoinerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*InvertedJoinerSpec) ProtoMessage

func (*InvertedJoinerSpec) ProtoMessage()

func (*InvertedJoinerSpec) Reset

func (m *InvertedJoinerSpec) Reset()

func (*InvertedJoinerSpec) Size

func (m *InvertedJoinerSpec) Size() (n int)

func (*InvertedJoinerSpec) String

func (m *InvertedJoinerSpec) String() string

func (*InvertedJoinerSpec) Unmarshal

func (m *InvertedJoinerSpec) Unmarshal(dAtA []byte) error

func (*InvertedJoinerSpec) XXX_DiscardUnknown

func (m *InvertedJoinerSpec) XXX_DiscardUnknown()

func (*InvertedJoinerSpec) XXX_Marshal

func (m *InvertedJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InvertedJoinerSpec) XXX_Merge

func (m *InvertedJoinerSpec) XXX_Merge(src proto.Message)

func (*InvertedJoinerSpec) XXX_Size

func (m *InvertedJoinerSpec) XXX_Size() int

func (*InvertedJoinerSpec) XXX_Unmarshal

func (m *InvertedJoinerSpec) XXX_Unmarshal(b []byte) error

type JobProgress

type JobProgress struct {
	JobID github_com_cockroachdb_cockroach_pkg_jobs_jobspb.JobID `protobuf:"varint,1,opt,name=job_id,json=jobId,casttype=sqlfmt/cockroach/pkg/jobs/jobspb.JobID" json:"job_id"`
	// contribution is the percent of work of the total this processor will
	// process.
	Contribution float32 `protobuf:"fixed32,2,opt,name=contribution" json:"contribution"`
	// slot is the index into the job details for this processor's completion.
	Slot int32 `protobuf:"varint,3,opt,name=slot" json:"slot"`
}

JobProgress identifies the job to report progress on. This reporting happens outside this package.

func (*JobProgress) Descriptor

func (*JobProgress) Descriptor() ([]byte, []int)

func (*JobProgress) Marshal

func (m *JobProgress) Marshal() (dAtA []byte, err error)

func (*JobProgress) MarshalTo

func (m *JobProgress) MarshalTo(dAtA []byte) (int, error)

func (*JobProgress) MarshalToSizedBuffer

func (m *JobProgress) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*JobProgress) ProtoMessage

func (*JobProgress) ProtoMessage()

func (*JobProgress) Reset

func (m *JobProgress) Reset()

func (*JobProgress) Size

func (m *JobProgress) Size() (n int)

func (*JobProgress) String

func (m *JobProgress) String() string

func (*JobProgress) Unmarshal

func (m *JobProgress) Unmarshal(dAtA []byte) error

func (*JobProgress) XXX_DiscardUnknown

func (m *JobProgress) XXX_DiscardUnknown()

func (*JobProgress) XXX_Marshal

func (m *JobProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobProgress) XXX_Merge

func (m *JobProgress) XXX_Merge(src proto.Message)

func (*JobProgress) XXX_Size

func (m *JobProgress) XXX_Size() int

func (*JobProgress) XXX_Unmarshal

func (m *JobProgress) XXX_Unmarshal(b []byte) error

type JoinReaderSpec

type JoinReaderSpec struct {
	FetchSpec descpb.IndexFetchSpec `protobuf:"bytes,19,opt,name=fetch_spec,json=fetchSpec" json:"fetch_spec"`
	// SplitFamilyIDs indicates that spans which fully constrain the index should
	// be split into single-family spans for the given families. Unset if
	// splitting is not possible.
	//
	// See span.MakeSplitter for the conditions that must hold for splitting to be
	// allowed. It is recommended to use span.MakeSplitter() followed by
	// splitter.FamilyIDs() to populate this field.
	SplitFamilyIDs []github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.FamilyID `` /* 158-byte string literal not displayed */
	// LookupExpr represents the part of the join condition used to perform the
	// lookup into the index. It should only be set when LookupColumns is empty.
	// LookupExpr is used instead of LookupColumns when the lookup condition is
	// more complicated than a simple equality between input columns and index
	// columns. In this case, LookupExpr specifies the expression that will be
	// used to construct the spans for each lookup. Currently, the only
	// expressions supported are conjunctions (AND expressions) of equality, IN
	// expressions, and simple inequalities, specifically:
	//  1. equalities between two variables (one from the input and one from the
	//     index) representing the equi-join condition(s),
	//  2. equalities between an index column and a constant, and
	//  3. IN expressions between an index column and a tuple of constants.
	//  4. LT,GT,GE,LE between an index var and a constant.
	//
	// Variables in this expression are assigned in the same way as the ON
	// condition below. Assuming that the input stream has N columns and the fetch
	// spec has M fetched columns, in this expression variables @1 to @N refer to
	// columns of the input stream and variables @(N+1) to @(N+M) refer to fetched
	// columns.
	//
	// For example, a valid LookupExpr for N=2 and M=2 might be:
	//   @3 IN (10, 20) AND @2 = @4.
	LookupExpr Expression `protobuf:"bytes,16,opt,name=lookup_expr,json=lookupExpr" json:"lookup_expr"`
	// If RemoteLookupExpr is set, this is a locality optimized lookup join. In
	// this case, LookupExpr contains the lookup join conditions targeting ranges
	// located on local nodes (relative to the gateway region), and
	// RemoteLookupExpr contains the lookup join conditions targeting remote
	// nodes. The optimizer will only plan a locality optimized lookup join if it
	// is known that each lookup returns at most one row. This fact allows the
	// joinReader to use the local conditions in LookupExpr first, and if a match
	// is found locally for each input row, there is no need to search remote
	// nodes. If a local match is not found for all input rows, the joinReader
	// uses RemoteLookupExpr to search remote nodes.
	//
	// The same restrictions on supported expressions that apply to LookupExpr
	// also apply to RemoteLookupExpr. See the comment above LookupExpr for more
	// details.
	RemoteLookupExpr Expression `protobuf:"bytes,17,opt,name=remote_lookup_expr,json=remoteLookupExpr" json:"remote_lookup_expr"`
	// LookupColumns, like LookupExpr, represents the part of the join condition
	// used to perform the lookup into the index. It is used as an optimization
	// for the common case where the join conditions are all simple equalities
	// between input and index columns (i.e., only the first of the supported
	// expression types listed above for LookupExpr). LookupColumns should only
	// be set when LookupExpr is empty.
	//
	// LookupColumns contains the column indexes in the input stream that match
	// with the index columns. These are the equality columns of the join. For
	// example, if there are 3 input columns and 2 fetched columns, LookupColumns
	// {0, 2} is equivalent to the LookupExpr @1 = @4 AND @3 = @5.
	//
	// If LookupExpr is empty, LookupColumns can be interpreted as follows:
	//
	// If empty (index join), the start of the input stream schema is assumed to
	// match the index columns. The joinReader will perform an index join and the
	// "internal columns" will be the fetched columns.
	//
	// If populated (lookup join), the `joinReader` will perform a lookup join
	// and the "internal columns" will be the concatenation of the input stream
	// columns followed by the fetched columns (except for semi/anti join, which
	// don't output any fetched columns).
	// TODO(rytaft): remove this field and use LookupExpr for all cases. This
	// requires ensuring that cases currently using LookupColumns do not regress.
	LookupColumns []uint32 `protobuf:"varint,3,rep,packed,name=lookup_columns,json=lookupColumns" json:"lookup_columns,omitempty"`
	// If set, the lookup columns form a key in the target table and thus each
	// lookup has at most one result.
	LookupColumnsAreKey bool `protobuf:"varint,8,opt,name=lookup_columns_are_key,json=lookupColumnsAreKey" json:"lookup_columns_are_key"`
	// "ON" expression (in addition to the conditions in LookupExpr and/or
	// equality constraints captured by the LookupColumns). Assuming that the
	// input stream has N columns and the fetch spec has M fetched columns, in
	// this expression variables @1 to @N refer to columns of the input stream and
	// variables @(N+1) to @(N+M) refer to fetched columns.
	OnExpr Expression `protobuf:"bytes,4,opt,name=on_expr,json=onExpr" json:"on_expr"`
	// For lookup joins. Only JoinType_INNER and JoinType_LEFT_OUTER are
	// supported.
	Type descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
	// Indicates the row-level locking strength to be used by the join. If set to
	// FOR_NONE, no row-level locking should be performed.
	LockingStrength descpb.ScanLockingStrength `` /* 137-byte string literal not displayed */
	// Indicates the policy to be used by the join for handling conflicting locks
	// held by other active transactions when attempting to lock rows. Always set
	// to BLOCK when locking_stength is FOR_NONE.
	LockingWaitPolicy descpb.ScanLockingWaitPolicy `` /* 148-byte string literal not displayed */
	// Indicates that the join reader should maintain the ordering of the input
	// stream. This is applicable to both lookup joins and index joins. For lookup
	// joins, maintaining order is expensive because it requires buffering. For
	// index joins buffering is not required, but still, if ordering is not
	// required, we'll change the output order to allow for some Pebble
	// optimizations.
	MaintainOrdering bool `protobuf:"varint,11,opt,name=maintain_ordering,json=maintainOrdering" json:"maintain_ordering"`
	// LeftJoinWithPairedJoiner is used when a left {outer,anti,semi} join is
	// being achieved by pairing two joins, and this is the second join. See
	// the comment above.
	LeftJoinWithPairedJoiner bool `protobuf:"varint,14,opt,name=left_join_with_paired_joiner,json=leftJoinWithPairedJoiner" json:"left_join_with_paired_joiner"`
	// OutputGroupContinuationForLeftRow indicates that this join is the first
	// join in the paired-joins. At most one of OutputGroupContinuationForLeftRow
	// and LeftJoinWithPairedJoiner must be true. Additionally, if
	// OutputGroupContinuationForLeftRow is true, MaintainOrdering must also
	// be true.
	OutputGroupContinuationForLeftRow bool `` /* 153-byte string literal not displayed */
	// lookup_batch_bytes_limit, if non-zero, controls the TargetBytes limits that
	// the joiner will use for its lookups. If zero, then the server-side default
	// is used. Note that, regardless of this setting, bytes limits are not always
	// used for lookups - it depends on whether the joiner decides it wants
	// DistSender-parallelism or not.
	LookupBatchBytesLimit int64 `protobuf:"varint,18,opt,name=lookup_batch_bytes_limit,json=lookupBatchBytesLimit" json:"lookup_batch_bytes_limit"`
}

JoinReaderSpec is the specification for a "join reader". A join reader performs KV operations to retrieve specific rows that correspond to the values in the input stream (join by lookup). The output can optionally preserve the order of the input rows.

The "internal columns" of a JoinReader (see ProcessorSpec) are either:

  • the fetched columns (see IndexFetchSpec), if we are performing an index join (no lookup columns) or if we are performing a semi or anti join, or
  • the concatenation of the columns of the input stream with the fetched columns.

Internally, only the values for the columns needed by the post-processing stage are populated.

Example: Input stream columns: | a | b | Fetched columns: | c | d | e |

If performing a lookup join on a = c (lookup columns is [0]):

Internal columns: | a | b | c | d | e |

If performing an index join (where a = c and b = d) (lookup columns is []):

Internal columns: | c | d | e |

There is a special case when a "join reader" is used as the second join in a pair of joins to accomplish a LEFT_OUTER, LEFT_SEMI or LEFT_ANTI join. The first join in this pair of joins is unable to precisely evaluate the join condition and produces false positives. This is typical when the first join is an inverted join (see InvertedJoinerSpec), but can also be the case when the first join is being evaluated over an index that does not have all the columns needed to evaluate the join condition. The first join outputs rows in sorted order of the original left columns. The input stream columns for the second join are a combination of the original left columns and the lookup columns. The first join additionally adds a continuation column that demarcates a group of successive rows that correspond to an original left row. The first row in a group contains false (since it is not a continuation of the group) and successive rows contain true.

The mapping from the original join to the pair of joins is: LEFT_OUTER => LEFT_OUTER, LEFT_OUTER LEFT_SEMI => INNER, LEFT_SEMI (better than doing INNER, INNER, SORT, DISTINCT) LEFT_ANTI => LEFT_OUTER, LEFT_ANTI. where the first join always preserves order.

More specifically, consider a lookup join example where the input stream columns are: | a | b | c | d | cont |. The lookup column is | d |. And the fetched columns are | e | f | with d = e. This join reader can see input of the form a1, b1, c1, d1, false a1, b1, c1, d2, true a1, b2, c1, null, false // when the first join is LEFT_OUTER a2, b1, c1, d3, false a2, b1, c1, d4, true

Say both the results for (a1, b1, c1) are false positives, and the first of the (a2, b1, c1) result is a false positive. The output for LEFT_OUTER: a1, b1, c1, d1, false, null, null a1, b2, c1, null, false, null, null a2, b1, c1, d4, true, d4, f1 The d, cont columns are not part of the original left row, so will be projected away after the join.

The output for LEFT_ANTI: a1, b1, c1, d1, false a1, b2, c1, null, false Again, the d, cont columns will be projected away after the join.

The output for LEFT_SEMI: a2, b1, c1, d4, true Again, the d, cont columns will be projected away after the join.

The example above is for a lookup join as the second join in the paired-joins. The lookup join can also be the first join in the paired-joins, which is indicated by both OutputGroupContinuationForLeftRow and MaintainOrdering set to true.

func (*JoinReaderSpec) Descriptor

func (*JoinReaderSpec) Descriptor() ([]byte, []int)

func (*JoinReaderSpec) IsIndexJoin

func (spec *JoinReaderSpec) IsIndexJoin() bool

IsIndexJoin returns true if spec defines an index join (as opposed to a lookup join).

func (*JoinReaderSpec) Marshal

func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)

func (*JoinReaderSpec) MarshalTo

func (m *JoinReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*JoinReaderSpec) MarshalToSizedBuffer

func (m *JoinReaderSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*JoinReaderSpec) ProtoMessage

func (*JoinReaderSpec) ProtoMessage()

func (*JoinReaderSpec) Reset

func (m *JoinReaderSpec) Reset()

func (*JoinReaderSpec) Size

func (m *JoinReaderSpec) Size() (n int)

func (*JoinReaderSpec) String

func (m *JoinReaderSpec) String() string

func (*JoinReaderSpec) Unmarshal

func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error

func (*JoinReaderSpec) XXX_DiscardUnknown

func (m *JoinReaderSpec) XXX_DiscardUnknown()

func (*JoinReaderSpec) XXX_Marshal

func (m *JoinReaderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JoinReaderSpec) XXX_Merge

func (m *JoinReaderSpec) XXX_Merge(src proto.Message)

func (*JoinReaderSpec) XXX_Size

func (m *JoinReaderSpec) XXX_Size() int

func (*JoinReaderSpec) XXX_Unmarshal

func (m *JoinReaderSpec) XXX_Unmarshal(b []byte) error

type KVStats

type KVStats struct {
	BytesRead  optional.Uint `protobuf:"bytes,1,opt,name=bytes_read,json=bytesRead" json:"bytes_read"`
	TuplesRead optional.Uint `protobuf:"bytes,2,opt,name=tuples_read,json=tuplesRead" json:"tuples_read"`
	// Cumulated time spent waiting for a KV request. This includes disk IO time
	// and potentially network time (if any of the keys are not local).
	KVTime optional.Duration `protobuf:"bytes,3,opt,name=kv_time,json=kvTime" json:"kv_time"`
	// ContentionTime is the cumulative time a KV request spent contending with
	// other transactions. This time accounts for a portion of KVTime above.
	ContentionTime    optional.Duration `protobuf:"bytes,4,opt,name=contention_time,json=contentionTime" json:"contention_time"`
	NumInterfaceSteps optional.Uint     `protobuf:"bytes,5,opt,name=num_interface_steps,json=numInterfaceSteps" json:"num_interface_steps"`
	NumInternalSteps  optional.Uint     `protobuf:"bytes,6,opt,name=num_internal_steps,json=numInternalSteps" json:"num_internal_steps"`
	NumInterfaceSeeks optional.Uint     `protobuf:"bytes,7,opt,name=num_interface_seeks,json=numInterfaceSeeks" json:"num_interface_seeks"`
	NumInternalSeeks  optional.Uint     `protobuf:"bytes,8,opt,name=num_internal_seeks,json=numInternalSeeks" json:"num_internal_seeks"`
}

KVStats contains statistics for components that perform KV operations.

func (*KVStats) Descriptor

func (*KVStats) Descriptor() ([]byte, []int)

func (*KVStats) Marshal

func (m *KVStats) Marshal() (dAtA []byte, err error)

func (*KVStats) MarshalTo

func (m *KVStats) MarshalTo(dAtA []byte) (int, error)

func (*KVStats) MarshalToSizedBuffer

func (m *KVStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*KVStats) ProtoMessage

func (*KVStats) ProtoMessage()

func (*KVStats) Reset

func (m *KVStats) Reset()

func (*KVStats) Size

func (m *KVStats) Size() (n int)

func (*KVStats) String

func (m *KVStats) String() string

func (*KVStats) Unmarshal

func (m *KVStats) Unmarshal(dAtA []byte) error

func (*KVStats) XXX_DiscardUnknown

func (m *KVStats) XXX_DiscardUnknown()

func (*KVStats) XXX_Marshal

func (m *KVStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*KVStats) XXX_Merge

func (m *KVStats) XXX_Merge(src proto.Message)

func (*KVStats) XXX_Size

func (m *KVStats) XXX_Size() int

func (*KVStats) XXX_Unmarshal

func (m *KVStats) XXX_Unmarshal(b []byte) error

type LocalPlanNodeSpec

type LocalPlanNodeSpec struct {
	RowSourceIdx uint32 `protobuf:"varint,1,opt,name=RowSourceIdx" json:"RowSourceIdx"`
	NumInputs    uint32 `protobuf:"varint,2,opt,name=NumInputs" json:"NumInputs"`
	Name         string `protobuf:"bytes,3,opt,name=Name" json:"Name"`
}

LocalPlanNodeSpec is the specification for a local planNode wrapping processor. It's created for situations where a planNode has no DistSQL processor equivalent, and constrains the plan to live on the gateway node. This spec contains just an index, which is used by the execution engine to find the planNode instance this processor is executing in an array of local planNodes. See LocalProcessors and LocalProcessorIndexes on distsqlplan.PhysicalPlan.

func (*LocalPlanNodeSpec) Descriptor

func (*LocalPlanNodeSpec) Descriptor() ([]byte, []int)

func (*LocalPlanNodeSpec) Marshal

func (m *LocalPlanNodeSpec) Marshal() (dAtA []byte, err error)

func (*LocalPlanNodeSpec) MarshalTo

func (m *LocalPlanNodeSpec) MarshalTo(dAtA []byte) (int, error)

func (*LocalPlanNodeSpec) MarshalToSizedBuffer

func (m *LocalPlanNodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*LocalPlanNodeSpec) ProtoMessage

func (*LocalPlanNodeSpec) ProtoMessage()

func (*LocalPlanNodeSpec) Reset

func (m *LocalPlanNodeSpec) Reset()

func (*LocalPlanNodeSpec) Size

func (m *LocalPlanNodeSpec) Size() (n int)

func (*LocalPlanNodeSpec) String

func (m *LocalPlanNodeSpec) String() string

func (*LocalPlanNodeSpec) Unmarshal

func (m *LocalPlanNodeSpec) Unmarshal(dAtA []byte) error

func (*LocalPlanNodeSpec) XXX_DiscardUnknown

func (m *LocalPlanNodeSpec) XXX_DiscardUnknown()

func (*LocalPlanNodeSpec) XXX_Marshal

func (m *LocalPlanNodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LocalPlanNodeSpec) XXX_Merge

func (m *LocalPlanNodeSpec) XXX_Merge(src proto.Message)

func (*LocalPlanNodeSpec) XXX_Size

func (m *LocalPlanNodeSpec) XXX_Size() int

func (*LocalPlanNodeSpec) XXX_Unmarshal

func (m *LocalPlanNodeSpec) XXX_Unmarshal(b []byte) error

type MergeJoinerSpec

type MergeJoinerSpec struct {
	// The streams must be ordered according to the columns that have equality
	// constraints. The first column of the left ordering is constrained to be
	// equal to the first column in the right ordering and so on. The ordering
	// lengths and directions must match.
	// In the example above, left ordering describes C1+,C2- and right ordering
	// describes C5+,C4-.
	LeftOrdering  Ordering `protobuf:"bytes,1,opt,name=left_ordering,json=leftOrdering" json:"left_ordering"`
	RightOrdering Ordering `protobuf:"bytes,2,opt,name=right_ordering,json=rightOrdering" json:"right_ordering"`
	// "ON" expression (in addition to the equality constraints captured by the
	// orderings). Assuming that the left stream has N columns and the right
	// stream has M columns, in this expression ordinal references @1 to @N refer
	// to columns of the left stream and variables @(N+1) to @(N+M) refer to
	// columns in the right stream.
	OnExpr Expression      `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"`
	Type   descpb.JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
	// NullEquality indicates that NULL = NULL should be considered true.
	// This allows OUTER JOINs to consider NULL values meaningfully. An
	// example of this is during SCRUB checks on secondary indexes.
	NullEquality bool `protobuf:"varint,7,opt,name=null_equality,json=nullEquality" json:"null_equality"`
	// If true, it is guaranteed that the left equality columns form a key for
	// the left input. In other words, no two rows from the left input have the
	// same set of values on the left equality columns.
	LeftEqColumnsAreKey bool `protobuf:"varint,8,opt,name=left_eq_columns_are_key,json=leftEqColumnsAreKey" json:"left_eq_columns_are_key"`
	// If true, it is guaranteed that the right equality columns form a key for
	// the right input. In other words, no two rows from the right input have the
	// same set of values on the right equality columns.
	RightEqColumnsAreKey bool `protobuf:"varint,9,opt,name=right_eq_columns_are_key,json=rightEqColumnsAreKey" json:"right_eq_columns_are_key"`
}

MergeJoinerSpec is the specification for a merge join processor. The processor has two inputs and one output. The inputs must have the same ordering on the columns that have equality constraints. For example:

SELECT * FROM T1 INNER JOIN T2 ON T1.C1 = T2.C5 AND T1.C2 = T2.C4

To perform a merge join, the streams corresponding to T1 and T2 must have the same ordering on columns C1, C2 and C5, C4 respectively. For example: C1+,C2- and C5+,C4-.

The "internal columns" of a MergeJoiner (see ProcessorSpec) are:

  • for INNER, LEFT_OUTER, RIGHT_OUTER, FULL_OUTER - the concatenation of left input columns and right input columns. If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side.
  • for LEFT_SEMI, LEFT_ANTI, INTERSECT_ALL, EXCEPT_ALL - the left input columns.
  • for RIGHT_SEMI, RIGHT_ANTI - the right input columns.

Note that, regardless of the join type, an optional ON expression can refer to columns from both inputs.

func (*MergeJoinerSpec) Descriptor

func (*MergeJoinerSpec) Descriptor() ([]byte, []int)

func (*MergeJoinerSpec) Marshal

func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)

func (*MergeJoinerSpec) MarshalTo

func (m *MergeJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*MergeJoinerSpec) MarshalToSizedBuffer

func (m *MergeJoinerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*MergeJoinerSpec) ProtoMessage

func (*MergeJoinerSpec) ProtoMessage()

func (*MergeJoinerSpec) Reset

func (m *MergeJoinerSpec) Reset()

func (*MergeJoinerSpec) Size

func (m *MergeJoinerSpec) Size() (n int)

func (*MergeJoinerSpec) String

func (m *MergeJoinerSpec) String() string

func (*MergeJoinerSpec) Unmarshal

func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error

func (*MergeJoinerSpec) XXX_DiscardUnknown

func (m *MergeJoinerSpec) XXX_DiscardUnknown()

func (*MergeJoinerSpec) XXX_Marshal

func (m *MergeJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MergeJoinerSpec) XXX_Merge

func (m *MergeJoinerSpec) XXX_Merge(src proto.Message)

func (*MergeJoinerSpec) XXX_Size

func (m *MergeJoinerSpec) XXX_Size() int

func (*MergeJoinerSpec) XXX_Unmarshal

func (m *MergeJoinerSpec) XXX_Unmarshal(b []byte) error

type MetadataTestReceiverSpec

type MetadataTestReceiverSpec struct {
	SenderIDs []string `protobuf:"bytes,1,rep,name=sender_ids,json=senderIds" json:"sender_ids,omitempty"`
}

func (*MetadataTestReceiverSpec) Descriptor

func (*MetadataTestReceiverSpec) Descriptor() ([]byte, []int)

func (*MetadataTestReceiverSpec) Marshal

func (m *MetadataTestReceiverSpec) Marshal() (dAtA []byte, err error)

func (*MetadataTestReceiverSpec) MarshalTo

func (m *MetadataTestReceiverSpec) MarshalTo(dAtA []byte) (int, error)

func (*MetadataTestReceiverSpec) MarshalToSizedBuffer

func (m *MetadataTestReceiverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*MetadataTestReceiverSpec) ProtoMessage

func (*MetadataTestReceiverSpec) ProtoMessage()

func (*MetadataTestReceiverSpec) Reset

func (m *MetadataTestReceiverSpec) Reset()

func (*MetadataTestReceiverSpec) Size

func (m *MetadataTestReceiverSpec) Size() (n int)

func (*MetadataTestReceiverSpec) String

func (m *MetadataTestReceiverSpec) String() string

func (*MetadataTestReceiverSpec) Unmarshal

func (m *MetadataTestReceiverSpec) Unmarshal(dAtA []byte) error

func (*MetadataTestReceiverSpec) XXX_DiscardUnknown

func (m *MetadataTestReceiverSpec) XXX_DiscardUnknown()

func (*MetadataTestReceiverSpec) XXX_Marshal

func (m *MetadataTestReceiverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MetadataTestReceiverSpec) XXX_Merge

func (m *MetadataTestReceiverSpec) XXX_Merge(src proto.Message)

func (*MetadataTestReceiverSpec) XXX_Size

func (m *MetadataTestReceiverSpec) XXX_Size() int

func (*MetadataTestReceiverSpec) XXX_Unmarshal

func (m *MetadataTestReceiverSpec) XXX_Unmarshal(b []byte) error

type MetadataTestSenderSpec

type MetadataTestSenderSpec struct {
	ID string `protobuf:"bytes,1,opt,name=id" json:"id"`
}

func (*MetadataTestSenderSpec) Descriptor

func (*MetadataTestSenderSpec) Descriptor() ([]byte, []int)

func (*MetadataTestSenderSpec) Marshal

func (m *MetadataTestSenderSpec) Marshal() (dAtA []byte, err error)

func (*MetadataTestSenderSpec) MarshalTo

func (m *MetadataTestSenderSpec) MarshalTo(dAtA []byte) (int, error)

func (*MetadataTestSenderSpec) MarshalToSizedBuffer

func (m *MetadataTestSenderSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*MetadataTestSenderSpec) ProtoMessage

func (*MetadataTestSenderSpec) ProtoMessage()

func (*MetadataTestSenderSpec) Reset

func (m *MetadataTestSenderSpec) Reset()

func (*MetadataTestSenderSpec) Size

func (m *MetadataTestSenderSpec) Size() (n int)

func (*MetadataTestSenderSpec) String

func (m *MetadataTestSenderSpec) String() string

func (*MetadataTestSenderSpec) Unmarshal

func (m *MetadataTestSenderSpec) Unmarshal(dAtA []byte) error

func (*MetadataTestSenderSpec) XXX_DiscardUnknown

func (m *MetadataTestSenderSpec) XXX_DiscardUnknown()

func (*MetadataTestSenderSpec) XXX_Marshal

func (m *MetadataTestSenderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*MetadataTestSenderSpec) XXX_Merge

func (m *MetadataTestSenderSpec) XXX_Merge(src proto.Message)

func (*MetadataTestSenderSpec) XXX_Size

func (m *MetadataTestSenderSpec) XXX_Size() int

func (*MetadataTestSenderSpec) XXX_Unmarshal

func (m *MetadataTestSenderSpec) XXX_Unmarshal(b []byte) error

type MockDialer

type MockDialer struct {
	// Addr is assumed to be obtained from execinfrapb.StartMockDistSQLServer.
	Addr net.Addr
	// contains filtered or unexported fields
}

MockDialer is a mocked implementation of the Outbox's `Dialer` interface. Used to create a connection with a client stream.

func (*MockDialer) Close

func (d *MockDialer) Close()

Close must be called after the test is done.

func (*MockDialer) DialNoBreaker

DialNoBreaker establishes a grpc connection once.

type MockDistSQLServer

type MockDistSQLServer struct {
	InboundStreams chan InboundStreamNotification
}

MockDistSQLServer implements the DistSQLServer (gRPC) interface and allows clients to control the inbound streams.

func StartMockDistSQLServer

func StartMockDistSQLServer(
	ctx context.Context, clock *hlc.Clock, stopper *stop.Stopper, sqlInstanceID base.SQLInstanceID,
) (uuid.UUID, *MockDistSQLServer, net.Addr, error)

StartMockDistSQLServer starts a MockDistSQLServer and returns the address on which it's listening.

func (*MockDistSQLServer) CancelDeadFlows

func (ds *MockDistSQLServer) CancelDeadFlows(
	_ context.Context, req *CancelDeadFlowsRequest,
) (*SimpleResponse, error)

CancelDeadFlows is part of the DistSQLServer interface.

func (*MockDistSQLServer) FlowStream

func (ds *MockDistSQLServer) FlowStream(stream DistSQL_FlowStreamServer) error

FlowStream is part of the DistSQLServer interface.

func (*MockDistSQLServer) SetupFlow

func (ds *MockDistSQLServer) SetupFlow(
	_ context.Context, req *SetupFlowRequest,
) (*SimpleResponse, error)

SetupFlow is part of the DistSQLServer interface.

type NetworkRxStats

type NetworkRxStats struct {
	// Observed network latency (round-trip time between the two nodes).
	Latency optional.Duration `protobuf:"bytes,1,opt,name=latency" json:"latency"`
	// Cumulated time spent waiting to receive or transmit tuple data.
	WaitTime optional.Duration `protobuf:"bytes,2,opt,name=wait_time,json=waitTime" json:"wait_time"`
	// Time spent deserializing network data.
	DeserializationTime optional.Duration `protobuf:"bytes,3,opt,name=deserialization_time,json=deserializationTime" json:"deserialization_time"`
	TuplesReceived      optional.Uint     `protobuf:"bytes,4,opt,name=tuples_received,json=tuplesReceived" json:"tuples_received"`
	BytesReceived       optional.Uint     `protobuf:"bytes,5,opt,name=bytes_received,json=bytesReceived" json:"bytes_received"`
	// Number of messages received over the network.
	MessagesReceived optional.Uint `protobuf:"bytes,6,opt,name=messages_received,json=messagesReceived" json:"messages_received"`
}

NetworkRxStats contains statistics for components that receive row data over the network. Note: Rx is shorthand for Receive.

func (*NetworkRxStats) Descriptor

func (*NetworkRxStats) Descriptor() ([]byte, []int)

func (*NetworkRxStats) Marshal

func (m *NetworkRxStats) Marshal() (dAtA []byte, err error)

func (*NetworkRxStats) MarshalTo

func (m *NetworkRxStats) MarshalTo(dAtA []byte) (int, error)

func (*NetworkRxStats) MarshalToSizedBuffer

func (m *NetworkRxStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*NetworkRxStats) ProtoMessage

func (*NetworkRxStats) ProtoMessage()

func (*NetworkRxStats) Reset

func (m *NetworkRxStats) Reset()

func (*NetworkRxStats) Size

func (m *NetworkRxStats) Size() (n int)

func (*NetworkRxStats) String

func (m *NetworkRxStats) String() string

func (*NetworkRxStats) Unmarshal

func (m *NetworkRxStats) Unmarshal(dAtA []byte) error

func (*NetworkRxStats) XXX_DiscardUnknown

func (m *NetworkRxStats) XXX_DiscardUnknown()

func (*NetworkRxStats) XXX_Marshal

func (m *NetworkRxStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NetworkRxStats) XXX_Merge

func (m *NetworkRxStats) XXX_Merge(src proto.Message)

func (*NetworkRxStats) XXX_Size

func (m *NetworkRxStats) XXX_Size() int

func (*NetworkRxStats) XXX_Unmarshal

func (m *NetworkRxStats) XXX_Unmarshal(b []byte) error

type NetworkTxStats

type NetworkTxStats struct {
	TuplesSent optional.Uint `protobuf:"bytes,1,opt,name=tuples_sent,json=tuplesSent" json:"tuples_sent"`
	BytesSent  optional.Uint `protobuf:"bytes,2,opt,name=bytes_sent,json=bytesSent" json:"bytes_sent"`
	// Number of messages sent over the network.
	MessagesSent optional.Uint `protobuf:"bytes,3,opt,name=messages_sent,json=messagesSent" json:"messages_sent"`
}

NetworkTxStats contains statistics for components that send row data over the network. Note: Tx is shorthand for Transmit.

func (*NetworkTxStats) Descriptor

func (*NetworkTxStats) Descriptor() ([]byte, []int)

func (*NetworkTxStats) Marshal

func (m *NetworkTxStats) Marshal() (dAtA []byte, err error)

func (*NetworkTxStats) MarshalTo

func (m *NetworkTxStats) MarshalTo(dAtA []byte) (int, error)

func (*NetworkTxStats) MarshalToSizedBuffer

func (m *NetworkTxStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*NetworkTxStats) ProtoMessage

func (*NetworkTxStats) ProtoMessage()

func (*NetworkTxStats) Reset

func (m *NetworkTxStats) Reset()

func (*NetworkTxStats) Size

func (m *NetworkTxStats) Size() (n int)

func (*NetworkTxStats) String

func (m *NetworkTxStats) String() string

func (*NetworkTxStats) Unmarshal

func (m *NetworkTxStats) Unmarshal(dAtA []byte) error

func (*NetworkTxStats) XXX_DiscardUnknown

func (m *NetworkTxStats) XXX_DiscardUnknown()

func (*NetworkTxStats) XXX_Marshal

func (m *NetworkTxStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NetworkTxStats) XXX_Merge

func (m *NetworkTxStats) XXX_Merge(src proto.Message)

func (*NetworkTxStats) XXX_Size

func (m *NetworkTxStats) XXX_Size() int

func (*NetworkTxStats) XXX_Unmarshal

func (m *NetworkTxStats) XXX_Unmarshal(b []byte) error

type NoopCoreSpec

type NoopCoreSpec struct {
}

NoopCoreSpec indicates a "no-op" processor core. This is used when we just need post-processing or when only a synchronizer is required (e.g. at the final endpoint).

func (*NoopCoreSpec) Descriptor

func (*NoopCoreSpec) Descriptor() ([]byte, []int)

func (*NoopCoreSpec) Marshal

func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)

func (*NoopCoreSpec) MarshalTo

func (m *NoopCoreSpec) MarshalTo(dAtA []byte) (int, error)

func (*NoopCoreSpec) MarshalToSizedBuffer

func (m *NoopCoreSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*NoopCoreSpec) ProtoMessage

func (*NoopCoreSpec) ProtoMessage()

func (*NoopCoreSpec) Reset

func (m *NoopCoreSpec) Reset()

func (*NoopCoreSpec) Size

func (m *NoopCoreSpec) Size() (n int)

func (*NoopCoreSpec) String

func (m *NoopCoreSpec) String() string

func (*NoopCoreSpec) Unmarshal

func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error

func (*NoopCoreSpec) XXX_DiscardUnknown

func (m *NoopCoreSpec) XXX_DiscardUnknown()

func (*NoopCoreSpec) XXX_Marshal

func (m *NoopCoreSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NoopCoreSpec) XXX_Merge

func (m *NoopCoreSpec) XXX_Merge(src proto.Message)

func (*NoopCoreSpec) XXX_Size

func (m *NoopCoreSpec) XXX_Size() int

func (*NoopCoreSpec) XXX_Unmarshal

func (m *NoopCoreSpec) XXX_Unmarshal(b []byte) error

type Ordering

type Ordering struct {
	Columns []Ordering_Column `protobuf:"bytes,1,rep,name=columns" json:"columns"`
}

Ordering defines an order - specifically a list of column indices and directions. See colinfo.ColumnOrdering.

func ConvertToMappedSpecOrdering

func ConvertToMappedSpecOrdering(
	columnOrdering colinfo.ColumnOrdering, planToStreamColMap []int,
) Ordering

ConvertToMappedSpecOrdering converts a colinfo.ColumnOrdering type to an Ordering type (as defined in data.proto), using the column indices contained in planToStreamColMap.

func ConvertToSpecOrdering

func ConvertToSpecOrdering(columnOrdering colinfo.ColumnOrdering) Ordering

ConvertToSpecOrdering converts a colinfo.ColumnOrdering type to an Ordering type (as defined in data.proto).

func (*Ordering) Descriptor

func (*Ordering) Descriptor() ([]byte, []int)

func (*Ordering) Equal

func (this *Ordering) Equal(that interface{}) bool

func (*Ordering) Marshal

func (m *Ordering) Marshal() (dAtA []byte, err error)

func (*Ordering) MarshalTo

func (m *Ordering) MarshalTo(dAtA []byte) (int, error)

func (*Ordering) MarshalToSizedBuffer

func (m *Ordering) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*Ordering) ProtoMessage

func (*Ordering) ProtoMessage()

func (*Ordering) Reset

func (m *Ordering) Reset()

func (*Ordering) Size

func (m *Ordering) Size() (n int)

func (*Ordering) String

func (m *Ordering) String() string

func (*Ordering) Unmarshal

func (m *Ordering) Unmarshal(dAtA []byte) error

func (*Ordering) XXX_DiscardUnknown

func (m *Ordering) XXX_DiscardUnknown()

func (*Ordering) XXX_Marshal

func (m *Ordering) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Ordering) XXX_Merge

func (m *Ordering) XXX_Merge(src proto.Message)

func (*Ordering) XXX_Size

func (m *Ordering) XXX_Size() int

func (*Ordering) XXX_Unmarshal

func (m *Ordering) XXX_Unmarshal(b []byte) error

type Ordering_Column

type Ordering_Column struct {
	ColIdx    uint32                    `protobuf:"varint,1,opt,name=col_idx,json=colIdx" json:"col_idx"`
	Direction Ordering_Column_Direction `protobuf:"varint,2,opt,name=direction,enum=cockroach.sql.distsqlrun.Ordering_Column_Direction" json:"direction"`
}

func (*Ordering_Column) Descriptor

func (*Ordering_Column) Descriptor() ([]byte, []int)

func (*Ordering_Column) Equal

func (this *Ordering_Column) Equal(that interface{}) bool

func (*Ordering_Column) Marshal

func (m *Ordering_Column) Marshal() (dAtA []byte, err error)

func (*Ordering_Column) MarshalTo

func (m *Ordering_Column) MarshalTo(dAtA []byte) (int, error)

func (*Ordering_Column) MarshalToSizedBuffer

func (m *Ordering_Column) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*Ordering_Column) ProtoMessage

func (*Ordering_Column) ProtoMessage()

func (*Ordering_Column) Reset

func (m *Ordering_Column) Reset()

func (*Ordering_Column) Size

func (m *Ordering_Column) Size() (n int)

func (*Ordering_Column) String

func (m *Ordering_Column) String() string

func (*Ordering_Column) Unmarshal

func (m *Ordering_Column) Unmarshal(dAtA []byte) error

func (*Ordering_Column) XXX_DiscardUnknown

func (m *Ordering_Column) XXX_DiscardUnknown()

func (*Ordering_Column) XXX_Marshal

func (m *Ordering_Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Ordering_Column) XXX_Merge

func (m *Ordering_Column) XXX_Merge(src proto.Message)

func (*Ordering_Column) XXX_Size

func (m *Ordering_Column) XXX_Size() int

func (*Ordering_Column) XXX_Unmarshal

func (m *Ordering_Column) XXX_Unmarshal(b []byte) error

type Ordering_Column_Direction

type Ordering_Column_Direction int32

The direction of the desired ordering for a column.

const (
	Ordering_Column_ASC  Ordering_Column_Direction = 0
	Ordering_Column_DESC Ordering_Column_Direction = 1
)

func (Ordering_Column_Direction) Enum

func (Ordering_Column_Direction) EnumDescriptor

func (Ordering_Column_Direction) EnumDescriptor() ([]byte, []int)

func (Ordering_Column_Direction) String

func (x Ordering_Column_Direction) String() string

func (*Ordering_Column_Direction) UnmarshalJSON

func (x *Ordering_Column_Direction) UnmarshalJSON(data []byte) error

type OrdinalitySpec

type OrdinalitySpec struct {
}

The specification for a WITH ORDINALITY processor. It adds a new column to each resulting row that contains the ordinal number of the row. Since there are no arguments for this operator, the spec is empty.

func (*OrdinalitySpec) Descriptor

func (*OrdinalitySpec) Descriptor() ([]byte, []int)

func (*OrdinalitySpec) Marshal

func (m *OrdinalitySpec) Marshal() (dAtA []byte, err error)

func (*OrdinalitySpec) MarshalTo

func (m *OrdinalitySpec) MarshalTo(dAtA []byte) (int, error)

func (*OrdinalitySpec) MarshalToSizedBuffer

func (m *OrdinalitySpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OrdinalitySpec) ProtoMessage

func (*OrdinalitySpec) ProtoMessage()

func (*OrdinalitySpec) Reset

func (m *OrdinalitySpec) Reset()

func (*OrdinalitySpec) Size

func (m *OrdinalitySpec) Size() (n int)

func (*OrdinalitySpec) String

func (m *OrdinalitySpec) String() string

func (*OrdinalitySpec) Unmarshal

func (m *OrdinalitySpec) Unmarshal(dAtA []byte) error

func (*OrdinalitySpec) XXX_DiscardUnknown

func (m *OrdinalitySpec) XXX_DiscardUnknown()

func (*OrdinalitySpec) XXX_Marshal

func (m *OrdinalitySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OrdinalitySpec) XXX_Merge

func (m *OrdinalitySpec) XXX_Merge(src proto.Message)

func (*OrdinalitySpec) XXX_Size

func (m *OrdinalitySpec) XXX_Size() int

func (*OrdinalitySpec) XXX_Unmarshal

func (m *OrdinalitySpec) XXX_Unmarshal(b []byte) error

type OutputRouterSpec

type OutputRouterSpec struct {
	Type    OutputRouterSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.OutputRouterSpec_Type" json:"type"`
	Streams []StreamEndpointSpec  `protobuf:"bytes,2,rep,name=streams" json:"streams"`
	// Only used for the BY_HASH type; these are the indexes of the columns we are
	// hashing.
	HashColumns     []uint32                         `protobuf:"varint,3,rep,name=hash_columns,json=hashColumns" json:"hash_columns,omitempty"`
	RangeRouterSpec OutputRouterSpec_RangeRouterSpec `protobuf:"bytes,4,opt,name=range_router_spec,json=rangeRouterSpec" json:"range_router_spec"`
	// disable_buffering disables output buffering. Generally buffering should be
	// enabled to prevent deadlocks. However some plans are known not to deadlock,
	// and so can set this flag to prevent unbounded buffering causing OOMs.
	DisableBuffering bool `protobuf:"varint,5,opt,name=disable_buffering,json=disableBuffering" json:"disable_buffering"`
}

OutputRouterSpec is the specification for the output router of a processor; it decides how to send results to multiple output streams.

func (*OutputRouterSpec) Descriptor

func (*OutputRouterSpec) Descriptor() ([]byte, []int)

func (*OutputRouterSpec) Marshal

func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec) MarshalTo

func (m *OutputRouterSpec) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec) MarshalToSizedBuffer

func (m *OutputRouterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OutputRouterSpec) ProtoMessage

func (*OutputRouterSpec) ProtoMessage()

func (*OutputRouterSpec) Reset

func (m *OutputRouterSpec) Reset()

func (*OutputRouterSpec) Size

func (m *OutputRouterSpec) Size() (n int)

func (*OutputRouterSpec) String

func (m *OutputRouterSpec) String() string

func (*OutputRouterSpec) Unmarshal

func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec) XXX_DiscardUnknown

func (m *OutputRouterSpec) XXX_DiscardUnknown()

func (*OutputRouterSpec) XXX_Marshal

func (m *OutputRouterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec) XXX_Merge

func (m *OutputRouterSpec) XXX_Merge(src proto.Message)

func (*OutputRouterSpec) XXX_Size

func (m *OutputRouterSpec) XXX_Size() int

func (*OutputRouterSpec) XXX_Unmarshal

func (m *OutputRouterSpec) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_RangeRouterSpec

type OutputRouterSpec_RangeRouterSpec struct {
	// spans is a slice of Span. Input matching a span will be routed to its
	// specified stream.
	Spans []OutputRouterSpec_RangeRouterSpec_Span `protobuf:"bytes,1,rep,name=spans" json:"spans"`
	// default_dest, if not nil, is the index of the stream to send rows that do
	// not match any span. If nil, a row that does not match a span will produce
	// an error in the router.
	DefaultDest *int32 `protobuf:"varint,2,opt,name=default_dest,json=defaultDest" json:"default_dest,omitempty"`
	// encodings is a slice of columns and encodings. Each will be appended to a
	// []byte, which is used as input to the spans. Columns from the input rows
	// potentially need to be recoded to match the encoding used for the spans.
	Encodings []OutputRouterSpec_RangeRouterSpec_ColumnEncoding `protobuf:"bytes,3,rep,name=encodings" json:"encodings"`
}

func (*OutputRouterSpec_RangeRouterSpec) Descriptor

func (*OutputRouterSpec_RangeRouterSpec) Descriptor() ([]byte, []int)

func (*OutputRouterSpec_RangeRouterSpec) Marshal

func (m *OutputRouterSpec_RangeRouterSpec) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec_RangeRouterSpec) MarshalTo

func (m *OutputRouterSpec_RangeRouterSpec) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec) MarshalToSizedBuffer

func (m *OutputRouterSpec_RangeRouterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage

func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage()

func (*OutputRouterSpec_RangeRouterSpec) Reset

func (*OutputRouterSpec_RangeRouterSpec) Size

func (m *OutputRouterSpec_RangeRouterSpec) Size() (n int)

func (*OutputRouterSpec_RangeRouterSpec) String

func (*OutputRouterSpec_RangeRouterSpec) Unmarshal

func (m *OutputRouterSpec_RangeRouterSpec) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec_RangeRouterSpec) XXX_DiscardUnknown

func (m *OutputRouterSpec_RangeRouterSpec) XXX_DiscardUnknown()

func (*OutputRouterSpec_RangeRouterSpec) XXX_Marshal

func (m *OutputRouterSpec_RangeRouterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec_RangeRouterSpec) XXX_Merge

func (*OutputRouterSpec_RangeRouterSpec) XXX_Size

func (m *OutputRouterSpec_RangeRouterSpec) XXX_Size() int

func (*OutputRouterSpec_RangeRouterSpec) XXX_Unmarshal

func (m *OutputRouterSpec_RangeRouterSpec) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_RangeRouterSpec_ColumnEncoding

type OutputRouterSpec_RangeRouterSpec_ColumnEncoding struct {
	// column is the index of a column to encode.
	Column uint32 `protobuf:"varint,1,opt,name=column" json:"column"`
	// encoding specifies how a particular column is to be encoded for
	// generating the sort key for a row. This needs to correspond to the way
	// the Span.{start,end} keys have been generated.
	Encoding descpb.DatumEncoding `protobuf:"varint,2,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"`
}

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Descriptor

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalTo

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalToSizedBuffer

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) ProtoMessage

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Reset

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Size

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) String

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Unmarshal

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_DiscardUnknown

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_DiscardUnknown()

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Marshal

func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Merge

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Size

func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) XXX_Unmarshal

type OutputRouterSpec_RangeRouterSpec_Span

type OutputRouterSpec_RangeRouterSpec_Span struct {
	Start []byte `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"`
	End   []byte `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
	// stream is the index of the destination stream.
	Stream int32 `protobuf:"varint,3,opt,name=stream" json:"stream"`
}

Span matches bytes in [start, end).

func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor

func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor() ([]byte, []int)

func (*OutputRouterSpec_RangeRouterSpec_Span) Marshal

func (m *OutputRouterSpec_RangeRouterSpec_Span) Marshal() (dAtA []byte, err error)

func (*OutputRouterSpec_RangeRouterSpec_Span) MarshalTo

func (m *OutputRouterSpec_RangeRouterSpec_Span) MarshalTo(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec_Span) MarshalToSizedBuffer

func (m *OutputRouterSpec_RangeRouterSpec_Span) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage

func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage()

func (*OutputRouterSpec_RangeRouterSpec_Span) Reset

func (*OutputRouterSpec_RangeRouterSpec_Span) Size

func (*OutputRouterSpec_RangeRouterSpec_Span) String

func (*OutputRouterSpec_RangeRouterSpec_Span) Unmarshal

func (m *OutputRouterSpec_RangeRouterSpec_Span) Unmarshal(dAtA []byte) error

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_DiscardUnknown

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_DiscardUnknown()

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Marshal

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Merge

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Size

func (*OutputRouterSpec_RangeRouterSpec_Span) XXX_Unmarshal

func (m *OutputRouterSpec_RangeRouterSpec_Span) XXX_Unmarshal(b []byte) error

type OutputRouterSpec_Type

type OutputRouterSpec_Type int32
const (
	// Single output stream.
	OutputRouterSpec_PASS_THROUGH OutputRouterSpec_Type = 0
	// Each row is sent to all output streams.
	OutputRouterSpec_MIRROR OutputRouterSpec_Type = 1
	// Each row is sent to one stream, chosen by hashing certain columns of
	// the row (specified by the hash_columns field).
	OutputRouterSpec_BY_HASH OutputRouterSpec_Type = 2
	// Each row is sent to one stream, chosen according to preset boundaries
	// for the values of certain columns of the row.
	OutputRouterSpec_BY_RANGE OutputRouterSpec_Type = 3
)

func (OutputRouterSpec_Type) Enum

func (OutputRouterSpec_Type) EnumDescriptor

func (OutputRouterSpec_Type) EnumDescriptor() ([]byte, []int)

func (OutputRouterSpec_Type) String

func (x OutputRouterSpec_Type) String() string

func (*OutputRouterSpec_Type) UnmarshalJSON

func (x *OutputRouterSpec_Type) UnmarshalJSON(data []byte) error

type OutputStats

type OutputStats struct {
	// Number of batches produced by the component.
	NumBatches optional.Uint `protobuf:"bytes,1,opt,name=num_batches,json=numBatches" json:"num_batches"`
	// Number of tuples produced by the component.
	NumTuples optional.Uint `protobuf:"bytes,2,opt,name=num_tuples,json=numTuples" json:"num_tuples"`
}

OutputStats contains statistics about the output (results) of a component.

func (*OutputStats) Descriptor

func (*OutputStats) Descriptor() ([]byte, []int)

func (*OutputStats) Marshal

func (m *OutputStats) Marshal() (dAtA []byte, err error)

func (*OutputStats) MarshalTo

func (m *OutputStats) MarshalTo(dAtA []byte) (int, error)

func (*OutputStats) MarshalToSizedBuffer

func (m *OutputStats) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*OutputStats) ProtoMessage

func (*OutputStats) ProtoMessage()

func (*OutputStats) Reset

func (m *OutputStats) Reset()

func (*OutputStats) Size

func (m *OutputStats) Size() (n int)

func (*OutputStats) String

func (m *OutputStats) String() string

func (*OutputStats) Unmarshal

func (m *OutputStats) Unmarshal(dAtA []byte) error

func (*OutputStats) XXX_DiscardUnknown

func (m *OutputStats) XXX_DiscardUnknown()

func (*OutputStats) XXX_Marshal

func (m *OutputStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OutputStats) XXX_Merge

func (m *OutputStats) XXX_Merge(src proto.Message)

func (*OutputStats) XXX_Size

func (m *OutputStats) XXX_Size() int

func (*OutputStats) XXX_Unmarshal

func (m *OutputStats) XXX_Unmarshal(b []byte) error

type PostProcessSpec

type PostProcessSpec struct {
	// If true, output_columns describes a projection. Used to differentiate
	// between an empty projection and no projection.
	//
	// Cannot be set at the same time with render expressions.
	Projection bool `protobuf:"varint,2,opt,name=projection" json:"projection"`
	// The output columns describe a projection on the internal set of columns;
	// only the columns in this list will be emitted.
	//
	// Can only be set if projection is true. Cannot be set at the same time with
	// render expressions.
	OutputColumns []uint32 `protobuf:"varint,3,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"`
	// OriginalOutputColumns will be set if OutputColumns are destructively
	// modified during the vectorized flow setup. This field is only used for
	// population of the DistSQL diagrams, and if set, it takes precedence over
	// OutputColumns.
	OriginalOutputColumns []uint32 `` /* 127-byte string literal not displayed */
	// If set, the output is the result of rendering these expressions. The
	// expressions reference the internal columns of the processor.
	//
	// Cannot be set at the same time with output columns.
	RenderExprs []Expression `protobuf:"bytes,4,rep,name=render_exprs,json=renderExprs" json:"render_exprs"`
	// OriginalRenderExprs will be set if RenderExprs are destructively
	// modified during the vectorized flow setup. This field is only used for
	// population of the DistSQL diagrams, and if set, it takes precedence over
	// RenderExprs.
	OriginalRenderExprs []Expression `protobuf:"bytes,8,rep,name=original_render_exprs,json=originalRenderExprs" json:"original_render_exprs"`
	// If nonzero, the first <offset> rows will be suppressed.
	Offset uint64 `protobuf:"varint,5,opt,name=offset" json:"offset"`
	// If nonzero, the processor will stop after emitting this many rows. The rows
	// suppressed by <offset>, if any, do not count towards this limit.
	Limit uint64 `protobuf:"varint,6,opt,name=limit" json:"limit"`
}

PostProcessSpec describes the processing required to obtain the output (e.g. projection). It operates on the internal schema of the processor (see ProcessorSpec).

func (*PostProcessSpec) Descriptor

func (*PostProcessSpec) Descriptor() ([]byte, []int)

func (*PostProcessSpec) Marshal

func (m *PostProcessSpec) Marshal() (dAtA []byte, err error)

func (*PostProcessSpec) MarshalTo

func (m *PostProcessSpec) MarshalTo(dAtA []byte) (int, error)

func (*PostProcessSpec) MarshalToSizedBuffer

func (m *PostProcessSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*PostProcessSpec) ProtoMessage

func (*PostProcessSpec) ProtoMessage()

func (*PostProcessSpec) Reset

func (m *PostProcessSpec) Reset()

func (*PostProcessSpec) Size

func (m *PostProcessSpec) Size() (n int)

func (*PostProcessSpec) String

func (m *PostProcessSpec) String() string

func (*PostProcessSpec) Unmarshal

func (m *PostProcessSpec) Unmarshal(dAtA []byte) error

func (*PostProcessSpec) XXX_DiscardUnknown

func (m *PostProcessSpec) XXX_DiscardUnknown()

func (*PostProcessSpec) XXX_Marshal

func (m *PostProcessSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PostProcessSpec) XXX_Merge

func (m *PostProcessSpec) XXX_Merge(src proto.Message)

func (*PostProcessSpec) XXX_Size

func (m *PostProcessSpec) XXX_Size() int

func (*PostProcessSpec) XXX_Unmarshal

func (m *PostProcessSpec) XXX_Unmarshal(b []byte) error

type ProcessorCoreUnion

type ProcessorCoreUnion struct {
	Noop                    *NoopCoreSpec                `protobuf:"bytes,1,opt,name=noop" json:"noop,omitempty"`
	TableReader             *TableReaderSpec             `protobuf:"bytes,2,opt,name=tableReader" json:"tableReader,omitempty"`
	JoinReader              *JoinReaderSpec              `protobuf:"bytes,3,opt,name=joinReader" json:"joinReader,omitempty"`
	Sorter                  *SorterSpec                  `protobuf:"bytes,4,opt,name=sorter" json:"sorter,omitempty"`
	Aggregator              *AggregatorSpec              `protobuf:"bytes,5,opt,name=aggregator" json:"aggregator,omitempty"`
	Distinct                *DistinctSpec                `protobuf:"bytes,7,opt,name=distinct" json:"distinct,omitempty"`
	MergeJoiner             *MergeJoinerSpec             `protobuf:"bytes,8,opt,name=mergeJoiner" json:"mergeJoiner,omitempty"`
	HashJoiner              *HashJoinerSpec              `protobuf:"bytes,9,opt,name=hashJoiner" json:"hashJoiner,omitempty"`
	Values                  *ValuesCoreSpec              `protobuf:"bytes,10,opt,name=values" json:"values,omitempty"`
	Backfiller              *BackfillerSpec              `protobuf:"bytes,11,opt,name=backfiller" json:"backfiller,omitempty"`
	ReadImport              *ReadImportDataSpec          `protobuf:"bytes,13,opt,name=readImport" json:"readImport,omitempty"`
	Sampler                 *SamplerSpec                 `protobuf:"bytes,15,opt,name=Sampler" json:"Sampler,omitempty"`
	SampleAggregator        *SampleAggregatorSpec        `protobuf:"bytes,16,opt,name=SampleAggregator" json:"SampleAggregator,omitempty"`
	MetadataTestSender      *MetadataTestSenderSpec      `protobuf:"bytes,18,opt,name=metadataTestSender" json:"metadataTestSender,omitempty"`
	MetadataTestReceiver    *MetadataTestReceiverSpec    `protobuf:"bytes,19,opt,name=metadataTestReceiver" json:"metadataTestReceiver,omitempty"`
	ZigzagJoiner            *ZigzagJoinerSpec            `protobuf:"bytes,21,opt,name=zigzagJoiner" json:"zigzagJoiner,omitempty"`
	ProjectSet              *ProjectSetSpec              `protobuf:"bytes,22,opt,name=projectSet" json:"projectSet,omitempty"`
	Windower                *WindowerSpec                `protobuf:"bytes,23,opt,name=windower" json:"windower,omitempty"`
	LocalPlanNode           *LocalPlanNodeSpec           `protobuf:"bytes,24,opt,name=localPlanNode" json:"localPlanNode,omitempty"`
	ChangeAggregator        *ChangeAggregatorSpec        `protobuf:"bytes,25,opt,name=changeAggregator" json:"changeAggregator,omitempty"`
	ChangeFrontier          *ChangeFrontierSpec          `protobuf:"bytes,26,opt,name=changeFrontier" json:"changeFrontier,omitempty"`
	Ordinality              *OrdinalitySpec              `protobuf:"bytes,27,opt,name=ordinality" json:"ordinality,omitempty"`
	BulkRowWriter           *BulkRowWriterSpec           `protobuf:"bytes,28,opt,name=bulkRowWriter" json:"bulkRowWriter,omitempty"`
	InvertedFilterer        *InvertedFiltererSpec        `protobuf:"bytes,29,opt,name=invertedFilterer" json:"invertedFilterer,omitempty"`
	InvertedJoiner          *InvertedJoinerSpec          `protobuf:"bytes,30,opt,name=invertedJoiner" json:"invertedJoiner,omitempty"`
	BackupData              *BackupDataSpec              `protobuf:"bytes,31,opt,name=backupData" json:"backupData,omitempty"`
	SplitAndScatter         *SplitAndScatterSpec         `protobuf:"bytes,32,opt,name=splitAndScatter" json:"splitAndScatter,omitempty"`
	RestoreData             *RestoreDataSpec             `protobuf:"bytes,33,opt,name=restoreData" json:"restoreData,omitempty"`
	Filterer                *FiltererSpec                `protobuf:"bytes,34,opt,name=filterer" json:"filterer,omitempty"`
	StreamIngestionData     *StreamIngestionDataSpec     `protobuf:"bytes,35,opt,name=streamIngestionData" json:"streamIngestionData,omitempty"`
	StreamIngestionFrontier *StreamIngestionFrontierSpec `protobuf:"bytes,36,opt,name=streamIngestionFrontier" json:"streamIngestionFrontier,omitempty"`
	Exporter                *ExportSpec                  `protobuf:"bytes,37,opt,name=exporter" json:"exporter,omitempty"`
	IndexBackfillMerger     *IndexBackfillMergerSpec     `protobuf:"bytes,38,opt,name=indexBackfillMerger" json:"indexBackfillMerger,omitempty"`
}

func (*ProcessorCoreUnion) Descriptor

func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)

func (*ProcessorCoreUnion) GetValue

func (this *ProcessorCoreUnion) GetValue() interface{}

func (*ProcessorCoreUnion) Marshal

func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)

func (*ProcessorCoreUnion) MarshalTo

func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)

func (*ProcessorCoreUnion) MarshalToSizedBuffer

func (m *ProcessorCoreUnion) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ProcessorCoreUnion) ProtoMessage

func (*ProcessorCoreUnion) ProtoMessage()

func (*ProcessorCoreUnion) Reset

func (m *ProcessorCoreUnion) Reset()

func (*ProcessorCoreUnion) SetValue

func (this *ProcessorCoreUnion) SetValue(value interface{}) bool

func (*ProcessorCoreUnion) Size

func (m *ProcessorCoreUnion) Size() (n int)

func (*ProcessorCoreUnion) String

func (m *ProcessorCoreUnion) String() string

func (*ProcessorCoreUnion) Unmarshal

func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error

func (*ProcessorCoreUnion) XXX_DiscardUnknown

func (m *ProcessorCoreUnion) XXX_DiscardUnknown()

func (*ProcessorCoreUnion) XXX_Marshal

func (m *ProcessorCoreUnion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProcessorCoreUnion) XXX_Merge

func (m *ProcessorCoreUnion) XXX_Merge(src proto.Message)

func (*ProcessorCoreUnion) XXX_Size

func (m *ProcessorCoreUnion) XXX_Size() int

func (*ProcessorCoreUnion) XXX_Unmarshal

func (m *ProcessorCoreUnion) XXX_Unmarshal(b []byte) error

type ProcessorID

type ProcessorID int

ProcessorID identifies a processor in the context of a specific flow.

type ProcessorSpec

type ProcessorSpec struct {
	// In most cases, there is one input.
	Input []InputSyncSpec    `protobuf:"bytes,1,rep,name=input" json:"input"`
	Core  ProcessorCoreUnion `protobuf:"bytes,2,opt,name=core" json:"core"`
	Post  PostProcessSpec    `protobuf:"bytes,4,opt,name=post" json:"post"`
	// In most cases, there is one output.
	Output []OutputRouterSpec `protobuf:"bytes,3,rep,name=output" json:"output"`
	// An optional identifier that can be used to correlate processors that are
	// part of the same stage (e.g. multiple joiners that are part of a
	// distributed join). This has no consequence on the running of flows, but is
	// useful for plan diagrams.
	StageID int32 `protobuf:"varint,5,opt,name=stage_id,json=stageId" json:"stage_id"`
	// processor_id uniquely identifies a processor within a physical plan. This
	// has no consequence on the running of flows, but is useful for plan
	// diagrams and linking information like tracing spans and log messages to
	// processors.
	ProcessorID int32 `protobuf:"varint,6,opt,name=processor_id,json=processorId" json:"processor_id"`
	// Schema for the rows produced by this processor (after post-processing has
	// been applied, if there is any).
	//
	// This can be aliased with InputSyncSpec.ColumnTypes, so it must not be
	// modified in-place during planning.
	ResultTypes []*types.T `protobuf:"bytes,7,rep,name=result_types,json=resultTypes" json:"result_types,omitempty"`
	// estimated_row_count contains the number of rows that the optimizer expects
	// will be emitted from this processor, or 0 if the estimate wasn't populated.
	EstimatedRowCount uint64 `protobuf:"varint,8,opt,name=estimated_row_count,json=estimatedRowCount" json:"estimated_row_count"`
}

Each processor has the following components:

  • one or more input synchronizers; each one merges rows between one or more input streams;

  • a processor "core" which encapsulates the inner logic of each processor;

  • a post-processing stage which allows "inline" post-processing on results (like projection or filtering);

  • one or more output synchronizers; each one directs rows to one or more output streams.

== Internal columns ==

The core outputs rows of a certain schema to the post-processing stage. We call this the "internal schema" (or "internal columns") and it differs for each type of core. Column indices in a PostProcessSpec refers to these internal columns. Some columns may be unused by the post-processing stage; processor implementations are internally optimized to not produce values for such unneeded columns.

func (*ProcessorSpec) Descriptor

func (*ProcessorSpec) Descriptor() ([]byte, []int)

func (*ProcessorSpec) Marshal

func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)

func (*ProcessorSpec) MarshalTo

func (m *ProcessorSpec) MarshalTo(dAtA []byte) (int, error)

func (*ProcessorSpec) MarshalToSizedBuffer

func (m *ProcessorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ProcessorSpec) ProtoMessage

func (*ProcessorSpec) ProtoMessage()

func (*ProcessorSpec) Reset

func (m *ProcessorSpec) Reset()

func (*ProcessorSpec) Size

func (m *ProcessorSpec) Size() (n int)

func (*ProcessorSpec) String

func (m *ProcessorSpec) String() string

func (*ProcessorSpec) Unmarshal

func (m *ProcessorSpec) Unmarshal(dAtA []byte) error

func (*ProcessorSpec) XXX_DiscardUnknown

func (m *ProcessorSpec) XXX_DiscardUnknown()

func (*ProcessorSpec) XXX_Marshal

func (m *ProcessorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProcessorSpec) XXX_Merge

func (m *ProcessorSpec) XXX_Merge(src proto.Message)

func (*ProcessorSpec) XXX_Size

func (m *ProcessorSpec) XXX_Size() int

func (*ProcessorSpec) XXX_Unmarshal

func (m *ProcessorSpec) XXX_Unmarshal(b []byte) error

type ProducerData

type ProducerData struct {
	// A bunch of rows, encoded. Each datum is encoded according to the
	// corresponding DatumInfo.
	RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"`
	// In the special case when the stream contains empty rows, the count is
	// passed instead.
	NumEmptyRows int32 `protobuf:"varint,3,opt,name=num_empty_rows,json=numEmptyRows" json:"num_empty_rows"`
	// A bunch of metadata messages.
	Metadata []RemoteProducerMetadata `protobuf:"bytes,2,rep,name=metadata" json:"metadata"`
}

ProducerData is a message that can be sent multiple times as part of a stream from a producer to a consumer. It contains 0 or more rows and/or 0 or more metadata messages.

func (*ProducerData) Descriptor

func (*ProducerData) Descriptor() ([]byte, []int)

func (*ProducerData) Marshal

func (m *ProducerData) Marshal() (dAtA []byte, err error)

func (*ProducerData) MarshalTo

func (m *ProducerData) MarshalTo(dAtA []byte) (int, error)

func (*ProducerData) MarshalToSizedBuffer

func (m *ProducerData) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ProducerData) ProtoMessage

func (*ProducerData) ProtoMessage()

func (*ProducerData) Reset

func (m *ProducerData) Reset()

func (*ProducerData) Size

func (m *ProducerData) Size() (n int)

func (*ProducerData) String

func (m *ProducerData) String() string

func (*ProducerData) Unmarshal

func (m *ProducerData) Unmarshal(dAtA []byte) error

func (*ProducerData) XXX_DiscardUnknown

func (m *ProducerData) XXX_DiscardUnknown()

func (*ProducerData) XXX_Marshal

func (m *ProducerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProducerData) XXX_Merge

func (m *ProducerData) XXX_Merge(src proto.Message)

func (*ProducerData) XXX_Size

func (m *ProducerData) XXX_Size() int

func (*ProducerData) XXX_Unmarshal

func (m *ProducerData) XXX_Unmarshal(b []byte) error

type ProducerHeader

type ProducerHeader struct {
	FlowID   FlowID   `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"`
	StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"`
}

ProducerHeader is a message that is sent once at the beginning of a stream.

func (*ProducerHeader) Descriptor

func (*ProducerHeader) Descriptor() ([]byte, []int)

func (*ProducerHeader) Marshal

func (m *ProducerHeader) Marshal() (dAtA []byte, err error)

func (*ProducerHeader) MarshalTo

func (m *ProducerHeader) MarshalTo(dAtA []byte) (int, error)

func (*ProducerHeader) MarshalToSizedBuffer

func (m *ProducerHeader) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ProducerHeader) ProtoMessage

func (*ProducerHeader) ProtoMessage()

func (*ProducerHeader) Reset

func (m *ProducerHeader) Reset()

func (*ProducerHeader) Size

func (m *ProducerHeader) Size() (n int)

func (*ProducerHeader) String

func (m *ProducerHeader) String() string

func (*ProducerHeader) Unmarshal

func (m *ProducerHeader) Unmarshal(dAtA []byte) error

func (*ProducerHeader) XXX_DiscardUnknown

func (m *ProducerHeader) XXX_DiscardUnknown()

func (*ProducerHeader) XXX_Marshal

func (m *ProducerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProducerHeader) XXX_Merge

func (m *ProducerHeader) XXX_Merge(src proto.Message)

func (*ProducerHeader) XXX_Size

func (m *ProducerHeader) XXX_Size() int

func (*ProducerHeader) XXX_Unmarshal

func (m *ProducerHeader) XXX_Unmarshal(b []byte) error

type ProducerMessage

type ProducerMessage struct {
	Header *ProducerHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
	// Typing information. There will be one DatumInfo for each element in a row.
	// This field has to be populated on, or before, a ProducerMessage with data
	// in it, and can only be populated once. It can be nil if only zero length
	// rows will be sent.
	// TODO(andrei): It'd be nice if the typing information for streams would be
	// configured statically at plan creation time, instead of being discovered
	// dynamically through the first rows that flow.
	Typing []DatumInfo  `protobuf:"bytes,2,rep,name=typing" json:"typing"`
	Data   ProducerData `protobuf:"bytes,3,opt,name=data" json:"data"`
}

func (*ProducerMessage) Descriptor

func (*ProducerMessage) Descriptor() ([]byte, []int)

func (*ProducerMessage) Marshal

func (m *ProducerMessage) Marshal() (dAtA []byte, err error)

func (*ProducerMessage) MarshalTo

func (m *ProducerMessage) MarshalTo(dAtA []byte) (int, error)

func (*ProducerMessage) MarshalToSizedBuffer

func (m *ProducerMessage) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ProducerMessage) ProtoMessage

func (*ProducerMessage) ProtoMessage()

func (*ProducerMessage) Reset

func (m *ProducerMessage) Reset()

func (*ProducerMessage) Size

func (m *ProducerMessage) Size() (n int)

func (*ProducerMessage) String

func (m *ProducerMessage) String() string

func (*ProducerMessage) Unmarshal

func (m *ProducerMessage) Unmarshal(dAtA []byte) error

func (*ProducerMessage) XXX_DiscardUnknown

func (m *ProducerMessage) XXX_DiscardUnknown()

func (*ProducerMessage) XXX_Marshal

func (m *ProducerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProducerMessage) XXX_Merge

func (m *ProducerMessage) XXX_Merge(src proto.Message)

func (*ProducerMessage) XXX_Size

func (m *ProducerMessage) XXX_Size() int

func (*ProducerMessage) XXX_Unmarshal

func (m *ProducerMessage) XXX_Unmarshal(b []byte) error

type ProducerMetadata

type ProducerMetadata struct {
	// Only one of these fields will be set. If this ever changes, note that
	// there're consumers out there that extract the error and, if there is one,
	// forward it in isolation and drop the rest of the record.
	Ranges []roachpb.RangeInfo
	// TODO(vivek): change to type Error
	Err error
	// TraceData is sent if tracing is enabled.
	TraceData []tracingpb.RecordedSpan
	// LeafTxnFinalState contains the final state of the LeafTxn to be
	// sent from leaf flows to the RootTxn held by the flow's ultimate
	// receiver.
	LeafTxnFinalState *roachpb.LeafTxnFinalState
	// RowNum corresponds to a row produced by a "source" processor that takes no
	// inputs. It is used in tests to verify that all metadata is forwarded
	// exactly once to the receiver on the gateway node.
	RowNum *RemoteProducerMetadata_RowNum
	// SamplerProgress contains incremental progress information from the sampler
	// processor.
	SamplerProgress *RemoteProducerMetadata_SamplerProgress
	// BulkProcessorProgress contains incremental progress information from a bulk
	// operation processor (backfiller, import, etc).
	BulkProcessorProgress *RemoteProducerMetadata_BulkProcessorProgress
	// Metrics contains information about goodput of the node.
	Metrics *RemoteProducerMetadata_Metrics
}

ProducerMetadata represents a metadata record flowing through a DistSQL flow.

func GetProducerMeta

func GetProducerMeta() *ProducerMetadata

GetProducerMeta returns a producer metadata object from the pool.

func RemoteProducerMetaToLocalMeta

func RemoteProducerMetaToLocalMeta(
	ctx context.Context, rpm RemoteProducerMetadata,
) (ProducerMetadata, bool)

RemoteProducerMetaToLocalMeta converts a RemoteProducerMetadata struct to ProducerMetadata and returns whether the conversion was successful or not.

func (*ProducerMetadata) Release

func (meta *ProducerMetadata) Release()

Release is part of Releasable interface.

type ProjectSetSpec

type ProjectSetSpec struct {
	// Expressions to be applied
	Exprs []Expression `protobuf:"bytes,1,rep,name=exprs" json:"exprs"`
	// Column types for the generated values
	GeneratedColumns []*types.T `protobuf:"bytes,2,rep,name=generated_columns,json=generatedColumns" json:"generated_columns,omitempty"`
	// The number of columns each expression returns. Same length as exprs.
	NumColsPerGen []uint32 `protobuf:"varint,3,rep,name=num_cols_per_gen,json=numColsPerGen" json:"num_cols_per_gen,omitempty"`
}

ProjectSetSpec is the specification of a processor which applies a set of expressions, which may be set-returning functions, to its input.

func (*ProjectSetSpec) Descriptor

func (*ProjectSetSpec) Descriptor() ([]byte, []int)

func (*ProjectSetSpec) Marshal

func (m *ProjectSetSpec) Marshal() (dAtA []byte, err error)

func (*ProjectSetSpec) MarshalTo

func (m *ProjectSetSpec) MarshalTo(dAtA []byte) (int, error)

func (*ProjectSetSpec) MarshalToSizedBuffer

func (m *ProjectSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ProjectSetSpec) ProtoMessage

func (*ProjectSetSpec) ProtoMessage()

func (*ProjectSetSpec) Reset

func (m *ProjectSetSpec) Reset()

func (*ProjectSetSpec) Size

func (m *ProjectSetSpec) Size() (n int)

func (*ProjectSetSpec) String

func (m *ProjectSetSpec) String() string

func (*ProjectSetSpec) Unmarshal

func (m *ProjectSetSpec) Unmarshal(dAtA []byte) error

func (*ProjectSetSpec) XXX_DiscardUnknown

func (m *ProjectSetSpec) XXX_DiscardUnknown()

func (*ProjectSetSpec) XXX_Marshal

func (m *ProjectSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ProjectSetSpec) XXX_Merge

func (m *ProjectSetSpec) XXX_Merge(src proto.Message)

func (*ProjectSetSpec) XXX_Size

func (m *ProjectSetSpec) XXX_Size() int

func (*ProjectSetSpec) XXX_Unmarshal

func (m *ProjectSetSpec) XXX_Unmarshal(b []byte) error

type ReadImportDataSpec

type ReadImportDataSpec struct {
	JobID  int64                `protobuf:"varint,19,opt,name=job_id,json=jobId" json:"job_id"`
	Format roachpb.IOFileFormat `protobuf:"bytes,8,opt,name=format" json:"format"`
	// sample_size is the rate at which to output rows, based on an input row's size.
	SampleSize int32 `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"`
	// tables supports input formats that can read multiple tables. If it is
	// non-empty, the keys specify the names of tables for which the processor
	// should read and emit data (ignoring data for any other tables that is
	// present in the input).
	//
	// TODO(dt): If a key has a nil value, the schema for that table should be
	// determined from the input on-the-fly (e.g. by parsing a CREATE TABLE in a
	// dump file) and the processor should emit a key/value for the generated
	// TableDescriptor with the corresponding descriptor ID key. If tables is
	// empty (and table_desc above is not specified), the processor should read
	// all tables in the input, determining their schemas on the fly.
	Tables map[string]*ReadImportDataSpec_ImportTable `` /* 132-byte string literal not displayed */
	// uri is a cloud.ExternalStorage URI pointing to the CSV files to be
	// read. The map key must be unique across the entire IMPORT job.
	Uri map[int32]string `` /* 127-byte string literal not displayed */
	// resume_pos specifies a map from an input ID to an offset in that
	// input from which the processing should continue.
	// The meaning of offset is specific to each processor.
	ResumePos              map[int32]int64 `` /* 158-byte string literal not displayed */
	Progress               JobProgress     `protobuf:"bytes,6,opt,name=progress" json:"progress"`
	SkipMissingForeignKeys bool            `protobuf:"varint,10,opt,name=skip_missing_foreign_keys,json=skipMissingForeignKeys" json:"skip_missing_foreign_keys"`
	// walltimeNanos is the MVCC time at which the created KVs will be written.
	WalltimeNanos int64 `protobuf:"varint,11,opt,name=walltimeNanos" json:"walltimeNanos"`
	// If set, specifies reader parallelism; 0 implies "use default".
	ReaderParallelism int32 `protobuf:"varint,13,opt,name=readerParallelism" json:"readerParallelism"`
	// User who initiated the import. This is used to check access privileges
	// when using FileTable ExternalStorage.
	UserProto github_com_cockroachdb_cockroach_pkg_security.SQLUsernameProto `` /* 128-byte string literal not displayed */
	Types     []*descpb.TypeDescriptor                                       `protobuf:"bytes,16,rep,name=types" json:"types,omitempty"`
	// If the database being imported into is a multi-region database, then this
	// field stores the databases' primary region.
	DatabasePrimaryRegion github_com_cockroachdb_cockroach_pkg_sql_catalog_catpb.RegionName `` /* 169-byte string literal not displayed */
	InitialSplits         int32                                                             `protobuf:"varint,18,opt,name=initial_splits,json=initialSplits" json:"initial_splits"`
}

func (*ReadImportDataSpec) Descriptor

func (*ReadImportDataSpec) Descriptor() ([]byte, []int)

func (*ReadImportDataSpec) Marshal

func (m *ReadImportDataSpec) Marshal() (dAtA []byte, err error)

func (*ReadImportDataSpec) MarshalTo

func (m *ReadImportDataSpec) MarshalTo(dAtA []byte) (int, error)

func (*ReadImportDataSpec) MarshalToSizedBuffer

func (m *ReadImportDataSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ReadImportDataSpec) ProtoMessage

func (*ReadImportDataSpec) ProtoMessage()

func (*ReadImportDataSpec) Reset

func (m *ReadImportDataSpec) Reset()

func (*ReadImportDataSpec) Size

func (m *ReadImportDataSpec) Size() (n int)

func (*ReadImportDataSpec) String

func (m *ReadImportDataSpec) String() string

func (*ReadImportDataSpec) Unmarshal

func (m *ReadImportDataSpec) Unmarshal(dAtA []byte) error

func (*ReadImportDataSpec) User

User accesses the user field.

func (*ReadImportDataSpec) XXX_DiscardUnknown

func (m *ReadImportDataSpec) XXX_DiscardUnknown()

func (*ReadImportDataSpec) XXX_Marshal

func (m *ReadImportDataSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ReadImportDataSpec) XXX_Merge

func (m *ReadImportDataSpec) XXX_Merge(src proto.Message)

func (*ReadImportDataSpec) XXX_Size

func (m *ReadImportDataSpec) XXX_Size() int

func (*ReadImportDataSpec) XXX_Unmarshal

func (m *ReadImportDataSpec) XXX_Unmarshal(b []byte) error

type ReadImportDataSpec_ImportTable

type ReadImportDataSpec_ImportTable struct {
	Desc *descpb.TableDescriptor `protobuf:"bytes,1,opt,name=desc" json:"desc,omitempty"`
	// targetCols is used to store the target columns for each existing table
	// being imported into. These are the columns for which the processor should
	// read and emit data (ignoring data for any other tables or columns outside
	// of the targetCols, that is present in the input).
	TargetCols []string `protobuf:"bytes,2,rep,name=targetCols" json:"targetCols,omitempty"`
}

func (*ReadImportDataSpec_ImportTable) Descriptor

func (*ReadImportDataSpec_ImportTable) Descriptor() ([]byte, []int)

func (*ReadImportDataSpec_ImportTable) Marshal

func (m *ReadImportDataSpec_ImportTable) Marshal() (dAtA []byte, err error)

func (*ReadImportDataSpec_ImportTable) MarshalTo

func (m *ReadImportDataSpec_ImportTable) MarshalTo(dAtA []byte) (int, error)

func (*ReadImportDataSpec_ImportTable) MarshalToSizedBuffer

func (m *ReadImportDataSpec_ImportTable) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ReadImportDataSpec_ImportTable) ProtoMessage

func (*ReadImportDataSpec_ImportTable) ProtoMessage()

func (*ReadImportDataSpec_ImportTable) Reset

func (m *ReadImportDataSpec_ImportTable) Reset()

func (*ReadImportDataSpec_ImportTable) Size

func (m *ReadImportDataSpec_ImportTable) Size() (n int)

func (*ReadImportDataSpec_ImportTable) String

func (*ReadImportDataSpec_ImportTable) Unmarshal

func (m *ReadImportDataSpec_ImportTable) Unmarshal(dAtA []byte) error

func (*ReadImportDataSpec_ImportTable) XXX_DiscardUnknown

func (m *ReadImportDataSpec_ImportTable) XXX_DiscardUnknown()

func (*ReadImportDataSpec_ImportTable) XXX_Marshal

func (m *ReadImportDataSpec_ImportTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ReadImportDataSpec_ImportTable) XXX_Merge

func (m *ReadImportDataSpec_ImportTable) XXX_Merge(src proto.Message)

func (*ReadImportDataSpec_ImportTable) XXX_Size

func (m *ReadImportDataSpec_ImportTable) XXX_Size() int

func (*ReadImportDataSpec_ImportTable) XXX_Unmarshal

func (m *ReadImportDataSpec_ImportTable) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata

type RemoteProducerMetadata struct {
	// Types that are valid to be assigned to Value:
	//	*RemoteProducerMetadata_RangeInfo
	//	*RemoteProducerMetadata_Error
	//	*RemoteProducerMetadata_TraceData_
	//	*RemoteProducerMetadata_LeafTxnFinalState
	//	*RemoteProducerMetadata_RowNum_
	//	*RemoteProducerMetadata_SamplerProgress_
	//	*RemoteProducerMetadata_Metrics_
	//	*RemoteProducerMetadata_BulkProcessorProgress_
	Value isRemoteProducerMetadata_Value `protobuf_oneof:"value"`
}

RemoteProducerMetadata represents records that a producer wants to pass to a consumer, other than data rows. It's named RemoteProducerMetadata to not clash with ProducerMetadata, which is used internally within a node and has a different go error instead of a proto error inside.

func LocalMetaToRemoteProducerMeta

func LocalMetaToRemoteProducerMeta(
	ctx context.Context, meta ProducerMetadata,
) RemoteProducerMetadata

LocalMetaToRemoteProducerMeta converts a ProducerMetadata struct to RemoteProducerMetadata.

func (*RemoteProducerMetadata) Descriptor

func (*RemoteProducerMetadata) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata) GetBulkProcessorProgress

func (*RemoteProducerMetadata) GetError

func (m *RemoteProducerMetadata) GetError() *Error

func (*RemoteProducerMetadata) GetLeafTxnFinalState

func (m *RemoteProducerMetadata) GetLeafTxnFinalState() *roachpb.LeafTxnFinalState

func (*RemoteProducerMetadata) GetMetrics

func (*RemoteProducerMetadata) GetRangeInfo

func (*RemoteProducerMetadata) GetRowNum

func (*RemoteProducerMetadata) GetSamplerProgress

func (*RemoteProducerMetadata) GetTraceData

func (*RemoteProducerMetadata) GetValue

func (m *RemoteProducerMetadata) GetValue() isRemoteProducerMetadata_Value

func (*RemoteProducerMetadata) Marshal

func (m *RemoteProducerMetadata) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata) MarshalTo

func (m *RemoteProducerMetadata) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata) MarshalToSizedBuffer

func (m *RemoteProducerMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata) ProtoMessage

func (*RemoteProducerMetadata) ProtoMessage()

func (*RemoteProducerMetadata) Reset

func (m *RemoteProducerMetadata) Reset()

func (*RemoteProducerMetadata) Size

func (m *RemoteProducerMetadata) Size() (n int)

func (*RemoteProducerMetadata) String

func (m *RemoteProducerMetadata) String() string

func (*RemoteProducerMetadata) Unmarshal

func (m *RemoteProducerMetadata) Unmarshal(dAtA []byte) error

func (*RemoteProducerMetadata) XXX_DiscardUnknown

func (m *RemoteProducerMetadata) XXX_DiscardUnknown()

func (*RemoteProducerMetadata) XXX_Marshal

func (m *RemoteProducerMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata) XXX_Merge

func (m *RemoteProducerMetadata) XXX_Merge(src proto.Message)

func (*RemoteProducerMetadata) XXX_OneofWrappers

func (*RemoteProducerMetadata) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*RemoteProducerMetadata) XXX_Size

func (m *RemoteProducerMetadata) XXX_Size() int

func (*RemoteProducerMetadata) XXX_Unmarshal

func (m *RemoteProducerMetadata) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata_BulkProcessorProgress

type RemoteProducerMetadata_BulkProcessorProgress struct {
	CompletedSpans    []roachpb.Span    `protobuf:"bytes,1,rep,name=completed_spans,json=completedSpans" json:"completed_spans"`
	CompletedFraction map[int32]float32 `` /* 182-byte string literal not displayed */
	ResumePos         map[int32]int64   `` /* 157-byte string literal not displayed */
	// Used to stream back progress to the coordinator of a bulk job.
	ProgressDetails  types1.Any            `protobuf:"bytes,4,opt,name=progress_details,json=progressDetails" json:"progress_details"`
	BulkSummary      roachpb.BulkOpSummary `protobuf:"bytes,5,opt,name=bulk_summary,json=bulkSummary" json:"bulk_summary"`
	CompletedSpanIdx []int32               `protobuf:"varint,6,rep,name=completed_span_idx,json=completedSpanIdx" json:"completed_span_idx,omitempty"`
}

func (*RemoteProducerMetadata_BulkProcessorProgress) Descriptor

func (*RemoteProducerMetadata_BulkProcessorProgress) Marshal

func (m *RemoteProducerMetadata_BulkProcessorProgress) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata_BulkProcessorProgress) MarshalTo

func (*RemoteProducerMetadata_BulkProcessorProgress) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_BulkProcessorProgress) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_BulkProcessorProgress) ProtoMessage

func (*RemoteProducerMetadata_BulkProcessorProgress) Reset

func (*RemoteProducerMetadata_BulkProcessorProgress) Size

func (*RemoteProducerMetadata_BulkProcessorProgress) String

func (*RemoteProducerMetadata_BulkProcessorProgress) Unmarshal

func (*RemoteProducerMetadata_BulkProcessorProgress) XXX_DiscardUnknown

func (m *RemoteProducerMetadata_BulkProcessorProgress) XXX_DiscardUnknown()

func (*RemoteProducerMetadata_BulkProcessorProgress) XXX_Marshal

func (m *RemoteProducerMetadata_BulkProcessorProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata_BulkProcessorProgress) XXX_Merge

func (*RemoteProducerMetadata_BulkProcessorProgress) XXX_Size

func (*RemoteProducerMetadata_BulkProcessorProgress) XXX_Unmarshal

type RemoteProducerMetadata_BulkProcessorProgress_

type RemoteProducerMetadata_BulkProcessorProgress_ struct {
	BulkProcessorProgress *RemoteProducerMetadata_BulkProcessorProgress `protobuf:"bytes,9,opt,name=bulk_processor_progress,json=bulkProcessorProgress,oneof" json:"bulk_processor_progress,omitempty"`
}

func (*RemoteProducerMetadata_BulkProcessorProgress_) MarshalTo

func (*RemoteProducerMetadata_BulkProcessorProgress_) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_BulkProcessorProgress_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_BulkProcessorProgress_) Size

type RemoteProducerMetadata_Error

type RemoteProducerMetadata_Error struct {
	Error *Error `protobuf:"bytes,2,opt,name=error,oneof" json:"error,omitempty"`
}

func (*RemoteProducerMetadata_Error) MarshalTo

func (m *RemoteProducerMetadata_Error) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_Error) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_Error) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_Error) Size

func (m *RemoteProducerMetadata_Error) Size() (n int)

type RemoteProducerMetadata_LeafTxnFinalState

type RemoteProducerMetadata_LeafTxnFinalState struct {
	LeafTxnFinalState *roachpb.LeafTxnFinalState `protobuf:"bytes,4,opt,name=leaf_txn_final_state,json=leafTxnFinalState,oneof" json:"leaf_txn_final_state,omitempty"`
}

func (*RemoteProducerMetadata_LeafTxnFinalState) MarshalTo

func (m *RemoteProducerMetadata_LeafTxnFinalState) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_LeafTxnFinalState) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_LeafTxnFinalState) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_LeafTxnFinalState) Size

type RemoteProducerMetadata_Metrics

type RemoteProducerMetadata_Metrics struct {
	// Total number of bytes read while executing a statement.
	BytesRead int64 `protobuf:"varint,1,opt,name=bytes_read,json=bytesRead" json:"bytes_read"`
	// Total number of rows read while executing a statement.
	RowsRead int64 `protobuf:"varint,2,opt,name=rows_read,json=rowsRead" json:"rows_read"`
	// Total number of rows modified while executing a statement.
	RowsWritten int64 `protobuf:"varint,3,opt,name=rows_written,json=rowsWritten" json:"rows_written"`
}

Metrics are unconditionally emitted by table readers.

func GetMetricsMeta

func GetMetricsMeta() *RemoteProducerMetadata_Metrics

GetMetricsMeta returns a metadata object from the pool of metrics metadata.

func (*RemoteProducerMetadata_Metrics) Descriptor

func (*RemoteProducerMetadata_Metrics) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata_Metrics) Marshal

func (m *RemoteProducerMetadata_Metrics) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata_Metrics) MarshalTo

func (m *RemoteProducerMetadata_Metrics) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_Metrics) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_Metrics) ProtoMessage

func (*RemoteProducerMetadata_Metrics) ProtoMessage()

func (*RemoteProducerMetadata_Metrics) Release

func (meta *RemoteProducerMetadata_Metrics) Release()

Release is part of Releasable interface. Note that although this meta is only used together with a ProducerMetadata that comes from another pool, we do not combine two Release methods into one because two objects have a different lifetime.

func (*RemoteProducerMetadata_Metrics) Reset

func (m *RemoteProducerMetadata_Metrics) Reset()

func (*RemoteProducerMetadata_Metrics) Size

func (m *RemoteProducerMetadata_Metrics) Size() (n int)

func (*RemoteProducerMetadata_Metrics) String

func (*RemoteProducerMetadata_Metrics) Unmarshal

func (m *RemoteProducerMetadata_Metrics) Unmarshal(dAtA []byte) error

func (*RemoteProducerMetadata_Metrics) XXX_DiscardUnknown

func (m *RemoteProducerMetadata_Metrics) XXX_DiscardUnknown()

func (*RemoteProducerMetadata_Metrics) XXX_Marshal

func (m *RemoteProducerMetadata_Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata_Metrics) XXX_Merge

func (m *RemoteProducerMetadata_Metrics) XXX_Merge(src proto.Message)

func (*RemoteProducerMetadata_Metrics) XXX_Size

func (m *RemoteProducerMetadata_Metrics) XXX_Size() int

func (*RemoteProducerMetadata_Metrics) XXX_Unmarshal

func (m *RemoteProducerMetadata_Metrics) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata_Metrics_

type RemoteProducerMetadata_Metrics_ struct {
	Metrics *RemoteProducerMetadata_Metrics `protobuf:"bytes,8,opt,name=metrics,oneof" json:"metrics,omitempty"`
}

func (*RemoteProducerMetadata_Metrics_) MarshalTo

func (m *RemoteProducerMetadata_Metrics_) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_Metrics_) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_Metrics_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_Metrics_) Size

func (m *RemoteProducerMetadata_Metrics_) Size() (n int)

type RemoteProducerMetadata_RangeInfo

type RemoteProducerMetadata_RangeInfo struct {
	RangeInfo *RemoteProducerMetadata_RangeInfos `protobuf:"bytes,1,opt,name=range_info,json=rangeInfo,oneof" json:"range_info,omitempty"`
}

func (*RemoteProducerMetadata_RangeInfo) MarshalTo

func (m *RemoteProducerMetadata_RangeInfo) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RangeInfo) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_RangeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RangeInfo) Size

func (m *RemoteProducerMetadata_RangeInfo) Size() (n int)

type RemoteProducerMetadata_RangeInfos

type RemoteProducerMetadata_RangeInfos struct {
	RangeInfo []roachpb.RangeInfo `protobuf:"bytes,1,rep,name=range_info,json=rangeInfo" json:"range_info"`
}

func (*RemoteProducerMetadata_RangeInfos) Descriptor

func (*RemoteProducerMetadata_RangeInfos) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata_RangeInfos) Marshal

func (m *RemoteProducerMetadata_RangeInfos) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata_RangeInfos) MarshalTo

func (m *RemoteProducerMetadata_RangeInfos) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RangeInfos) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_RangeInfos) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RangeInfos) ProtoMessage

func (*RemoteProducerMetadata_RangeInfos) ProtoMessage()

func (*RemoteProducerMetadata_RangeInfos) Reset

func (*RemoteProducerMetadata_RangeInfos) Size

func (m *RemoteProducerMetadata_RangeInfos) Size() (n int)

func (*RemoteProducerMetadata_RangeInfos) String

func (*RemoteProducerMetadata_RangeInfos) Unmarshal

func (m *RemoteProducerMetadata_RangeInfos) Unmarshal(dAtA []byte) error

func (*RemoteProducerMetadata_RangeInfos) XXX_DiscardUnknown

func (m *RemoteProducerMetadata_RangeInfos) XXX_DiscardUnknown()

func (*RemoteProducerMetadata_RangeInfos) XXX_Marshal

func (m *RemoteProducerMetadata_RangeInfos) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata_RangeInfos) XXX_Merge

func (*RemoteProducerMetadata_RangeInfos) XXX_Size

func (m *RemoteProducerMetadata_RangeInfos) XXX_Size() int

func (*RemoteProducerMetadata_RangeInfos) XXX_Unmarshal

func (m *RemoteProducerMetadata_RangeInfos) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata_RowNum

type RemoteProducerMetadata_RowNum struct {
	// The ID of the processor that is producing rows.
	SenderID string `protobuf:"bytes,1,opt,name=sender_id,json=senderId" json:"sender_id"`
	// A running count of the number of rows emitted from the sender so far.
	RowNum int32 `protobuf:"varint,2,opt,name=row_num,json=rowNum" json:"row_num"`
	// When set, indicates that the row count contains the expected number of
	// RowNum messages with this ID.
	LastMsg bool `protobuf:"varint,3,opt,name=last_msg,json=lastMsg" json:"last_msg"`
}

RowNum is used to count the rows sent from a processor. It is used in tests to check that metadata is propagated correctly.

func (*RemoteProducerMetadata_RowNum) Descriptor

func (*RemoteProducerMetadata_RowNum) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata_RowNum) Marshal

func (m *RemoteProducerMetadata_RowNum) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata_RowNum) MarshalTo

func (m *RemoteProducerMetadata_RowNum) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RowNum) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_RowNum) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RowNum) ProtoMessage

func (*RemoteProducerMetadata_RowNum) ProtoMessage()

func (*RemoteProducerMetadata_RowNum) Reset

func (m *RemoteProducerMetadata_RowNum) Reset()

func (*RemoteProducerMetadata_RowNum) Size

func (m *RemoteProducerMetadata_RowNum) Size() (n int)

func (*RemoteProducerMetadata_RowNum) String

func (*RemoteProducerMetadata_RowNum) Unmarshal

func (m *RemoteProducerMetadata_RowNum) Unmarshal(dAtA []byte) error

func (*RemoteProducerMetadata_RowNum) XXX_DiscardUnknown

func (m *RemoteProducerMetadata_RowNum) XXX_DiscardUnknown()

func (*RemoteProducerMetadata_RowNum) XXX_Marshal

func (m *RemoteProducerMetadata_RowNum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata_RowNum) XXX_Merge

func (m *RemoteProducerMetadata_RowNum) XXX_Merge(src proto.Message)

func (*RemoteProducerMetadata_RowNum) XXX_Size

func (m *RemoteProducerMetadata_RowNum) XXX_Size() int

func (*RemoteProducerMetadata_RowNum) XXX_Unmarshal

func (m *RemoteProducerMetadata_RowNum) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata_RowNum_

type RemoteProducerMetadata_RowNum_ struct {
	RowNum *RemoteProducerMetadata_RowNum `protobuf:"bytes,5,opt,name=row_num,json=rowNum,oneof" json:"row_num,omitempty"`
}

func (*RemoteProducerMetadata_RowNum_) MarshalTo

func (m *RemoteProducerMetadata_RowNum_) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RowNum_) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_RowNum_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_RowNum_) Size

func (m *RemoteProducerMetadata_RowNum_) Size() (n int)

type RemoteProducerMetadata_SamplerProgress

type RemoteProducerMetadata_SamplerProgress struct {
	// The number of rows processed by the sampler processor since the last
	// update.
	RowsProcessed uint64 `protobuf:"varint,1,opt,name=rows_processed,json=rowsProcessed" json:"rows_processed"`
	// Indicates that sample collection for histograms should be disabled,
	// likely because the sampler processor ran out of memory.
	HistogramDisabled bool `protobuf:"varint,2,opt,name=histogram_disabled,json=histogramDisabled" json:"histogram_disabled"`
}

func (*RemoteProducerMetadata_SamplerProgress) Descriptor

func (*RemoteProducerMetadata_SamplerProgress) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata_SamplerProgress) Marshal

func (m *RemoteProducerMetadata_SamplerProgress) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata_SamplerProgress) MarshalTo

func (m *RemoteProducerMetadata_SamplerProgress) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_SamplerProgress) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_SamplerProgress) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_SamplerProgress) ProtoMessage

func (*RemoteProducerMetadata_SamplerProgress) Reset

func (*RemoteProducerMetadata_SamplerProgress) Size

func (*RemoteProducerMetadata_SamplerProgress) String

func (*RemoteProducerMetadata_SamplerProgress) Unmarshal

func (m *RemoteProducerMetadata_SamplerProgress) Unmarshal(dAtA []byte) error

func (*RemoteProducerMetadata_SamplerProgress) XXX_DiscardUnknown

func (m *RemoteProducerMetadata_SamplerProgress) XXX_DiscardUnknown()

func (*RemoteProducerMetadata_SamplerProgress) XXX_Marshal

func (m *RemoteProducerMetadata_SamplerProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata_SamplerProgress) XXX_Merge

func (*RemoteProducerMetadata_SamplerProgress) XXX_Size

func (*RemoteProducerMetadata_SamplerProgress) XXX_Unmarshal

func (m *RemoteProducerMetadata_SamplerProgress) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata_SamplerProgress_

type RemoteProducerMetadata_SamplerProgress_ struct {
	SamplerProgress *RemoteProducerMetadata_SamplerProgress `protobuf:"bytes,7,opt,name=sampler_progress,json=samplerProgress,oneof" json:"sampler_progress,omitempty"`
}

func (*RemoteProducerMetadata_SamplerProgress_) MarshalTo

func (m *RemoteProducerMetadata_SamplerProgress_) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_SamplerProgress_) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_SamplerProgress_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_SamplerProgress_) Size

type RemoteProducerMetadata_TraceData

type RemoteProducerMetadata_TraceData struct {
	CollectedSpans []tracingpb.RecordedSpan `protobuf:"bytes,1,rep,name=collected_spans,json=collectedSpans" json:"collected_spans"`
}

func (*RemoteProducerMetadata_TraceData) Descriptor

func (*RemoteProducerMetadata_TraceData) Descriptor() ([]byte, []int)

func (*RemoteProducerMetadata_TraceData) Marshal

func (m *RemoteProducerMetadata_TraceData) Marshal() (dAtA []byte, err error)

func (*RemoteProducerMetadata_TraceData) MarshalTo

func (m *RemoteProducerMetadata_TraceData) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_TraceData) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_TraceData) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_TraceData) ProtoMessage

func (*RemoteProducerMetadata_TraceData) ProtoMessage()

func (*RemoteProducerMetadata_TraceData) Reset

func (*RemoteProducerMetadata_TraceData) Size

func (m *RemoteProducerMetadata_TraceData) Size() (n int)

func (*RemoteProducerMetadata_TraceData) String

func (*RemoteProducerMetadata_TraceData) Unmarshal

func (m *RemoteProducerMetadata_TraceData) Unmarshal(dAtA []byte) error

func (*RemoteProducerMetadata_TraceData) XXX_DiscardUnknown

func (m *RemoteProducerMetadata_TraceData) XXX_DiscardUnknown()

func (*RemoteProducerMetadata_TraceData) XXX_Marshal

func (m *RemoteProducerMetadata_TraceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RemoteProducerMetadata_TraceData) XXX_Merge

func (*RemoteProducerMetadata_TraceData) XXX_Size

func (m *RemoteProducerMetadata_TraceData) XXX_Size() int

func (*RemoteProducerMetadata_TraceData) XXX_Unmarshal

func (m *RemoteProducerMetadata_TraceData) XXX_Unmarshal(b []byte) error

type RemoteProducerMetadata_TraceData_

type RemoteProducerMetadata_TraceData_ struct {
	TraceData *RemoteProducerMetadata_TraceData `protobuf:"bytes,3,opt,name=trace_data,json=traceData,oneof" json:"trace_data,omitempty"`
}

func (*RemoteProducerMetadata_TraceData_) MarshalTo

func (m *RemoteProducerMetadata_TraceData_) MarshalTo(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_TraceData_) MarshalToSizedBuffer

func (m *RemoteProducerMetadata_TraceData_) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RemoteProducerMetadata_TraceData_) Size

func (m *RemoteProducerMetadata_TraceData_) Size() (n int)

type RestoreDataSpec

type RestoreDataSpec struct {
	JobID        int64                          `protobuf:"varint,6,opt,name=job_id,json=jobId" json:"job_id"`
	RestoreTime  hlc.Timestamp                  `protobuf:"bytes,1,opt,name=restore_time,json=restoreTime" json:"restore_time"`
	Encryption   *roachpb.FileEncryptionOptions `protobuf:"bytes,2,opt,name=encryption" json:"encryption,omitempty"`
	TableRekeys  []TableRekey                   `protobuf:"bytes,3,rep,name=table_rekeys,json=tableRekeys" json:"table_rekeys"`
	TenantRekeys []TenantRekey                  `protobuf:"bytes,5,rep,name=tenant_rekeys,json=tenantRekeys" json:"tenant_rekeys"`
	// PKIDs is used to convert result from an ExportRequest into row count
	// information passed back to track progress in the backup job.
	PKIDs      map[uint64]bool          `` /* 145-byte string literal not displayed */
	Validation jobspb.RestoreValidation `protobuf:"varint,7,opt,name=validation,enum=cockroach.sql.jobs.jobspb.RestoreValidation" json:"validation"`
}

func (*RestoreDataSpec) Descriptor

func (*RestoreDataSpec) Descriptor() ([]byte, []int)

func (*RestoreDataSpec) Marshal

func (m *RestoreDataSpec) Marshal() (dAtA []byte, err error)

func (*RestoreDataSpec) MarshalTo

func (m *RestoreDataSpec) MarshalTo(dAtA []byte) (int, error)

func (*RestoreDataSpec) MarshalToSizedBuffer

func (m *RestoreDataSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RestoreDataSpec) ProtoMessage

func (*RestoreDataSpec) ProtoMessage()

func (*RestoreDataSpec) Reset

func (m *RestoreDataSpec) Reset()

func (*RestoreDataSpec) Size

func (m *RestoreDataSpec) Size() (n int)

func (*RestoreDataSpec) String

func (m *RestoreDataSpec) String() string

func (*RestoreDataSpec) Unmarshal

func (m *RestoreDataSpec) Unmarshal(dAtA []byte) error

func (*RestoreDataSpec) XXX_DiscardUnknown

func (m *RestoreDataSpec) XXX_DiscardUnknown()

func (*RestoreDataSpec) XXX_Marshal

func (m *RestoreDataSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RestoreDataSpec) XXX_Merge

func (m *RestoreDataSpec) XXX_Merge(src proto.Message)

func (*RestoreDataSpec) XXX_Size

func (m *RestoreDataSpec) XXX_Size() int

func (*RestoreDataSpec) XXX_Unmarshal

func (m *RestoreDataSpec) XXX_Unmarshal(b []byte) error

type RestoreFileSpec

type RestoreFileSpec struct {
	Dir  roachpb.ExternalStorage `protobuf:"bytes,1,opt,name=dir" json:"dir"`
	Path string                  `protobuf:"bytes,2,opt,name=path" json:"path"`
}

func (*RestoreFileSpec) Descriptor

func (*RestoreFileSpec) Descriptor() ([]byte, []int)

func (*RestoreFileSpec) Marshal

func (m *RestoreFileSpec) Marshal() (dAtA []byte, err error)

func (*RestoreFileSpec) MarshalTo

func (m *RestoreFileSpec) MarshalTo(dAtA []byte) (int, error)

func (*RestoreFileSpec) MarshalToSizedBuffer

func (m *RestoreFileSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RestoreFileSpec) ProtoMessage

func (*RestoreFileSpec) ProtoMessage()

func (*RestoreFileSpec) Reset

func (m *RestoreFileSpec) Reset()

func (*RestoreFileSpec) Size

func (m *RestoreFileSpec) Size() (n int)

func (*RestoreFileSpec) String

func (m *RestoreFileSpec) String() string

func (*RestoreFileSpec) Unmarshal

func (m *RestoreFileSpec) Unmarshal(dAtA []byte) error

func (*RestoreFileSpec) XXX_DiscardUnknown

func (m *RestoreFileSpec) XXX_DiscardUnknown()

func (*RestoreFileSpec) XXX_Marshal

func (m *RestoreFileSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RestoreFileSpec) XXX_Merge

func (m *RestoreFileSpec) XXX_Merge(src proto.Message)

func (*RestoreFileSpec) XXX_Size

func (m *RestoreFileSpec) XXX_Size() int

func (*RestoreFileSpec) XXX_Unmarshal

func (m *RestoreFileSpec) XXX_Unmarshal(b []byte) error

type RestoreSpanEntry

type RestoreSpanEntry struct {
	Span        roachpb.Span      `protobuf:"bytes,1,opt,name=span" json:"span"`
	Files       []RestoreFileSpec `protobuf:"bytes,2,rep,name=files" json:"files"`
	ProgressIdx int64             `protobuf:"varint,3,opt,name=progressIdx" json:"progressIdx"`
}

RestoreDataEntry will be specified at planning time to the SplitAndScatter processors, then those processors will stream these, encoded as bytes in rows to the RestoreDataProcessors. This field has a subset of the importEntry struct defined in restore.

func (*RestoreSpanEntry) Descriptor

func (*RestoreSpanEntry) Descriptor() ([]byte, []int)

func (*RestoreSpanEntry) Marshal

func (m *RestoreSpanEntry) Marshal() (dAtA []byte, err error)

func (*RestoreSpanEntry) MarshalTo

func (m *RestoreSpanEntry) MarshalTo(dAtA []byte) (int, error)

func (*RestoreSpanEntry) MarshalToSizedBuffer

func (m *RestoreSpanEntry) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*RestoreSpanEntry) ProtoMessage

func (*RestoreSpanEntry) ProtoMessage()

func (*RestoreSpanEntry) Reset

func (m *RestoreSpanEntry) Reset()

func (*RestoreSpanEntry) Size

func (m *RestoreSpanEntry) Size() (n int)

func (*RestoreSpanEntry) String

func (m *RestoreSpanEntry) String() string

func (*RestoreSpanEntry) Unmarshal

func (m *RestoreSpanEntry) Unmarshal(dAtA []byte) error

func (*RestoreSpanEntry) XXX_DiscardUnknown

func (m *RestoreSpanEntry) XXX_DiscardUnknown()

func (*RestoreSpanEntry) XXX_Marshal

func (m *RestoreSpanEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RestoreSpanEntry) XXX_Merge

func (m *RestoreSpanEntry) XXX_Merge(src proto.Message)

func (*RestoreSpanEntry) XXX_Size

func (m *RestoreSpanEntry) XXX_Size() int

func (*RestoreSpanEntry) XXX_Unmarshal

func (m *RestoreSpanEntry) XXX_Unmarshal(b []byte) error

type SampleAggregatorSpec

type SampleAggregatorSpec struct {
	Sketches         []SketchSpec `protobuf:"bytes,1,rep,name=sketches" json:"sketches"`
	InvertedSketches []SketchSpec `protobuf:"bytes,8,rep,name=inverted_sketches,json=invertedSketches" json:"inverted_sketches"`
	// The processor merges reservoir sample sets into a single sample set of
	// this size. (If this size is larger than the size of any sampler, it will
	// shrink dynamically.)
	SampleSize    uint32 `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"`
	MinSampleSize uint32 `protobuf:"varint,9,opt,name=min_sample_size,json=minSampleSize" json:"min_sample_size"`
	// The i-th value indicates the ColumnID of the i-th sampled row column.
	// These are necessary for writing out the statistic data.
	SampledColumnIDs []github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ColumnID `` /* 163-byte string literal not displayed */
	TableID          github_com_cockroachdb_cockroach_pkg_sql_catalog_descpb.ID         `protobuf:"varint,4,opt,name=table_id,json=tableId,casttype=sqlfmt/cockroach/pkg/sql/catalog/descpb.ID" json:"table_id"`
	// JobID is the id of the CREATE STATISTICS job.
	JobID github_com_cockroachdb_cockroach_pkg_jobs_jobspb.JobID `protobuf:"varint,6,opt,name=job_id,json=jobId,casttype=sqlfmt/cockroach/pkg/jobs/jobspb.JobID" json:"job_id"`
	// The total number of rows expected in the table based on previous runs of
	// CREATE STATISTICS. Used for progress reporting. If rows expected is 0,
	// reported progress is 0 until the very end.
	RowsExpected uint64 `protobuf:"varint,7,opt,name=rows_expected,json=rowsExpected" json:"rows_expected"`
}

SampleAggregatorSpec is the specification of a processor that aggregates the results from multiple sampler processors and writes out the statistics to system.table_statistics.

The input schema it expects matches the output schema of a sampler spec (see the comment for SamplerSpec for all the details):

  1. sampled row columns: - sampled columns - row rank
  2. sketch columns: - sketch index - number of rows processed - number of rows encountered with NULL values on all columns of the sketch - average size of the columns of the sketch in bytes - binary sketch data
  3. inverted columns: - column index for inverted sample - sample column

func (*SampleAggregatorSpec) Descriptor

func (*SampleAggregatorSpec) Descriptor() ([]byte, []int)

func (*SampleAggregatorSpec) Marshal

func (m *SampleAggregatorSpec) Marshal() (dAtA []byte, err error)

func (*SampleAggregatorSpec) MarshalTo

func (m *SampleAggregatorSpec) MarshalTo(dAtA []byte) (int, error)

func (*SampleAggregatorSpec) MarshalToSizedBuffer

func (m *SampleAggregatorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SampleAggregatorSpec) ProtoMessage

func (*SampleAggregatorSpec) ProtoMessage()

func (*SampleAggregatorSpec) Reset

func (m *SampleAggregatorSpec) Reset()

func (*SampleAggregatorSpec) Size

func (m *SampleAggregatorSpec) Size() (n int)

func (*SampleAggregatorSpec) String

func (m *SampleAggregatorSpec) String() string

func (*SampleAggregatorSpec) Unmarshal

func (m *SampleAggregatorSpec) Unmarshal(dAtA []byte) error

func (*SampleAggregatorSpec) XXX_DiscardUnknown

func (m *SampleAggregatorSpec) XXX_DiscardUnknown()

func (*SampleAggregatorSpec) XXX_Marshal

func (m *SampleAggregatorSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SampleAggregatorSpec) XXX_Merge

func (m *SampleAggregatorSpec) XXX_Merge(src proto.Message)

func (*SampleAggregatorSpec) XXX_Size

func (m *SampleAggregatorSpec) XXX_Size() int

func (*SampleAggregatorSpec) XXX_Unmarshal

func (m *SampleAggregatorSpec) XXX_Unmarshal(b []byte) error

type SamplerSpec

type SamplerSpec struct {
	Sketches         []SketchSpec `protobuf:"bytes,1,rep,name=sketches" json:"sketches"`
	InvertedSketches []SketchSpec `protobuf:"bytes,4,rep,name=inverted_sketches,json=invertedSketches" json:"inverted_sketches"`
	SampleSize       uint32       `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"`
	MinSampleSize    uint32       `protobuf:"varint,5,opt,name=min_sample_size,json=minSampleSize" json:"min_sample_size"`
	// Setting this value enables throttling; this is the fraction of time that
	// the sampler processors will be idle when the recent CPU usage is high. The
	// throttling is adaptive so the actual idle fraction will depend on CPU
	// usage; this value is a ceiling.
	//
	// Currently, this field is set only for automatic statistics based on the
	// value of the cluster setting
	// sql.stats.automatic_collection.max_fraction_idle.
	MaxFractionIdle float64 `protobuf:"fixed64,3,opt,name=max_fraction_idle,json=maxFractionIdle" json:"max_fraction_idle"`
}

SamplerSpec is the specification of a "sampler" processor which returns a sample (random subset) of the input columns and computes cardinality estimation sketches on sets of columns.

The sampler is configured with a sample size and sets of columns for the sketches. It produces one row with global statistics, one row with sketch information for each sketch plus at least min_sample_size and at most sample_size sampled rows.

For each column with an inverted index, a sketch and sample reservoir are created. Each of these produces one sketch row and at least min_sample_size and at most sample_size sampled rows from the inverted index keys.

The following method is used to do reservoir sampling: we generate a "rank" for each row, which is just a random, uniformly distributed 64-bit value. The rows with the smallest <sample_size> ranks are selected. This method is chosen because it allows to combine sample sets very easily.

The internal schema of the processor is formed of three column groups:

  1. sampled row columns: - columns that map 1-1 to the columns in the input (same schema as the input). Note that columns unused in a histogram are set to NULL. - an INT column with the "rank" of the row; this is a random value associated with the row (necessary for combining sample sets).
  2. sketch columns: - an INT column indicating the sketch index (0 to len(sketches) - 1). - an INT column indicating the number of rows processed - an INT column indicating the number of rows with NULL values on all columns of the sketch. - an INT column indicating the sum of the size of all columns of the sketch in bytes. - a BYTES column with the binary sketch data (format dependent on the sketch type).
  3. inverted columns: - an INT column identifying the column index for this inverted sample - a BYTE column of the inverted index key.

There are four row types produced:

  1. sample rows, using column group #1 and the numRows column from #2.
  2. sketch rows, using column group #2.
  3. inverted sample rows, using column group #3, the rank column from #1, and numRows column from #2.
  4. inverted sketch rows, using column group #2 and first column from #3.

Rows have NULLs on all columns they do not use.

func (*SamplerSpec) Descriptor

func (*SamplerSpec) Descriptor() ([]byte, []int)

func (*SamplerSpec) Marshal

func (m *SamplerSpec) Marshal() (dAtA []byte, err error)

func (*SamplerSpec) MarshalTo

func (m *SamplerSpec) MarshalTo(dAtA []byte) (int, error)

func (*SamplerSpec) MarshalToSizedBuffer

func (m *SamplerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SamplerSpec) ProtoMessage

func (*SamplerSpec) ProtoMessage()

func (*SamplerSpec) Reset

func (m *SamplerSpec) Reset()

func (*SamplerSpec) Size

func (m *SamplerSpec) Size() (n int)

func (*SamplerSpec) String

func (m *SamplerSpec) String() string

func (*SamplerSpec) Unmarshal

func (m *SamplerSpec) Unmarshal(dAtA []byte) error

func (*SamplerSpec) XXX_DiscardUnknown

func (m *SamplerSpec) XXX_DiscardUnknown()

func (*SamplerSpec) XXX_Marshal

func (m *SamplerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SamplerSpec) XXX_Merge

func (m *SamplerSpec) XXX_Merge(src proto.Message)

func (*SamplerSpec) XXX_Size

func (m *SamplerSpec) XXX_Size() int

func (*SamplerSpec) XXX_Unmarshal

func (m *SamplerSpec) XXX_Unmarshal(b []byte) error

type SetupFlowRequest

type SetupFlowRequest struct {
	TraceInfo tracingpb.TraceInfo `protobuf:"bytes,11,opt,name=trace_info,json=traceInfo" json:"trace_info"`
	// LeafTxnInputState is the input parameter for the *client.Txn needed for
	// executing the flow.
	// If nil, the flow will not run in a higher-level transaction
	// (i.e. it is responsible for managing its own transactions, if any). Most
	// flows expect to run in a txn, but some, like backfills, don't.
	LeafTxnInputState *roachpb.LeafTxnInputState `protobuf:"bytes,7,opt,name=leaf_txn_input_state,json=leafTxnInputState" json:"leaf_txn_input_state,omitempty"`
	// Version of distsqlrun protocol; a server accepts a certain range of
	// versions, up to its own version. See server.go for more details.
	Version     DistSQLVersion `protobuf:"varint,5,opt,name=version,casttype=DistSQLVersion" json:"version"`
	Flow        FlowSpec       `protobuf:"bytes,3,opt,name=flow" json:"flow"`
	EvalContext EvalContext    `protobuf:"bytes,6,opt,name=eval_context,json=evalContext" json:"eval_context"`
	TraceKV     bool           `protobuf:"varint,8,opt,name=trace_kv,json=traceKv" json:"trace_kv"`
	// CollectStats specifies whether stats collection should be enabled for this
	// flow. Note that some form of tracing must be enabled for these stats to be
	// observed, since the resulting stats are added as structured data to a
	// trace.
	CollectStats bool `protobuf:"varint,9,opt,name=collect_stats,json=collectStats" json:"collect_stats"`
	// StatementSQL is the SQL statement for which this flow is executing. It
	// is populated on a best effort basis.
	StatementSQL string `protobuf:"bytes,10,opt,name=statement_sql,json=statementSql" json:"statement_sql"`
}

func (*SetupFlowRequest) Descriptor

func (*SetupFlowRequest) Descriptor() ([]byte, []int)

func (*SetupFlowRequest) Marshal

func (m *SetupFlowRequest) Marshal() (dAtA []byte, err error)

func (*SetupFlowRequest) MarshalTo

func (m *SetupFlowRequest) MarshalTo(dAtA []byte) (int, error)

func (*SetupFlowRequest) MarshalToSizedBuffer

func (m *SetupFlowRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SetupFlowRequest) ProtoMessage

func (*SetupFlowRequest) ProtoMessage()

func (*SetupFlowRequest) Reset

func (m *SetupFlowRequest) Reset()

func (*SetupFlowRequest) Size

func (m *SetupFlowRequest) Size() (n int)

func (*SetupFlowRequest) String

func (m *SetupFlowRequest) String() string

func (*SetupFlowRequest) Unmarshal

func (m *SetupFlowRequest) Unmarshal(dAtA []byte) error

func (*SetupFlowRequest) XXX_DiscardUnknown

func (m *SetupFlowRequest) XXX_DiscardUnknown()

func (*SetupFlowRequest) XXX_Marshal

func (m *SetupFlowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SetupFlowRequest) XXX_Merge

func (m *SetupFlowRequest) XXX_Merge(src proto.Message)

func (*SetupFlowRequest) XXX_Size

func (m *SetupFlowRequest) XXX_Size() int

func (*SetupFlowRequest) XXX_Unmarshal

func (m *SetupFlowRequest) XXX_Unmarshal(b []byte) error

type SimpleResponse

type SimpleResponse struct {
	Error *Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
}

func (*SimpleResponse) Descriptor

func (*SimpleResponse) Descriptor() ([]byte, []int)

func (*SimpleResponse) Marshal

func (m *SimpleResponse) Marshal() (dAtA []byte, err error)

func (*SimpleResponse) MarshalTo

func (m *SimpleResponse) MarshalTo(dAtA []byte) (int, error)

func (*SimpleResponse) MarshalToSizedBuffer

func (m *SimpleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SimpleResponse) ProtoMessage

func (*SimpleResponse) ProtoMessage()

func (*SimpleResponse) Reset

func (m *SimpleResponse) Reset()

func (*SimpleResponse) Size

func (m *SimpleResponse) Size() (n int)

func (*SimpleResponse) String

func (m *SimpleResponse) String() string

func (*SimpleResponse) Unmarshal

func (m *SimpleResponse) Unmarshal(dAtA []byte) error

func (*SimpleResponse) XXX_DiscardUnknown

func (m *SimpleResponse) XXX_DiscardUnknown()

func (*SimpleResponse) XXX_Marshal

func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SimpleResponse) XXX_Merge

func (m *SimpleResponse) XXX_Merge(src proto.Message)

func (*SimpleResponse) XXX_Size

func (m *SimpleResponse) XXX_Size() int

func (*SimpleResponse) XXX_Unmarshal

func (m *SimpleResponse) XXX_Unmarshal(b []byte) error

type SketchSpec

type SketchSpec struct {
	SketchType SketchType `protobuf:"varint,1,opt,name=sketch_type,json=sketchType,enum=cockroach.sql.distsqlrun.SketchType" json:"sketch_type"`
	// Each value is an index identifying a column in the input stream.
	// TODO(radu): currently only one column is supported.
	Columns []uint32 `protobuf:"varint,2,rep,name=columns" json:"columns,omitempty"`
	// If set, we generate a histogram for the first column in the sketch.
	GenerateHistogram bool `protobuf:"varint,3,opt,name=generate_histogram,json=generateHistogram" json:"generate_histogram"`
	// Controls the maximum number of buckets in the histogram.
	// Only used by the SampleAggregator.
	HistogramMaxBuckets uint32 `protobuf:"varint,4,opt,name=histogram_max_buckets,json=histogramMaxBuckets" json:"histogram_max_buckets"`
	// Only used by the SampleAggregator.
	StatName string `protobuf:"bytes,5,opt,name=stat_name,json=statName" json:"stat_name"`
	// Index is needed by some types (for example the geo types) when generating
	// inverted index entries, since it may contain configuration.
	Index *descpb.IndexDescriptor `protobuf:"bytes,6,opt,name=index" json:"index,omitempty"`
}

SketchSpec contains the specification for a generated statistic.

func (*SketchSpec) Descriptor

func (*SketchSpec) Descriptor() ([]byte, []int)

func (*SketchSpec) Marshal

func (m *SketchSpec) Marshal() (dAtA []byte, err error)

func (*SketchSpec) MarshalTo

func (m *SketchSpec) MarshalTo(dAtA []byte) (int, error)

func (*SketchSpec) MarshalToSizedBuffer

func (m *SketchSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SketchSpec) ProtoMessage

func (*SketchSpec) ProtoMessage()

func (*SketchSpec) Reset

func (m *SketchSpec) Reset()

func (*SketchSpec) Size

func (m *SketchSpec) Size() (n int)

func (*SketchSpec) String

func (m *SketchSpec) String() string

func (*SketchSpec) Unmarshal

func (m *SketchSpec) Unmarshal(dAtA []byte) error

func (*SketchSpec) XXX_DiscardUnknown

func (m *SketchSpec) XXX_DiscardUnknown()

func (*SketchSpec) XXX_Marshal

func (m *SketchSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SketchSpec) XXX_Merge

func (m *SketchSpec) XXX_Merge(src proto.Message)

func (*SketchSpec) XXX_Size

func (m *SketchSpec) XXX_Size() int

func (*SketchSpec) XXX_Unmarshal

func (m *SketchSpec) XXX_Unmarshal(b []byte) error

type SketchType

type SketchType int32
const (
	// This is the github.com/axiomhq/hyperloglog binary format (as of commit
	// 730eea1) for a sketch with precision 14. Values are encoded using their key
	// encoding, except integers which are encoded in 8 bytes (little-endian).
	SketchType_HLL_PLUS_PLUS_V1 SketchType = 0
)

func (SketchType) Enum

func (x SketchType) Enum() *SketchType

func (SketchType) EnumDescriptor

func (SketchType) EnumDescriptor() ([]byte, []int)

func (SketchType) String

func (x SketchType) String() string

func (*SketchType) UnmarshalJSON

func (x *SketchType) UnmarshalJSON(data []byte) error

type SorterSpec

type SorterSpec struct {
	OutputOrdering Ordering `protobuf:"bytes,1,opt,name=output_ordering,json=outputOrdering" json:"output_ordering"`
	// Ordering match length, specifying that the input is already sorted by the
	// first 'n' output ordering columns, can be optionally specified for
	// possible speed-ups taking advantage of the partial orderings.
	OrderingMatchLen uint32 `protobuf:"varint,2,opt,name=ordering_match_len,json=orderingMatchLen" json:"ordering_match_len"`
	// The limit of a top-k sort is the number of rows that the sorter should
	// output. If limit is 0, then all rows should be output.
	Limit int64 `protobuf:"varint,3,opt,name=limit" json:"limit"`
}

SorterSpec is the specification for a "sorting aggregator". A sorting processor sorts elements in the input stream providing a certain output order guarantee regardless of the input ordering. The output ordering is according to a configurable set of columns.

The "internal columns" of a Sorter (see ProcessorSpec) are the same as the input columns.

func (*SorterSpec) Descriptor

func (*SorterSpec) Descriptor() ([]byte, []int)

func (*SorterSpec) Marshal

func (m *SorterSpec) Marshal() (dAtA []byte, err error)

func (*SorterSpec) MarshalTo

func (m *SorterSpec) MarshalTo(dAtA []byte) (int, error)

func (*SorterSpec) MarshalToSizedBuffer

func (m *SorterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SorterSpec) ProtoMessage

func (*SorterSpec) ProtoMessage()

func (*SorterSpec) Reset

func (m *SorterSpec) Reset()

func (*SorterSpec) Size

func (m *SorterSpec) Size() (n int)

func (*SorterSpec) String

func (m *SorterSpec) String() string

func (*SorterSpec) Unmarshal

func (m *SorterSpec) Unmarshal(dAtA []byte) error

func (*SorterSpec) XXX_DiscardUnknown

func (m *SorterSpec) XXX_DiscardUnknown()

func (*SorterSpec) XXX_Marshal

func (m *SorterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SorterSpec) XXX_Merge

func (m *SorterSpec) XXX_Merge(src proto.Message)

func (*SorterSpec) XXX_Size

func (m *SorterSpec) XXX_Size() int

func (*SorterSpec) XXX_Unmarshal

func (m *SorterSpec) XXX_Unmarshal(b []byte) error

type SplitAndScatterSpec

type SplitAndScatterSpec struct {
	JobID        int64                                   `protobuf:"varint,4,opt,name=job_id,json=jobId" json:"job_id"`
	Chunks       []SplitAndScatterSpec_RestoreEntryChunk `protobuf:"bytes,1,rep,name=chunks" json:"chunks"`
	TableRekeys  []TableRekey                            `protobuf:"bytes,2,rep,name=table_rekeys,json=tableRekeys" json:"table_rekeys"`
	TenantRekeys []TenantRekey                           `protobuf:"bytes,3,rep,name=tenant_rekeys,json=tenantRekeys" json:"tenant_rekeys"`
	Validation   jobspb.RestoreValidation                `protobuf:"varint,5,opt,name=validation,enum=cockroach.sql.jobs.jobspb.RestoreValidation" json:"validation"`
}

func (*SplitAndScatterSpec) Descriptor

func (*SplitAndScatterSpec) Descriptor() ([]byte, []int)

func (*SplitAndScatterSpec) Marshal

func (m *SplitAndScatterSpec) Marshal() (dAtA []byte, err error)

func (*SplitAndScatterSpec) MarshalTo

func (m *SplitAndScatterSpec) MarshalTo(dAtA []byte) (int, error)

func (*SplitAndScatterSpec) MarshalToSizedBuffer

func (m *SplitAndScatterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SplitAndScatterSpec) ProtoMessage

func (*SplitAndScatterSpec) ProtoMessage()

func (*SplitAndScatterSpec) Reset

func (m *SplitAndScatterSpec) Reset()

func (*SplitAndScatterSpec) Size

func (m *SplitAndScatterSpec) Size() (n int)

func (*SplitAndScatterSpec) String

func (m *SplitAndScatterSpec) String() string

func (*SplitAndScatterSpec) Unmarshal

func (m *SplitAndScatterSpec) Unmarshal(dAtA []byte) error

func (*SplitAndScatterSpec) XXX_DiscardUnknown

func (m *SplitAndScatterSpec) XXX_DiscardUnknown()

func (*SplitAndScatterSpec) XXX_Marshal

func (m *SplitAndScatterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SplitAndScatterSpec) XXX_Merge

func (m *SplitAndScatterSpec) XXX_Merge(src proto.Message)

func (*SplitAndScatterSpec) XXX_Size

func (m *SplitAndScatterSpec) XXX_Size() int

func (*SplitAndScatterSpec) XXX_Unmarshal

func (m *SplitAndScatterSpec) XXX_Unmarshal(b []byte) error

type SplitAndScatterSpec_RestoreEntryChunk

type SplitAndScatterSpec_RestoreEntryChunk struct {
	Entries []RestoreSpanEntry `protobuf:"bytes,1,rep,name=entries" json:"entries"`
}

func (*SplitAndScatterSpec_RestoreEntryChunk) Descriptor

func (*SplitAndScatterSpec_RestoreEntryChunk) Descriptor() ([]byte, []int)

func (*SplitAndScatterSpec_RestoreEntryChunk) Marshal

func (m *SplitAndScatterSpec_RestoreEntryChunk) Marshal() (dAtA []byte, err error)

func (*SplitAndScatterSpec_RestoreEntryChunk) MarshalTo

func (m *SplitAndScatterSpec_RestoreEntryChunk) MarshalTo(dAtA []byte) (int, error)

func (*SplitAndScatterSpec_RestoreEntryChunk) MarshalToSizedBuffer

func (m *SplitAndScatterSpec_RestoreEntryChunk) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*SplitAndScatterSpec_RestoreEntryChunk) ProtoMessage

func (*SplitAndScatterSpec_RestoreEntryChunk) ProtoMessage()

func (*SplitAndScatterSpec_RestoreEntryChunk) Reset

func (*SplitAndScatterSpec_RestoreEntryChunk) Size

func (*SplitAndScatterSpec_RestoreEntryChunk) String

func (*SplitAndScatterSpec_RestoreEntryChunk) Unmarshal

func (m *SplitAndScatterSpec_RestoreEntryChunk) Unmarshal(dAtA []byte) error

func (*SplitAndScatterSpec_RestoreEntryChunk) XXX_DiscardUnknown

func (m *SplitAndScatterSpec_RestoreEntryChunk) XXX_DiscardUnknown()

func (*SplitAndScatterSpec_RestoreEntryChunk) XXX_Marshal

func (m *SplitAndScatterSpec_RestoreEntryChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SplitAndScatterSpec_RestoreEntryChunk) XXX_Merge

func (*SplitAndScatterSpec_RestoreEntryChunk) XXX_Size

func (*SplitAndScatterSpec_RestoreEntryChunk) XXX_Unmarshal

func (m *SplitAndScatterSpec_RestoreEntryChunk) XXX_Unmarshal(b []byte) error

type StreamEndpointSpec

type StreamEndpointSpec struct {
	Type StreamEndpointSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.StreamEndpointSpec_Type" json:"type"`
	// The ID of this stream.
	//
	// For LOCAL streams, both ends of the stream are part of the flow on this
	// machine (and there must be a corresponding endpoint with the same ID).
	//
	// For REMOTE streams, this ID is used in the ProducerHeader when connecting to
	// the other host.
	//
	// For SYNC_RESPONSE streams, the ID is unused.
	StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"`
	// SQLInstanceID of the target host, only used for outgoing REMOTE streams.
	TargetNodeID github_com_cockroachdb_cockroach_pkg_base.SQLInstanceID `` /* 132-byte string literal not displayed */
	// SQLInstanceID of the origin node, only used for REMOTE streams.
	OriginNodeID github_com_cockroachdb_cockroach_pkg_base.SQLInstanceID `` /* 132-byte string literal not displayed */
}

StreamEndpointSpec describes one of the endpoints (input or output) of a physical stream.

func (*StreamEndpointSpec) Descriptor

func (*StreamEndpointSpec) Descriptor() ([]byte, []int)

func (*StreamEndpointSpec) Marshal

func (m *StreamEndpointSpec) Marshal() (dAtA []byte, err error)

func (*StreamEndpointSpec) MarshalTo

func (m *StreamEndpointSpec) MarshalTo(dAtA []byte) (int, error)

func (*StreamEndpointSpec) MarshalToSizedBuffer

func (m *StreamEndpointSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StreamEndpointSpec) ProtoMessage

func (*StreamEndpointSpec) ProtoMessage()

func (*StreamEndpointSpec) Reset

func (m *StreamEndpointSpec) Reset()

func (*StreamEndpointSpec) Size

func (m *StreamEndpointSpec) Size() (n int)

func (*StreamEndpointSpec) String

func (m *StreamEndpointSpec) String() string

func (*StreamEndpointSpec) Unmarshal

func (m *StreamEndpointSpec) Unmarshal(dAtA []byte) error

func (*StreamEndpointSpec) XXX_DiscardUnknown

func (m *StreamEndpointSpec) XXX_DiscardUnknown()

func (*StreamEndpointSpec) XXX_Marshal

func (m *StreamEndpointSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamEndpointSpec) XXX_Merge

func (m *StreamEndpointSpec) XXX_Merge(src proto.Message)

func (*StreamEndpointSpec) XXX_Size

func (m *StreamEndpointSpec) XXX_Size() int

func (*StreamEndpointSpec) XXX_Unmarshal

func (m *StreamEndpointSpec) XXX_Unmarshal(b []byte) error

type StreamEndpointSpec_Type

type StreamEndpointSpec_Type int32
const (
	// Stream that is part of the local flow.
	StreamEndpointSpec_LOCAL StreamEndpointSpec_Type = 0
	// Stream that has the other endpoint on a different node.
	StreamEndpointSpec_REMOTE StreamEndpointSpec_Type = 1
	// Special stream used when in "sync flow" mode. In this mode, we return
	// results directly as part of the RPC call that set up the flow. This saves
	// overhead (extra RPCs) compared to the normal mode where the RPC just sets
	// up the flow. This type can only be used with outbound endpoints.
	StreamEndpointSpec_SYNC_RESPONSE StreamEndpointSpec_Type = 2
)

func (StreamEndpointSpec_Type) Enum

func (StreamEndpointSpec_Type) EnumDescriptor

func (StreamEndpointSpec_Type) EnumDescriptor() ([]byte, []int)

func (StreamEndpointSpec_Type) String

func (x StreamEndpointSpec_Type) String() string

func (*StreamEndpointSpec_Type) UnmarshalJSON

func (x *StreamEndpointSpec_Type) UnmarshalJSON(data []byte) error

type StreamID

type StreamID int

StreamID identifies a stream; it may be local to a flow or it may cross machine boundaries. The identifier can only be used in the context of a specific flow.

func (StreamID) String

func (sid StreamID) String() string

type StreamIngestionDataSpec

type StreamIngestionDataSpec struct {
	// StreamID is the ID of the stream (which is shared across the producer and consumer).
	StreamID uint64 `protobuf:"varint,5,opt,name=stream_id,json=streamId" json:"stream_id"`
	// PartitionSpecs specify how to subscribe to the i'th partition.
	PartitionIds []string `protobuf:"bytes,6,rep,name=partition_ids,json=partitionIds" json:"partition_ids,omitempty"`
	// PartitionSpecs specify how to subscribe to the i'th partition.
	PartitionSpecs []string `protobuf:"bytes,7,rep,name=partition_specs,json=partitionSpecs" json:"partition_specs,omitempty"`
	// PartitionAddresses locate the partitions that produce events to be
	// ingested. We don't set the casttype to avoid depending on ccl packages.
	PartitionAddresses []string `protobuf:"bytes,8,rep,name=partition_addresses,json=partitionAddresses" json:"partition_addresses,omitempty"`
	// The processor will ingest events from StartTime onwards.
	StartTime hlc.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime" json:"start_time"`
	// StreamAddress locate the stream so that a stream client can be initialized.
	StreamAddress string `protobuf:"bytes,3,opt,name=stream_address,json=streamAddress" json:"stream_address"`
	// JobID is the job ID of the stream ingestion job.
	JobID int64 `protobuf:"varint,4,opt,name=job_id,json=jobId" json:"job_id"`
}

func (*StreamIngestionDataSpec) Descriptor

func (*StreamIngestionDataSpec) Descriptor() ([]byte, []int)

func (*StreamIngestionDataSpec) Marshal

func (m *StreamIngestionDataSpec) Marshal() (dAtA []byte, err error)

func (*StreamIngestionDataSpec) MarshalTo

func (m *StreamIngestionDataSpec) MarshalTo(dAtA []byte) (int, error)

func (*StreamIngestionDataSpec) MarshalToSizedBuffer

func (m *StreamIngestionDataSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StreamIngestionDataSpec) ProtoMessage

func (*StreamIngestionDataSpec) ProtoMessage()

func (*StreamIngestionDataSpec) Reset

func (m *StreamIngestionDataSpec) Reset()

func (*StreamIngestionDataSpec) Size

func (m *StreamIngestionDataSpec) Size() (n int)

func (*StreamIngestionDataSpec) String

func (m *StreamIngestionDataSpec) String() string

func (*StreamIngestionDataSpec) Unmarshal

func (m *StreamIngestionDataSpec) Unmarshal(dAtA []byte) error

func (*StreamIngestionDataSpec) XXX_DiscardUnknown

func (m *StreamIngestionDataSpec) XXX_DiscardUnknown()

func (*StreamIngestionDataSpec) XXX_Marshal

func (m *StreamIngestionDataSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamIngestionDataSpec) XXX_Merge

func (m *StreamIngestionDataSpec) XXX_Merge(src proto.Message)

func (*StreamIngestionDataSpec) XXX_Size

func (m *StreamIngestionDataSpec) XXX_Size() int

func (*StreamIngestionDataSpec) XXX_Unmarshal

func (m *StreamIngestionDataSpec) XXX_Unmarshal(b []byte) error

type StreamIngestionFrontierSpec

type StreamIngestionFrontierSpec struct {
	// HighWaterAtStart is set by the ingestion job when initializing the frontier
	// processor. It is used as sanity check by the frontier processor to ensure
	// that it does not receive updates at a timestamp lower than this field. This
	// consequently prevents the job progress from regressing during ingestion.
	HighWaterAtStart hlc.Timestamp `protobuf:"bytes,1,opt,name=high_water_at_start,json=highWaterAtStart" json:"high_water_at_start"`
	// TrackedSpans is the entire span set being watched. The spans do not really
	// represent KV spans but uniquely identify the partitions in the ingestion
	// stream. Once all the partitions in the ingestion stream have been resolved
	// at a certain timestamp, then it's safe to resolve the ingestion at that
	// timestamp.
	TrackedSpans []roachpb.Span `protobuf:"bytes,2,rep,name=tracked_spans,json=trackedSpans" json:"tracked_spans"`
	// JobID is the job ID of the stream ingestion job.
	JobID int64 `protobuf:"varint,3,opt,name=job_id,json=jobId" json:"job_id"`
	// StreamID is the ID of the stream.
	StreamID uint64 `protobuf:"varint,4,opt,name=stream_id,json=streamId" json:"stream_id"`
	// StreamAddress locate the stream so that a stream client can be initialized.
	StreamAddress string `protobuf:"bytes,5,opt,name=stream_address,json=streamAddress" json:"stream_address"`
}

func (*StreamIngestionFrontierSpec) Descriptor

func (*StreamIngestionFrontierSpec) Descriptor() ([]byte, []int)

func (*StreamIngestionFrontierSpec) Marshal

func (m *StreamIngestionFrontierSpec) Marshal() (dAtA []byte, err error)

func (*StreamIngestionFrontierSpec) MarshalTo

func (m *StreamIngestionFrontierSpec) MarshalTo(dAtA []byte) (int, error)

func (*StreamIngestionFrontierSpec) MarshalToSizedBuffer

func (m *StreamIngestionFrontierSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*StreamIngestionFrontierSpec) ProtoMessage

func (*StreamIngestionFrontierSpec) ProtoMessage()

func (*StreamIngestionFrontierSpec) Reset

func (m *StreamIngestionFrontierSpec) Reset()

func (*StreamIngestionFrontierSpec) Size

func (m *StreamIngestionFrontierSpec) Size() (n int)

func (*StreamIngestionFrontierSpec) String

func (m *StreamIngestionFrontierSpec) String() string

func (*StreamIngestionFrontierSpec) Unmarshal

func (m *StreamIngestionFrontierSpec) Unmarshal(dAtA []byte) error

func (*StreamIngestionFrontierSpec) XXX_DiscardUnknown

func (m *StreamIngestionFrontierSpec) XXX_DiscardUnknown()

func (*StreamIngestionFrontierSpec) XXX_Marshal

func (m *StreamIngestionFrontierSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*StreamIngestionFrontierSpec) XXX_Merge

func (m *StreamIngestionFrontierSpec) XXX_Merge(src proto.Message)

func (*StreamIngestionFrontierSpec) XXX_Size

func (m *StreamIngestionFrontierSpec) XXX_Size() int

func (*StreamIngestionFrontierSpec) XXX_Unmarshal

func (m *StreamIngestionFrontierSpec) XXX_Unmarshal(b []byte) error

type TableReaderSpec

type TableReaderSpec struct {
	FetchSpec descpb.IndexFetchSpec `protobuf:"bytes,20,opt,name=fetch_spec,json=fetchSpec" json:"fetch_spec"`
	// TableModificationTime is the timestamp of the transaction which last
	// modified the table descriptor.
	// TODO(radu, otan): take this into account during planning and remove it from
	// this spec.
	TableDescriptorModificationTime hlc.Timestamp  `` /* 142-byte string literal not displayed */
	Reverse                         bool           `protobuf:"varint,3,opt,name=reverse" json:"reverse"`
	Spans                           []roachpb.Span `protobuf:"bytes,18,rep,name=spans" json:"spans"`
	// A hint for how many rows the consumer of the table reader output might
	// need. This is used to size the initial KV batches to try to avoid reading
	// many more rows than needed by the processor receiving the output.
	//
	// Not used if there is a limit set in the PostProcessSpec of this processor
	// (that value will be used for sizing batches instead).
	LimitHint int64 `protobuf:"varint,5,opt,name=limit_hint,json=limitHint" json:"limit_hint"`
	// If set, the TableReader can read all the spans in parallel, without any
	// batch limits. This should only be the case when there is a known upper
	// bound on the number of rows we can read, and when there is no limit or
	// limit hint.
	Parallelize bool `protobuf:"varint,12,opt,name=parallelize" json:"parallelize"`
	// batch_bytes_limit, if non-zero, controls the TargetBytes limits that the
	// TableReader will use for its scans. If zero, then the server-side default
	// is used. If parallelize is set, this cannot be set.
	BatchBytesLimit int64 `protobuf:"varint,17,opt,name=batch_bytes_limit,json=batchBytesLimit" json:"batch_bytes_limit"`
	// If non-zero, this enables inconsistent historical scanning where different
	// batches can be read with different timestamps. This is used for
	// long-running table statistics which may outlive the TTL. Using this setting
	// will cause inconsistencies across rows and even within rows.
	//
	// The value is a duration (in nanoseconds), which is the maximum "age" of the
	// timestamp. If the scan takes long enough for the timestamp to become older,
	// the timestamp is advanced by however much time passed.
	//
	// Example:
	//
	//     current time:      10
	//     initial timestamp: 0
	//     max timestamp age: 30
	//
	//     time
	//     10:    start scan, timestamp=0
	//     10-29: continue scanning at timestamp=0
	//     30:    bump timestamp to 20
	//     30-49: continue scanning at timestamp=20
	//     50:    bump timestamp to 40
	//     ...
	//
	// Note: it is an error to perform a historical read at an initial timestamp
	// older than this value.
	//
	MaxTimestampAgeNanos uint64 `protobuf:"varint,9,opt,name=max_timestamp_age_nanos,json=maxTimestampAgeNanos" json:"max_timestamp_age_nanos"`
	// Indicates the row-level locking strength to be used by the scan. If set to
	// FOR_NONE, no row-level locking should be performed.
	LockingStrength descpb.ScanLockingStrength `` /* 138-byte string literal not displayed */
	// Indicates the policy to be used by the scan for handling conflicting locks
	// held by other active transactions when attempting to lock rows. Always set
	// to BLOCK when locking_stength is FOR_NONE.
	LockingWaitPolicy descpb.ScanLockingWaitPolicy `` /* 148-byte string literal not displayed */
}

TableReaderSpec is the specification for a "table reader". A table reader performs KV operations to retrieve rows for a table and outputs the desired columns of the rows that pass a filter expression.

The "internal columns" of a TableReader (see ProcessorSpec) correspond to the FetchSpec.FetchedColumns.

func (*TableReaderSpec) Descriptor

func (*TableReaderSpec) Descriptor() ([]byte, []int)

func (*TableReaderSpec) Marshal

func (m *TableReaderSpec) Marshal() (dAtA []byte, err error)

func (*TableReaderSpec) MarshalTo

func (m *TableReaderSpec) MarshalTo(dAtA []byte) (int, error)

func (*TableReaderSpec) MarshalToSizedBuffer

func (m *TableReaderSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TableReaderSpec) ProtoMessage

func (*TableReaderSpec) ProtoMessage()

func (*TableReaderSpec) Reset

func (m *TableReaderSpec) Reset()

func (*TableReaderSpec) Size

func (m *TableReaderSpec) Size() (n int)

func (*TableReaderSpec) String

func (m *TableReaderSpec) String() string

func (*TableReaderSpec) Unmarshal

func (m *TableReaderSpec) Unmarshal(dAtA []byte) error

func (*TableReaderSpec) XXX_DiscardUnknown

func (m *TableReaderSpec) XXX_DiscardUnknown()

func (*TableReaderSpec) XXX_Marshal

func (m *TableReaderSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TableReaderSpec) XXX_Merge

func (m *TableReaderSpec) XXX_Merge(src proto.Message)

func (*TableReaderSpec) XXX_Size

func (m *TableReaderSpec) XXX_Size() int

func (*TableReaderSpec) XXX_Unmarshal

func (m *TableReaderSpec) XXX_Unmarshal(b []byte) error

type TableRekey

type TableRekey struct {
	// OldID is the previous ID of `new_desc`.
	OldID uint32 `protobuf:"varint,1,opt,name=old_id,json=oldId" json:"old_id"`
	// NewDesc is an encoded Descriptor message.
	NewDesc []byte `protobuf:"bytes,2,opt,name=new_desc,json=newDesc" json:"new_desc,omitempty"`
}

func (*TableRekey) Descriptor

func (*TableRekey) Descriptor() ([]byte, []int)

func (*TableRekey) Marshal

func (m *TableRekey) Marshal() (dAtA []byte, err error)

func (*TableRekey) MarshalTo

func (m *TableRekey) MarshalTo(dAtA []byte) (int, error)

func (*TableRekey) MarshalToSizedBuffer

func (m *TableRekey) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TableRekey) ProtoMessage

func (*TableRekey) ProtoMessage()

func (*TableRekey) Reset

func (m *TableRekey) Reset()

func (*TableRekey) Size

func (m *TableRekey) Size() (n int)

func (*TableRekey) String

func (m *TableRekey) String() string

func (*TableRekey) Unmarshal

func (m *TableRekey) Unmarshal(dAtA []byte) error

func (*TableRekey) XXX_DiscardUnknown

func (m *TableRekey) XXX_DiscardUnknown()

func (*TableRekey) XXX_Marshal

func (m *TableRekey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TableRekey) XXX_Merge

func (m *TableRekey) XXX_Merge(src proto.Message)

func (*TableRekey) XXX_Size

func (m *TableRekey) XXX_Size() int

func (*TableRekey) XXX_Unmarshal

func (m *TableRekey) XXX_Unmarshal(b []byte) error

type TenantRekey

type TenantRekey struct {
	// OldID is a previous tenant ID.
	OldID roachpb.TenantID `protobuf:"bytes,1,opt,name=old_id,json=oldId" json:"old_id"`
	// NewID is the ID with which to replace OldID.
	NewID roachpb.TenantID `protobuf:"bytes,2,opt,name=new_id,json=newId" json:"new_id"`
}

func (*TenantRekey) Descriptor

func (*TenantRekey) Descriptor() ([]byte, []int)

func (*TenantRekey) Marshal

func (m *TenantRekey) Marshal() (dAtA []byte, err error)

func (*TenantRekey) MarshalTo

func (m *TenantRekey) MarshalTo(dAtA []byte) (int, error)

func (*TenantRekey) MarshalToSizedBuffer

func (m *TenantRekey) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*TenantRekey) ProtoMessage

func (*TenantRekey) ProtoMessage()

func (*TenantRekey) Reset

func (m *TenantRekey) Reset()

func (*TenantRekey) Size

func (m *TenantRekey) Size() (n int)

func (*TenantRekey) String

func (m *TenantRekey) String() string

func (*TenantRekey) Unmarshal

func (m *TenantRekey) Unmarshal(dAtA []byte) error

func (*TenantRekey) XXX_DiscardUnknown

func (m *TenantRekey) XXX_DiscardUnknown()

func (*TenantRekey) XXX_Marshal

func (m *TenantRekey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TenantRekey) XXX_Merge

func (m *TenantRekey) XXX_Merge(src proto.Message)

func (*TenantRekey) XXX_Size

func (m *TenantRekey) XXX_Size() int

func (*TenantRekey) XXX_Unmarshal

func (m *TenantRekey) XXX_Unmarshal(b []byte) error

type UnimplementedDistSQLServer

type UnimplementedDistSQLServer struct {
}

UnimplementedDistSQLServer can be embedded to have forward compatible implementations.

func (*UnimplementedDistSQLServer) CancelDeadFlows

func (*UnimplementedDistSQLServer) FlowStream

func (*UnimplementedDistSQLServer) SetupFlow

type ValuesCoreSpec

type ValuesCoreSpec struct {
	// There is one DatumInfo for each element in a row. Can be empty, in which
	// case raw_bytes will be empty.
	Columns []DatumInfo `protobuf:"bytes,1,rep,name=columns" json:"columns"`
	// The number of rows is especially useful when we have zero columns.
	NumRows uint64 `protobuf:"varint,3,opt,name=num_rows,json=numRows" json:"num_rows"`
	// Each raw block encodes one row; each datum is encoded according to the
	// corresponding DatumInfo. As an optimization, if columns is empty, this will
	// be empty rather than containing empty byte strings.
	RawBytes [][]byte `protobuf:"bytes,2,rep,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"`
}

ValuesCoreSpec is the core of a processor that has no inputs and generates "pre-canned" rows. This is not intended to be used for very large datasets.

func (*ValuesCoreSpec) Descriptor

func (*ValuesCoreSpec) Descriptor() ([]byte, []int)

func (*ValuesCoreSpec) Marshal

func (m *ValuesCoreSpec) Marshal() (dAtA []byte, err error)

func (*ValuesCoreSpec) MarshalTo

func (m *ValuesCoreSpec) MarshalTo(dAtA []byte) (int, error)

func (*ValuesCoreSpec) MarshalToSizedBuffer

func (m *ValuesCoreSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ValuesCoreSpec) ProtoMessage

func (*ValuesCoreSpec) ProtoMessage()

func (*ValuesCoreSpec) Reset

func (m *ValuesCoreSpec) Reset()

func (*ValuesCoreSpec) Size

func (m *ValuesCoreSpec) Size() (n int)

func (*ValuesCoreSpec) String

func (m *ValuesCoreSpec) String() string

func (*ValuesCoreSpec) Unmarshal

func (m *ValuesCoreSpec) Unmarshal(dAtA []byte) error

func (*ValuesCoreSpec) XXX_DiscardUnknown

func (m *ValuesCoreSpec) XXX_DiscardUnknown()

func (*ValuesCoreSpec) XXX_Marshal

func (m *ValuesCoreSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ValuesCoreSpec) XXX_Merge

func (m *ValuesCoreSpec) XXX_Merge(src proto.Message)

func (*ValuesCoreSpec) XXX_Size

func (m *ValuesCoreSpec) XXX_Size() int

func (*ValuesCoreSpec) XXX_Unmarshal

func (m *ValuesCoreSpec) XXX_Unmarshal(b []byte) error

type WindowerSpec

type WindowerSpec struct {
	// PartitionBy specifies how to partition rows for all window functions.
	PartitionBy []uint32 `protobuf:"varint,1,rep,name=partitionBy" json:"partitionBy,omitempty"`
	// WindowFns is the specification of all window functions to be computed.
	WindowFns []WindowerSpec_WindowFn `protobuf:"bytes,2,rep,name=windowFns" json:"windowFns"`
}

WindowerSpec is the specification of a processor that performs computations of window functions that have the same PARTITION BY clause. For a particular windowFn, the processor puts result at windowFn.ArgIdxStart and "consumes" all arguments to windowFn (windowFn.ArgCount of them). So if windowFn takes no arguments, an extra column is added; if windowFn takes more than one argument, (windowFn.ArgCount - 1) columns are removed.

func (*WindowerSpec) Descriptor

func (*WindowerSpec) Descriptor() ([]byte, []int)

func (*WindowerSpec) Marshal

func (m *WindowerSpec) Marshal() (dAtA []byte, err error)

func (*WindowerSpec) MarshalTo

func (m *WindowerSpec) MarshalTo(dAtA []byte) (int, error)

func (*WindowerSpec) MarshalToSizedBuffer

func (m *WindowerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*WindowerSpec) ProtoMessage

func (*WindowerSpec) ProtoMessage()

func (*WindowerSpec) Reset

func (m *WindowerSpec) Reset()

func (*WindowerSpec) Size

func (m *WindowerSpec) Size() (n int)

func (*WindowerSpec) String

func (m *WindowerSpec) String() string

func (*WindowerSpec) Unmarshal

func (m *WindowerSpec) Unmarshal(dAtA []byte) error

func (*WindowerSpec) XXX_DiscardUnknown

func (m *WindowerSpec) XXX_DiscardUnknown()

func (*WindowerSpec) XXX_Marshal

func (m *WindowerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WindowerSpec) XXX_Merge

func (m *WindowerSpec) XXX_Merge(src proto.Message)

func (*WindowerSpec) XXX_Size

func (m *WindowerSpec) XXX_Size() int

func (*WindowerSpec) XXX_Unmarshal

func (m *WindowerSpec) XXX_Unmarshal(b []byte) error

type WindowerSpec_Frame

type WindowerSpec_Frame struct {
	Mode      WindowerSpec_Frame_Mode      `protobuf:"varint,1,opt,name=mode,enum=cockroach.sql.distsqlrun.WindowerSpec_Frame_Mode" json:"mode"`
	Bounds    WindowerSpec_Frame_Bounds    `protobuf:"bytes,2,opt,name=bounds" json:"bounds"`
	Exclusion WindowerSpec_Frame_Exclusion `protobuf:"varint,3,opt,name=exclusion,enum=cockroach.sql.distsqlrun.WindowerSpec_Frame_Exclusion" json:"exclusion"`
}

Frame is the specification of a single window frame for a window function.

func (*WindowerSpec_Frame) ConvertToAST

func (spec *WindowerSpec_Frame) ConvertToAST() (*tree.WindowFrame, error)

ConvertToAST produces a tree.WindowFrame given a WindoweSpec_Frame.

func (*WindowerSpec_Frame) Descriptor

func (*WindowerSpec_Frame) Descriptor() ([]byte, []int)

func (*WindowerSpec_Frame) InitFromAST

func (spec *WindowerSpec_Frame) InitFromAST(f *tree.WindowFrame, evalCtx *tree.EvalContext) error

InitFromAST initializes the spec based on tree.WindowFrame. It will evaluate offset expressions if present in the frame.

func (*WindowerSpec_Frame) Marshal

func (m *WindowerSpec_Frame) Marshal() (dAtA []byte, err error)

func (*WindowerSpec_Frame) MarshalTo

func (m *WindowerSpec_Frame) MarshalTo(dAtA []byte) (int, error)

func (*WindowerSpec_Frame) MarshalToSizedBuffer

func (m *WindowerSpec_Frame) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*WindowerSpec_Frame) ProtoMessage

func (*WindowerSpec_Frame) ProtoMessage()

func (*WindowerSpec_Frame) Reset

func (m *WindowerSpec_Frame) Reset()

func (*WindowerSpec_Frame) Size

func (m *WindowerSpec_Frame) Size() (n int)

func (*WindowerSpec_Frame) String

func (m *WindowerSpec_Frame) String() string

func (*WindowerSpec_Frame) Unmarshal

func (m *WindowerSpec_Frame) Unmarshal(dAtA []byte) error

func (*WindowerSpec_Frame) XXX_DiscardUnknown

func (m *WindowerSpec_Frame) XXX_DiscardUnknown()

func (*WindowerSpec_Frame) XXX_Marshal

func (m *WindowerSpec_Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WindowerSpec_Frame) XXX_Merge

func (m *WindowerSpec_Frame) XXX_Merge(src proto.Message)

func (*WindowerSpec_Frame) XXX_Size

func (m *WindowerSpec_Frame) XXX_Size() int

func (*WindowerSpec_Frame) XXX_Unmarshal

func (m *WindowerSpec_Frame) XXX_Unmarshal(b []byte) error

type WindowerSpec_Frame_Bound

type WindowerSpec_Frame_Bound struct {
	BoundType WindowerSpec_Frame_BoundType `protobuf:"varint,1,opt,name=boundType,enum=cockroach.sql.distsqlrun.WindowerSpec_Frame_BoundType" json:"boundType"`
	// For UNBOUNDED_PRECEDING, UNBOUNDED_FOLLOWING, and CURRENT_ROW offset
	// is ignored. Integer offset for ROWS and GROUPS modes is stored in
	// int_offset while an encoded datum and the type information are stored
	// for RANGE mode.
	IntOffset   uint64    `protobuf:"varint,2,opt,name=int_offset,json=intOffset" json:"int_offset"`
	TypedOffset []byte    `protobuf:"bytes,3,opt,name=typed_offset,json=typedOffset" json:"typed_offset,omitempty"`
	OffsetType  DatumInfo `protobuf:"bytes,4,opt,name=offset_type,json=offsetType" json:"offset_type"`
}

Bound specifies the type of boundary and the offset (if present).

func (*WindowerSpec_Frame_Bound) Descriptor

func (*WindowerSpec_Frame_Bound) Descriptor() ([]byte, []int)

func (*WindowerSpec_Frame_Bound) Marshal

func (m *WindowerSpec_Frame_Bound) Marshal() (dAtA []byte, err error)

func (*WindowerSpec_Frame_Bound) MarshalTo

func (m *WindowerSpec_Frame_Bound) MarshalTo(dAtA []byte) (int, error)

func (*WindowerSpec_Frame_Bound) MarshalToSizedBuffer

func (m *WindowerSpec_Frame_Bound) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*WindowerSpec_Frame_Bound) ProtoMessage

func (*WindowerSpec_Frame_Bound) ProtoMessage()

func (*WindowerSpec_Frame_Bound) Reset

func (m *WindowerSpec_Frame_Bound) Reset()

func (*WindowerSpec_Frame_Bound) Size

func (m *WindowerSpec_Frame_Bound) Size() (n int)

func (*WindowerSpec_Frame_Bound) String

func (m *WindowerSpec_Frame_Bound) String() string

func (*WindowerSpec_Frame_Bound) Unmarshal

func (m *WindowerSpec_Frame_Bound) Unmarshal(dAtA []byte) error

func (*WindowerSpec_Frame_Bound) XXX_DiscardUnknown

func (m *WindowerSpec_Frame_Bound) XXX_DiscardUnknown()

func (*WindowerSpec_Frame_Bound) XXX_Marshal

func (m *WindowerSpec_Frame_Bound) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WindowerSpec_Frame_Bound) XXX_Merge

func (m *WindowerSpec_Frame_Bound) XXX_Merge(src proto.Message)

func (*WindowerSpec_Frame_Bound) XXX_Size

func (m *WindowerSpec_Frame_Bound) XXX_Size() int

func (*WindowerSpec_Frame_Bound) XXX_Unmarshal

func (m *WindowerSpec_Frame_Bound) XXX_Unmarshal(b []byte) error

type WindowerSpec_Frame_BoundType

type WindowerSpec_Frame_BoundType int32

BoundType indicates which type of boundary is used.

const (
	WindowerSpec_Frame_UNBOUNDED_PRECEDING WindowerSpec_Frame_BoundType = 0
	WindowerSpec_Frame_UNBOUNDED_FOLLOWING WindowerSpec_Frame_BoundType = 1
	// Offsets are stored within Bound.
	WindowerSpec_Frame_OFFSET_PRECEDING WindowerSpec_Frame_BoundType = 2
	WindowerSpec_Frame_OFFSET_FOLLOWING WindowerSpec_Frame_BoundType = 3
	WindowerSpec_Frame_CURRENT_ROW      WindowerSpec_Frame_BoundType = 4
)

func (WindowerSpec_Frame_BoundType) Enum

func (WindowerSpec_Frame_BoundType) EnumDescriptor

func (WindowerSpec_Frame_BoundType) EnumDescriptor() ([]byte, []int)

func (WindowerSpec_Frame_BoundType) String

func (*WindowerSpec_Frame_BoundType) UnmarshalJSON

func (x *WindowerSpec_Frame_BoundType) UnmarshalJSON(data []byte) error

type WindowerSpec_Frame_Bounds

type WindowerSpec_Frame_Bounds struct {
	// Start bound must always be present whereas end bound might be omitted.
	Start WindowerSpec_Frame_Bound  `protobuf:"bytes,1,opt,name=start" json:"start"`
	End   *WindowerSpec_Frame_Bound `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"`
}

Bounds specifies boundaries of the window frame.

func (*WindowerSpec_Frame_Bounds) Descriptor

func (*WindowerSpec_Frame_Bounds) Descriptor() ([]byte, []int)

func (*WindowerSpec_Frame_Bounds) Marshal

func (m *WindowerSpec_Frame_Bounds) Marshal() (dAtA []byte, err error)

func (*WindowerSpec_Frame_Bounds) MarshalTo

func (m *WindowerSpec_Frame_Bounds) MarshalTo(dAtA []byte) (int, error)

func (*WindowerSpec_Frame_Bounds) MarshalToSizedBuffer

func (m *WindowerSpec_Frame_Bounds) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*WindowerSpec_Frame_Bounds) ProtoMessage

func (*WindowerSpec_Frame_Bounds) ProtoMessage()

func (*WindowerSpec_Frame_Bounds) Reset

func (m *WindowerSpec_Frame_Bounds) Reset()

func (*WindowerSpec_Frame_Bounds) Size

func (m *WindowerSpec_Frame_Bounds) Size() (n int)

func (*WindowerSpec_Frame_Bounds) String

func (m *WindowerSpec_Frame_Bounds) String() string

func (*WindowerSpec_Frame_Bounds) Unmarshal

func (m *WindowerSpec_Frame_Bounds) Unmarshal(dAtA []byte) error

func (*WindowerSpec_Frame_Bounds) XXX_DiscardUnknown

func (m *WindowerSpec_Frame_Bounds) XXX_DiscardUnknown()

func (*WindowerSpec_Frame_Bounds) XXX_Marshal

func (m *WindowerSpec_Frame_Bounds) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WindowerSpec_Frame_Bounds) XXX_Merge

func (m *WindowerSpec_Frame_Bounds) XXX_Merge(src proto.Message)

func (*WindowerSpec_Frame_Bounds) XXX_Size

func (m *WindowerSpec_Frame_Bounds) XXX_Size() int

func (*WindowerSpec_Frame_Bounds) XXX_Unmarshal

func (m *WindowerSpec_Frame_Bounds) XXX_Unmarshal(b []byte) error

type WindowerSpec_Frame_Exclusion

type WindowerSpec_Frame_Exclusion int32

Exclusion specifies the type of frame exclusion.

const (
	WindowerSpec_Frame_NO_EXCLUSION        WindowerSpec_Frame_Exclusion = 0
	WindowerSpec_Frame_EXCLUDE_CURRENT_ROW WindowerSpec_Frame_Exclusion = 1
	WindowerSpec_Frame_EXCLUDE_GROUP       WindowerSpec_Frame_Exclusion = 2
	WindowerSpec_Frame_EXCLUDE_TIES        WindowerSpec_Frame_Exclusion = 3
)

func (WindowerSpec_Frame_Exclusion) Enum

func (WindowerSpec_Frame_Exclusion) EnumDescriptor

func (WindowerSpec_Frame_Exclusion) EnumDescriptor() ([]byte, []int)

func (WindowerSpec_Frame_Exclusion) String

func (*WindowerSpec_Frame_Exclusion) UnmarshalJSON

func (x *WindowerSpec_Frame_Exclusion) UnmarshalJSON(data []byte) error

type WindowerSpec_Frame_Mode

type WindowerSpec_Frame_Mode int32

Mode indicates which mode of framing is used.

const (
	// RANGE specifies frame in terms of logical range (e.g. 1 unit cheaper).
	WindowerSpec_Frame_RANGE WindowerSpec_Frame_Mode = 0
	// ROWS specifies frame in terms of physical offsets (e.g. 1 row before).
	WindowerSpec_Frame_ROWS WindowerSpec_Frame_Mode = 1
	// GROUPS specifies frame in terms of peer groups (where "peers" mean
	// rows not distinct in the ordering columns).
	WindowerSpec_Frame_GROUPS WindowerSpec_Frame_Mode = 2
)

func (WindowerSpec_Frame_Mode) Enum

func (WindowerSpec_Frame_Mode) EnumDescriptor

func (WindowerSpec_Frame_Mode) EnumDescriptor() ([]byte, []int)

func (WindowerSpec_Frame_Mode) String

func (x WindowerSpec_Frame_Mode) String() string

func (*WindowerSpec_Frame_Mode) UnmarshalJSON

func (x *WindowerSpec_Frame_Mode) UnmarshalJSON(data []byte) error

type WindowerSpec_Func

type WindowerSpec_Func struct {
	AggregateFunc *AggregatorSpec_Func     `protobuf:"varint,1,opt,name=aggregateFunc,enum=cockroach.sql.distsqlrun.AggregatorSpec_Func" json:"aggregateFunc,omitempty"`
	WindowFunc    *WindowerSpec_WindowFunc `protobuf:"varint,2,opt,name=windowFunc,enum=cockroach.sql.distsqlrun.WindowerSpec_WindowFunc" json:"windowFunc,omitempty"`
}

Func specifies which function to compute. It can either be built-in aggregate or built-in window function.

func (*WindowerSpec_Func) Descriptor

func (*WindowerSpec_Func) Descriptor() ([]byte, []int)

func (*WindowerSpec_Func) GetValue

func (this *WindowerSpec_Func) GetValue() interface{}

func (*WindowerSpec_Func) Marshal

func (m *WindowerSpec_Func) Marshal() (dAtA []byte, err error)

func (*WindowerSpec_Func) MarshalTo

func (m *WindowerSpec_Func) MarshalTo(dAtA []byte) (int, error)

func (*WindowerSpec_Func) MarshalToSizedBuffer

func (m *WindowerSpec_Func) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*WindowerSpec_Func) ProtoMessage

func (*WindowerSpec_Func) ProtoMessage()

func (*WindowerSpec_Func) Reset

func (m *WindowerSpec_Func) Reset()

func (*WindowerSpec_Func) SetValue

func (this *WindowerSpec_Func) SetValue(value interface{}) bool

func (*WindowerSpec_Func) Size

func (m *WindowerSpec_Func) Size() (n int)

func (*WindowerSpec_Func) String

func (m *WindowerSpec_Func) String() string

func (*WindowerSpec_Func) Unmarshal

func (m *WindowerSpec_Func) Unmarshal(dAtA []byte) error

func (*WindowerSpec_Func) XXX_DiscardUnknown

func (m *WindowerSpec_Func) XXX_DiscardUnknown()

func (*WindowerSpec_Func) XXX_Marshal

func (m *WindowerSpec_Func) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WindowerSpec_Func) XXX_Merge

func (m *WindowerSpec_Func) XXX_Merge(src proto.Message)

func (*WindowerSpec_Func) XXX_Size

func (m *WindowerSpec_Func) XXX_Size() int

func (*WindowerSpec_Func) XXX_Unmarshal

func (m *WindowerSpec_Func) XXX_Unmarshal(b []byte) error

type WindowerSpec_WindowFn

type WindowerSpec_WindowFn struct {
	// Func is which function to compute.
	Func WindowerSpec_Func `protobuf:"bytes,1,opt,name=func" json:"func"`
	// ArgsIdxs contains indices of the columns that are arguments to the
	// window function.
	ArgsIdxs []uint32 `protobuf:"varint,7,rep,name=argsIdxs" json:"argsIdxs,omitempty"`
	// Ordering specifies in which order rows should be considered by this
	// window function. Its contents come from ORDER BY clause of the window
	// function.
	Ordering Ordering `protobuf:"bytes,4,opt,name=ordering" json:"ordering"`
	// Frame specifies over which frame this window function is computed.
	Frame *WindowerSpec_Frame `protobuf:"bytes,5,opt,name=frame" json:"frame,omitempty"`
	// Optional index of a column over which filtering of rows will be done.
	// Special value -1 indicates that filter is not present.
	FilterColIdx int32 `protobuf:"varint,6,opt,name=filterColIdx" json:"filterColIdx"`
	// OutputColIdx specifies the column index which the window function should
	// put its output into.
	OutputColIdx uint32 `protobuf:"varint,8,opt,name=outputColIdx" json:"outputColIdx"`
}

WindowFn is the specification of a single window function.

func (*WindowerSpec_WindowFn) Descriptor

func (*WindowerSpec_WindowFn) Descriptor() ([]byte, []int)

func (*WindowerSpec_WindowFn) Marshal

func (m *WindowerSpec_WindowFn) Marshal() (dAtA []byte, err error)

func (*WindowerSpec_WindowFn) MarshalTo

func (m *WindowerSpec_WindowFn) MarshalTo(dAtA []byte) (int, error)

func (*WindowerSpec_WindowFn) MarshalToSizedBuffer

func (m *WindowerSpec_WindowFn) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*WindowerSpec_WindowFn) ProtoMessage

func (*WindowerSpec_WindowFn) ProtoMessage()

func (*WindowerSpec_WindowFn) Reset

func (m *WindowerSpec_WindowFn) Reset()

func (*WindowerSpec_WindowFn) Size

func (m *WindowerSpec_WindowFn) Size() (n int)

func (*WindowerSpec_WindowFn) String

func (m *WindowerSpec_WindowFn) String() string

func (*WindowerSpec_WindowFn) Unmarshal

func (m *WindowerSpec_WindowFn) Unmarshal(dAtA []byte) error

func (*WindowerSpec_WindowFn) XXX_DiscardUnknown

func (m *WindowerSpec_WindowFn) XXX_DiscardUnknown()

func (*WindowerSpec_WindowFn) XXX_Marshal

func (m *WindowerSpec_WindowFn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WindowerSpec_WindowFn) XXX_Merge

func (m *WindowerSpec_WindowFn) XXX_Merge(src proto.Message)

func (*WindowerSpec_WindowFn) XXX_Size

func (m *WindowerSpec_WindowFn) XXX_Size() int

func (*WindowerSpec_WindowFn) XXX_Unmarshal

func (m *WindowerSpec_WindowFn) XXX_Unmarshal(b []byte) error

type WindowerSpec_WindowFunc

type WindowerSpec_WindowFunc int32
const (
	// These mirror window functions from window_builtins.go.
	WindowerSpec_ROW_NUMBER   WindowerSpec_WindowFunc = 0
	WindowerSpec_RANK         WindowerSpec_WindowFunc = 1
	WindowerSpec_DENSE_RANK   WindowerSpec_WindowFunc = 2
	WindowerSpec_PERCENT_RANK WindowerSpec_WindowFunc = 3
	WindowerSpec_CUME_DIST    WindowerSpec_WindowFunc = 4
	WindowerSpec_NTILE        WindowerSpec_WindowFunc = 5
	WindowerSpec_LAG          WindowerSpec_WindowFunc = 6
	WindowerSpec_LEAD         WindowerSpec_WindowFunc = 7
	WindowerSpec_FIRST_VALUE  WindowerSpec_WindowFunc = 8
	WindowerSpec_LAST_VALUE   WindowerSpec_WindowFunc = 9
	WindowerSpec_NTH_VALUE    WindowerSpec_WindowFunc = 10
)

func (WindowerSpec_WindowFunc) Enum

func (WindowerSpec_WindowFunc) EnumDescriptor

func (WindowerSpec_WindowFunc) EnumDescriptor() ([]byte, []int)

func (WindowerSpec_WindowFunc) String

func (x WindowerSpec_WindowFunc) String() string

func (*WindowerSpec_WindowFunc) UnmarshalJSON

func (x *WindowerSpec_WindowFunc) UnmarshalJSON(data []byte) error

type ZigzagJoinerSpec

type ZigzagJoinerSpec struct {
	// TODO(pbardea): Replace these with inputs that conform to a RowSource-like
	// interface.
	Tables []descpb.TableDescriptor `protobuf:"bytes,1,rep,name=tables" json:"tables"`
	// An array of arrays. The array at eq_columns[side_idx] contains the
	// equality columns for that side. All arrays in eq_columns should have
	// equal length.
	EqColumns []Columns `protobuf:"bytes,2,rep,name=eq_columns,json=eqColumns" json:"eq_columns"`
	// Each value indicates an index: if 0, primary index; otherwise the n-th
	// secondary index, i.e. tables[side_idx].indexes[index_ordinals[side_idx]].
	IndexOrdinals []uint32 `protobuf:"varint,3,rep,packed,name=index_ordinals,json=indexOrdinals" json:"index_ordinals,omitempty"`
	// "ON" expression (in addition to the equality constraints captured by the
	// orderings). Assuming that the left stream has N columns and the right
	// stream has M columns, in this expression ordinal references @1 to @N refer
	// to columns of the left stream and variables @(N+1) to @(N+M) refer to
	// columns in the right stream.
	OnExpr Expression `protobuf:"bytes,4,opt,name=on_expr,json=onExpr" json:"on_expr"`
	// Fixed values at the start of indices.
	FixedValues []*ValuesCoreSpec `protobuf:"bytes,5,rep,name=fixed_values,json=fixedValues" json:"fixed_values,omitempty"`
	Type        descpb.JoinType   `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.sqlbase.JoinType" json:"type"`
}

ZigzagJoinerSpec is the specification for a zigzag join processor. The processor's current implementation fetches the rows using internal rowFetchers.

The "internal columns" of a ZigzagJoiner (see ProcessorSpec) are the concatenation of all of the columns of the tables specified. The columns are populated if they are contained in the index specified for that table.

func (*ZigzagJoinerSpec) Descriptor

func (*ZigzagJoinerSpec) Descriptor() ([]byte, []int)

func (*ZigzagJoinerSpec) Marshal

func (m *ZigzagJoinerSpec) Marshal() (dAtA []byte, err error)

func (*ZigzagJoinerSpec) MarshalTo

func (m *ZigzagJoinerSpec) MarshalTo(dAtA []byte) (int, error)

func (*ZigzagJoinerSpec) MarshalToSizedBuffer

func (m *ZigzagJoinerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*ZigzagJoinerSpec) ProtoMessage

func (*ZigzagJoinerSpec) ProtoMessage()

func (*ZigzagJoinerSpec) Reset

func (m *ZigzagJoinerSpec) Reset()

func (*ZigzagJoinerSpec) Size

func (m *ZigzagJoinerSpec) Size() (n int)

func (*ZigzagJoinerSpec) String

func (m *ZigzagJoinerSpec) String() string

func (*ZigzagJoinerSpec) Unmarshal

func (m *ZigzagJoinerSpec) Unmarshal(dAtA []byte) error

func (*ZigzagJoinerSpec) XXX_DiscardUnknown

func (m *ZigzagJoinerSpec) XXX_DiscardUnknown()

func (*ZigzagJoinerSpec) XXX_Marshal

func (m *ZigzagJoinerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ZigzagJoinerSpec) XXX_Merge

func (m *ZigzagJoinerSpec) XXX_Merge(src proto.Message)

func (*ZigzagJoinerSpec) XXX_Size

func (m *ZigzagJoinerSpec) XXX_Size() int

func (*ZigzagJoinerSpec) XXX_Unmarshal

func (m *ZigzagJoinerSpec) XXX_Unmarshal(b []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL