From 8251e4d616a34ac4bed6d002586a3cbeef348a44 Mon Sep 17 00:00:00 2001 From: Samuel Williams Date: Fri, 16 Jan 2026 17:51:45 +1300 Subject: [PATCH 1/6] Design document for xds. --- xds.md | 843 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 843 insertions(+) create mode 100644 xds.md diff --git a/xds.md b/xds.md new file mode 100644 index 0000000..14fb4fa --- /dev/null +++ b/xds.md @@ -0,0 +1,843 @@ +# xDS Support for Async::GRPC + +This document outlines the design and implementation of xDS (Discovery Service) support for `async-grpc`, enabling dynamic service discovery and configuration for gRPC clients and servers. + +## Overview + +xDS is a set of discovery APIs originally created for Envoy proxy and now adopted as a standard for dynamic configuration in gRPC and other systems. It provides a unified mechanism for service mesh control planes to configure data planes. + +### What is xDS? + +xDS consists of multiple discovery service APIs: + +- **LDS** (Listener Discovery Service) - Defines what ports/protocols to listen on +- **RDS** (Route Discovery Service) - Defines how requests are routed +- **CDS** (Cluster Discovery Service) - Defines logical upstream services +- **EDS** (Endpoint Discovery Service) - Defines actual IP:port backends +- **SDS** (Secret Discovery Service) - Distributes certificates and keys +- **TDS** (Transport Discovery Service) - Configures transport sockets +- **ECDS** (Extension Config Discovery Service) - Distributes extension configurations + +### Why xDS for gRPC? + +1. **Dynamic Service Discovery** - Discover backends without hardcoded addresses +2. **Load Balancing** - Intelligent client-side load balancing with health checking +3. **Traffic Management** - Sophisticated routing, retries, timeouts +4. **Security** - Dynamic certificate distribution and mTLS configuration +5. **Observability** - Standardized metrics and tracing integration +6. **Service Mesh Integration** - Compatible with Istio, Linkerd, etc. + +## Architecture + +### URI Scheme + +xDS endpoints use a special URI scheme: + +```ruby +# Basic xDS endpoint +endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice") + +# With explicit control plane +endpoint = Async::GRPC::XDS::Endpoint.parse("xds://control-plane.example.com/myservice") + +# With bootstrap configuration +endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice", bootstrap: "/path/to/bootstrap.json") +``` + +### Component Structure + +``` +Async::GRPC::XDS +├── Endpoint # Main entry point for xDS-enabled connections +├── Context # Manages xDS state and subscriptions +├── Client # xDS API client (ADS or individual xDS APIs) +├── ResourceCache # Caches discovered resources +├── LoadBalancer # Client-side load balancing +├── HealthChecker # Endpoint health checking +└── Resources # Resource data models + ├── Listener + ├── RouteConfiguration + ├── Cluster + ├── ClusterLoadAssignment + └── Secret +``` + +## Core Components + +### 1. `Async::GRPC::XDS::Endpoint` + +The main entry point that wraps standard endpoints with xDS capabilities: + +```ruby +module Async + module GRPC + module XDS + class Endpoint + # Parse an xDS URI into an endpoint + # @parameter uri [String] xDS URI (e.g., "xds:///myservice") + # @parameter bootstrap [String, Hash, nil] Bootstrap config file path or hash + # @returns [Endpoint] xDS-enabled endpoint + def self.parse(uri, bootstrap: nil) + # Parse xDS URI + # Load bootstrap configuration + # Create endpoint instance + end + + # Initialize with parsed configuration + # @parameter service_name [String] Target service name + # @parameter control_plane [URI, nil] Control plane endpoint + # @parameter bootstrap [Hash, nil] Bootstrap configuration + def initialize(service_name, control_plane: nil, bootstrap: nil) + @service_name = service_name + @control_plane = control_plane + @bootstrap = bootstrap || load_default_bootstrap + @context = Context.new(self, @bootstrap) + end + + # Connect to the service using xDS-discovered endpoints + # @yields [Async::HTTP::Endpoint] Individual backend endpoint + # @returns [Array] Available endpoints + def connect(&block) + @context.resolve_endpoints(@service_name, &block) + end + + # Get load balancer for this endpoint + # @returns [LoadBalancer] Configured load balancer + def load_balancer + @context.load_balancer_for(@service_name) + end + + # Close xDS subscriptions and cleanup + def close + @context.close + end + end + end + end +end +``` + +### 2. `Async::GRPC::XDS::Context` + +Manages xDS subscriptions and maintains discovered resource state: + +```ruby +module Async + module GRPC + module XDS + class Context + # Initialize xDS context + # @parameter endpoint [Endpoint] Parent endpoint + # @parameter bootstrap [Hash] Bootstrap configuration + def initialize(endpoint, bootstrap) + @endpoint = endpoint + @bootstrap = bootstrap + @client = Client.new(bootstrap["xds_servers"].first) + @cache = ResourceCache.new + @subscriptions = {} + @load_balancers = {} + end + + # Resolve endpoints for a service + # @parameter service_name [String] Service to resolve + # @yields [Async::HTTP::Endpoint] Each discovered endpoint + # @returns [Array] All available endpoints + def resolve_endpoints(service_name, &block) + # Subscribe to CDS for cluster discovery + cluster = discover_cluster(service_name) + + # Subscribe to EDS for endpoint discovery + endpoints = discover_endpoints(cluster) + + # Filter healthy endpoints + healthy_endpoints = filter_healthy(endpoints) + + if block_given? + healthy_endpoints.each(&block) + end + + healthy_endpoints + end + + # Get or create load balancer for service + # @parameter service_name [String] Service name + # @returns [LoadBalancer] Load balancer instance + def load_balancer_for(service_name) + @load_balancers[service_name] ||= begin + cluster = @cache.get_cluster(service_name) + LoadBalancer.new(self, cluster) + end + end + + # Subscribe to resource updates + # @parameter type_url [String] xDS resource type URL + # @parameter resource_names [Array] Resource names to watch + # @yields [Resource] Updated resources + def subscribe(type_url, resource_names, &block) + @subscriptions[type_url] ||= {} + resource_names.each do |name| + @subscriptions[type_url][name] = block + end + + @client.subscribe(type_url, resource_names) + end + + # Close all subscriptions + def close + @client.close + @load_balancers.each_value(&:close) + end + + private + + def discover_cluster(service_name) + # Implement CDS (Cluster Discovery Service) + end + + def discover_endpoints(cluster) + # Implement EDS (Endpoint Discovery Service) + end + + def filter_healthy(endpoints) + # Filter based on health checks + end + end + end + end +end +``` + +### 3. `Async::GRPC::XDS::Client` + +Communicates with xDS control plane: + +```ruby +module Async + module GRPC + module XDS + # Client for xDS APIs (ADS or individual APIs) + class Client + # xDS API type URLs + LISTENER_TYPE = "type.googleapis.com/envoy.config.listener.v3.Listener" + ROUTE_TYPE = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" + CLUSTER_TYPE = "type.googleapis.com/envoy.config.cluster.v3.Cluster" + ENDPOINT_TYPE = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" + SECRET_TYPE = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" + + # Initialize xDS client + # @parameter server_config [Hash] xDS server configuration from bootstrap + def initialize(server_config) + @server_uri = server_config["server_uri"] + @channel_creds = build_credentials(server_config) + @node = build_node_info + @streams = {} + end + + # Subscribe to resource type using ADS + # (Aggregated Discovery Service - single stream for all types) + # @parameter type_url [String] Resource type URL + # @parameter resource_names [Array] Resources to subscribe to + # @yields [Resource] Updated resources + def subscribe(type_url, resource_names) + stream = get_or_create_stream + + request = build_discovery_request( + type_url: type_url, + resource_names: resource_names, + version_info: @versions[type_url] || "", + nonce: @nonces[type_url] || "" + ) + + stream.write(request) + + # Process responses asynchronously + Async do + stream.each do |response| + process_response(response, &Proc.new) + end + end + end + + # Close xDS client + def close + @streams.each_value(&:close) + end + + private + + def get_or_create_stream + @streams[:ads] ||= create_ads_stream + end + + def create_ads_stream + # Create bidirectional streaming RPC to ADS + endpoint = Async::HTTP::Endpoint.parse(@server_uri) + grpc_client = Async::GRPC::Client.open(endpoint) + + # Use envoy.service.discovery.v3.AggregatedDiscoveryService + interface = AggregatedDiscoveryServiceInterface.new( + "envoy.service.discovery.v3.AggregatedDiscoveryService" + ) + stub = grpc_client.stub(interface) + + stub.stream_aggregated_resources + end + + def build_discovery_request(type_url:, resource_names:, version_info:, nonce:) + # Build DiscoveryRequest protobuf + Envoy::Service::Discovery::V3::DiscoveryRequest.new( + version_info: version_info, + node: @node, + resource_names: resource_names, + type_url: type_url, + response_nonce: nonce + ) + end + + def build_node_info + # Build node identification for xDS server + Envoy::Config::Core::V3::Node.new( + id: generate_node_id, + cluster: ENV["XDS_CLUSTER"] || "default", + metadata: build_metadata, + locality: build_locality + ) + end + + def process_response(response) + # Parse and validate response + # Update version and nonce + # Deserialize resources + # Yield to subscribers + end + end + end + end +end +``` + +### 4. `Async::GRPC::XDS::LoadBalancer` + +Client-side load balancing with health checking: + +```ruby +module Async + module GRPC + module XDS + class LoadBalancer + # Load balancing policies + ROUND_ROBIN = :round_robin + LEAST_REQUEST = :least_request + RANDOM = :random + RING_HASH = :ring_hash + MAGLEV = :maglev + + # Initialize load balancer + # @parameter context [Context] xDS context + # @parameter cluster [Resources::Cluster] Cluster configuration + def initialize(context, cluster) + @context = context + @cluster = cluster + @policy = parse_policy(cluster.lb_policy) + @endpoints = [] + @health_checker = HealthChecker.new(cluster.health_checks) + @current_index = 0 + + # Subscribe to endpoint updates + watch_endpoints + end + + # Pick next endpoint using load balancing policy + # @returns [Async::HTTP::Endpoint, nil] Selected endpoint + def pick + healthy = @endpoints.select { |ep| @health_checker.healthy?(ep) } + return nil if healthy.empty? + + case @policy + when ROUND_ROBIN + pick_round_robin(healthy) + when LEAST_REQUEST + pick_least_request(healthy) + when RANDOM + pick_random(healthy) + when RING_HASH + pick_ring_hash(healthy) + else + healthy.first + end + end + + # Update endpoints from EDS + # @parameter endpoints [Array] New endpoints + def update_endpoints(endpoints) + @endpoints = endpoints.map { |ep| build_http_endpoint(ep) } + @health_checker.update_endpoints(@endpoints) + end + + # Close load balancer + def close + @health_checker.close + end + + private + + def pick_round_robin(endpoints) + @current_index = (@current_index + 1) % endpoints.size + endpoints[@current_index] + end + + def pick_least_request(endpoints) + # Track in-flight requests and pick endpoint with fewest + endpoints.min_by { |ep| in_flight_count(ep) } + end + + def pick_random(endpoints) + endpoints.sample + end + + def pick_ring_hash(endpoints) + # Consistent hashing implementation + end + + def watch_endpoints + @context.subscribe(Client::ENDPOINT_TYPE, [@cluster.name]) do |assignment| + update_endpoints(assignment.endpoints) + end + end + end + end + end +end +``` + +### 5. Resource Data Models + +```ruby +module Async + module GRPC + module XDS + module Resources + # Represents a discovered cluster + class Cluster + attr_reader :name, :type, :lb_policy, :health_checks, :circuit_breakers + + def initialize(proto) + @name = proto.name + @type = proto.type + @lb_policy = proto.lb_policy + @health_checks = proto.health_checks + @circuit_breakers = proto.circuit_breakers + end + + def eds_cluster? + @type == :EDS + end + end + + # Represents endpoint assignment + class ClusterLoadAssignment + attr_reader :cluster_name, :endpoints + + def initialize(proto) + @cluster_name = proto.cluster_name + @endpoints = proto.endpoints.flat_map do |locality_endpoints| + locality_endpoints.lb_endpoints.map { |lb_ep| Endpoint.new(lb_ep) } + end + end + end + + # Represents a single endpoint + class Endpoint + attr_reader :address, :port, :health_status, :metadata + + def initialize(lb_endpoint) + socket_address = lb_endpoint.endpoint.address.socket_address + @address = socket_address.address + @port = socket_address.port_value + @health_status = lb_endpoint.health_status + @metadata = lb_endpoint.metadata + end + + def healthy? + @health_status == :HEALTHY || @health_status == :UNKNOWN + end + + def uri + "http://#{@address}:#{@port}" + end + end + + # Represents a listener configuration + class Listener + attr_reader :name, :address, :filter_chains + + def initialize(proto) + @name = proto.name + @address = proto.address + @filter_chains = proto.filter_chains + end + end + + # Represents route configuration + class RouteConfiguration + attr_reader :name, :virtual_hosts + + def initialize(proto) + @name = proto.name + @virtual_hosts = proto.virtual_hosts.map { |vh| VirtualHost.new(vh) } + end + end + + class VirtualHost + attr_reader :name, :domains, :routes + + def initialize(proto) + @name = proto.name + @domains = proto.domains + @routes = proto.routes + end + end + end + end + end +end +``` + +## Bootstrap Configuration + +xDS clients require a bootstrap configuration that specifies control plane details: + +```json +{ + "xds_servers": [ + { + "server_uri": "xds.example.com:443", + "channel_creds": [ + { + "type": "google_default" + } + ], + "server_features": ["xds_v3"] + } + ], + "node": { + "id": "async-grpc-client-001", + "cluster": "production", + "locality": { + "zone": "us-central1-a" + }, + "metadata": { + "TRAFFICDIRECTOR_GCP_PROJECT_NUMBER": "123456789" + } + }, + "certificate_providers": { + "default": { + "plugin_name": "file_watcher", + "config": { + "certificate_file": "/path/to/cert.pem", + "private_key_file": "/path/to/key.pem", + "ca_certificate_file": "/path/to/ca.pem", + "refresh_interval": "600s" + } + } + } +} +``` + +Bootstrap can be loaded from: +1. Explicit parameter to `Endpoint.parse` +2. Environment variable `GRPC_XDS_BOOTSTRAP` +3. Default file location `~/.config/grpc/bootstrap.json` + +## Integration with Async::GRPC::Client + +```ruby +module Async + module GRPC + class Client + # Enhanced to support xDS endpoints + def self.open(endpoint = self::ENDPOINT, headers: Protocol::HTTP::Headers.new, **options) + # Check if endpoint is xDS-enabled + if endpoint.is_a?(XDS::Endpoint) + # Use xDS load balancer to select backend + lb = endpoint.load_balancer + backend_endpoint = lb.pick + + # Create client with selected backend + client = connect(backend_endpoint) + grpc_client = new(client, headers: headers, **options) + + # Wrap with load balancer for automatic failover + XDS::BalancedClient.new(grpc_client, lb) + else + # Standard endpoint handling + endpoint = Async::HTTP::Endpoint.parse(endpoint) if endpoint.is_a?(String) + client = connect(endpoint) + new(client, headers: headers, **options) + end + end + end + end +end +``` + +## Usage Examples + +### Basic Service Discovery + +```ruby +require "async/grpc" +require "async/grpc/xds" + +# Parse xDS endpoint +endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice") + +# Create client - automatically uses xDS for discovery +Async::GRPC::Client.open(endpoint) do |client| + stub = client.stub(MyServiceInterface, "myservice") + + # Make calls - automatically load balanced across discovered endpoints + response = stub.my_method(request) + puts response.message +end +``` + +### Manual Endpoint Resolution + +```ruby +require "async/grpc/xds" + +endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice") + +# Get all healthy endpoints +endpoints = endpoint.connect + +endpoints.each do |backend| + puts "Available backend: #{backend.authority}" +end + +# Use load balancer directly +lb = endpoint.load_balancer + +10.times do + backend = lb.pick + puts "Selected: #{backend.authority}" +end +``` + +### With Custom Bootstrap + +```ruby +bootstrap = { + "xds_servers" => [ + { + "server_uri" => "control-plane.example.com:443", + "channel_creds" => [{"type" => "google_default"}] + } + ], + "node" => { + "id" => "my-app-instance-001", + "cluster" => "production" + } +} + +endpoint = Async::GRPC::XDS::Endpoint.parse( + "xds:///myservice", + bootstrap: bootstrap +) + +Async::GRPC::Client.open(endpoint) do |client| + # Use client normally +end +``` + +### Server-Side xDS (Listener Discovery) + +```ruby +require "async/grpc/xds" + +# Create xDS-enabled server +xds_config = Async::GRPC::XDS::ServerConfig.parse("xds:///myserver") + +Async do + # Server configuration discovered via LDS + listeners = xds_config.listeners + + listeners.each do |listener| + endpoint = Async::HTTP::Endpoint.parse(listener.uri) + server = Async::HTTP::Server.for(endpoint, dispatcher) + + Async do + server.run + end + end +end +``` + +## Implementation Phases + +### Phase 1: Core Infrastructure +- [ ] Parse xDS URIs +- [ ] Bootstrap configuration loading +- [ ] Basic `XDS::Endpoint` implementation +- [ ] `XDS::Context` for state management +- [ ] `XDS::ResourceCache` for discovered resources + +### Phase 2: Discovery Services +- [ ] `XDS::Client` with ADS support +- [ ] CDS (Cluster Discovery) implementation +- [ ] EDS (Endpoint Discovery) implementation +- [ ] Resource subscription and updates +- [ ] Version tracking and ACK/NACK + +### Phase 3: Load Balancing +- [ ] `XDS::LoadBalancer` base implementation +- [ ] Round-robin policy +- [ ] Least-request policy +- [ ] Random policy +- [ ] Ring-hash/consistent hashing +- [ ] Maglev policy + +### Phase 4: Health Checking +- [ ] `XDS::HealthChecker` implementation +- [ ] HTTP health checks +- [ ] gRPC health checks +- [ ] Health status aggregation +- [ ] Active/passive health checking + +### Phase 5: Advanced Features +- [ ] LDS (Listener Discovery) for servers +- [ ] RDS (Route Discovery) for routing +- [ ] SDS (Secret Discovery) for mTLS +- [ ] Circuit breakers +- [ ] Retry policies +- [ ] Timeout configuration +- [ ] Rate limiting + +### Phase 6: Integration +- [ ] Integration with `Async::GRPC::Client` +- [ ] Integration with `Async::GRPC::Dispatcher` +- [ ] Interceptor support +- [ ] Observability (metrics, tracing) +- [ ] Testing utilities + +## Standards and Specifications + +### xDS Protocol Specifications +- [xDS REST and gRPC protocol](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol) +- [Universal Data Plane API (UDPA)](https://github.com/cncf/xds) +- [gRFC A27: xDS-Based Global Load Balancing](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md) +- [gRFC A28: xDS Traffic Splitting and Routing](https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md) + +### Protobuf Definitions +- [envoy.config.listener.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/listener/v3/listener.proto) +- [envoy.config.route.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route.proto) +- [envoy.config.cluster.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto) +- [envoy.config.endpoint.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/endpoint/v3/endpoint.proto) +- [envoy.service.discovery.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/discovery/v3/discovery.proto) + +### Compatible Systems +- **Google Cloud Traffic Director** - Managed xDS control plane +- **Istio** - Service mesh with xDS control plane +- **Linkerd** - Service mesh with xDS support +- **Consul Connect** - Service mesh with xDS API +- **Envoy Proxy** - Reference xDS implementation + +## Testing Strategy + +### Unit Tests +- URI parsing and validation +- Bootstrap configuration loading +- Resource deserialization +- Load balancing algorithms +- Health checking logic + +### Integration Tests +- Mock xDS control plane +- Full discovery flow (CDS + EDS) +- Load balancer endpoint selection +- Health check state transitions +- Resource update handling + +### System Tests +- Integration with Google Cloud Traffic Director +- Integration with Istio +- Multi-endpoint failover scenarios +- Load balancing distribution +- Health check integration + +## Security Considerations + +### Authentication +- Support for Google Default Credentials +- Support for mTLS with SDS +- Support for OAuth2 tokens +- Channel credential configuration + +### Authorization +- RBAC integration via xDS +- Resource filtering by permissions +- Secure communication with control plane + +### Certificate Management +- Dynamic certificate rotation via SDS +- Certificate validation +- CRL/OCSP checking +- Certificate provider plugins + +## Performance Considerations + +### Resource Caching +- Cache discovered resources locally +- Version-based cache invalidation +- Memory-efficient resource storage + +### Connection Pooling +- Reuse HTTP/2 connections to backends +- Connection pool per endpoint +- Idle connection cleanup + +### Async Operations +- Non-blocking xDS subscriptions +- Async health checks +- Parallel endpoint discovery + +## Open Questions + +1. **Incremental vs. State-of-the-World** - Which xDS update mode to use? + - Incremental allows selective updates + - State-of-the-world is simpler but more bandwidth + +2. **Control Plane Failover** - How to handle control plane unavailability? + - Cache last known good configuration + - Fall back to static configuration + - Multiple control plane endpoints + +3. **Server-Side xDS** - Priority for server features? + - LDS for dynamic listener configuration + - RDS for advanced routing + - Integration with existing `Dispatcher` + +4. **Protobuf Dependencies** - How to handle Envoy protos? + - Bundle pre-generated Ruby protos + - Generate from .proto files at build time + - Separate gem for Envoy proto definitions + +5. **Backwards Compatibility** - How to maintain compatibility? + - Make xDS optional dependency + - Graceful degradation without xDS + - Clear migration path from static to dynamic + +## Related Work + +- [grpc-go xDS implementation](https://github.com/grpc/grpc-go/tree/master/xds) +- [grpc-java xDS implementation](https://github.com/grpc/grpc-java/tree/master/xds) +- [Envoy data plane implementation](https://github.com/envoyproxy/envoy) +- [go-control-plane](https://github.com/envoyproxy/go-control-plane) - Reference control plane + +## References + +- [gRPC xDS Documentation](https://grpc.io/docs/guides/xds/) +- [Envoy xDS Documentation](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/operations/dynamic_configuration) +- [Traffic Director Documentation](https://cloud.google.com/traffic-director/docs) +- [CNCF xDS API Working Group](https://github.com/cncf/xds) From e95816bfca7692fc208860ade01f25768c5d48cf Mon Sep 17 00:00:00 2001 From: Samuel Williams Date: Fri, 16 Jan 2026 18:16:26 +1300 Subject: [PATCH 2/6] Add updates from composer-1 and async-redis design. --- xds.md | 1151 ++++++++++++++++++++++------- xds/Dockerfile.backend | 21 + xds/backend_server.rb | 66 ++ xds/docker-compose.yaml | 80 ++ xds/readme.md | 98 +++ xds/test/async/grpc/xds/client.rb | 124 ++++ 6 files changed, 1272 insertions(+), 268 deletions(-) create mode 100644 xds/Dockerfile.backend create mode 100644 xds/backend_server.rb create mode 100644 xds/docker-compose.yaml create mode 100644 xds/readme.md create mode 100644 xds/test/async/grpc/xds/client.rb diff --git a/xds.md b/xds.md index 14fb4fa..5948937 100644 --- a/xds.md +++ b/xds.md @@ -1,6 +1,6 @@ # xDS Support for Async::GRPC -This document outlines the design and implementation of xDS (Discovery Service) support for `async-grpc`, enabling dynamic service discovery and configuration for gRPC clients and servers. +This document outlines the design and implementation of xDS (Discovery Service) support for `async-grpc`, enabling dynamic service discovery and configuration for gRPC clients. The design follows patterns established in `async-redis` (SentinelClient and ClusterClient) for service discovery and load balancing. ## Overview @@ -29,32 +29,28 @@ xDS consists of multiple discovery service APIs: ## Architecture -### URI Scheme +### Design Pattern: Wrapper Client (Like SentinelClient/ClusterClient) -xDS endpoints use a special URI scheme: +Following the pattern from `async-redis`, xDS support is implemented as a **wrapper client** that handles discovery and load balancing, rather than modifying the base `Async::GRPC::Client` class. -```ruby -# Basic xDS endpoint -endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice") - -# With explicit control plane -endpoint = Async::GRPC::XDS::Endpoint.parse("xds://control-plane.example.com/myservice") - -# With bootstrap configuration -endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice", bootstrap: "/path/to/bootstrap.json") -``` +**Key Principles:** +- `XDS::Client` wraps `Async::GRPC::Client` instances +- Implements `Protocol::HTTP::Middleware` interface (same as `Async::GRPC::Client`) +- Lazy endpoint resolution (resolved on first use) +- Client caching per endpoint (reuse connections) +- Error handling with cache invalidation and retry ### Component Structure ``` Async::GRPC::XDS -├── Endpoint # Main entry point for xDS-enabled connections -├── Context # Manages xDS state and subscriptions -├── Client # xDS API client (ADS or individual xDS APIs) -├── ResourceCache # Caches discovered resources -├── LoadBalancer # Client-side load balancing -├── HealthChecker # Endpoint health checking -└── Resources # Resource data models +├── Client # Main wrapper client (like SentinelClient) +├── Context # Manages xDS state and subscriptions +├── DiscoveryClient # xDS API client (ADS or individual xDS APIs) +├── ResourceCache # Caches discovered resources +├── LoadBalancer # Client-side load balancing +├── HealthChecker # Endpoint health checking +└── Resources # Resource data models ├── Listener ├── RouteConfiguration ├── Cluster @@ -64,52 +60,191 @@ Async::GRPC::XDS ## Core Components -### 1. `Async::GRPC::XDS::Endpoint` +### 1. `Async::GRPC::XDS::Client` -The main entry point that wraps standard endpoints with xDS capabilities: +The main wrapper client that handles xDS discovery and load balancing. Similar to `SentinelClient` and `ClusterClient` in async-redis. ```ruby module Async module GRPC module XDS - class Endpoint - # Parse an xDS URI into an endpoint - # @parameter uri [String] xDS URI (e.g., "xds:///myservice") - # @parameter bootstrap [String, Hash, nil] Bootstrap config file path or hash - # @returns [Endpoint] xDS-enabled endpoint - def self.parse(uri, bootstrap: nil) - # Parse xDS URI - # Load bootstrap configuration - # Create endpoint instance - end - - # Initialize with parsed configuration - # @parameter service_name [String] Target service name - # @parameter control_plane [URI, nil] Control plane endpoint - # @parameter bootstrap [Hash, nil] Bootstrap configuration - def initialize(service_name, control_plane: nil, bootstrap: nil) + # Wrapper client for xDS-enabled gRPC connections + # Follows the same pattern as Async::Redis::SentinelClient and ClusterClient + class Client < Protocol::HTTP::Middleware + # Raised when xDS configuration cannot be loaded + class ConfigurationError < StandardError + end + + # Raised when no endpoints are available + class NoEndpointsError < StandardError + end + + # Raised when cluster configuration cannot be reloaded + class ReloadError < StandardError + end + + # Create a new xDS client + # @parameter service_name [String] Target service name (e.g., "myservice") + # @parameter bootstrap [Hash, String, nil] Bootstrap config (hash, file path, or nil for default) + # @parameter headers [Protocol::HTTP::Headers] Default headers + # @parameter options [Hash] Additional options passed to underlying clients + def initialize(service_name, bootstrap: nil, headers: Protocol::HTTP::Headers.new, **options) @service_name = service_name - @control_plane = control_plane - @bootstrap = bootstrap || load_default_bootstrap - @context = Context.new(self, @bootstrap) + @bootstrap = load_bootstrap(bootstrap) + @headers = headers + @options = options + + @context = Context.new(@bootstrap) + @load_balancer = nil + @clients = {} # Cache clients per endpoint (like ClusterClient caches node.client) + @mutex = Mutex.new end - # Connect to the service using xDS-discovered endpoints - # @yields [Async::HTTP::Endpoint] Individual backend endpoint + # Resolve endpoints lazily (like SentinelClient.resolve_address) # @returns [Array] Available endpoints - def connect(&block) - @context.resolve_endpoints(@service_name, &block) + def resolve_endpoints + @mutex.synchronize do + unless @load_balancer + # Discover cluster via CDS + cluster = @context.discover_cluster(@service_name) + + # Discover endpoints via EDS + endpoints = @context.discover_endpoints(cluster) + + # Create load balancer + @load_balancer = LoadBalancer.new(@context, cluster, endpoints) + end + + @load_balancer.healthy_endpoints + end end - # Get load balancer for this endpoint - # @returns [LoadBalancer] Configured load balancer - def load_balancer - @context.load_balancer_for(@service_name) + # Get a client for making calls (like ClusterClient.client_for) + # Resolves endpoints lazily and picks one via load balancer + # @returns [Async::GRPC::Client] gRPC client for selected endpoint + def client_for_call + endpoints = resolve_endpoints + raise NoEndpointsError, "No endpoints available for #{@service_name}" if endpoints.empty? + + # Pick endpoint via load balancer + endpoint = @load_balancer.pick + raise NoEndpointsError, "No healthy endpoints available" unless endpoint + + # Cache client per endpoint (like ClusterClient caches node.client) + @clients[endpoint] ||= begin + http_client = Async::HTTP::Client.new(endpoint) + Async::GRPC::Client.new(http_client, headers: @headers) + end end - # Close xDS subscriptions and cleanup + # Implement Protocol::HTTP::Middleware interface + # This allows XDS::Client to be used anywhere Async::GRPC::Client is used + # @parameter request [Protocol::HTTP::Request] The HTTP request + # @returns [Protocol::HTTP::Response] The HTTP response + def call(request, attempts: 3) + # Get client for this call (load balanced) + client = client_for_call + + begin + client.call(request) + rescue Protocol::GRPC::Error => error + # Handle endpoint changes (like ClusterClient handles MOVED/ASK) + if error.status_code == Protocol::GRPC::Status::UNAVAILABLE + Console.warn(self, error) + + # Invalidate cache, reload configuration + invalidate_cache! + + attempts -= 1 + retry if attempts > 0 + end + + raise + rescue => error + # Network errors might indicate endpoint failure + Console.warn(self, error) + + # Invalidate this specific endpoint + invalidate_endpoint(client) + + attempts -= 1 + retry if attempts > 0 + + raise + end + end + + # Create a stub for the given interface + # Delegates to underlying client (maintains Async::GRPC::Client interface) + # @parameter interface_class [Class] Interface class (subclass of Protocol::GRPC::Interface) + # @parameter service_name [String] Service name (e.g., "hello.Greeter") + # @returns [Async::GRPC::Stub] Stub object with methods for each RPC + def stub(interface_class, service_name) + # Use a client to create stub (will be load balanced per call) + client = client_for_call + client.stub(interface_class, service_name) + end + + # Close xDS client and all connections def close + @clients.each_value(&:close) + @clients.clear @context.close + @load_balancer&.close + end + + private + + def load_bootstrap(bootstrap) + case bootstrap + when Hash + bootstrap + when String + load_bootstrap_file(bootstrap) + when nil + load_default_bootstrap + else + raise ArgumentError, "Invalid bootstrap: #{bootstrap.inspect}" + end + end + + def load_bootstrap_file(path) + raise ConfigurationError, "Bootstrap file not found: #{path}" unless File.exist?(path) + + require "json" + JSON.parse(File.read(path)) + rescue JSON::ParserError => error + raise ConfigurationError, "Invalid bootstrap JSON: #{error.message}" + end + + def load_default_bootstrap + # Try environment variable first + if path = ENV["GRPC_XDS_BOOTSTRAP"] + return load_bootstrap_file(path) + end + + # Try default location + default_path = File.expand_path("~/.config/grpc/bootstrap.json") + if File.exist?(default_path) + return load_bootstrap_file(default_path) + end + + raise ConfigurationError, "No bootstrap configuration found" + end + + def invalidate_cache! + @mutex.synchronize do + @clients.each_value(&:close) + @clients.clear + @load_balancer = nil + end + end + + def invalidate_endpoint(client) + @mutex.synchronize do + @clients.delete_if { |endpoint, cached_client| cached_client == client } + client.close + end end end end @@ -119,87 +254,109 @@ end ### 2. `Async::GRPC::XDS::Context` -Manages xDS subscriptions and maintains discovered resource state: +Manages xDS subscriptions and maintains discovered resource state. Similar to how `ClusterClient` manages cluster configuration. ```ruby module Async module GRPC module XDS + # Manages xDS subscriptions and maintains discovered resource state class Context # Initialize xDS context - # @parameter endpoint [Endpoint] Parent endpoint # @parameter bootstrap [Hash] Bootstrap configuration - def initialize(endpoint, bootstrap) - @endpoint = endpoint + def initialize(bootstrap) @bootstrap = bootstrap - @client = Client.new(bootstrap["xds_servers"].first) + @discovery_client = DiscoveryClient.new(bootstrap["xds_servers"].first) @cache = ResourceCache.new - @subscriptions = {} - @load_balancers = {} + @subscriptions = {} # Track active subscriptions + @mutex = Mutex.new end - # Resolve endpoints for a service - # @parameter service_name [String] Service to resolve - # @yields [Async::HTTP::Endpoint] Each discovered endpoint - # @returns [Array] All available endpoints - def resolve_endpoints(service_name, &block) - # Subscribe to CDS for cluster discovery - cluster = discover_cluster(service_name) - - # Subscribe to EDS for endpoint discovery - endpoints = discover_endpoints(cluster) - - # Filter healthy endpoints - healthy_endpoints = filter_healthy(endpoints) - - if block_given? - healthy_endpoints.each(&block) + # Discover cluster for service (like ClusterClient.reload_cluster!) + # @parameter service_name [String] Service to discover + # @returns [Resources::Cluster] Cluster configuration + def discover_cluster(service_name) + @mutex.synchronize do + # Check cache first + if cluster = @cache.get_cluster(service_name) + return cluster + end + + # Subscribe to CDS if not already subscribed + unless @subscriptions[:cds] + @subscriptions[:cds] = subscribe_cds(service_name) + end + + # Wait for cluster to be discovered + # In practice, this might need async waiting + cluster = @cache.get_cluster(service_name) + raise ReloadError, "Failed to discover cluster: #{service_name}" unless cluster + + cluster end - - healthy_endpoints end - # Get or create load balancer for service + # Discover endpoints for cluster (like ClusterClient discovers nodes) + # @parameter cluster [Resources::Cluster] Cluster configuration + # @returns [Array] Discovered endpoints + def discover_endpoints(cluster) + @mutex.synchronize do + # Check cache first + if endpoints = @cache.get_endpoints(cluster.name) + return endpoints + end + + # Subscribe to EDS if not already subscribed + unless @subscriptions[:"eds_#{cluster.name}"] + @subscriptions[:"eds_#{cluster.name}"] = subscribe_eds(cluster.name) + end + + # Wait for endpoints to be discovered + endpoints = @cache.get_endpoints(cluster.name) + raise ReloadError, "Failed to discover endpoints for cluster: #{cluster.name}" unless endpoints + + endpoints + end + end + + # Subscribe to CDS (Cluster Discovery Service) # @parameter service_name [String] Service name - # @returns [LoadBalancer] Load balancer instance - def load_balancer_for(service_name) - @load_balancers[service_name] ||= begin - cluster = @cache.get_cluster(service_name) - LoadBalancer.new(self, cluster) + # @returns [Async::Task] Subscription task + def subscribe_cds(service_name) + @discovery_client.subscribe( + DiscoveryClient::CLUSTER_TYPE, + [service_name] + ) do |resources| + resources.each do |resource| + cluster = Resources::Cluster.new(resource) + @cache.update_cluster(cluster) + end end end - # Subscribe to resource updates - # @parameter type_url [String] xDS resource type URL - # @parameter resource_names [Array] Resource names to watch - # @yields [Resource] Updated resources - def subscribe(type_url, resource_names, &block) - @subscriptions[type_url] ||= {} - resource_names.each do |name| - @subscriptions[type_url][name] = block + # Subscribe to EDS (Endpoint Discovery Service) + # @parameter cluster_name [String] Cluster name + # @returns [Async::Task] Subscription task + def subscribe_eds(cluster_name) + @discovery_client.subscribe( + DiscoveryClient::ENDPOINT_TYPE, + [cluster_name] + ) do |resources| + resources.each do |resource| + assignment = Resources::ClusterLoadAssignment.new(resource) + endpoints = assignment.endpoints.map do |ep| + Async::HTTP::Endpoint.parse(ep.uri) + end + @cache.update_endpoints(cluster_name, endpoints) + end end - - @client.subscribe(type_url, resource_names) end # Close all subscriptions def close - @client.close - @load_balancers.each_value(&:close) - end - - private - - def discover_cluster(service_name) - # Implement CDS (Cluster Discovery Service) - end - - def discover_endpoints(cluster) - # Implement EDS (Endpoint Discovery Service) - end - - def filter_healthy(endpoints) - # Filter based on health checks + @subscriptions.each_value(&:stop) + @subscriptions.clear + @discovery_client.close end end end @@ -207,16 +364,16 @@ module Async end ``` -### 3. `Async::GRPC::XDS::Client` +### 3. `Async::GRPC::XDS::DiscoveryClient` -Communicates with xDS control plane: +Communicates with xDS control plane using ADS (Aggregated Discovery Service). ```ruby module Async module GRPC module XDS # Client for xDS APIs (ADS or individual APIs) - class Client + class DiscoveryClient # xDS API type URLs LISTENER_TYPE = "type.googleapis.com/envoy.config.listener.v3.Listener" ROUTE_TYPE = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" @@ -224,21 +381,25 @@ module Async ENDPOINT_TYPE = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" SECRET_TYPE = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" - # Initialize xDS client + # Initialize xDS discovery client # @parameter server_config [Hash] xDS server configuration from bootstrap def initialize(server_config) @server_uri = server_config["server_uri"] @channel_creds = build_credentials(server_config) @node = build_node_info @streams = {} + @versions = {} # Track version_info per type + @nonces = {} # Track nonces per type + @mutex = Mutex.new end # Subscribe to resource type using ADS # (Aggregated Discovery Service - single stream for all types) # @parameter type_url [String] Resource type URL # @parameter resource_names [Array] Resources to subscribe to - # @yields [Resource] Updated resources - def subscribe(type_url, resource_names) + # @yields [Array] Updated resources + # @returns [Async::Task] Subscription task + def subscribe(type_url, resource_names, &block) stream = get_or_create_stream request = build_discovery_request( @@ -251,40 +412,56 @@ module Async stream.write(request) # Process responses asynchronously - Async do - stream.each do |response| - process_response(response, &Proc.new) + Async do |task| + begin + stream.each do |response| + process_response(response, type_url, &block) + end + rescue => error + Console.error(self, error) + # Stream closed, will reconnect on next subscription + @mutex.synchronize do + @streams.delete(:ads) + end + raise end end end - # Close xDS client + # Close xDS discovery client def close @streams.each_value(&:close) + @streams.clear end private def get_or_create_stream - @streams[:ads] ||= create_ads_stream + @mutex.synchronize do + @streams[:ads] ||= create_ads_stream + end end def create_ads_stream # Create bidirectional streaming RPC to ADS endpoint = Async::HTTP::Endpoint.parse(@server_uri) - grpc_client = Async::GRPC::Client.open(endpoint) + http_client = Async::HTTP::Client.new(endpoint) + grpc_client = Async::GRPC::Client.new(http_client) # Use envoy.service.discovery.v3.AggregatedDiscoveryService + # This would require the Envoy protobuf definitions interface = AggregatedDiscoveryServiceInterface.new( "envoy.service.discovery.v3.AggregatedDiscoveryService" ) - stub = grpc_client.stub(interface) + stub = grpc_client.stub(interface, "envoy.service.discovery.v3") + # Create bidirectional stream stub.stream_aggregated_resources end def build_discovery_request(type_url:, resource_names:, version_info:, nonce:) # Build DiscoveryRequest protobuf + # This requires Envoy protobuf definitions Envoy::Service::Discovery::V3::DiscoveryRequest.new( version_info: version_info, node: @node, @@ -304,11 +481,54 @@ module Async ) end - def process_response(response) - # Parse and validate response - # Update version and nonce - # Deserialize resources - # Yield to subscribers + def process_response(response, type_url, &block) + @mutex.synchronize do + # Update version and nonce + @versions[type_url] = response.version_info + @nonces[type_url] = response.nonce + + # Deserialize resources + resources = response.resources.map do |resource| + # Deserialize Any protobuf to specific type + deserialize_resource(resource, type_url) + end + + # Yield to subscribers + block.call(resources) if block_given? + end + end + + def deserialize_resource(resource, type_url) + # Deserialize protobuf Any to specific message type + # This requires Envoy protobuf definitions + case type_url + when CLUSTER_TYPE + Envoy::Config::Cluster::V3::Cluster.decode(resource.value) + when ENDPOINT_TYPE + Envoy::Config::Endpoint::V3::ClusterLoadAssignment.decode(resource.value) + # ... other types + end + end + + def generate_node_id + # Generate unique node ID + "#{Socket.gethostname}-#{Process.pid}-#{SecureRandom.hex(4)}" + end + + def build_metadata + # Build node metadata + {} + end + + def build_locality + # Build locality information + nil + end + + def build_credentials(server_config) + # Build channel credentials from config + # Support Google Default Credentials, mTLS, etc. + nil end end end @@ -318,12 +538,13 @@ end ### 4. `Async::GRPC::XDS::LoadBalancer` -Client-side load balancing with health checking: +Client-side load balancing with health checking. Similar to how `ClusterClient` selects nodes. ```ruby module Async module GRPC module XDS + # Client-side load balancing with health checking class LoadBalancer # Load balancing policies ROUND_ROBIN = :round_robin @@ -335,22 +556,34 @@ module Async # Initialize load balancer # @parameter context [Context] xDS context # @parameter cluster [Resources::Cluster] Cluster configuration - def initialize(context, cluster) + # @parameter endpoints [Array] Initial endpoints + def initialize(context, cluster, endpoints) @context = context @cluster = cluster + @endpoints = endpoints @policy = parse_policy(cluster.lb_policy) - @endpoints = [] + @health_status = {} # Track health per endpoint @health_checker = HealthChecker.new(cluster.health_checks) @current_index = 0 + @in_flight_requests = {} # Track in-flight requests per endpoint # Subscribe to endpoint updates watch_endpoints + + # Start health checking + start_health_checks + end + + # Get healthy endpoints + # @returns [Array] Healthy endpoints + def healthy_endpoints + @endpoints.select { |ep| healthy?(ep) } end # Pick next endpoint using load balancing policy # @returns [Async::HTTP::Endpoint, nil] Selected endpoint def pick - healthy = @endpoints.select { |ep| @health_checker.healthy?(ep) } + healthy = healthy_endpoints return nil if healthy.empty? case @policy @@ -362,16 +595,18 @@ module Async pick_random(healthy) when RING_HASH pick_ring_hash(healthy) + when MAGLEV + pick_maglev(healthy) else healthy.first end end # Update endpoints from EDS - # @parameter endpoints [Array] New endpoints + # @parameter endpoints [Array] New endpoints def update_endpoints(endpoints) - @endpoints = endpoints.map { |ep| build_http_endpoint(ep) } - @health_checker.update_endpoints(@endpoints) + @endpoints = endpoints + @health_checker.update_endpoints(endpoints) end # Close load balancer @@ -381,6 +616,10 @@ module Async private + def healthy?(endpoint) + @health_status[endpoint] != :unhealthy + end + def pick_round_robin(endpoints) @current_index = (@current_index + 1) % endpoints.size endpoints[@current_index] @@ -388,7 +627,7 @@ module Async def pick_least_request(endpoints) # Track in-flight requests and pick endpoint with fewest - endpoints.min_by { |ep| in_flight_count(ep) } + endpoints.min_by { |ep| @in_flight_requests[ep] || 0 } end def pick_random(endpoints) @@ -397,11 +636,47 @@ module Async def pick_ring_hash(endpoints) # Consistent hashing implementation + # Would need request context to hash + endpoints.first # Placeholder + end + + def pick_maglev(endpoints) + # Maglev hashing implementation + endpoints.first # Placeholder + end + + def parse_policy(lb_policy) + # Parse cluster LB policy to our constants + case lb_policy + when :ROUND_ROBIN then ROUND_ROBIN + when :LEAST_REQUEST then LEAST_REQUEST + when :RANDOM then RANDOM + when :RING_HASH then RING_HASH + when :MAGLEV then MAGLEV + else ROUND_ROBIN + end end def watch_endpoints - @context.subscribe(Client::ENDPOINT_TYPE, [@cluster.name]) do |assignment| - update_endpoints(assignment.endpoints) + # Subscribe to endpoint updates + @context.subscribe_eds(@cluster.name) do |endpoints| + update_endpoints(endpoints) + end + end + + def start_health_checks + return unless @cluster.health_checks.any? + + Async do |task| + loop do + @endpoints.each do |endpoint| + @health_status[endpoint] = @health_checker.check(endpoint) + end + + # Sleep for health check interval + interval = @cluster.health_checks.first&.interval || 30 + task.sleep(interval) + end end end end @@ -410,7 +685,103 @@ module Async end ``` -### 5. Resource Data Models +### 5. `Async::GRPC::XDS::HealthChecker` + +Health checking for endpoints. Runs as async tasks. + +```ruby +module Async + module GRPC + module XDS + # Endpoint health checking + class HealthChecker + # Initialize health checker + # @parameter health_checks [Array] Health check configurations from cluster + def initialize(health_checks) + @health_checks = health_checks + @endpoints = [] + @tasks = {} # Track health check tasks per endpoint + end + + # Update endpoints to check + # @parameter endpoints [Array] Endpoints to check + def update_endpoints(endpoints) + # Stop checking removed endpoints + removed = @endpoints - endpoints + removed.each do |endpoint| + @tasks[endpoint]&.stop + @tasks.delete(endpoint) + end + + # Start checking new endpoints + added = endpoints - @endpoints + added.each do |endpoint| + start_checking(endpoint) + end + + @endpoints = endpoints + end + + # Check health of endpoint + # @parameter endpoint [Async::HTTP::Endpoint] Endpoint to check + # @returns [Symbol] :healthy, :unhealthy, or :unknown + def check(endpoint) + # Use cached health status if available + # Otherwise perform check + perform_check(endpoint) + end + + # Close health checker + def close + @tasks.each_value(&:stop) + @tasks.clear + end + + private + + def start_checking(endpoint) + @tasks[endpoint] = Async do |task| + loop do + perform_check(endpoint) + + interval = @health_checks.first&.interval || 30 + task.sleep(interval) + end + end + end + + def perform_check(endpoint) + health_check = @health_checks.first + return :unknown unless health_check + + case health_check.type + when :HTTP + check_http_health(endpoint, health_check) + when :gRPC + check_grpc_health(endpoint, health_check) + else + :unknown + end + end + + def check_http_health(endpoint, health_check) + # Perform HTTP health check + # Use Async::HTTP::Client to make health check request + :healthy # Placeholder + end + + def check_grpc_health(endpoint, health_check) + # Perform gRPC health check + # Use Async::GRPC::Client to call grpc.health.v1.Health service + :healthy # Placeholder + end + end + end + end +end +``` + +### 6. Resource Data Models ```ruby module Async @@ -463,39 +834,45 @@ module Async end def uri - "http://#{@address}:#{@port}" + "https://#{@address}:#{@port}" end end + end + end + end +end +``` + +### 7. `Async::GRPC::XDS::ResourceCache` + +Caches discovered resources. + +```ruby +module Async + module GRPC + module XDS + # Caches discovered xDS resources + class ResourceCache + def initialize + @clusters = {} + @endpoints = {} + @mutex = Mutex.new + end - # Represents a listener configuration - class Listener - attr_reader :name, :address, :filter_chains - - def initialize(proto) - @name = proto.name - @address = proto.address - @filter_chains = proto.filter_chains - end + def get_cluster(name) + @mutex.synchronize { @clusters[name] } end - # Represents route configuration - class RouteConfiguration - attr_reader :name, :virtual_hosts - - def initialize(proto) - @name = proto.name - @virtual_hosts = proto.virtual_hosts.map { |vh| VirtualHost.new(vh) } - end + def update_cluster(cluster) + @mutex.synchronize { @clusters[cluster.name] = cluster } end - class VirtualHost - attr_reader :name, :domains, :routes - - def initialize(proto) - @name = proto.name - @domains = proto.domains - @routes = proto.routes - end + def get_endpoints(cluster_name) + @mutex.synchronize { @endpoints[cluster_name] } + end + + def update_endpoints(cluster_name, endpoints) + @mutex.synchronize { @endpoints[cluster_name] = endpoints } end end end @@ -545,42 +922,10 @@ xDS clients require a bootstrap configuration that specifies control plane detai ``` Bootstrap can be loaded from: -1. Explicit parameter to `Endpoint.parse` +1. Explicit parameter to `XDS::Client.new` 2. Environment variable `GRPC_XDS_BOOTSTRAP` 3. Default file location `~/.config/grpc/bootstrap.json` -## Integration with Async::GRPC::Client - -```ruby -module Async - module GRPC - class Client - # Enhanced to support xDS endpoints - def self.open(endpoint = self::ENDPOINT, headers: Protocol::HTTP::Headers.new, **options) - # Check if endpoint is xDS-enabled - if endpoint.is_a?(XDS::Endpoint) - # Use xDS load balancer to select backend - lb = endpoint.load_balancer - backend_endpoint = lb.pick - - # Create client with selected backend - client = connect(backend_endpoint) - grpc_client = new(client, headers: headers, **options) - - # Wrap with load balancer for automatic failover - XDS::BalancedClient.new(grpc_client, lb) - else - # Standard endpoint handling - endpoint = Async::HTTP::Endpoint.parse(endpoint) if endpoint.is_a?(String) - client = connect(endpoint) - new(client, headers: headers, **options) - end - end - end - end -end -``` - ## Usage Examples ### Basic Service Discovery @@ -589,35 +934,54 @@ end require "async/grpc" require "async/grpc/xds" -# Parse xDS endpoint -endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice") +# Create xDS client (like SentinelClient) +xds_client = Async::GRPC::XDS::Client.new( + "myservice", + bootstrap: "/path/to/bootstrap.json" +) -# Create client - automatically uses xDS for discovery -Async::GRPC::Client.open(endpoint) do |client| - stub = client.stub(MyServiceInterface, "myservice") +# Use it exactly like Async::GRPC::Client +Async do + stub = xds_client.stub(MyServiceInterface, "myservice") # Make calls - automatically load balanced across discovered endpoints response = stub.my_method(request) puts response.message +ensure + xds_client.close end ``` -### Manual Endpoint Resolution +### With Default Bootstrap ```ruby -require "async/grpc/xds" +# Uses GRPC_XDS_BOOTSTRAP env var or ~/.config/grpc/bootstrap.json +xds_client = Async::GRPC::XDS::Client.new("myservice") -endpoint = Async::GRPC::XDS::Endpoint.parse("xds:///myservice") +Async do + # Use client normally + xds_client.stub(MyServiceInterface, "myservice") do |stub| + response = stub.say_hello(request) + end +ensure + xds_client.close +end +``` + +### Manual Endpoint Resolution + +```ruby +xds_client = Async::GRPC::XDS::Client.new("myservice") # Get all healthy endpoints -endpoints = endpoint.connect +endpoints = xds_client.resolve_endpoints -endpoints.each do |backend| - puts "Available backend: #{backend.authority}" +endpoints.each do |endpoint| + puts "Available backend: #{endpoint.authority}" end # Use load balancer directly -lb = endpoint.load_balancer +lb = xds_client.instance_variable_get(:@load_balancer) 10.times do backend = lb.pick @@ -625,66 +989,58 @@ lb = endpoint.load_balancer end ``` -### With Custom Bootstrap +### Error Handling ```ruby -bootstrap = { - "xds_servers" => [ - { - "server_uri" => "control-plane.example.com:443", - "channel_creds" => [{"type" => "google_default"}] - } - ], - "node" => { - "id" => "my-app-instance-001", - "cluster" => "production" - } -} - -endpoint = Async::GRPC::XDS::Endpoint.parse( - "xds:///myservice", - bootstrap: bootstrap -) +xds_client = Async::GRPC::XDS::Client.new("myservice") -Async::GRPC::Client.open(endpoint) do |client| - # Use client normally +Async do + begin + stub = xds_client.stub(MyServiceInterface, "myservice") + response = stub.my_method(request) + rescue Async::GRPC::XDS::NoEndpointsError => error + puts "No endpoints available: #{error.message}" + # Fallback to static endpoint or retry later + rescue Async::GRPC::XDS::ConfigurationError => error + puts "Configuration error: #{error.message}" + # Check bootstrap configuration + end +ensure + xds_client.close end ``` -### Server-Side xDS (Listener Discovery) - -```ruby -require "async/grpc/xds" +## Integration with Existing Code -# Create xDS-enabled server -xds_config = Async::GRPC::XDS::ServerConfig.parse("xds:///myserver") +Since `XDS::Client` implements `Protocol::HTTP::Middleware` (same as `Async::GRPC::Client`), it can be used as a drop-in replacement: -Async do - # Server configuration discovered via LDS - listeners = xds_config.listeners - - listeners.each do |listener| - endpoint = Async::HTTP::Endpoint.parse(listener.uri) - server = Async::HTTP::Server.for(endpoint, dispatcher) - - Async do - server.run - end +```ruby +# Works with any code expecting Async::GRPC::Client interface +def make_call(client) + client.stub(MyServiceInterface, "myservice") do |stub| + stub.say_hello(request) end end + +# Can use either regular client or xDS client +regular_client = Async::GRPC::Client.open(endpoint) +xds_client = Async::GRPC::XDS::Client.new("myservice") + +make_call(regular_client) # Works +make_call(xds_client) # Also works! ``` ## Implementation Phases ### Phase 1: Core Infrastructure -- [ ] Parse xDS URIs - [ ] Bootstrap configuration loading -- [ ] Basic `XDS::Endpoint` implementation +- [ ] Basic `XDS::Client` wrapper implementation - [ ] `XDS::Context` for state management - [ ] `XDS::ResourceCache` for discovered resources +- [ ] Basic endpoint resolution ### Phase 2: Discovery Services -- [ ] `XDS::Client` with ADS support +- [ ] `XDS::DiscoveryClient` with ADS support - [ ] CDS (Cluster Discovery) implementation - [ ] EDS (Endpoint Discovery) implementation - [ ] Resource subscription and updates @@ -714,12 +1070,12 @@ end - [ ] Timeout configuration - [ ] Rate limiting -### Phase 6: Integration -- [ ] Integration with `Async::GRPC::Client` -- [ ] Integration with `Async::GRPC::Dispatcher` -- [ ] Interceptor support -- [ ] Observability (metrics, tracing) -- [ ] Testing utilities +### Phase 6: Integration & Testing +- [ ] Integration tests with mock xDS server +- [ ] Error handling and recovery tests +- [ ] Load balancing distribution tests +- [ ] Health check integration tests +- [ ] Performance benchmarks ## Standards and Specifications @@ -746,18 +1102,272 @@ end ## Testing Strategy ### Unit Tests -- URI parsing and validation -- Bootstrap configuration loading +- Bootstrap configuration loading and validation - Resource deserialization - Load balancing algorithms - Health checking logic +- Cache invalidation + +### Integration Tests with Docker Compose + +Following the pattern from `async-redis`, integration tests use Docker Compose to spin up a complete xDS test environment with: +- xDS control plane (using go-control-plane or Envoy) +- Multiple backend gRPC servers +- Health check services + +#### Docker Compose Setup + +Create `xds/docker-compose.yaml`: + +```yaml +services: + # xDS control plane (using go-control-plane test server) + xds-control-plane: + image: envoyproxy/go-control-plane:latest + command: > + /go-control-plane + -alsologtostderr + -v 2 + -mode xds + -server_type ADS + -port 18000 + ports: + - "18000:18000" + healthcheck: + test: ["CMD", "nc", "-z", "localhost", "18000"] + interval: 1s + timeout: 3s + retries: 30 + + # Backend gRPC server 1 + backend-1: + build: + context: . + dockerfile: xds/Dockerfile.backend + environment: + - PORT=50051 + - SERVICE_NAME=myservice + ports: + - "50051:50051" + depends_on: + xds-control-plane: + condition: service_healthy + + # Backend gRPC server 2 + backend-2: + build: + context: . + dockerfile: xds/Dockerfile.backend + environment: + - PORT=50052 + - SERVICE_NAME=myservice + ports: + - "50052:50052" + depends_on: + xds-control-plane: + condition: service_healthy + + # Backend gRPC server 3 + backend-3: + build: + context: . + dockerfile: xds/Dockerfile.backend + environment: + - PORT=50053 + - SERVICE_NAME=myservice + ports: + - "50053:50053" + depends_on: + xds-control-plane: + condition: service_healthy + + # Test runner + tests: + image: ruby:${RUBY_VERSION:-latest} + volumes: + - ../:/code + working_dir: /code + command: bash -c "bundle install && bundle exec sus xds/test" + environment: + - COVERAGE=${COVERAGE} + - XDS_SERVER_URI=xds-control-plane:18000 + depends_on: + - xds-control-plane + - backend-1 + - backend-2 + - backend-3 +``` + +#### Test Structure -### Integration Tests -- Mock xDS control plane -- Full discovery flow (CDS + EDS) -- Load balancer endpoint selection -- Health check state transitions -- Resource update handling +Create `xds/test/async/grpc/xds/client.rb`: + +```ruby +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async/grpc/xds/client" +require "sus/fixtures/async" +require "async/http/endpoint" + +describe Async::GRPC::XDS::Client do + include Sus::Fixtures::Async::ReactorContext + + let(:xds_server_uri) {ENV["XDS_SERVER_URI"] || "xds-control-plane:18000"} + let(:service_name) {"myservice"} + + let(:bootstrap) { + { + "xds_servers" => [ + { + "server_uri" => xds_server_uri, + "channel_creds" => [{"type" => "insecure"}] + } + ], + "node" => { + "id" => "test-client-#{Process.pid}", + "cluster" => "test" + } + } + } + + let(:client) {subject.new(service_name, bootstrap: bootstrap)} + + it "can resolve endpoints" do + endpoints = client.resolve_endpoints + + expect(endpoints).not_to be_empty + expect(endpoints.size).to be >= 1 + end + + it "can make RPC calls through xDS" do + stub = client.stub(MyServiceInterface, service_name) + + request = MyService::HelloRequest.new(name: "test") + response = stub.say_hello(request) + + expect(response).to be_a(MyService::HelloReply) + expect(response.message).to match(/test/) + end + + it "load balances across multiple endpoints" do + # Make multiple calls and verify they hit different backends + endpoints_used = Set.new + + 10.times do + stub = client.stub(MyServiceInterface, service_name) + request = MyService::HelloRequest.new(name: "test") + response = stub.say_hello(request) + + # Extract backend info from response metadata or headers + endpoints_used << extract_backend(response) + end + + # Should use multiple backends (depending on LB policy) + expect(endpoints_used.size).to be > 1 + end + + it "handles endpoint failures gracefully" do + # Start with healthy endpoints + endpoints = client.resolve_endpoints + expect(endpoints).not_to be_empty + + # Simulate endpoint failure (stop one backend) + # xDS should update and remove failed endpoint + + # Wait for xDS update + sleep 5 + + # Should still be able to make calls (using remaining endpoints) + stub = client.stub(MyServiceInterface, service_name) + request = MyService::HelloRequest.new(name: "test") + response = stub.say_hello(request) + + expect(response).to be_a(MyService::HelloReply) + end + + it "reloads configuration on errors" do + # Make initial call + stub = client.stub(MyServiceInterface, service_name) + request = MyService::HelloRequest.new(name: "test") + response = stub.say_hello(request) + expect(response).to be_a(MyService::HelloReply) + + # Invalidate cache (simulate endpoint change) + client.instance_variable_get(:@load_balancer)&.update_endpoints([]) + + # Should reload and work again + response = stub.say_hello(request) + expect(response).to be_a(MyService::HelloReply) + end + + private + + def extract_backend(response) + # Extract backend identifier from response + # This depends on your test service implementation + response.metadata["backend-id"] || "unknown" + end +end +``` + +#### Running Integration Tests + +```bash +# Start docker compose environment +cd xds +docker compose up -d + +# Wait for services to be ready +docker compose ps + +# Run tests +docker compose run --rm tests + +# Or run locally (if services are accessible) +bundle exec sus xds/test + +# Cleanup +docker compose down +``` + +#### Mock xDS Control Plane + +For simpler testing, use a mock xDS server: + +```ruby +# xds/test/mock_xds_server.rb +module Async + module GRPC + module XDS + module Test + # Simple mock xDS server for testing + class MockControlPlane + def initialize + @clusters = {} + @endpoints = {} + end + + def add_cluster(name, config) + @clusters[name] = config + end + + def add_endpoints(cluster_name, endpoints) + @endpoints[cluster_name] = endpoints + end + + # Implement ADS server interface + def stream_aggregated_resources(requests) + # Yield DiscoveryResponse messages + end + end + end + end + end +end +``` ### System Tests - Integration with Google Cloud Traffic Director @@ -807,26 +1417,31 @@ end 1. **Incremental vs. State-of-the-World** - Which xDS update mode to use? - Incremental allows selective updates - State-of-the-world is simpler but more bandwidth + - **Recommendation**: Start with state-of-the-world, add incremental later 2. **Control Plane Failover** - How to handle control plane unavailability? - Cache last known good configuration - Fall back to static configuration - Multiple control plane endpoints + - **Recommendation**: Cache last known config, support multiple endpoints -3. **Server-Side xDS** - Priority for server features? - - LDS for dynamic listener configuration - - RDS for advanced routing - - Integration with existing `Dispatcher` - -4. **Protobuf Dependencies** - How to handle Envoy protos? +3. **Protobuf Dependencies** - How to handle Envoy protos? - Bundle pre-generated Ruby protos - Generate from .proto files at build time - Separate gem for Envoy proto definitions + - **Recommendation**: Separate gem (`envoy-protos-ruby`) for proto definitions -5. **Backwards Compatibility** - How to maintain compatibility? +4. **Backwards Compatibility** - How to maintain compatibility? - Make xDS optional dependency - Graceful degradation without xDS - Clear migration path from static to dynamic + - **Recommendation**: Optional dependency, wrapper pattern maintains compatibility + +5. **Server-Side xDS** - Priority for server features? + - LDS for dynamic listener configuration + - RDS for advanced routing + - Integration with existing `Dispatcher` + - **Recommendation**: Focus on client-side first, server-side later ## Related Work diff --git a/xds/Dockerfile.backend b/xds/Dockerfile.backend new file mode 100644 index 0000000..ec44aee --- /dev/null +++ b/xds/Dockerfile.backend @@ -0,0 +1,21 @@ +FROM ruby:latest + +WORKDIR /app + +# Install dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + +# Copy gemfiles +COPY gems.rb gems.locked ./ +RUN bundle install + +# Copy application code +COPY . . + +# Expose port +EXPOSE ${PORT:-50051} + +# Run gRPC server +CMD bundle exec ruby xds/backend_server.rb diff --git a/xds/backend_server.rb b/xds/backend_server.rb new file mode 100644 index 0000000..954edb1 --- /dev/null +++ b/xds/backend_server.rb @@ -0,0 +1,66 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# Simple gRPC backend server for xDS testing +# This mimics a real gRPC service that would be discovered via xDS + +require "async" +require "async/http/server" +require "async/http/endpoint" +require "protocol/grpc/middleware" +require_relative "../fixtures/async/grpc/test_interface" + +class TestBackendService + def initialize(backend_id) + @backend_id = backend_id + end + + def unary_call(input, output, call) + request = input.read + + # Include backend ID in response metadata + call.set_metadata("backend-id", @backend_id) + + response = Protocol::GRPC::Fixtures::TestMessage.new( + value: "Response from #{@backend_id}: #{request.value}" + ) + + output.write(response) + end + + def say_hello(input, output, call) + request = input.read + + call.set_metadata("backend-id", @backend_id) + + response = Protocol::GRPC::Fixtures::TestMessage.new( + value: "Hello from #{@backend_id}, #{request.value}!" + ) + + output.write(response) + end +end + +port = ENV["PORT"] || "50051" +backend_id = ENV["BACKEND_ID"] || "backend-unknown" +service_name = ENV["SERVICE_NAME"] || "test.Service" + +Async do + # Create gRPC middleware + grpc = Protocol::GRPC::Middleware.new + service = TestBackendService.new(backend_id) + grpc.register(service_name, service) + + # Create endpoint + endpoint = Async::HTTP::Endpoint.parse( + "https://0.0.0.0:#{port}", + protocol: Async::HTTP::Protocol::HTTP2 + ) + + # Start server + server = Async::HTTP::Server.new(grpc, endpoint) + + Console.logger.info(self){"Starting backend server #{backend_id} on port #{port}"} + + server.run +end diff --git a/xds/docker-compose.yaml b/xds/docker-compose.yaml new file mode 100644 index 0000000..67f7a67 --- /dev/null +++ b/xds/docker-compose.yaml @@ -0,0 +1,80 @@ +services: + # xDS control plane (using go-control-plane test server) + # This provides a simple xDS server for testing + xds-control-plane: + image: envoyproxy/go-control-plane:latest + command: > + /go-control-plane + -alsologtostderr + -v 2 + -mode xds + -server_type ADS + -port 18000 + ports: + - "18000:18000" + healthcheck: + test: ["CMD", "nc", "-z", "localhost", "18000"] + interval: 1s + timeout: 3s + retries: 30 + + # Backend gRPC server 1 + backend-1: + build: + context: .. + dockerfile: xds/Dockerfile.backend + environment: + - PORT=50051 + - SERVICE_NAME=myservice + - BACKEND_ID=backend-1 + ports: + - "50051:50051" + depends_on: + xds-control-plane: + condition: service_healthy + + # Backend gRPC server 2 + backend-2: + build: + context: .. + dockerfile: xds/Dockerfile.backend + environment: + - PORT=50052 + - SERVICE_NAME=myservice + - BACKEND_ID=backend-2 + ports: + - "50052:50052" + depends_on: + xds-control-plane: + condition: service_healthy + + # Backend gRPC server 3 + backend-3: + build: + context: .. + dockerfile: xds/Dockerfile.backend + environment: + - PORT=50053 + - SERVICE_NAME=myservice + - BACKEND_ID=backend-3 + ports: + - "50053:50053" + depends_on: + xds-control-plane: + condition: service_healthy + + # Test runner + tests: + image: ruby:${RUBY_VERSION:-latest} + volumes: + - ../:/code + working_dir: /code + command: bash -c "bundle install && bundle exec sus xds/test" + environment: + - COVERAGE=${COVERAGE} + - XDS_SERVER_URI=xds-control-plane:18000 + depends_on: + - xds-control-plane + - backend-1 + - backend-2 + - backend-3 diff --git a/xds/readme.md b/xds/readme.md new file mode 100644 index 0000000..0e362c9 --- /dev/null +++ b/xds/readme.md @@ -0,0 +1,98 @@ +# xDS Integration Tests + +This directory contains Docker Compose configuration and test files for xDS integration testing, following the same pattern as `async-redis` (Sentinel and Cluster tests). + +## Setup + +The Docker Compose setup includes: +- **xds-control-plane**: xDS control plane server (using go-control-plane) +- **backend-1, backend-2, backend-3**: Multiple gRPC backend servers +- **tests**: Test runner container + +## Running Tests + +### Start the environment + +```bash +cd xds +docker compose up -d +``` + +### Wait for services to be ready + +```bash +docker compose ps +``` + +All services should show as "healthy" or "running". + +### Run tests + +```bash +# Run tests in docker compose +docker compose run --rm tests + +# Or run tests locally (if services are accessible) +# Set XDS_SERVER_URI environment variable +export XDS_SERVER_URI=xds-control-plane:18000 +bundle exec sus xds/test +``` + +### Cleanup + +```bash +docker compose down +``` + +## Test Structure + +Tests are located in `xds/test/async/grpc/xds/` and follow the same pattern as other async-grpc tests: + +- `client.rb`: Tests for `Async::GRPC::XDS::Client` +- Tests use `Sus::Fixtures::Async::ReactorContext` for async test support +- Tests connect to docker compose services using service names (e.g., `xds-control-plane:18000`) + +## Environment Variables + +- `XDS_SERVER_URI`: xDS control plane server URI (default: `xds-control-plane:18000`) +- `RUBY_VERSION`: Ruby version for test container (default: `latest`) +- `COVERAGE`: Enable code coverage reporting + +## Backend Servers + +The backend servers (`backend-1`, `backend-2`, `backend-3`) are simple gRPC servers that: +- Implement the test service interface +- Include backend ID in response metadata +- Can be used to test load balancing and failover + +See `backend_server.rb` for implementation details. + +## Mock xDS Control Plane + +For simpler unit testing, you can use a mock xDS server instead of the full Docker Compose setup. See the test files for examples of mocking xDS responses. + +## Troubleshooting + +### Services not starting + +Check logs: +```bash +docker compose logs xds-control-plane +docker compose logs backend-1 +``` + +### Tests failing to connect + +Ensure services are healthy: +```bash +docker compose ps +``` + +Check network connectivity: +```bash +docker compose exec tests ping xds-control-plane +``` + +### Port conflicts + +If ports are already in use, modify `docker-compose.yaml` to use different ports. diff --git a/xds/test/async/grpc/xds/client.rb b/xds/test/async/grpc/xds/client.rb new file mode 100644 index 0000000..0538def --- /dev/null +++ b/xds/test/async/grpc/xds/client.rb @@ -0,0 +1,124 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async/grpc/xds/client" +require "sus/fixtures/async" +require "async/http/endpoint" +require "set" + +describe Async::GRPC::XDS::Client do + include Sus::Fixtures::Async::ReactorContext + + let(:xds_server_uri) {ENV["XDS_SERVER_URI"] || "xds-control-plane:18000"} + let(:service_name) {"myservice"} + + let(:bootstrap) { + { + "xds_servers" => [ + { + "server_uri" => xds_server_uri, + "channel_creds" => [{"type" => "insecure"}] + } + ], + "node" => { + "id" => "test-client-#{Process.pid}", + "cluster" => "test" + } + } + } + + let(:client) {subject.new(service_name, bootstrap: bootstrap)} + + it "can resolve endpoints" do + endpoints = client.resolve_endpoints + + expect(endpoints).not_to be_empty + expect(endpoints.size).to be >= 1 + end + + it "can make RPC calls through xDS" do + # This test requires a working xDS control plane and backend servers + # Skip if not running in docker compose + skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] + + stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) + + request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") + response = stub.unary_call(request) + + expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) + expect(response.value).to match(/test/) + end + + it "load balances across multiple endpoints" do + skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] + + # Make multiple calls and verify they hit different backends + endpoints_used = Set.new + + 10.times do + stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) + request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") + response = stub.unary_call(request) + + # Extract backend info from response metadata + # This would need to be implemented based on how metadata is returned + endpoints_used << response.value + end + + # Should use multiple backends (depending on LB policy) + # Note: This depends on load balancing policy + expect(endpoints_used.size).to be >= 1 + end + + it "handles endpoint failures gracefully" do + skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] + + # Start with healthy endpoints + endpoints = client.resolve_endpoints + expect(endpoints).not_to be_empty + + # Make initial call + stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) + request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") + response = stub.unary_call(request) + expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) + + # Note: Testing actual endpoint failure would require stopping a backend + # This is better done as a separate integration test + end + + it "reloads configuration on errors" do + skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] + + # Make initial call + stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) + request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") + response = stub.unary_call(request) + expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) + + # Invalidate cache (simulate endpoint change) + client.instance_variable_get(:@load_balancer)&.update_endpoints([]) + + # Should reload and work again + response = stub.unary_call(request) + expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) + end + + it "handles bootstrap configuration errors" do + expect { + subject.new(service_name, bootstrap: {invalid: "config"}) + }.to raise_error(Async::GRPC::XDS::Client::ConfigurationError) + end + + it "handles no endpoints available" do + # Create client with invalid service name + invalid_client = subject.new("nonexistent-service", bootstrap: bootstrap) + + expect { + invalid_client.resolve_endpoints + }.to raise_error(Async::GRPC::XDS::Client::NoEndpointsError) + end +end From 05d6f2732fb698f21fa7e73fbc30823330faa9a4 Mon Sep 17 00:00:00 2001 From: Samuel Williams Date: Thu, 26 Feb 2026 09:45:43 +1300 Subject: [PATCH 3/6] Add support for XDS v3. --- .github/workflows/test-xds.yaml | 36 + async-grpc.gemspec | 1 + bake/async/grpc/xds.rb | 116 + code.md | 0 lib/async/grpc/client.rb | 9 +- lib/async/grpc/stub.rb | 5 +- lib/async/grpc/xds.rb | 37 + lib/async/grpc/xds/ads_stream.rb | 70 + lib/async/grpc/xds/client.rb | 241 ++ lib/async/grpc/xds/context.rb | 201 ++ lib/async/grpc/xds/discovery_client.rb | 365 +++ lib/async/grpc/xds/health_checker.rb | 88 + lib/async/grpc/xds/load_balancer.rb | 196 ++ lib/async/grpc/xds/resource_cache.rb | 55 + lib/async/grpc/xds/resources.rb | 281 ++ lib/envoy.rb | 83 + lib/envoy/annotations/deprecation_pb.rb | 19 + .../config/cluster/v3/circuit_breaker_pb.rb | 31 + lib/envoy/config/cluster/v3/cluster_pb.rb | 80 + lib/envoy/config/cluster/v3/filter_pb.rb | 28 + .../config/cluster/v3/outlier_detection_pb.rb | 29 + lib/envoy/config/core/v3/address_pb.rb | 38 + lib/envoy/config/core/v3/backoff_pb.rb | 27 + lib/envoy/config/core/v3/base_pb.rb | 68 + lib/envoy/config/core/v3/cel_pb.rb | 24 + lib/envoy/config/core/v3/config_source_pb.rb | 42 + .../config/core/v3/event_service_config_pb.rb | 27 + lib/envoy/config/core/v3/extension_pb.rb | 26 + .../config/core/v3/grpc_method_list_pb.rb | 27 + lib/envoy/config/core/v3/grpc_service_pb.rb | 45 + lib/envoy/config/core/v3/health_check_pb.rb | 47 + lib/envoy/config/core/v3/http_service_pb.rb | 27 + lib/envoy/config/core/v3/http_uri_pb.rb | 27 + lib/envoy/config/core/v3/protocol_pb.rb | 51 + lib/envoy/config/core/v3/proxy_protocol_pb.rb | 31 + lib/envoy/config/core/v3/resolver_pb.rb | 27 + .../config/core/v3/socket_cmsg_headers_pb.rb | 25 + lib/envoy/config/core/v3/socket_option_pb.rb | 31 + .../core/v3/substitution_format_string_pb.rb | 30 + .../config/core/v3/udp_socket_config_pb.rb | 26 + .../endpoint/v3/endpoint_components_pb.rb | 40 + lib/envoy/config/endpoint/v3/endpoint_pb.rb | 32 + .../config/endpoint/v3/load_report_pb.rb | 36 + lib/envoy/service/discovery/v3/ads_pb.rb | 26 + .../v3/aggregated_discovery_service.rb | 64 + .../service/discovery/v3/discovery_pb.rb | 42 + lib/envoy/type/matcher/v3/address_pb.rb | 25 + lib/envoy/type/matcher/v3/filter_state_pb.rb | 27 + lib/envoy/type/matcher/v3/http_inputs_pb.rb | 29 + lib/envoy/type/matcher/v3/metadata_pb.rb | 28 + lib/envoy/type/matcher/v3/node_pb.rb | 27 + lib/envoy/type/matcher/v3/number_pb.rb | 27 + lib/envoy/type/matcher/v3/path_pb.rb | 27 + lib/envoy/type/matcher/v3/regex_pb.rb | 30 + .../type/matcher/v3/status_code_input_pb.rb | 25 + lib/envoy/type/matcher/v3/string_pb.rb | 29 + lib/envoy/type/matcher/v3/struct_pb.rb | 28 + lib/envoy/type/matcher/v3/value_pb.rb | 31 + lib/envoy/type/metadata/v3/metadata_pb.rb | 32 + lib/envoy/type/v3/hash_policy_pb.rb | 26 + lib/envoy/type/v3/http_pb.rb | 22 + lib/envoy/type/v3/http_status_pb.rb | 25 + lib/envoy/type/v3/percent_pb.rb | 26 + lib/envoy/type/v3/range_pb.rb | 25 + lib/envoy/type/v3/ratelimit_strategy_pb.rb | 28 + lib/envoy/type/v3/ratelimit_unit_pb.rb | 22 + lib/envoy/type/v3/semantic_version_pb.rb | 23 + lib/envoy/type/v3/token_bucket_pb.rb | 26 + lib/google/protobuf/any_pb.rb | 18 + lib/google/protobuf/duration_pb.rb | 18 + lib/google/protobuf/empty_pb.rb | 18 + lib/google/protobuf/struct_pb.rb | 21 + lib/google/protobuf/timestamp_pb.rb | 18 + lib/google/protobuf/wrappers_pb.rb | 26 + lib/google/rpc/status_pb.rb | 20 + lib/udpa/annotations/migrate_pb.rb | 22 + lib/udpa/annotations/security_pb.rb | 23 + lib/udpa/annotations/sensitive_pb.rb | 19 + lib/udpa/annotations/status_pb.rb | 21 + lib/udpa/annotations/versioning_pb.rb | 20 + lib/validate/validate_pb.rb | 43 + lib/xds/annotations/v3/status_pb.rb | 26 + lib/xds/core/v3/authority_pb.rb | 23 + lib/xds/core/v3/cidr_pb.rb | 24 + lib/xds/core/v3/collection_entry_pb.rb | 26 + lib/xds/core/v3/context_params_pb.rb | 22 + lib/xds/core/v3/extension_pb.rb | 23 + lib/xds/core/v3/resource_locator_pb.rb | 26 + lib/xds/core/v3/resource_name_pb.rb | 24 + lib/xds/core/v3/resource_pb.rb | 24 + lib/xds/type/matcher/v3/domain_pb.rb | 27 + lib/xds/type/matcher/v3/http_inputs_pb.rb | 22 + lib/xds/type/matcher/v3/ip_pb.rb | 28 + lib/xds/type/matcher/v3/matcher_pb.rb | 34 + lib/xds/type/matcher/v3/range_pb.rb | 31 + lib/xds/type/matcher/v3/regex_pb.rb | 25 + lib/xds/type/matcher/v3/string_pb.rb | 27 + proto/README.md | 70 + proto/envoy/annotations/deprecation.proto | 34 + proto/envoy/annotations/resource.proto | 19 + proto/envoy/config/README.md | 3 + proto/envoy/config/cluster/v3/BUILD | 18 + .../config/cluster/v3/circuit_breaker.proto | 121 + proto/envoy/config/cluster/v3/cluster.proto | 1407 ++++++++ proto/envoy/config/cluster/v3/filter.proto | 40 + .../config/cluster/v3/outlier_detection.proto | 180 + proto/envoy/config/core/v3/BUILD | 16 + proto/envoy/config/core/v3/address.proto | 214 ++ proto/envoy/config/core/v3/backoff.proto | 37 + proto/envoy/config/core/v3/base.proto | 662 ++++ proto/envoy/config/core/v3/cel.proto | 63 + .../envoy/config/core/v3/config_source.proto | 283 ++ .../config/core/v3/event_service_config.proto | 29 + proto/envoy/config/core/v3/extension.proto | 32 + .../config/core/v3/grpc_method_list.proto | 33 + proto/envoy/config/core/v3/grpc_service.proto | 355 ++ proto/envoy/config/core/v3/health_check.proto | 443 +++ proto/envoy/config/core/v3/http_service.proto | 35 + proto/envoy/config/core/v3/http_uri.proto | 58 + proto/envoy/config/core/v3/protocol.proto | 807 +++++ .../envoy/config/core/v3/proxy_protocol.proto | 114 + proto/envoy/config/core/v3/resolver.proto | 36 + .../config/core/v3/socket_cmsg_headers.proto | 28 + .../envoy/config/core/v3/socket_option.proto | 108 + .../core/v3/substitution_format_string.proto | 136 + .../config/core/v3/udp_socket_config.proto | 32 + proto/envoy/config/endpoint/v3/BUILD | 16 + proto/envoy/config/endpoint/v3/endpoint.proto | 137 + .../endpoint/v3/endpoint_components.proto | 229 ++ .../config/endpoint/v3/load_report.proto | 220 ++ proto/envoy/config/listener/v3/BUILD | 18 + .../config/listener/v3/api_listener.proto | 34 + proto/envoy/config/listener/v3/listener.proto | 455 +++ .../listener/v3/listener_components.proto | 353 ++ .../config/listener/v3/quic_config.proto | 108 + .../listener/v3/udp_listener_config.proto | 52 + proto/envoy/config/route/v3/BUILD | 19 + proto/envoy/config/route/v3/route.proto | 172 + .../config/route/v3/route_components.proto | 2918 +++++++++++++++++ .../envoy/config/route/v3/scoped_route.proto | 133 + .../extensions/transport_sockets/tls/v3/BUILD | 14 + .../transport_sockets/tls/v3/cert.proto | 12 + .../transport_sockets/tls/v3/common.proto | 597 ++++ .../transport_sockets/tls/v3/secret.proto | 61 + .../transport_sockets/tls/v3/tls.proto | 366 +++ .../tls/v3/tls_spiffe_validator_config.proto | 67 + proto/envoy/service/README.md | 3 + proto/envoy/service/discovery/v3/BUILD | 13 + proto/envoy/service/discovery/v3/ads.proto | 44 + .../service/discovery/v3/discovery.proto | 443 +++ proto/envoy/type/BUILD | 9 + proto/envoy/type/hash_policy.proto | 28 + proto/envoy/type/http.proto | 24 + proto/envoy/type/http_status.proto | 140 + proto/envoy/type/matcher/v3/address.proto | 22 + .../envoy/type/matcher/v3/filter_state.proto | 33 + proto/envoy/type/matcher/v3/http_inputs.proto | 71 + proto/envoy/type/matcher/v3/metadata.proto | 110 + proto/envoy/type/matcher/v3/node.proto | 29 + proto/envoy/type/matcher/v3/number.proto | 33 + proto/envoy/type/matcher/v3/path.proto | 31 + proto/envoy/type/matcher/v3/regex.proto | 97 + .../type/matcher/v3/status_code_input.proto | 23 + proto/envoy/type/matcher/v3/string.proto | 94 + proto/envoy/type/matcher/v3/struct.proto | 91 + proto/envoy/type/matcher/v3/value.proto | 80 + proto/envoy/type/metadata/v3/metadata.proto | 117 + proto/envoy/type/percent.proto | 52 + proto/envoy/type/range.proto | 43 + proto/envoy/type/semantic_version.proto | 24 + proto/envoy/type/token_bucket.proto | 36 + proto/envoy/type/v3/BUILD | 12 + proto/envoy/type/v3/hash_policy.proto | 43 + proto/envoy/type/v3/http.proto | 24 + proto/envoy/type/v3/http_status.proto | 199 ++ proto/envoy/type/v3/percent.proto | 57 + proto/envoy/type/v3/range.proto | 50 + proto/envoy/type/v3/ratelimit_strategy.proto | 79 + proto/envoy/type/v3/ratelimit_unit.proto | 37 + proto/envoy/type/v3/semantic_version.proto | 27 + proto/envoy/type/v3/token_bucket.proto | 39 + proto/google/protobuf/any.proto | 162 + proto/google/protobuf/duration.proto | 115 + proto/google/protobuf/empty.proto | 51 + proto/google/protobuf/struct.proto | 95 + proto/google/protobuf/timestamp.proto | 145 + proto/google/protobuf/wrappers.proto | 157 + proto/google/rpc/status.proto | 47 + proto/udpa/annotations/migrate.proto | 49 + proto/udpa/annotations/security.proto | 31 + proto/udpa/annotations/sensitive.proto | 14 + proto/udpa/annotations/status.proto | 34 + proto/udpa/annotations/versioning.proto | 17 + proto/validate/validate.proto | 862 +++++ proto/xds/annotations/v3/migrate.proto | 46 + proto/xds/annotations/v3/security.proto | 30 + proto/xds/annotations/v3/sensitive.proto | 16 + proto/xds/annotations/v3/status.proto | 59 + proto/xds/annotations/v3/versioning.proto | 20 + proto/xds/core/v3/authority.proto | 22 + proto/xds/core/v3/cidr.proto | 25 + proto/xds/core/v3/collection_entry.proto | 55 + proto/xds/core/v3/context_params.proto | 23 + proto/xds/core/v3/extension.proto | 26 + proto/xds/core/v3/resource.proto | 29 + proto/xds/core/v3/resource_locator.proto | 118 + proto/xds/core/v3/resource_name.proto | 42 + proto/xds/type/matcher/v3/cel.proto | 37 + proto/xds/type/matcher/v3/domain.proto | 46 + proto/xds/type/matcher/v3/http_inputs.proto | 23 + proto/xds/type/matcher/v3/ip.proto | 53 + proto/xds/type/matcher/v3/matcher.proto | 144 + proto/xds/type/matcher/v3/range.proto | 69 + proto/xds/type/matcher/v3/regex.proto | 46 + proto/xds/type/matcher/v3/string.proto | 71 + proto/xds/type/v3/cel.proto | 77 + proto/xds/type/v3/range.proto | 40 + proto/xds/type/v3/typed_struct.proto | 44 + spanner_integration.md | 309 -- xds.md | 1458 -------- xds/Dockerfile.backend | 6 +- xds/Dockerfile.control-plane | 23 + xds/backend_server.rb | 32 +- xds/docker-compose.yaml | 29 +- xds/go.mod | 8 + xds/readme.md | 27 +- xds/test/async/grpc/xds/client.rb | 135 +- xds/test_server.go | 178 + xds/update_protos.sh | 123 + 229 files changed, 21204 insertions(+), 1859 deletions(-) create mode 100644 .github/workflows/test-xds.yaml create mode 100644 bake/async/grpc/xds.rb delete mode 100644 code.md create mode 100644 lib/async/grpc/xds.rb create mode 100644 lib/async/grpc/xds/ads_stream.rb create mode 100644 lib/async/grpc/xds/client.rb create mode 100644 lib/async/grpc/xds/context.rb create mode 100644 lib/async/grpc/xds/discovery_client.rb create mode 100644 lib/async/grpc/xds/health_checker.rb create mode 100644 lib/async/grpc/xds/load_balancer.rb create mode 100644 lib/async/grpc/xds/resource_cache.rb create mode 100644 lib/async/grpc/xds/resources.rb create mode 100644 lib/envoy.rb create mode 100644 lib/envoy/annotations/deprecation_pb.rb create mode 100644 lib/envoy/config/cluster/v3/circuit_breaker_pb.rb create mode 100644 lib/envoy/config/cluster/v3/cluster_pb.rb create mode 100644 lib/envoy/config/cluster/v3/filter_pb.rb create mode 100644 lib/envoy/config/cluster/v3/outlier_detection_pb.rb create mode 100644 lib/envoy/config/core/v3/address_pb.rb create mode 100644 lib/envoy/config/core/v3/backoff_pb.rb create mode 100644 lib/envoy/config/core/v3/base_pb.rb create mode 100644 lib/envoy/config/core/v3/cel_pb.rb create mode 100644 lib/envoy/config/core/v3/config_source_pb.rb create mode 100644 lib/envoy/config/core/v3/event_service_config_pb.rb create mode 100644 lib/envoy/config/core/v3/extension_pb.rb create mode 100644 lib/envoy/config/core/v3/grpc_method_list_pb.rb create mode 100644 lib/envoy/config/core/v3/grpc_service_pb.rb create mode 100644 lib/envoy/config/core/v3/health_check_pb.rb create mode 100644 lib/envoy/config/core/v3/http_service_pb.rb create mode 100644 lib/envoy/config/core/v3/http_uri_pb.rb create mode 100644 lib/envoy/config/core/v3/protocol_pb.rb create mode 100644 lib/envoy/config/core/v3/proxy_protocol_pb.rb create mode 100644 lib/envoy/config/core/v3/resolver_pb.rb create mode 100644 lib/envoy/config/core/v3/socket_cmsg_headers_pb.rb create mode 100644 lib/envoy/config/core/v3/socket_option_pb.rb create mode 100644 lib/envoy/config/core/v3/substitution_format_string_pb.rb create mode 100644 lib/envoy/config/core/v3/udp_socket_config_pb.rb create mode 100644 lib/envoy/config/endpoint/v3/endpoint_components_pb.rb create mode 100644 lib/envoy/config/endpoint/v3/endpoint_pb.rb create mode 100644 lib/envoy/config/endpoint/v3/load_report_pb.rb create mode 100644 lib/envoy/service/discovery/v3/ads_pb.rb create mode 100644 lib/envoy/service/discovery/v3/aggregated_discovery_service.rb create mode 100644 lib/envoy/service/discovery/v3/discovery_pb.rb create mode 100644 lib/envoy/type/matcher/v3/address_pb.rb create mode 100644 lib/envoy/type/matcher/v3/filter_state_pb.rb create mode 100644 lib/envoy/type/matcher/v3/http_inputs_pb.rb create mode 100644 lib/envoy/type/matcher/v3/metadata_pb.rb create mode 100644 lib/envoy/type/matcher/v3/node_pb.rb create mode 100644 lib/envoy/type/matcher/v3/number_pb.rb create mode 100644 lib/envoy/type/matcher/v3/path_pb.rb create mode 100644 lib/envoy/type/matcher/v3/regex_pb.rb create mode 100644 lib/envoy/type/matcher/v3/status_code_input_pb.rb create mode 100644 lib/envoy/type/matcher/v3/string_pb.rb create mode 100644 lib/envoy/type/matcher/v3/struct_pb.rb create mode 100644 lib/envoy/type/matcher/v3/value_pb.rb create mode 100644 lib/envoy/type/metadata/v3/metadata_pb.rb create mode 100644 lib/envoy/type/v3/hash_policy_pb.rb create mode 100644 lib/envoy/type/v3/http_pb.rb create mode 100644 lib/envoy/type/v3/http_status_pb.rb create mode 100644 lib/envoy/type/v3/percent_pb.rb create mode 100644 lib/envoy/type/v3/range_pb.rb create mode 100644 lib/envoy/type/v3/ratelimit_strategy_pb.rb create mode 100644 lib/envoy/type/v3/ratelimit_unit_pb.rb create mode 100644 lib/envoy/type/v3/semantic_version_pb.rb create mode 100644 lib/envoy/type/v3/token_bucket_pb.rb create mode 100644 lib/google/protobuf/any_pb.rb create mode 100644 lib/google/protobuf/duration_pb.rb create mode 100644 lib/google/protobuf/empty_pb.rb create mode 100644 lib/google/protobuf/struct_pb.rb create mode 100644 lib/google/protobuf/timestamp_pb.rb create mode 100644 lib/google/protobuf/wrappers_pb.rb create mode 100644 lib/google/rpc/status_pb.rb create mode 100644 lib/udpa/annotations/migrate_pb.rb create mode 100644 lib/udpa/annotations/security_pb.rb create mode 100644 lib/udpa/annotations/sensitive_pb.rb create mode 100644 lib/udpa/annotations/status_pb.rb create mode 100644 lib/udpa/annotations/versioning_pb.rb create mode 100644 lib/validate/validate_pb.rb create mode 100644 lib/xds/annotations/v3/status_pb.rb create mode 100644 lib/xds/core/v3/authority_pb.rb create mode 100644 lib/xds/core/v3/cidr_pb.rb create mode 100644 lib/xds/core/v3/collection_entry_pb.rb create mode 100644 lib/xds/core/v3/context_params_pb.rb create mode 100644 lib/xds/core/v3/extension_pb.rb create mode 100644 lib/xds/core/v3/resource_locator_pb.rb create mode 100644 lib/xds/core/v3/resource_name_pb.rb create mode 100644 lib/xds/core/v3/resource_pb.rb create mode 100644 lib/xds/type/matcher/v3/domain_pb.rb create mode 100644 lib/xds/type/matcher/v3/http_inputs_pb.rb create mode 100644 lib/xds/type/matcher/v3/ip_pb.rb create mode 100644 lib/xds/type/matcher/v3/matcher_pb.rb create mode 100644 lib/xds/type/matcher/v3/range_pb.rb create mode 100644 lib/xds/type/matcher/v3/regex_pb.rb create mode 100644 lib/xds/type/matcher/v3/string_pb.rb create mode 100644 proto/README.md create mode 100644 proto/envoy/annotations/deprecation.proto create mode 100644 proto/envoy/annotations/resource.proto create mode 100644 proto/envoy/config/README.md create mode 100644 proto/envoy/config/cluster/v3/BUILD create mode 100644 proto/envoy/config/cluster/v3/circuit_breaker.proto create mode 100644 proto/envoy/config/cluster/v3/cluster.proto create mode 100644 proto/envoy/config/cluster/v3/filter.proto create mode 100644 proto/envoy/config/cluster/v3/outlier_detection.proto create mode 100644 proto/envoy/config/core/v3/BUILD create mode 100644 proto/envoy/config/core/v3/address.proto create mode 100644 proto/envoy/config/core/v3/backoff.proto create mode 100644 proto/envoy/config/core/v3/base.proto create mode 100644 proto/envoy/config/core/v3/cel.proto create mode 100644 proto/envoy/config/core/v3/config_source.proto create mode 100644 proto/envoy/config/core/v3/event_service_config.proto create mode 100644 proto/envoy/config/core/v3/extension.proto create mode 100644 proto/envoy/config/core/v3/grpc_method_list.proto create mode 100644 proto/envoy/config/core/v3/grpc_service.proto create mode 100644 proto/envoy/config/core/v3/health_check.proto create mode 100644 proto/envoy/config/core/v3/http_service.proto create mode 100644 proto/envoy/config/core/v3/http_uri.proto create mode 100644 proto/envoy/config/core/v3/protocol.proto create mode 100644 proto/envoy/config/core/v3/proxy_protocol.proto create mode 100644 proto/envoy/config/core/v3/resolver.proto create mode 100644 proto/envoy/config/core/v3/socket_cmsg_headers.proto create mode 100644 proto/envoy/config/core/v3/socket_option.proto create mode 100644 proto/envoy/config/core/v3/substitution_format_string.proto create mode 100644 proto/envoy/config/core/v3/udp_socket_config.proto create mode 100644 proto/envoy/config/endpoint/v3/BUILD create mode 100644 proto/envoy/config/endpoint/v3/endpoint.proto create mode 100644 proto/envoy/config/endpoint/v3/endpoint_components.proto create mode 100644 proto/envoy/config/endpoint/v3/load_report.proto create mode 100644 proto/envoy/config/listener/v3/BUILD create mode 100644 proto/envoy/config/listener/v3/api_listener.proto create mode 100644 proto/envoy/config/listener/v3/listener.proto create mode 100644 proto/envoy/config/listener/v3/listener_components.proto create mode 100644 proto/envoy/config/listener/v3/quic_config.proto create mode 100644 proto/envoy/config/listener/v3/udp_listener_config.proto create mode 100644 proto/envoy/config/route/v3/BUILD create mode 100644 proto/envoy/config/route/v3/route.proto create mode 100644 proto/envoy/config/route/v3/route_components.proto create mode 100644 proto/envoy/config/route/v3/scoped_route.proto create mode 100644 proto/envoy/extensions/transport_sockets/tls/v3/BUILD create mode 100644 proto/envoy/extensions/transport_sockets/tls/v3/cert.proto create mode 100644 proto/envoy/extensions/transport_sockets/tls/v3/common.proto create mode 100644 proto/envoy/extensions/transport_sockets/tls/v3/secret.proto create mode 100644 proto/envoy/extensions/transport_sockets/tls/v3/tls.proto create mode 100644 proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto create mode 100644 proto/envoy/service/README.md create mode 100644 proto/envoy/service/discovery/v3/BUILD create mode 100644 proto/envoy/service/discovery/v3/ads.proto create mode 100644 proto/envoy/service/discovery/v3/discovery.proto create mode 100644 proto/envoy/type/BUILD create mode 100644 proto/envoy/type/hash_policy.proto create mode 100644 proto/envoy/type/http.proto create mode 100644 proto/envoy/type/http_status.proto create mode 100644 proto/envoy/type/matcher/v3/address.proto create mode 100644 proto/envoy/type/matcher/v3/filter_state.proto create mode 100644 proto/envoy/type/matcher/v3/http_inputs.proto create mode 100644 proto/envoy/type/matcher/v3/metadata.proto create mode 100644 proto/envoy/type/matcher/v3/node.proto create mode 100644 proto/envoy/type/matcher/v3/number.proto create mode 100644 proto/envoy/type/matcher/v3/path.proto create mode 100644 proto/envoy/type/matcher/v3/regex.proto create mode 100644 proto/envoy/type/matcher/v3/status_code_input.proto create mode 100644 proto/envoy/type/matcher/v3/string.proto create mode 100644 proto/envoy/type/matcher/v3/struct.proto create mode 100644 proto/envoy/type/matcher/v3/value.proto create mode 100644 proto/envoy/type/metadata/v3/metadata.proto create mode 100644 proto/envoy/type/percent.proto create mode 100644 proto/envoy/type/range.proto create mode 100644 proto/envoy/type/semantic_version.proto create mode 100644 proto/envoy/type/token_bucket.proto create mode 100644 proto/envoy/type/v3/BUILD create mode 100644 proto/envoy/type/v3/hash_policy.proto create mode 100644 proto/envoy/type/v3/http.proto create mode 100644 proto/envoy/type/v3/http_status.proto create mode 100644 proto/envoy/type/v3/percent.proto create mode 100644 proto/envoy/type/v3/range.proto create mode 100644 proto/envoy/type/v3/ratelimit_strategy.proto create mode 100644 proto/envoy/type/v3/ratelimit_unit.proto create mode 100644 proto/envoy/type/v3/semantic_version.proto create mode 100644 proto/envoy/type/v3/token_bucket.proto create mode 100644 proto/google/protobuf/any.proto create mode 100644 proto/google/protobuf/duration.proto create mode 100644 proto/google/protobuf/empty.proto create mode 100644 proto/google/protobuf/struct.proto create mode 100644 proto/google/protobuf/timestamp.proto create mode 100644 proto/google/protobuf/wrappers.proto create mode 100644 proto/google/rpc/status.proto create mode 100644 proto/udpa/annotations/migrate.proto create mode 100644 proto/udpa/annotations/security.proto create mode 100644 proto/udpa/annotations/sensitive.proto create mode 100644 proto/udpa/annotations/status.proto create mode 100644 proto/udpa/annotations/versioning.proto create mode 100644 proto/validate/validate.proto create mode 100644 proto/xds/annotations/v3/migrate.proto create mode 100644 proto/xds/annotations/v3/security.proto create mode 100644 proto/xds/annotations/v3/sensitive.proto create mode 100644 proto/xds/annotations/v3/status.proto create mode 100644 proto/xds/annotations/v3/versioning.proto create mode 100644 proto/xds/core/v3/authority.proto create mode 100644 proto/xds/core/v3/cidr.proto create mode 100644 proto/xds/core/v3/collection_entry.proto create mode 100644 proto/xds/core/v3/context_params.proto create mode 100644 proto/xds/core/v3/extension.proto create mode 100644 proto/xds/core/v3/resource.proto create mode 100644 proto/xds/core/v3/resource_locator.proto create mode 100644 proto/xds/core/v3/resource_name.proto create mode 100644 proto/xds/type/matcher/v3/cel.proto create mode 100644 proto/xds/type/matcher/v3/domain.proto create mode 100644 proto/xds/type/matcher/v3/http_inputs.proto create mode 100644 proto/xds/type/matcher/v3/ip.proto create mode 100644 proto/xds/type/matcher/v3/matcher.proto create mode 100644 proto/xds/type/matcher/v3/range.proto create mode 100644 proto/xds/type/matcher/v3/regex.proto create mode 100644 proto/xds/type/matcher/v3/string.proto create mode 100644 proto/xds/type/v3/cel.proto create mode 100644 proto/xds/type/v3/range.proto create mode 100644 proto/xds/type/v3/typed_struct.proto delete mode 100644 spanner_integration.md delete mode 100644 xds.md create mode 100644 xds/Dockerfile.control-plane create mode 100644 xds/go.mod create mode 100644 xds/test_server.go create mode 100755 xds/update_protos.sh diff --git a/.github/workflows/test-xds.yaml b/.github/workflows/test-xds.yaml new file mode 100644 index 0000000..784188b --- /dev/null +++ b/.github/workflows/test-xds.yaml @@ -0,0 +1,36 @@ +name: Test xDS + +on: [push, pull_request] + +permissions: + contents: read + +env: + CONSOLE_OUTPUT: XTerm + +jobs: + test: + name: ${{matrix.ruby}} on ${{matrix.os}} + runs-on: ${{matrix.os}}-latest + continue-on-error: ${{matrix.experimental}} + + strategy: + matrix: + os: + - ubuntu + + ruby: + - "3.3" + - "3.4" + - "4.0" + + experimental: [false] + + steps: + - uses: actions/checkout@v6 + + - name: Run tests + timeout-minutes: 15 + env: + RUBY_VERSION: ${{matrix.ruby}} + run: docker compose -f xds/docker-compose.yaml up --exit-code-from tests diff --git a/async-grpc.gemspec b/async-grpc.gemspec index 967e771..14d9d6a 100644 --- a/async-grpc.gemspec +++ b/async-grpc.gemspec @@ -24,6 +24,7 @@ Gem::Specification.new do |spec| spec.required_ruby_version = ">= 3.3" + spec.add_dependency "async", ">= 2.38.0" spec.add_dependency "async-http" spec.add_dependency "protocol-http", "~> 0.60" spec.add_dependency "protocol-grpc", "~> 0.11.0" diff --git a/bake/async/grpc/xds.rb b/bake/async/grpc/xds.rb new file mode 100644 index 0000000..077376f --- /dev/null +++ b/bake/async/grpc/xds.rb @@ -0,0 +1,116 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +# Generate Ruby protobuf classes from Envoy .proto files +# @parameter proto_dir [String] Directory containing .proto files (default: "proto") +# @parameter output_dir [String] Output directory for generated Ruby files (default: "lib") +def generate_protos(proto_dir: "proto", output_dir: "lib") + require "fileutils" + + proto_dir = File.expand_path(proto_dir) + output_dir = File.expand_path(output_dir) + + # Core discovery service files (most important) + discovery_files = [ + "envoy/service/discovery/v3/discovery.proto", + "envoy/service/discovery/v3/ads.proto" + ] + + # Core config files needed for discovery + config_files = [ + "envoy/config/core/v3/base.proto", + "envoy/config/core/v3/address.proto", + "envoy/config/core/v3/config_source.proto", + "envoy/config/cluster/v3/cluster.proto", + "envoy/config/endpoint/v3/endpoint.proto" + ] + + # Google protobuf well-known types + google_files = [ + "google/protobuf/any.proto", + "google/protobuf/duration.proto", + "google/protobuf/timestamp.proto", + "google/protobuf/struct.proto", + "google/protobuf/empty.proto", + "google/protobuf/wrappers.proto", + "google/rpc/status.proto" + ] + + all_files = discovery_files + config_files + google_files + + # Create output directories + FileUtils.mkdir_p(output_dir) + + # Generate Ruby code + all_files.each do |proto_file| + full_path = File.join(proto_dir, proto_file) + next unless File.exist?(full_path) + + Console.info{"Generating #{proto_file}..."} + + system( + "protoc", + "--ruby_out=#{output_dir}", + "--proto_path=#{proto_dir}", + "--proto_path=#{File.join(proto_dir, 'google')}", + full_path, + out: File::NULL, + err: File::NULL + ) or begin + Console.warn{"Failed to generate #{proto_file} (may have missing dependencies)"} + end + end + + # Count generated files + generated = Dir.glob(File.join(output_dir, "**/*_pb.rb")).count + + Console.info{"Generated #{generated} protobuf Ruby files in #{output_dir}"} +end + +# Generate all protobuf files (including optional dependencies) +# This will attempt to generate all .proto files, even if some fail +# @parameter proto_dir [String] Directory containing .proto files (default: "proto") +# @parameter output_dir [String] Output directory for generated Ruby files (default: "lib") +def generate_all_protos(proto_dir: "proto", output_dir: "lib") + require "fileutils" + + proto_dir = File.expand_path(proto_dir) + output_dir = File.expand_path(output_dir) + + # Find all .proto files + proto_files = Dir.glob(File.join(proto_dir, "**/*.proto")) + + Console.info{"Found #{proto_files.count} .proto files"} + + # Generate each file + success_count = 0 + fail_count = 0 + + proto_files.each do |proto_file| + relative_path = proto_file.sub(/^#{proto_dir}\//, "") + + Console.debug{"Generating #{relative_path}..."} + + if system( + "protoc", + "--ruby_out=#{output_dir}", + "--proto_path=#{proto_dir}", + "--proto_path=#{File.join(proto_dir, 'google')}", + proto_file, + out: File::NULL, + err: File::NULL + ) + success_count += 1 + else + fail_count += 1 + Console.debug{"Failed: #{relative_path}"} + end + end + + # Count generated files + generated = Dir.glob(File.join(output_dir, "**/*_pb.rb")).count + + Console.info{"Generated #{generated} protobuf Ruby files (#{success_count} succeeded, #{fail_count} failed)"} +end diff --git a/code.md b/code.md deleted file mode 100644 index e69de29..0000000 diff --git a/lib/async/grpc/client.rb b/lib/async/grpc/client.rb index db2ea50..ee2b3a5 100644 --- a/lib/async/grpc/client.rb +++ b/lib/async/grpc/client.rb @@ -113,11 +113,12 @@ def call(request) # @parameter metadata [Hash] Custom metadata headers # @parameter timeout [Numeric | Nil] Optional timeout in seconds # @parameter encoding [String | Nil] Optional compression encoding + # @parameter initial [Object | Array] Optional initial message(s) to send with the request body for bidirectional streaming (avoids deadlock when server waits for first message) # @yields {|input, output| ...} Block for streaming calls # @returns [Object | Protocol::GRPC::Body::ReadableBody] Response message or readable body for streaming # @raises [ArgumentError] If method is unknown or streaming type is invalid # @raises [Protocol::GRPC::Error] If the gRPC call fails - def invoke(service, method, request = nil, metadata: {}, timeout: nil, encoding: nil, &block) + def invoke(service, method, request = nil, metadata: {}, timeout: nil, encoding: nil, initial: nil, &block) rpc = service.class.lookup_rpc(method) raise ArgumentError, "Unknown method: #{method}" unless rpc @@ -141,7 +142,7 @@ def invoke(service, method, request = nil, metadata: {}, timeout: nil, encoding: when :client_streaming client_streaming_call(path, headers, request_class, response_class, encoding, &block) when :bidirectional - bidirectional_call(path, headers, request_class, response_class, encoding, &block) + bidirectional_call(path, headers, request_class, response_class, encoding, initial: initial, &block) else raise ArgumentError, "Unknown streaming type: #{streaming}" end @@ -273,14 +274,16 @@ def client_streaming_call(path, headers, request_class, response_class, encoding # @parameter request_class [Class] Request message class # @parameter response_class [Class] Response message class # @parameter encoding [String | Nil] Compression encoding + # @parameter initial [Object | Array | Nil] Optional initial message(s) to send with the request body (avoids deadlock when server waits for first message) # @yields {|input, output| ...} Block to handle bidirectional streaming # @returns [Protocol::GRPC::Body::ReadableBody] Readable body for streaming messages # @raises [Protocol::GRPC::Error] If the gRPC call fails - def bidirectional_call(path, headers, request_class, response_class, encoding, &block) + def bidirectional_call(path, headers, request_class, response_class, encoding, initial: nil, &block) body = Protocol::GRPC::Body::WritableBody.new( encoding: encoding, message_class: request_class ) + Array(initial).each{|message| body.write(message)} http_request = Protocol::HTTP::Request["POST", path, headers, body] response = call(http_request) diff --git a/lib/async/grpc/stub.rb b/lib/async/grpc/stub.rb index 7d0dca8..4d16993 100644 --- a/lib/async/grpc/stub.rb +++ b/lib/async/grpc/stub.rb @@ -47,13 +47,14 @@ def method_missing(method_name, *args, **options, &block) # Extract request from args (first positional argument): request = args.first - # Extract metadata, timeout, encoding from options: + # Extract metadata, timeout, encoding, initial from options: metadata = options.delete(:metadata) || {} timeout = options.delete(:timeout) encoding = options.delete(:encoding) + initial = options.delete(:initial) # Delegate to client.invoke with PascalCase method name (for interface lookup): - @client.invoke(@interface, interface_method_name, request, metadata: metadata, timeout: timeout, encoding: encoding, &block) + @client.invoke(@interface, interface_method_name, request, metadata: metadata, timeout: timeout, encoding: encoding, initial: initial, &block) else super end diff --git a/lib/async/grpc/xds.rb b/lib/async/grpc/xds.rb new file mode 100644 index 0000000..d89011f --- /dev/null +++ b/lib/async/grpc/xds.rb @@ -0,0 +1,37 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +# Load order matters - Context must be loaded before Client +require_relative "xds/resource_cache" +require_relative "xds/resources" +require_relative "xds/ads_stream" +require_relative "xds/discovery_client" +require_relative "xds/health_checker" +require_relative "xds/load_balancer" +require_relative "xds/context" +require_relative "xds/client" + +module Async + module GRPC + # xDS (Discovery Service) support for dynamic service discovery and configuration + # + # Provides dynamic service discovery and load balancing for gRPC clients + # using the xDS (Discovery Service) protocol. + # + # @example Basic usage + # require "async/grpc/xds" + # + # bootstrap = { + # "xds_servers" => [{"server_uri" => "xds-control-plane:18000"}], + # "node" => {"id" => "client-1", "cluster" => "test"} + # } + # + # xds_client = Async::GRPC::XDS::Client.new("myservice", bootstrap: bootstrap) + # stub = xds_client.stub(MyServiceInterface, "myservice") + # response = stub.say_hello(request) + module XDS + end + end +end diff --git a/lib/async/grpc/xds/ads_stream.rb b/lib/async/grpc/xds/ads_stream.rb new file mode 100644 index 0000000..499fb28 --- /dev/null +++ b/lib/async/grpc/xds/ads_stream.rb @@ -0,0 +1,70 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async" +require "async/grpc/client" +require "envoy/service/discovery/v3/aggregated_discovery_service" +require "envoy/service/discovery/v3/discovery_pb" +require "envoy/config/core/v3/base_pb" + +module Async + module GRPC + module XDS + # Encapsulates a single ADS (Aggregated Discovery Service) bidirectional stream. + # Owns the stream lifecycle and delegates events to a delegate object. + class ADSStream + # Interface for ADSStream delegates. Implement these methods to receive stream events. + module Delegate + # Called when a DiscoveryResponse is received from the server. + # @parameter response [Envoy::Service::Discovery::V3::DiscoveryResponse] The discovery response + # @parameter stream [ADSStream] The stream instance; use stream.send(request) to send ACKs or new requests + def discovery_response(response, stream) + end + end + + def initialize(client, node, delegate:) + @client = client + @node = node + @delegate = delegate + @body = nil + end + + # Send a DiscoveryRequest on the stream. Call from within discovery_response to send ACKs. + # @parameter request [Envoy::Service::Discovery::V3::DiscoveryRequest] The request to send + def send(request) + @body&.write(request) + end + + # Run the ADS stream. Blocks until the stream completes or errors. + # @parameter initial [Object | Array | Nil] Initial message(s) to send (defaults to node-only request if nil/empty) + def run(initial: nil) + service = Envoy::Service::Discovery::V3::AggregatedDiscoveryService.new( + "envoy.service.discovery.v3.AggregatedDiscoveryService" + ) + + initial = Array(initial).any? ? initial : [Envoy::Service::Discovery::V3::DiscoveryRequest.new(node: @node)] + + @client.invoke(service, :StreamAggregatedResources, nil, initial: initial) do |body, readable_body| + @body = body + @delegate.stream_opened(self) if @delegate.respond_to?(:stream_opened) + + begin + readable_body.each do |response| + @delegate.discovery_response(response, self) + end + ensure + @delegate.stream_closed(self) if @delegate.respond_to?(:stream_closed) + @body = nil + end + end + rescue => error + @delegate.stream_error(self, error) if @delegate.respond_to?(:stream_error) + Console.error(self, "Failed while streaming updates!", exception: error) + raise + end + end + end + end +end diff --git a/lib/async/grpc/xds/client.rb b/lib/async/grpc/xds/client.rb new file mode 100644 index 0000000..6fc809e --- /dev/null +++ b/lib/async/grpc/xds/client.rb @@ -0,0 +1,241 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async" +require "async/http/client" +require "async/http/endpoint" +require "protocol/http" +require "protocol/grpc" +require_relative "../stub" +require_relative "context" +require_relative "load_balancer" + +module Async + module GRPC + module XDS + # Wrapper client for xDS-enabled gRPC connections + # Follows the same pattern as Async::Redis::SentinelClient and ClusterClient + class Client < Protocol::HTTP::Middleware + # Raised when xDS configuration cannot be loaded + ConfigurationError = Context::ConfigurationError + + # Raised when no endpoints are available + class NoEndpointsError < StandardError + end + + # Raised when cluster configuration cannot be reloaded + ReloadError = Context::ReloadError + + # Create a new xDS client + # @parameter service_name [String] Target service name (e.g., "myservice") + # @parameter bootstrap [Hash, String, nil] Bootstrap config (hash, file path, or nil for default) + # @parameter headers [Protocol::HTTP::Headers] Default headers + # @parameter options [Hash] Additional options passed to underlying clients + def initialize(service_name, bootstrap: nil, headers: Protocol::HTTP::Headers.new, node: nil, **options) + @service_name = service_name + @bootstrap = load_bootstrap(bootstrap) + @headers = headers + @options = options + + @context = Context.new(@bootstrap, node: node) + @load_balancer = nil + @clients = {} # Cache clients per endpoint (like ClusterClient caches node.client) + @mutex = Mutex.new + end + + # Resolve endpoints lazily (like SentinelClient.resolve_address) + # @returns [Array] Available endpoints + def resolve_endpoints + @mutex.synchronize do + unless @load_balancer + # Discover cluster via CDS + cluster = @context.discover_cluster(@service_name) + + # Discover endpoints via EDS + endpoints = @context.discover_endpoints(cluster) + + raise NoEndpointsError, "No endpoints discovered for #{@service_name}" if endpoints.empty? + + # Create load balancer + @load_balancer = LoadBalancer.new(cluster, endpoints) + + # Set load balancer reference in context for endpoint updates + @context.load_balancer = @load_balancer + end + + @load_balancer.healthy_endpoints + end + rescue Context::ReloadError => error + raise NoEndpointsError, "No endpoints discovered for #{@service_name}", cause: error + end + + # Get a client for making calls (like ClusterClient.client_for) + # Resolves endpoints lazily and picks one via load balancer + # @returns [Array(Async::GRPC::Client, Async::HTTP::Endpoint)] Client and endpoint for request tracking + def client_for_call + endpoints = resolve_endpoints + raise NoEndpointsError, "No endpoints available for #{@service_name}" if endpoints.empty? + + # Pick endpoint via load balancer + endpoint = @load_balancer.pick + raise NoEndpointsError, "No healthy endpoints available" unless endpoint + + # Cache client per endpoint (like ClusterClient caches node.client) + client = @clients[endpoint] ||= begin + http_client = Async::HTTP::Client.new(endpoint, **@options) + Async::GRPC::Client.new(http_client, headers: @headers) + end + [client, endpoint] + end + + # Implement Protocol::HTTP::Middleware interface + # This allows XDS::Client to be used anywhere Async::GRPC::Client is used + # @parameter request [Protocol::HTTP::Request] The HTTP request + # @returns [Protocol::HTTP::Response] The HTTP response + def call(request, attempts: 3) + client, endpoint = client_for_call + @load_balancer.record_request_start(endpoint) + begin + client.call(request) + rescue Protocol::GRPC::Error => error + # Handle endpoint changes (like ClusterClient handles MOVED/ASK) + if error.status_code == Protocol::GRPC::Status::UNAVAILABLE + Console.warn(self, error) + + # Invalidate cache, reload configuration + invalidate_cache! + + attempts -= 1 + retry if attempts > 0 + end + + raise + rescue => error + # Network errors might indicate endpoint failure + Console.warn(self, error) + + # Invalidate this specific endpoint + invalidate_endpoint(client) + + attempts -= 1 + retry if attempts > 0 + + raise + end + ensure + @load_balancer&.record_request_end(endpoint) + end + + # Create a stub for the given interface. + # Same API as Async::GRPC::Client - load balancing happens per RPC call. + # @parameter interface_class [Class] Interface class (subclass of Protocol::GRPC::Interface) + # @parameter service_name [String] Service name (e.g., "hello.Greeter") + # @returns [Async::GRPC::Stub] Stub object with methods for each RPC + def stub(interface_class, service_name) + interface = interface_class.new(service_name) + Stub.new(self, interface) + end + + # Invoke an RPC (called by Stub). Load balances per call. + # @parameter service [Protocol::GRPC::Interface] Interface instance + # @parameter method [Symbol, String] Method name + # @parameter request [Object | Nil] Request message + # @parameter metadata [Hash] Custom metadata headers + # @parameter timeout [Numeric | Nil] Optional timeout in seconds + # @parameter encoding [String | Nil] Optional compression encoding + # @parameter initial [Object | Array] Optional initial message(s) for bidirectional streaming + # @yields {|input, output| ...} Block for streaming calls + # @returns [Object | Protocol::GRPC::Body::ReadableBody] Response message or readable body + def invoke(service, method, request = nil, metadata: {}, timeout: nil, encoding: nil, initial: nil, attempts: 3, &block) + client, endpoint = client_for_call + @load_balancer.record_request_start(endpoint) + begin + client.invoke(service, method, request, metadata: metadata, timeout: timeout, encoding: encoding, initial: initial, &block) + rescue Protocol::GRPC::Error => error + if error.status_code == Protocol::GRPC::Status::UNAVAILABLE + Console.warn(self, error) + invalidate_cache! + attempts -= 1 + retry if attempts > 0 + end + raise + rescue => error + Console.warn(self, error) + invalidate_endpoint(client) + attempts -= 1 + retry if attempts > 0 + raise + end + ensure + @load_balancer&.record_request_end(endpoint) + end + + # Close xDS client and all connections + def close + @clients.each_value(&:close) + @clients.clear + @context.close + @load_balancer&.close + end + + private + + def load_bootstrap(bootstrap) + case bootstrap + when Hash + bootstrap + when String + load_bootstrap_file(bootstrap) + when nil + load_default_bootstrap + else + raise ArgumentError, "Invalid bootstrap: #{bootstrap.inspect}" + end + end + + def load_bootstrap_file(path) + raise ConfigurationError, "Bootstrap file not found: #{path}" unless File.exist?(path) + + require "json" + JSON.parse(File.read(path), symbolize_names: true) + rescue JSON::ParserError => error + raise ConfigurationError, "Invalid bootstrap JSON: #{error.message}" + end + + def load_default_bootstrap + # Try environment variable first + if path = ENV["GRPC_XDS_BOOTSTRAP"] + return load_bootstrap_file(path) + end + + # Try default location + default_path = File.expand_path("~/.config/grpc/bootstrap.json") + if File.exist?(default_path) + return load_bootstrap_file(default_path) + end + + raise ConfigurationError, "No bootstrap configuration found" + end + + def invalidate_cache! + @mutex.synchronize do + @clients.each_value(&:close) + @clients.clear + @load_balancer = nil + end + end + + def invalidate_endpoint(client) + @mutex.synchronize do + endpoint = @clients.key(client) + @load_balancer&.mark_unhealthy(endpoint) if endpoint + @clients.delete_if{|_, cached_client| cached_client == client} + client.close + end + end + end + end + end +end diff --git a/lib/async/grpc/xds/context.rb b/lib/async/grpc/xds/context.rb new file mode 100644 index 0000000..d2451de --- /dev/null +++ b/lib/async/grpc/xds/context.rb @@ -0,0 +1,201 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async" +require_relative "discovery_client" +require_relative "resource_cache" +require_relative "resources" + +module Async + module GRPC + module XDS + # Manages xDS subscriptions and maintains discovered resource state + class Context + # Raised when configuration is invalid + class ConfigurationError < StandardError + end + + # Raised when cluster configuration cannot be reloaded + class ReloadError < StandardError + end + + # Initialize xDS context + # @parameter bootstrap [Hash] Bootstrap configuration + # @parameter node [Hash] Node information (id, cluster, metadata, locality) + def initialize(bootstrap, node: nil) + @bootstrap = bootstrap + xds_server = bootstrap[:xds_servers]&.first + raise ConfigurationError, "No xds_servers in bootstrap" unless xds_server + + @discovery_client = DiscoveryClient.new(xds_server, node: node) + @cache = ResourceCache.new + @subscriptions = {} # Track active subscriptions + @load_balancer = nil # Will be set by Client + @mutex = Mutex.new + @cluster_promises = {} # service_name -> Async::Promise (level-triggered: resolved value persists) + @endpoint_promises = {} # cluster_name -> Async::Promise + end + + # Set load balancer reference (called by Client) + # @parameter load_balancer [LoadBalancer] Load balancer instance + def load_balancer=(load_balancer) + @load_balancer = load_balancer + end + + # Discover cluster for service (like ClusterClient.reload_cluster!) + # @parameter service_name [String] Service to discover + # @returns [Resources::Cluster] Cluster configuration + def discover_cluster(service_name) + @mutex.synchronize do + # Check cache first + if cluster = @cache.get_cluster(service_name) + return cluster + end + + # Subscribe to CDS if not already subscribed + unless @subscriptions[:cds] + @subscriptions[:cds] = subscribe_cds(service_name) + end + + # Subscribe to EDS for same name up front (EDS clusters use service name as cluster name) + # This avoids 10s delay between CDS and EDS - both requests go out together + subscription_key = :"eds_#{service_name}" + unless @subscriptions[subscription_key] + @subscriptions[subscription_key] = subscribe_eds(service_name) + end + end + return @cache.get_cluster(service_name) if @cache.get_cluster(service_name) + + # Wait for cluster (CDS response) + cluster = wait_for_cluster(service_name, timeout: 10) + raise ReloadError, "Failed to discover cluster: #{service_name}" unless cluster + cluster + end + + # Discover endpoints for cluster (like ClusterClient discovers nodes) + # @parameter cluster [Resources::Cluster] Cluster configuration + # @returns [Array] Discovered endpoints + def discover_endpoints(cluster) + cluster_name = cluster.name + @mutex.synchronize do + # Check cache first + if endpoints = @cache.get_endpoints(cluster_name) + return endpoints + end + + # Subscribe to EDS if not already subscribed + subscription_key = :"eds_#{cluster_name}" + unless @subscriptions[subscription_key] + @subscriptions[subscription_key] = subscribe_eds(cluster_name) + end + end + return @cache.get_endpoints(cluster_name) if @cache.get_endpoints(cluster_name) + + # Wait outside mutex so EDS callback can run and update cache + endpoints = wait_for_endpoints(cluster_name, timeout: 10) + raise ReloadError, "Failed to discover endpoints for cluster: #{cluster_name}" unless endpoints + endpoints + end + + # Subscribe to CDS (Cluster Discovery Service) + # @parameter service_name [String] Service name + # @returns [Async::Task] Subscription task + def subscribe_cds(service_name) + @discovery_client.subscribe( + DiscoveryClient::CLUSTER_TYPE, + [service_name] + ) do |resources| + resources.each do |resource| + cluster = resource.is_a?(Resources::Cluster) ? resource : Resources::Cluster.from_proto(resource) + @cache.update_cluster(cluster) + resolve_cluster_promise(cluster.name, cluster) + end + end + end + + # Subscribe to EDS (Endpoint Discovery Service) + # @parameter cluster_name [String] Cluster name + # @returns [Async::Task] Subscription task + def subscribe_eds(cluster_name) + @discovery_client.subscribe( + DiscoveryClient::ENDPOINT_TYPE, + [cluster_name] + ) do |resources| + resources.each do |resource| + assignment = resource.is_a?(Resources::ClusterLoadAssignment) ? resource : Resources::ClusterLoadAssignment.from_proto(resource) + endpoints = assignment.endpoints.select(&:healthy?).map do |ep| + Async::HTTP::Endpoint.parse(ep.uri, protocol: Async::HTTP::Protocol::HTTP2) + end + @cache.update_endpoints(cluster_name, endpoints) + resolve_endpoint_promise(cluster_name, endpoints) unless endpoints.empty? + @load_balancer&.update_endpoints(endpoints) + end + end + end + + # Close all subscriptions + def close + @mutex.synchronize do + @subscriptions.each_value do |task| + task.stop if task.respond_to?(:stop) + end + @subscriptions.clear + @cluster_promises.clear + @endpoint_promises.clear + end + @discovery_client.close + end + + private + + def wait_for_cluster(service_name, timeout:) + promise = cluster_promise_for(service_name) + return promise.value if promise.completed? + + begin + promise.wait(timeout: timeout) + promise.completed? ? promise.value : nil + rescue Async::TimeoutError + nil + end + end + + def wait_for_endpoints(cluster_name, timeout:) + promise = endpoint_promise_for(cluster_name) + return promise.value if promise.completed? + + begin + promise.wait(timeout: timeout) + promise.completed? ? promise.value : nil + rescue Async::TimeoutError + nil + end + end + + def cluster_promise_for(service_name) + @mutex.synchronize do + @cluster_promises[service_name] ||= Async::Promise.new + end + end + + def endpoint_promise_for(cluster_name) + @mutex.synchronize do + @endpoint_promises[cluster_name] ||= Async::Promise.new + end + end + + def resolve_cluster_promise(service_name, cluster) + cluster_promise_for(service_name).resolve(cluster) + @mutex.synchronize{@cluster_promises.delete(service_name)} + end + + def resolve_endpoint_promise(cluster_name, endpoints) + endpoint_promise_for(cluster_name).resolve(endpoints) + @mutex.synchronize{@endpoint_promises.delete(cluster_name)} + end + end + end + end +end diff --git a/lib/async/grpc/xds/discovery_client.rb b/lib/async/grpc/xds/discovery_client.rb new file mode 100644 index 0000000..a275bc7 --- /dev/null +++ b/lib/async/grpc/xds/discovery_client.rb @@ -0,0 +1,365 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async" +require "async/http/client" +require "async/http/endpoint" +require "async/grpc/client" +require "async/grpc/xds/ads_stream" +require "securerandom" +require "envoy/service/discovery/v3/aggregated_discovery_service" +require "envoy/service/discovery/v3/discovery_pb" +require "envoy/config/core/v3/base_pb" +require "envoy/config/cluster/v3/cluster_pb" +require "envoy/config/endpoint/v3/endpoint_pb" +require "google/protobuf/any_pb" + +module Async + module GRPC + module XDS + # Client for xDS APIs (ADS or individual APIs) + # Implements Aggregated Discovery Service (ADS) protocol + # Acts as delegate for ADSStream, receiving discovery_response events + class DiscoveryClient + include ADSStream::Delegate + # xDS API type URLs (v3 API) + LISTENER_TYPE = "type.googleapis.com/envoy.config.listener.v3.Listener" + ROUTE_TYPE = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" + CLUSTER_TYPE = "type.googleapis.com/envoy.config.cluster.v3.Cluster" + ENDPOINT_TYPE = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" + SECRET_TYPE = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" + + # Initialize xDS discovery client + # @parameter server_config [Hash] xDS server configuration from bootstrap + # @parameter node [Hash] Node information (id, cluster, metadata, locality) + def initialize(server_config, node: nil) + @server_uri = server_config[:server_uri] + @channel_creds = server_config[:channel_creds] + @server_features = server_config[:server_features] || [] + @node_info = node || build_node_info + @node = build_node_proto(@node_info) + @grpc_client = nil + @versions = {} # Track version_info per type_url + @nonces = {} # Track nonces per type_url + @mutex = Mutex.new + @subscriptions = {} # Track subscriptions by type_url + @stream_task = nil + @ads_stream = nil # ADSStream instance when connected (owns stream state) + @stream_ready_promise = nil # Resolved when stream_opened runs + end + + # Subscribe to resource type using ADS + # (Aggregated Discovery Service - single stream for all types) + # @parameter type_url [String] Resource type URL + # @parameter resource_names [Array] Resources to subscribe to + # @yields [Array] Updated resources (as protobuf objects) + # @returns [Async::Task] Subscription task + def subscribe(type_url, resource_names, &block) + # Store subscription callback + @mutex.synchronize do + @subscriptions[type_url] = { + resource_names: resource_names, + callback: block + } + end + + # Ensure ADS stream is running + ensure_stream_running + + # Wait for stream to be ready (event-driven, no polling) + promise = @stream_ready_promise + if promise && !promise.completed? + begin + promise.wait(timeout: 5) + rescue Async::TimeoutError + # Stream didn't open in time; send_discovery_request will no-op if @ads_stream is nil + end + end + + send_discovery_request(type_url, resource_names) if @ads_stream + + # Return the stream task (already running) + @stream_task + end + + # Close xDS discovery client + def close + @mutex.synchronize do + @stream_task&.stop + @grpc_client&.close + @grpc_client = nil + @subscriptions.clear + @stream_task = nil + @ads_stream = nil + @stream_ready_promise = nil + end + end + + private + + def ensure_stream_running + @mutex.synchronize do + return if @stream_task&.running? + + @stream_ready_promise = Async::Promise.new + @stream_task = Async do |task| + backoff = 5 + loop do + begin + create_and_run_ads_stream(task) + break + rescue Async::Stop + raise + rescue => error + Console.error(self, error) + @mutex.synchronize do + @grpc_client&.close + @grpc_client = nil + @ads_stream = nil + @stream_ready_promise = Async::Promise.new + end + task.sleep(backoff) + backoff = [backoff * 2, 60].min + end + end + end + end + end + + def create_and_run_ads_stream(task) + begin + # Create gRPC client + server_uri = @server_uri + unless server_uri.match?(/^https?:\/\//) + use_insecure = @channel_creds&.any?{|cred| cred[:type] == "insecure"} + scheme = use_insecure ? "http" : "https" + server_uri = "#{scheme}://#{server_uri}" + end + Console.info(self){"Connecting to xDS server: #{server_uri}"} + endpoint = Async::HTTP::Endpoint.parse(server_uri, protocol: Async::HTTP::Protocol::HTTP2) + http_client = Async::HTTP::Client.new(endpoint) + grpc_client = Async::GRPC::Client.new(http_client) + + @mutex.synchronize{@grpc_client = grpc_client} + + # ADSStream owns the stream; we act as delegate receiving discovery_response events + ads_stream = ADSStream.new(grpc_client, @node, delegate: self) + ads_stream.run(initial: build_initial_requests) + rescue => error + Console.error(self, "Failed to create ADS stream: #{error.message}") + @mutex.synchronize do + @grpc_client&.close + @grpc_client = nil + @ads_stream = nil + end + raise + end + end + + # ADSStream::Delegate interface - must be public for ADSStream to call + public + + def stream_opened(stream) + @mutex.synchronize{@ads_stream = stream} + @stream_ready_promise&.resolve(stream) + end + + def stream_closed(stream) + @mutex.synchronize{@ads_stream = nil} + end + + def discovery_response(response, stream) + process_response(response, stream) + end + + private + + def send_discovery_request(type_url, resource_names) + @mutex.synchronize do + stream = @ads_stream + return unless stream + + request = Envoy::Service::Discovery::V3::DiscoveryRequest.new( + version_info: @versions[type_url] || "", + node: @node, + resource_names: resource_names, + type_url: type_url, + response_nonce: @nonces[type_url] || "" + ) + stream.send(request) + end + rescue => error + Console.error(self, error) + raise + end + + def build_initial_requests + # Build discovery requests for all active subscriptions. + # If no subscriptions exist, return minimal request with node info so the server + # receives data and responds (avoids deadlock when server waits for first message). + subscriptions_copy = nil + @mutex.synchronize do + subscriptions_copy = @subscriptions.dup + end + + if subscriptions_copy.empty? + Console.info(self){"Building initial DiscoveryRequest (no subscriptions yet)"} + [Envoy::Service::Discovery::V3::DiscoveryRequest.new(node: @node)] + else + Console.info(self){"Building #{subscriptions_copy.size} subscription requests"} + subscriptions_copy.map do |type_url, subscription| + Envoy::Service::Discovery::V3::DiscoveryRequest.new( + version_info: @versions[type_url] || "", + node: @node, + resource_names: subscription[:resource_names], + type_url: type_url, + response_nonce: @nonces[type_url] || "" + ) + end + end + end + + def process_response(response, stream) + type_url = response.type_url + Console.info(self){"Processing response for type_url: #{type_url}"} + + callback = nil + resources = nil + resource_names = nil + + @mutex.synchronize do + subscription = @subscriptions[type_url] + unless subscription + Console.warn(self){"No subscription found for type_url: #{type_url}"} + return + end + + # Update version and nonce + @versions[type_url] = response.version_info + @nonces[type_url] = response.nonce + + # Deserialize resources (skip failed; callback receives only valid resources) + resources = response.resources.filter_map do |any_resource| + deserialize_resource(any_resource, type_url) + end + + # Capture for use outside mutex (avoid deadlock) + callback = subscription[:callback] + resource_names = subscription[:resource_names] + end + + # Call callback outside mutex + if callback + callback.call(resources) + else + Console.warn(self){"No callback found for type_url: #{type_url}"} + end + + # Send ACK (acknowledge receipt) + @mutex.synchronize do + send_ack(type_url, resource_names, stream) + end + end + + def send_ack(type_url, resource_names, stream) + request = Envoy::Service::Discovery::V3::DiscoveryRequest.new( + version_info: @versions[type_url] || "", + node: @node, + resource_names: resource_names, + type_url: type_url, + response_nonce: @nonces[type_url] || "" + ) + stream.send(request) + rescue => error + Console.warn(self, "Failed to send ACK: #{error.message}") + end + + def deserialize_resource(any_resource, type_url) + # Deserialize google.protobuf.Any to appropriate resource type + # Based on type_url, decode the value to the correct protobuf message + case type_url + when CLUSTER_TYPE + # Decode Cluster from Any.value + begin + cluster_proto = Envoy::Config::Cluster::V3::Cluster.decode(any_resource.value) + Resources::Cluster.from_proto(cluster_proto) + rescue => error + Console.warn(self, "Failed to deserialize Cluster: #{error.message}") + nil + end + when ENDPOINT_TYPE + # Decode ClusterLoadAssignment from Any.value + begin + endpoint_proto = Envoy::Config::Endpoint::V3::ClusterLoadAssignment.decode(any_resource.value) + Resources::ClusterLoadAssignment.from_proto(endpoint_proto) + rescue => error + Console.warn(self, "Failed to deserialize ClusterLoadAssignment: #{error.message}") + nil + end + else + # For other types, return raw protobuf for now + any_resource + end + end + + def build_node_proto(node_info) + # Build envoy.config.core.v3.Node protobuf + Envoy::Config::Core::V3::Node.new( + id: node_info[:id] || generate_node_id, + cluster: node_info[:cluster] || ENV["XDS_CLUSTER"] || "default", + metadata: build_metadata_struct(node_info[:metadata] || {}), + locality: node_info[:locality] ? build_locality_proto(node_info[:locality]) : nil + ) + end + + def build_metadata_struct(metadata_hash) + # Convert hash to google.protobuf.Struct + return nil if metadata_hash.empty? + + fields = {} + metadata_hash.each do |key, value| + fields[key.to_s] = case value + when String + Google::Protobuf::Value.new(string_value: value) + when Numeric + Google::Protobuf::Value.new(number_value: value.to_f) + when TrueClass, FalseClass + Google::Protobuf::Value.new(bool_value: value) + else + Google::Protobuf::Value.new(string_value: value.to_s) + end + end + + Google::Protobuf::Struct.new(fields: fields) + end + + def build_locality_proto(locality_hash) + # Build envoy.config.core.v3.Locality protobuf + Envoy::Config::Core::V3::Locality.new( + region: locality_hash[:region] || "", + zone: locality_hash[:zone] || "", + sub_zone: locality_hash[:sub_zone] || "" + ) + end + + def build_node_info + # Build node identification for xDS server + # Based on envoy.config.core.v3.Node + { + id: generate_node_id, + cluster: ENV["XDS_CLUSTER"] || "default", + metadata: {}, + locality: nil + } + end + + def generate_node_id + # Generate unique node ID + "#{Socket.gethostname}-#{Process.pid}-#{SecureRandom.hex(4)}" + end + end + end + end +end diff --git a/lib/async/grpc/xds/health_checker.rb b/lib/async/grpc/xds/health_checker.rb new file mode 100644 index 0000000..22507c4 --- /dev/null +++ b/lib/async/grpc/xds/health_checker.rb @@ -0,0 +1,88 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async" +require "async/http/client" +require "async/http/endpoint" +require "protocol/http" + +module Async + module GRPC + module XDS + # Performs health checks on endpoints. Called by LoadBalancer's loop. + # Runs within the caller's reactor; does not spawn tasks or reactors. + # Only HTTP health checks are supported; gRPC health checks return :unknown. + class HealthChecker + # Initialize health checker + # @parameter health_checks [Array] Health check configurations from cluster + def initialize(health_checks) + @health_checks = health_checks + @endpoints = [] + @cache = {} + end + + # Update endpoints (cleans cache for removed endpoints) + # @parameter endpoints [Array] Current endpoints + def update_endpoints(endpoints) + removed = @endpoints - endpoints + removed.each{|ep| @cache.delete(ep)} + @endpoints = endpoints + end + + # Check health of endpoint. Runs in caller's reactor. + # @parameter endpoint [Async::HTTP::Endpoint] Endpoint to check + # @returns [Symbol] :healthy, :unhealthy, or :unknown + def check(endpoint) + if cached = @cache[endpoint] + return cached[:status] if Time.now - cached[:time] < 5 + end + + status = perform_check(endpoint) + @cache[endpoint] = {status: status, time: Time.now} + status + end + + # Close health checker + def close + @cache.clear + end + + private + + def perform_check(endpoint) + health_check = @health_checks.first + return :unknown unless health_check + + case health_check[:type] + when :HTTP, "HTTP" + check_http_health(endpoint, health_check) + when :gRPC, "gRPC" + check_grpc_health(endpoint, health_check) + else + :unknown + end + rescue => error + Console.warn(self, "Health check failed for #{endpoint}: #{error.message}") + :unhealthy + end + + def check_http_health(endpoint, health_check) + path = health_check[:path] || "/health" + http_client = Async::HTTP::Client.new(endpoint) + request = Protocol::HTTP::Request["GET", path] + response = http_client.call(request) + response.status == 200 ? :healthy : :unhealthy + ensure + http_client&.close + end + + def check_grpc_health(endpoint, health_check) + # gRPC health checks (grpc.health.v1.Health) not implemented + :unknown + end + end + end + end +end diff --git a/lib/async/grpc/xds/load_balancer.rb b/lib/async/grpc/xds/load_balancer.rb new file mode 100644 index 0000000..344b9bc --- /dev/null +++ b/lib/async/grpc/xds/load_balancer.rb @@ -0,0 +1,196 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +require "async" +require "async/http/endpoint" +require_relative "health_checker" +require_relative "resources" + +module Async + module GRPC + module XDS + # Client-side load balancing with health checking. + # RING_HASH and MAGLEV fall back to round-robin (require request context to hash). + class LoadBalancer + # Load balancing policies (matching Envoy cluster LB policies) + ROUND_ROBIN = :round_robin + LEAST_REQUEST = :least_request + RANDOM = :random + RING_HASH = :ring_hash + MAGLEV = :maglev + + # Initialize load balancer + # @parameter cluster [Resources::Cluster] Cluster configuration + # @parameter endpoints [Array] Initial endpoints + def initialize(cluster, endpoints) + @cluster = cluster + @endpoints = endpoints + @policy = parse_policy(cluster.lb_policy) + @health_status = {} # Track health per endpoint + @health_checker = HealthChecker.new(cluster.health_checks) + @current_index = 0 + @in_flight_requests = {} # Track in-flight requests per endpoint + @health_check_task = nil # Transient task for health check loop + + # Initialize health status + @endpoints.each do |ep| + @health_status[ep] = :unknown + end + + # Start health checking if configured + start_health_checks if cluster.health_checks.any? + end + + # Get healthy endpoints + # @returns [Array] Healthy endpoints + def healthy_endpoints + @endpoints.select{|ep| healthy?(ep)} + end + + # Pick next endpoint using load balancing policy + # @returns [Async::HTTP::Endpoint, nil] Selected endpoint + def pick + healthy = healthy_endpoints + return nil if healthy.empty? + + case @policy + when ROUND_ROBIN + pick_round_robin(healthy) + when LEAST_REQUEST + pick_least_request(healthy) + when RANDOM + pick_random(healthy) + when RING_HASH + pick_ring_hash(healthy) + when MAGLEV + pick_maglev(healthy) + else + healthy.first + end + end + + # Update endpoints from EDS + # @parameter endpoints [Array] New endpoints + def update_endpoints(endpoints) + old_endpoints = @endpoints + @endpoints = endpoints + + # Update health checker + @health_checker.update_endpoints(endpoints) + + # Initialize health status for new endpoints + endpoints.each do |ep| + @health_status[ep] ||= :unknown + end + + # Remove state for old endpoints + (old_endpoints - endpoints).each do |ep| + @health_status.delete(ep) + @in_flight_requests.delete(ep) + end + end + + # Record that a request has started for the given endpoint. + # Used by LEAST_REQUEST policy. Call from Client when a call begins. + # @parameter endpoint [Async::HTTP::Endpoint] The endpoint handling the request + def record_request_start(endpoint) + @in_flight_requests[endpoint] ||= 0 + @in_flight_requests[endpoint] += 1 + end + + # Record that a request has finished for the given endpoint. + # Must be called in ensure to decrement even on error/retry. + # @parameter endpoint [Async::HTTP::Endpoint] The endpoint that handled the request + def record_request_end(endpoint) + return unless endpoint + current = @in_flight_requests[endpoint] + return unless current && current > 0 + @in_flight_requests[endpoint] = current - 1 + @in_flight_requests.delete(endpoint) if @in_flight_requests[endpoint] == 0 + end + + # Mark endpoint as unhealthy (e.g. after connection failure). + # Health checker may restore it on next successful check. + # @parameter endpoint [Async::HTTP::Endpoint] The endpoint to mark unhealthy + def mark_unhealthy(endpoint) + @health_status[endpoint] = :unhealthy + end + + # Close load balancer + def close + if health_check_task = @health_check_task + @health_check_task = nil + health_check_task.stop + end + + @health_checker.close + end + + private + + def healthy?(endpoint) + status = @health_status[endpoint] + status == :healthy || status == :unknown + end + + def pick_round_robin(endpoints) + @current_index = (@current_index + 1) % endpoints.size + endpoints[@current_index] + end + + def pick_least_request(endpoints) + # Track in-flight requests and pick endpoint with fewest + endpoints.min_by{|ep| @in_flight_requests[ep] || 0} + end + + def pick_random(endpoints) + endpoints.sample + end + + def pick_ring_hash(endpoints) + pick_round_robin(endpoints) # Fallback; requires request context for consistent hashing + end + + def pick_maglev(endpoints) + pick_round_robin(endpoints) # Fallback; requires request context for Maglev hashing + end + + def parse_policy(lb_policy) + # Parse cluster LB policy to our constants + case lb_policy + when :ROUND_ROBIN, "ROUND_ROBIN" + ROUND_ROBIN + when :LEAST_REQUEST, "LEAST_REQUEST" + LEAST_REQUEST + when :RANDOM, "RANDOM" + RANDOM + when :RING_HASH, "RING_HASH" + RING_HASH + when :MAGLEV, "MAGLEV" + MAGLEV + else + ROUND_ROBIN # Default + end + end + + def start_health_checks + return unless @cluster.health_checks.any? + + @health_check_task = Async(transient: true) do + loop do + @endpoints.each do |endpoint| + @health_status[endpoint] = @health_checker.check(endpoint) + end + + # Sleep for health check interval + interval = @cluster.health_checks.first[:interval] || 30 + sleep(interval) + end + end + end + end + end + end +end diff --git a/lib/async/grpc/xds/resource_cache.rb b/lib/async/grpc/xds/resource_cache.rb new file mode 100644 index 0000000..7024ec9 --- /dev/null +++ b/lib/async/grpc/xds/resource_cache.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +module Async + module GRPC + module XDS + # Caches discovered xDS resources + # Thread-safe cache for clusters and endpoints + class ResourceCache + def initialize + @clusters = {} + @endpoints = {} + @mutex = Mutex.new + end + + # Get cluster by name + # @parameter name [String] Cluster name + # @returns [Resources::Cluster, nil] Cached cluster or nil + def get_cluster(name) + @mutex.synchronize{@clusters[name]} + end + + # Update cluster in cache + # @parameter cluster [Resources::Cluster] Cluster to cache + def update_cluster(cluster) + @mutex.synchronize{@clusters[cluster.name] = cluster} + end + + # Get endpoints for cluster + # @parameter cluster_name [String] Cluster name + # @returns [Array, nil] Cached endpoints or nil + def get_endpoints(cluster_name) + @mutex.synchronize{@endpoints[cluster_name]} + end + + # Update endpoints for cluster + # @parameter cluster_name [String] Cluster name + # @parameter endpoints [Array] Endpoints to cache + def update_endpoints(cluster_name, endpoints) + @mutex.synchronize{@endpoints[cluster_name] = endpoints} + end + + # Clear all cached resources + def clear + @mutex.synchronize do + @clusters.clear + @endpoints.clear + end + end + end + end + end +end diff --git a/lib/async/grpc/xds/resources.rb b/lib/async/grpc/xds/resources.rb new file mode 100644 index 0000000..59522e5 --- /dev/null +++ b/lib/async/grpc/xds/resources.rb @@ -0,0 +1,281 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +module Async + module GRPC + module XDS + module Resources + # Represents a discovered cluster + # Based on envoy.config.cluster.v3.Cluster + class Cluster + attr_reader :name, :type, :lb_policy, :health_checks, :circuit_breakers, :eds_cluster_config + + # Initialize cluster from protobuf or hash + # @parameter data [Object, Hash] Cluster protobuf or hash representation + def initialize(data) + if data.is_a?(Hash) + @name = data[:name] + @type = parse_type(data[:type]) + @lb_policy = parse_lb_policy(data[:lb_policy]) + @health_checks = parse_health_checks(data[:health_checks] || []) + @circuit_breakers = data[:circuit_breakers] + @eds_cluster_config = data[:eds_cluster_config] + else + # Assume protobuf object + @name = data.name + @type = parse_type(data.type) + @lb_policy = parse_lb_policy(data.lb_policy) + @health_checks = parse_health_checks(data.health_checks || []) + @circuit_breakers = data.circuit_breakers + @eds_cluster_config = data.eds_cluster_config + end + end + + # Create Cluster from protobuf message + # @parameter proto [Envoy::Config::Cluster::V3::Cluster] Protobuf cluster + # @returns [Cluster] Cluster instance + def self.from_proto(proto) + new(proto) + end + + def eds_cluster? + @type == :EDS + end + + private + + def parse_type(type) + # Handle protobuf enum values (integers) or symbols/strings + case type + when :EDS, "EDS", "envoy.config.cluster.v3.Cluster.EDS", 3 + :EDS + when :STATIC, "STATIC", "envoy.config.cluster.v3.Cluster.STATIC", 0 + :STATIC + when :LOGICAL_DNS, "LOGICAL_DNS", "envoy.config.cluster.v3.Cluster.LOGICAL_DNS", 2 + :LOGICAL_DNS + when :STRICT_DNS, "STRICT_DNS", "envoy.config.cluster.v3.Cluster.STRICT_DNS", 1 + :STRICT_DNS + else + # Default to EDS for unknown types + :EDS + end + end + + def parse_lb_policy(policy) + # Handle protobuf enum values (integers) or symbols/strings + case policy + when :ROUND_ROBIN, "ROUND_ROBIN", "envoy.config.cluster.v3.Cluster.ROUND_ROBIN", 0 + :ROUND_ROBIN + when :LEAST_REQUEST, "LEAST_REQUEST", "envoy.config.cluster.v3.Cluster.LEAST_REQUEST", 1 + :LEAST_REQUEST + when :RANDOM, "RANDOM", "envoy.config.cluster.v3.Cluster.RANDOM", 3 + :RANDOM + when :RING_HASH, "RING_HASH", "envoy.config.cluster.v3.Cluster.RING_HASH", 2 + :RING_HASH + when :MAGLEV, "MAGLEV", "envoy.config.cluster.v3.Cluster.MAGLEV", 5 + :MAGLEV + else + # Default to ROUND_ROBIN + :ROUND_ROBIN + end + end + + def parse_health_checks(checks) + Array(checks).map do |check| + if check.is_a?(Hash) + { + type: parse_health_check_type(check[:health_checker] || check[:type] || :HTTP), + timeout: parse_duration(check[:timeout]), + interval: parse_duration(check[:interval] || 30), + path: extract_http_path(check) + } + else + # Protobuf HealthCheck object + { + type: parse_health_check_type(check.health_checker), + timeout: parse_duration(check.timeout), + interval: parse_duration(check.interval), + path: extract_http_path_from_proto(check) + } + end + end + end + + def parse_health_check_type(checker) + # Handle protobuf HealthCheck.health_checker oneof + # checker is the health_checker field from HealthCheck protobuf + return :HTTP if checker.nil? + + case checker + when Hash + checker_type = checker[:type] + case checker_type + when :HTTP, "HTTP", "envoy.config.core.v3.HealthCheck.HttpHealthCheck" + :HTTP + when :gRPC, "gRPC", "envoy.config.core.v3.HealthCheck.GrpcHealthCheck" + :gRPC + when :TCP, "TCP", "envoy.config.core.v3.HealthCheck.TcpHealthCheck" + :TCP + else + :HTTP # Default + end + else + # Protobuf HealthCheck object - check which oneof field is set + # The health_checker is a oneof, so we check which field is populated + if checker.respond_to?(:http_health_check) && checker.http_health_check + :HTTP + elsif checker.respond_to?(:grpc_health_check) && checker.grpc_health_check + :gRPC + elsif checker.respond_to?(:tcp_health_check) && checker.tcp_health_check + :TCP + else + :HTTP # Default + end + end + end + + def parse_duration(duration) + # Convert protobuf Duration to seconds (float) + return duration if duration.is_a?(Numeric) + return nil unless duration + + # If it's a protobuf Duration, convert to seconds + if duration.respond_to?(:seconds) && duration.respond_to?(:nanos) + duration.seconds + (duration.nanos.to_f / 1_000_000_000) + else + duration.to_f + end + end + + def extract_http_path(check) + # Extract HTTP path from hash + return nil unless check.is_a?(Hash) + + http_check = check[:http_health_check] || {} + http_check[:path] || "/health" + end + + def extract_http_path_from_proto(check) + # Extract HTTP path from protobuf HealthCheck + return nil unless check.respond_to?(:http_health_check) + + http_check = check.http_health_check + return nil unless http_check + + http_check.path || "/health" + end + end + + # Represents endpoint assignment (ClusterLoadAssignment) + # Based on envoy.config.endpoint.v3.ClusterLoadAssignment + class ClusterLoadAssignment + attr_reader :cluster_name, :endpoints + + # Initialize from protobuf or hash + # @parameter data [Object, Hash] ClusterLoadAssignment protobuf or hash + def initialize(data) + if data.is_a?(Hash) + @cluster_name = data[:cluster_name] + @endpoints = parse_endpoints(data[:endpoints] || []) + else + @cluster_name = data.cluster_name + @endpoints = parse_endpoints(data.endpoints || []) + end + end + + # Create ClusterLoadAssignment from protobuf message + # @parameter proto [Envoy::Config::Endpoint::V3::ClusterLoadAssignment] Protobuf assignment + # @returns [ClusterLoadAssignment] Assignment instance + def self.from_proto(proto) + new(proto) + end + + private + + def parse_endpoints(endpoints_data) + Array(endpoints_data).flat_map do |locality_endpoints| + lb_endpoints = locality_endpoints.is_a?(Hash) ? + (locality_endpoints[:lb_endpoints] || []) : + (locality_endpoints.lb_endpoints || []) + + lb_endpoints.map{|lb_ep| Endpoint.new(lb_ep)} + end + end + end + + # Represents a single endpoint + # Based on envoy.config.endpoint.v3.LbEndpoint + class Endpoint + attr_reader :address, :port, :health_status, :metadata + + def initialize(lb_endpoint) + if lb_endpoint.is_a?(Hash) + endpoint_data = lb_endpoint[:endpoint] || {} + address_data = endpoint_data[:address] || {} + socket_address = address_data[:socket_address] || {} + + @address = socket_address[:address] || "localhost" + @port = socket_address[:port_value] || 50051 + @health_status = parse_health_status(lb_endpoint[:health_status]) + @metadata = lb_endpoint[:metadata] || {} + else + socket_address = lb_endpoint.endpoint.address.socket_address + @address = socket_address.address + @port = socket_address.port_value + @health_status = parse_health_status(lb_endpoint.health_status) + @metadata = lb_endpoint.metadata || {} + end + end + + def healthy? + @health_status == :HEALTHY || @health_status == :UNKNOWN + end + + def uri + # Use http for insecure/docker environments (gRPC h2c) + scheme = ENV["XDS_ENDPOINT_SCHEME"] || "http" + "#{scheme}://#{@address}:#{@port}" + end + + private + + def parse_health_status(status) + # Handle protobuf enum values (integers) or symbols/strings + # HealthStatus is defined in envoy.config.endpoint.v3.Endpoint + case status + when :HEALTHY, "HEALTHY", 0 + :HEALTHY + when :UNHEALTHY, "UNHEALTHY", 1 + :UNHEALTHY + when :DEGRADED, "DEGRADED", 2 + :DEGRADED + when :UNKNOWN, "UNKNOWN", 3, nil + :UNKNOWN + else + # Try to match against protobuf enum if available + begin + require "envoy/config/endpoint/v3/endpoint_pb" + case status + when Envoy::Config::Endpoint::V3::Endpoint::HealthStatus::HEALTHY + :HEALTHY + when Envoy::Config::Endpoint::V3::Endpoint::HealthStatus::UNHEALTHY + :UNHEALTHY + when Envoy::Config::Endpoint::V3::Endpoint::HealthStatus::DEGRADED + :DEGRADED + when Envoy::Config::Endpoint::V3::Endpoint::HealthStatus::UNKNOWN + :UNKNOWN + else + :UNKNOWN + end + rescue NameError, LoadError + :UNKNOWN + end + end + end + end + end + end + end +end diff --git a/lib/envoy.rb b/lib/envoy.rb new file mode 100644 index 0000000..98e5344 --- /dev/null +++ b/lib/envoy.rb @@ -0,0 +1,83 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +# Envoy protobuf definitions for xDS support +# Generated from envoyproxy/data-plane-api +# +# This module provides access to generated Envoy protobuf classes for xDS. +# Files are generated using protoc from .proto files in the proto/ directory. + +# Core discovery service (most important for xDS) +# Note: Generated protobuf files use absolute requires, so lib must be in $LOAD_PATH +# Load dependencies first (in order) +# XDS annotations +require "xds/annotations/v3/status_pb" +require "xds/core/v3/context_params_pb" +require "xds/core/v3/authority_pb" + +# UDPA annotations +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "udpa/annotations/migrate_pb" + +# Validate annotations +require "validate/validate_pb" + +# Envoy annotations +require "envoy/annotations/deprecation_pb" + +# Envoy type definitions +require "envoy/type/v3/percent_pb" +require "envoy/type/v3/semantic_version_pb" + +# Envoy config core (load in dependency order) +require "envoy/config/core/v3/extension_pb" +require "envoy/config/core/v3/backoff_pb" +require "envoy/config/core/v3/http_uri_pb" +require "envoy/config/core/v3/grpc_service_pb" +require "envoy/config/core/v3/address_pb" +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/config_source_pb" + +# Discovery service +require "envoy/service/discovery/v3/discovery_pb" +require "envoy/service/discovery/v3/ads_pb" +require "envoy/service/discovery/v3/aggregated_discovery_service" + +# Resource types +require "envoy/config/cluster/v3/cluster_pb" +require "envoy/config/endpoint/v3/endpoint_pb" + +module Envoy + module Service + module Discovery + module V3 + # Re-export for convenience + # Use Envoy::Service::Discovery::V3::DiscoveryRequest + # Use Envoy::Service::Discovery::V3::AggregatedDiscoveryService + end + end + end + + module Config + module Cluster + module V3 + # Use Envoy::Config::Cluster::V3::Cluster + end + end + + module Endpoint + module V3 + # Use Envoy::Config::Endpoint::V3::ClusterLoadAssignment + end + end + + module Core + module V3 + # Use Envoy::Config::Core::V3::Node + end + end + end +end diff --git a/lib/envoy/annotations/deprecation_pb.rb b/lib/envoy/annotations/deprecation_pb.rb new file mode 100644 index 0000000..b8e63ba --- /dev/null +++ b/lib/envoy/annotations/deprecation_pb.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/annotations/deprecation.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" + + +descriptor_data = "\n#envoy/annotations/deprecation.proto\x12\x11\x65nvoy.annotations\x1a google/protobuf/descriptor.proto:?\n\x15\x64isallowed_by_default\x12\x1d.google.protobuf.FieldOptions\x18\xe7\xad\xaeZ \x01(\x08:E\n\x1b\x64\x65precated_at_minor_version\x12\x1d.google.protobuf.FieldOptions\x18\xf2\xe8\x80K \x01(\t:H\n\x1a\x64isallowed_by_default_enum\x12!.google.protobuf.EnumValueOptions\x18\xf5\xce\xb6! \x01(\x08:N\n deprecated_at_minor_version_enum\x12!.google.protobuf.EnumValueOptions\x18\xc1\xbe\xb3V \x01(\tB:Z8github.com/envoyproxy/go-control-plane/envoy/annotationsb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Annotations + end +end diff --git a/lib/envoy/config/cluster/v3/circuit_breaker_pb.rb b/lib/envoy/config/cluster/v3/circuit_breaker_pb.rb new file mode 100644 index 0000000..ca6f9a6 --- /dev/null +++ b/lib/envoy/config/cluster/v3/circuit_breaker_pb.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/cluster/v3/circuit_breaker.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "envoy/type/v3/percent_pb" +require "google/protobuf/wrappers_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n-envoy/config/cluster/v3/circuit_breaker.proto\x12\x17\x65nvoy.config.cluster.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a\x1b\x65nvoy/type/v3/percent.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xaa\x07\n\x0f\x43ircuitBreakers\x12G\n\nthresholds\x18\x01 \x03(\x0b\x32\x33.envoy.config.cluster.v3.CircuitBreakers.Thresholds\x12P\n\x13per_host_thresholds\x18\x02 \x03(\x0b\x32\x33.envoy.config.cluster.v3.CircuitBreakers.Thresholds\x1a\xce\x05\n\nThresholds\x12\x41\n\x08priority\x18\x01 \x01(\x0e\x32%.envoy.config.core.v3.RoutingPriorityB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x35\n\x0fmax_connections\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12:\n\x14max_pending_requests\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x32\n\x0cmax_requests\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x31\n\x0bmax_retries\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12U\n\x0cretry_budget\x18\x08 \x01(\x0b\x32?.envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget\x12\x17\n\x0ftrack_remaining\x18\x06 \x01(\x08\x12:\n\x14max_connection_pools\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x1a\xbe\x01\n\x0bRetryBudget\x12.\n\x0e\x62udget_percent\x18\x01 \x01(\x0b\x32\x16.envoy.type.v3.Percent\x12;\n\x15min_retry_concurrency\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value:B\x9a\xc5\x88\x1e=\n;envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget:6\x9a\xc5\x88\x1e\x31\n/envoy.api.v2.cluster.CircuitBreakers.Thresholds:+\x9a\xc5\x88\x1e&\n$envoy.api.v2.cluster.CircuitBreakersB\x90\x01\n%io.envoyproxy.envoy.config.cluster.v3B\x13\x43ircuitBreakerProtoP\x01ZHgithub.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Cluster + module V3 + CircuitBreakers = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.cluster.v3.CircuitBreakers").msgclass + CircuitBreakers::Thresholds = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.cluster.v3.CircuitBreakers.Thresholds").msgclass + CircuitBreakers::Thresholds::RetryBudget = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget").msgclass + end + end + end +end diff --git a/lib/envoy/config/cluster/v3/cluster_pb.rb b/lib/envoy/config/cluster/v3/cluster_pb.rb new file mode 100644 index 0000000..2e54f3f --- /dev/null +++ b/lib/envoy/config/cluster/v3/cluster_pb.rb @@ -0,0 +1,80 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/cluster/v3/cluster.proto + +require "google/protobuf" + +require "envoy/config/cluster/v3/circuit_breaker_pb" +require "envoy/config/cluster/v3/filter_pb" +require "envoy/config/cluster/v3/outlier_detection_pb" +require "envoy/config/core/v3/address_pb" +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/config_source_pb" +require "envoy/config/core/v3/extension_pb" +require "envoy/config/core/v3/health_check_pb" +require "envoy/config/core/v3/protocol_pb" +require "envoy/config/core/v3/resolver_pb" +require "envoy/config/endpoint/v3/endpoint_pb" +require "envoy/type/metadata/v3/metadata_pb" +require "envoy/type/v3/percent_pb" +require "google/protobuf/any_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/struct_pb" +require "google/protobuf/wrappers_pb" +require "xds/core/v3/collection_entry_pb" +require "xds/type/matcher/v3/matcher_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/migrate_pb" +require "udpa/annotations/security_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n%envoy/config/cluster/v3/cluster.proto\x12\x17\x65nvoy.config.cluster.v3\x1a-envoy/config/cluster/v3/circuit_breaker.proto\x1a$envoy/config/cluster/v3/filter.proto\x1a/envoy/config/cluster/v3/outlier_detection.proto\x1a\"envoy/config/core/v3/address.proto\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a(envoy/config/core/v3/config_source.proto\x1a$envoy/config/core/v3/extension.proto\x1a\'envoy/config/core/v3/health_check.proto\x1a#envoy/config/core/v3/protocol.proto\x1a#envoy/config/core/v3/resolver.proto\x1a\'envoy/config/endpoint/v3/endpoint.proto\x1a%envoy/type/metadata/v3/metadata.proto\x1a\x1b\x65nvoy/type/v3/percent.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\"xds/core/v3/collection_entry.proto\x1a!xds/type/matcher/v3/matcher.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1eudpa/annotations/migrate.proto\x1a\x1fudpa/annotations/security.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"B\n\x11\x43lusterCollection\x12-\n\x07\x65ntries\x18\x01 \x01(\x0b\x32\x1c.xds.core.v3.CollectionEntry\"\x87\x46\n\x07\x43luster\x12W\n\x18transport_socket_matches\x18+ \x03(\x0b\x32\x35.envoy.config.cluster.v3.Cluster.TransportSocketMatch\x12>\n\x18transport_socket_matcher\x18; \x01(\x0b\x32\x1c.xds.type.matcher.v3.Matcher\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x31\n\ralt_stat_name\x18\x1c \x01(\tB\x1a\xf2\x98\xfe\x8f\x05\x14\n\x12observability_name\x12H\n\x04type\x18\x02 \x01(\x0e\x32..envoy.config.cluster.v3.Cluster.DiscoveryTypeB\x08\xfa\x42\x05\x82\x01\x02\x10\x01H\x00\x12J\n\x0c\x63luster_type\x18& \x01(\x0b\x32\x32.envoy.config.cluster.v3.Cluster.CustomClusterTypeH\x00\x12M\n\x12\x65\x64s_cluster_config\x18\x03 \x01(\x0b\x32\x31.envoy.config.cluster.v3.Cluster.EdsClusterConfig\x12<\n\x0f\x63onnect_timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12P\n!per_connection_buffer_limit_bytes\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\x8a\x93\xb7*\x02\x10\x01\x12\x46\n\tlb_policy\x18\x06 \x01(\x0e\x32).envoy.config.cluster.v3.Cluster.LbPolicyB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12H\n\x0fload_assignment\x18! \x01(\x0b\x32/.envoy.config.endpoint.v3.ClusterLoadAssignment\x12\x38\n\rhealth_checks\x18\x08 \x03(\x0b\x32!.envoy.config.core.v3.HealthCheck\x12N\n\x1bmax_requests_per_connection\x18\t \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12\x42\n\x10\x63ircuit_breakers\x18\n \x01(\x0b\x32(.envoy.config.cluster.v3.CircuitBreakers\x12\x66\n\x1eupstream_http_protocol_options\x18. \x01(\x0b\x32\x31.envoy.config.core.v3.UpstreamHttpProtocolOptionsB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12\\\n\x1c\x63ommon_http_protocol_options\x18\x1d \x01(\x0b\x32).envoy.config.core.v3.HttpProtocolOptionsB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12V\n\x15http_protocol_options\x18\r \x01(\x0b\x32*.envoy.config.core.v3.Http1ProtocolOptionsB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12^\n\x16http2_protocol_options\x18\x0e \x01(\x0b\x32*.envoy.config.core.v3.Http2ProtocolOptionsB\x12\x18\x01\x8a\x93\xb7*\x02\x10\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12m\n typed_extension_protocol_options\x18$ \x03(\x0b\x32\x43.envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry\x12L\n\x10\x64ns_refresh_rate\x18\x10 \x01(\x0b\x32\x19.google.protobuf.DurationB\x17\x18\x01\xfa\x42\t\xaa\x01\x06*\x04\x10\xc0\x84=\x92\xc7\x86\xd8\x04\x03\x33.0\x12\x42\n\ndns_jitter\x18: \x01(\x0b\x32\x19.google.protobuf.DurationB\x13\x18\x01\xfa\x42\x05\xaa\x01\x02\x32\x00\x92\xc7\x86\xd8\x04\x03\x33.0\x12[\n\x18\x64ns_failure_refresh_rate\x18, \x01(\x0b\x32,.envoy.config.cluster.v3.Cluster.RefreshRateB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12$\n\x0frespect_dns_ttl\x18\' \x01(\x08\x42\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12U\n\x11\x64ns_lookup_family\x18\x11 \x01(\x0e\x32\x30.envoy.config.cluster.v3.Cluster.DnsLookupFamilyB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x41\n\rdns_resolvers\x18\x12 \x03(\x0b\x32\x1d.envoy.config.core.v3.AddressB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12,\n\x17use_tcp_for_dns_lookups\x18- \x01(\x08\x42\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12U\n\x15\x64ns_resolution_config\x18\x35 \x01(\x0b\x32).envoy.config.core.v3.DnsResolutionConfigB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12M\n\x19typed_dns_resolver_config\x18\x37 \x01(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12\x39\n\x15wait_for_warm_on_init\x18\x36 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x44\n\x11outlier_detection\x18\x13 \x01(\x0b\x32).envoy.config.cluster.v3.OutlierDetection\x12=\n\x10\x63leanup_interval\x18\x14 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12>\n\x14upstream_bind_config\x18\x15 \x01(\x0b\x32 .envoy.config.core.v3.BindConfig\x12I\n\x10lb_subset_config\x18\x16 \x01(\x0b\x32/.envoy.config.cluster.v3.Cluster.LbSubsetConfig\x12P\n\x13ring_hash_lb_config\x18\x17 \x01(\x0b\x32\x31.envoy.config.cluster.v3.Cluster.RingHashLbConfigH\x01\x12K\n\x10maglev_lb_config\x18\x34 \x01(\x0b\x32/.envoy.config.cluster.v3.Cluster.MaglevLbConfigH\x01\x12V\n\x16original_dst_lb_config\x18\" \x01(\x0b\x32\x34.envoy.config.cluster.v3.Cluster.OriginalDstLbConfigH\x01\x12X\n\x17least_request_lb_config\x18% \x01(\x0b\x32\x35.envoy.config.cluster.v3.Cluster.LeastRequestLbConfigH\x01\x12T\n\x15round_robin_lb_config\x18\x38 \x01(\x0b\x32\x33.envoy.config.cluster.v3.Cluster.RoundRobinLbConfigH\x01\x12I\n\x10\x63ommon_lb_config\x18\x1b \x01(\x0b\x32/.envoy.config.cluster.v3.Cluster.CommonLbConfig\x12?\n\x10transport_socket\x18\x18 \x01(\x0b\x32%.envoy.config.core.v3.TransportSocket\x12\x30\n\x08metadata\x18\x19 \x01(\x0b\x32\x1e.envoy.config.core.v3.Metadata\x12\x62\n\x12protocol_selection\x18\x1a \x01(\x0e\x32\x39.envoy.config.cluster.v3.Cluster.ClusterProtocolSelectionB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12W\n\x1bupstream_connection_options\x18\x1e \x01(\x0b\x32\x32.envoy.config.cluster.v3.UpstreamConnectionOptions\x12\x30\n(close_connections_on_host_health_failure\x18\x1f \x01(\x08\x12%\n\x1dignore_health_on_host_removal\x18 \x01(\x08\x12\x30\n\x07\x66ilters\x18( \x03(\x0b\x32\x1f.envoy.config.cluster.v3.Filter\x12K\n\x15load_balancing_policy\x18) \x01(\x0b\x32,.envoy.config.cluster.v3.LoadBalancingPolicy\x12\x36\n\nlrs_server\x18* \x01(\x0b\x32\".envoy.config.core.v3.ConfigSource\x12#\n\x1blrs_report_endpoint_metrics\x18\x39 \x03(\t\x12*\n\x15track_timeout_budgets\x18/ \x01(\x08\x42\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12\x43\n\x0fupstream_config\x18\x30 \x01(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12G\n\x13track_cluster_stats\x18\x31 \x01(\x0b\x32*.envoy.config.cluster.v3.TrackClusterStats\x12L\n\x11preconnect_policy\x18\x32 \x01(\x0b\x32\x31.envoy.config.cluster.v3.Cluster.PreconnectPolicy\x12\x31\n)connection_pool_per_downstream_connection\x18\x33 \x01(\x08\x1a\xc8\x01\n\x14TransportSocketMatch\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12&\n\x05match\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12?\n\x10transport_socket\x18\x03 \x01(\x0b\x32%.envoy.config.core.v3.TransportSocket:0\x9a\xc5\x88\x1e+\n)envoy.api.v2.Cluster.TransportSocketMatch\x1a\x85\x01\n\x11\x43ustomClusterType\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12*\n\x0ctyped_config\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:-\x9a\xc5\x88\x1e(\n&envoy.api.v2.Cluster.CustomClusterType\x1a\x8e\x01\n\x10\x45\x64sClusterConfig\x12\x36\n\neds_config\x18\x01 \x01(\x0b\x32\".envoy.config.core.v3.ConfigSource\x12\x14\n\x0cservice_name\x18\x02 \x01(\t:,\x9a\xc5\x88\x1e\'\n%envoy.api.v2.Cluster.EdsClusterConfig\x1a\xd9\x08\n\x0eLbSubsetConfig\x12i\n\x0f\x66\x61llback_policy\x18\x01 \x01(\x0e\x32\x46.envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicyB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12/\n\x0e\x64\x65\x66\x61ult_subset\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12Z\n\x10subset_selectors\x18\x03 \x03(\x0b\x32@.envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector\x12\x1d\n\x15locality_weight_aware\x18\x04 \x01(\x08\x12\x1d\n\x15scale_locality_weight\x18\x05 \x01(\x08\x12\x16\n\x0epanic_mode_any\x18\x06 \x01(\x08\x12\x13\n\x0blist_as_any\x18\x07 \x01(\x08\x12z\n\x18metadata_fallback_policy\x18\x08 \x01(\x0e\x32N.envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicyB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x1a\x9b\x03\n\x10LbSubsetSelector\x12\x0c\n\x04keys\x18\x01 \x03(\t\x12\x1e\n\x16single_host_per_subset\x18\x04 \x01(\x08\x12\x82\x01\n\x0f\x66\x61llback_policy\x18\x02 \x01(\x0e\x32_.envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicyB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x1c\n\x14\x66\x61llback_keys_subset\x18\x03 \x03(\t\"y\n\x1eLbSubsetSelectorFallbackPolicy\x12\x0f\n\x0bNOT_DEFINED\x10\x00\x12\x0f\n\x0bNO_FALLBACK\x10\x01\x12\x10\n\x0c\x41NY_ENDPOINT\x10\x02\x12\x12\n\x0e\x44\x45\x46\x41ULT_SUBSET\x10\x03\x12\x0f\n\x0bKEYS_SUBSET\x10\x04:;\x9a\xc5\x88\x1e\x36\n4envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector\"O\n\x16LbSubsetFallbackPolicy\x12\x0f\n\x0bNO_FALLBACK\x10\x00\x12\x10\n\x0c\x41NY_ENDPOINT\x10\x01\x12\x12\n\x0e\x44\x45\x46\x41ULT_SUBSET\x10\x02\"M\n\x1eLbSubsetMetadataFallbackPolicy\x12\x18\n\x14METADATA_NO_FALLBACK\x10\x00\x12\x11\n\rFALLBACK_LIST\x10\x01:*\x9a\xc5\x88\x1e%\n#envoy.api.v2.Cluster.LbSubsetConfig\x1a\xb4\x01\n\x0fSlowStartConfig\x12\x34\n\x11slow_start_window\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x37\n\naggression\x18\x02 \x01(\x0b\x32#.envoy.config.core.v3.RuntimeDouble\x12\x32\n\x12min_weight_percent\x18\x03 \x01(\x0b\x32\x16.envoy.type.v3.Percent\x1a\x61\n\x12RoundRobinLbConfig\x12K\n\x11slow_start_config\x18\x01 \x01(\x0b\x32\x30.envoy.config.cluster.v3.Cluster.SlowStartConfig\x1a\x94\x02\n\x14LeastRequestLbConfig\x12;\n\x0c\x63hoice_count\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x02\x12@\n\x13\x61\x63tive_request_bias\x18\x02 \x01(\x0b\x32#.envoy.config.core.v3.RuntimeDouble\x12K\n\x11slow_start_config\x18\x03 \x01(\x0b\x32\x30.envoy.config.cluster.v3.Cluster.SlowStartConfig:0\x9a\xc5\x88\x1e+\n)envoy.api.v2.Cluster.LeastRequestLbConfig\x1a\xe1\x02\n\x10RingHashLbConfig\x12\x43\n\x11minimum_ring_size\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt64ValueB\n\xfa\x42\x07\x32\x05\x18\x80\x80\x80\x04\x12_\n\rhash_function\x18\x03 \x01(\x0e\x32>.envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunctionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x43\n\x11maximum_ring_size\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt64ValueB\n\xfa\x42\x07\x32\x05\x18\x80\x80\x80\x04\".\n\x0cHashFunction\x12\x0b\n\x07XX_HASH\x10\x00\x12\x11\n\rMURMUR_HASH_2\x10\x01:,\x9a\xc5\x88\x1e\'\n%envoy.api.v2.Cluster.RingHashLbConfigJ\x04\x08\x02\x10\x03\x1aN\n\x0eMaglevLbConfig\x12<\n\ntable_size\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt64ValueB\n\xfa\x42\x07\x32\x05\x18\xcb\x96\xb1\x02\x1a\xfd\x01\n\x13OriginalDstLbConfig\x12\x17\n\x0fuse_http_header\x18\x01 \x01(\x08\x12\x18\n\x10http_header_name\x18\x02 \x01(\t\x12G\n\x16upstream_port_override\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\t\xfa\x42\x06*\x04\x18\xff\xff\x03\x12\x39\n\x0cmetadata_key\x18\x04 \x01(\x0b\x32#.envoy.type.metadata.v3.MetadataKey:/\x9a\xc5\x88\x1e*\n(envoy.api.v2.Cluster.OriginalDstLbConfig\x1a\xb2\t\n\x0e\x43ommonLbConfig\x12\x37\n\x17healthy_panic_threshold\x18\x01 \x01(\x0b\x32\x16.envoy.type.v3.Percent\x12\x61\n\x14zone_aware_lb_config\x18\x02 \x01(\x0b\x32\x41.envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfigH\x00\x12o\n\x1blocality_weighted_lb_config\x18\x03 \x01(\x0b\x32H.envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfigH\x00\x12\x36\n\x13update_merge_window\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\'\n\x1fignore_new_hosts_until_first_hc\x18\x05 \x01(\x08\x12,\n$close_connections_on_host_set_change\x18\x06 \x01(\x08\x12o\n\x1c\x63onsistent_hashing_lb_config\x18\x07 \x01(\x0b\x32I.envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig\x12\x43\n\x14override_host_status\x18\x08 \x01(\x0b\x32%.envoy.config.core.v3.HealthStatusSet\x1a\xd9\x01\n\x11ZoneAwareLbConfig\x12/\n\x0frouting_enabled\x18\x01 \x01(\x0b\x32\x16.envoy.type.v3.Percent\x12\x36\n\x10min_cluster_size\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12\x1d\n\x15\x66\x61il_traffic_on_panic\x18\x03 \x01(\x08:<\x9a\xc5\x88\x1e\x37\n5envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig\x1a_\n\x18LocalityWeightedLbConfig:C\x9a\xc5\x88\x1e>\n\n\x11max_ejection_time\x18\x15 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12;\n\x18max_ejection_time_jitter\x18\x16 \x01(\x0b\x32\x19.google.protobuf.Duration\x12O\n+successful_active_health_check_uneject_host\x18\x17 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12<\n\x08monitors\x18\x18 \x03(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12\x39\n\x15\x61lways_eject_one_host\x18\x19 \x01(\x0b\x32\x1a.google.protobuf.BoolValue:,\x9a\xc5\x88\x1e\'\n%envoy.api.v2.cluster.OutlierDetectionB\x92\x01\n%io.envoyproxy.envoy.config.cluster.v3B\x15OutlierDetectionProtoP\x01ZHgithub.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Cluster + module V3 + OutlierDetection = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.cluster.v3.OutlierDetection").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/address_pb.rb b/lib/envoy/config/core/v3/address_pb.rb new file mode 100644 index 0000000..49fe853 --- /dev/null +++ b/lib/envoy/config/core/v3/address_pb.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/address.proto + +require "google/protobuf" + +require "envoy/config/core/v3/extension_pb" +require "envoy/config/core/v3/socket_option_pb" +require "google/protobuf/wrappers_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"envoy/config/core/v3/address.proto\x12\x14\x65nvoy.config.core.v3\x1a$envoy/config/core/v3/extension.proto\x1a(envoy/config/core/v3/socket_option.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"T\n\x04Pipe\x12\x15\n\x04path\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x16\n\x04mode\x18\x02 \x01(\rB\x08\xfa\x42\x05*\x03\x18\xff\x03:\x1d\x9a\xc5\x88\x1e\x18\n\x16\x65nvoy.api.v2.core.Pipe\"j\n\x14\x45nvoyInternalAddress\x12\x1e\n\x14server_listener_name\x18\x01 \x01(\tH\x00\x12\x13\n\x0b\x65ndpoint_id\x18\x02 \x01(\tB\x1d\n\x16\x61\x64\x64ress_name_specifier\x12\x03\xf8\x42\x01\"\xd7\x02\n\rSocketAddress\x12H\n\x08protocol\x18\x01 \x01(\x0e\x32,.envoy.config.core.v3.SocketAddress.ProtocolB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x18\n\x07\x61\x64\x64ress\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x1f\n\nport_value\x18\x03 \x01(\rB\t\xfa\x42\x06*\x04\x18\xff\xff\x03H\x00\x12\x14\n\nnamed_port\x18\x04 \x01(\tH\x00\x12\x15\n\rresolver_name\x18\x05 \x01(\t\x12\x13\n\x0bipv4_compat\x18\x06 \x01(\x08\x12\"\n\x1anetwork_namespace_filepath\x18\x07 \x01(\t\"\x1c\n\x08Protocol\x12\x07\n\x03TCP\x10\x00\x12\x07\n\x03UDP\x10\x01:&\x9a\xc5\x88\x1e!\n\x1f\x65nvoy.api.v2.core.SocketAddressB\x15\n\x0eport_specifier\x12\x03\xf8\x42\x01\"\xdd\x01\n\x0cTcpKeepalive\x12\x36\n\x10keepalive_probes\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x34\n\x0ekeepalive_time\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x38\n\x12keepalive_interval\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.core.TcpKeepalive\"\x99\x01\n\x12\x45xtraSourceAddress\x12>\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32#.envoy.config.core.v3.SocketAddressB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x43\n\x0esocket_options\x18\x02 \x01(\x0b\x32+.envoy.config.core.v3.SocketOptionsOverride\"\xc5\x03\n\nBindConfig\x12;\n\x0esource_address\x18\x01 \x01(\x0b\x32#.envoy.config.core.v3.SocketAddress\x12,\n\x08\x66reebind\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12:\n\x0esocket_options\x18\x03 \x03(\x0b\x32\".envoy.config.core.v3.SocketOption\x12H\n\x16\x65xtra_source_addresses\x18\x05 \x03(\x0b\x32(.envoy.config.core.v3.ExtraSourceAddress\x12U\n\x1b\x61\x64\x64itional_source_addresses\x18\x04 \x03(\x0b\x32#.envoy.config.core.v3.SocketAddressB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12J\n\x16local_address_selector\x18\x06 \x01(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig:#\x9a\xc5\x88\x1e\x1e\n\x1c\x65nvoy.api.v2.core.BindConfig\"\xf4\x01\n\x07\x41\x64\x64ress\x12=\n\x0esocket_address\x18\x01 \x01(\x0b\x32#.envoy.config.core.v3.SocketAddressH\x00\x12*\n\x04pipe\x18\x02 \x01(\x0b\x32\x1a.envoy.config.core.v3.PipeH\x00\x12L\n\x16\x65nvoy_internal_address\x18\x03 \x01(\x0b\x32*.envoy.config.core.v3.EnvoyInternalAddressH\x00: \x9a\xc5\x88\x1e\x1b\n\x19\x65nvoy.api.v2.core.AddressB\x0e\n\x07\x61\x64\x64ress\x12\x03\xf8\x42\x01\"\x8c\x01\n\tCidrRange\x12\x1f\n\x0e\x61\x64\x64ress_prefix\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12:\n\nprefix_len\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x08\xfa\x42\x05*\x03\x18\x80\x01:\"\x9a\xc5\x88\x1e\x1d\n\x1b\x65nvoy.api.v2.core.CidrRangeB\x80\x01\n\"io.envoyproxy.envoy.config.core.v3B\x0c\x41\x64\x64ressProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + Pipe = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Pipe").msgclass + EnvoyInternalAddress = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.EnvoyInternalAddress").msgclass + SocketAddress = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketAddress").msgclass + SocketAddress::Protocol = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketAddress.Protocol").enummodule + TcpKeepalive = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.TcpKeepalive").msgclass + ExtraSourceAddress = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ExtraSourceAddress").msgclass + BindConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.BindConfig").msgclass + Address = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Address").msgclass + CidrRange = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.CidrRange").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/backoff_pb.rb b/lib/envoy/config/core/v3/backoff_pb.rb new file mode 100644 index 0000000..08d1dcb --- /dev/null +++ b/lib/envoy/config/core/v3/backoff_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/backoff.proto + +require "google/protobuf" + +require "google/protobuf/duration_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"envoy/config/core/v3/backoff.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1egoogle/protobuf/duration.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb8\x01\n\x0f\x42\x61\x63koffStrategy\x12@\n\rbase_interval\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0e\xfa\x42\x0b\xaa\x01\x08\x08\x01\x32\x04\x10\xc0\x84=\x12\x39\n\x0cmax_interval\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00:(\x9a\xc5\x88\x1e#\n!envoy.api.v2.core.BackoffStrategyB\x80\x01\n\"io.envoyproxy.envoy.config.core.v3B\x0c\x42\x61\x63koffProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + BackoffStrategy = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.BackoffStrategy").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/base_pb.rb b/lib/envoy/config/core/v3/base_pb.rb new file mode 100644 index 0000000..848bbc4 --- /dev/null +++ b/lib/envoy/config/core/v3/base_pb.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/base.proto + +require "google/protobuf" + +require "envoy/config/core/v3/address_pb" +require "envoy/config/core/v3/backoff_pb" +require "envoy/config/core/v3/http_uri_pb" +require "envoy/type/v3/percent_pb" +require "envoy/type/v3/semantic_version_pb" +require "google/protobuf/any_pb" +require "google/protobuf/struct_pb" +require "google/protobuf/wrappers_pb" +require "xds/core/v3/context_params_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/migrate_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1f\x65nvoy/config/core/v3/base.proto\x12\x14\x65nvoy.config.core.v3\x1a\"envoy/config/core/v3/address.proto\x1a\"envoy/config/core/v3/backoff.proto\x1a#envoy/config/core/v3/http_uri.proto\x1a\x1b\x65nvoy/type/v3/percent.proto\x1a$envoy/type/v3/semantic_version.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a xds/core/v3/context_params.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1eudpa/annotations/migrate.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"]\n\x08Locality\x12\x0e\n\x06region\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x10\n\x08sub_zone\x18\x03 \x01(\t:!\x9a\xc5\x88\x1e\x1c\n\x1a\x65nvoy.api.v2.core.Locality\"\x91\x01\n\x0c\x42uildVersion\x12/\n\x07version\x18\x01 \x01(\x0b\x32\x1e.envoy.type.v3.SemanticVersion\x12)\n\x08metadata\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.core.BuildVersion\"\xcf\x01\n\tExtension\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x02 \x01(\t\x12$\n\x0ftype_descriptor\x18\x03 \x01(\tB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12\x33\n\x07version\x18\x04 \x01(\x0b\x32\".envoy.config.core.v3.BuildVersion\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\x12\x11\n\ttype_urls\x18\x06 \x03(\t:\"\x9a\xc5\x88\x1e\x1d\n\x1b\x65nvoy.api.v2.core.Extension\"\x8a\x05\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12)\n\x08metadata\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12M\n\x12\x64ynamic_parameters\x18\x0c \x03(\x0b\x32\x31.envoy.config.core.v3.Node.DynamicParametersEntry\x12\x30\n\x08locality\x18\x04 \x01(\x0b\x32\x1e.envoy.config.core.v3.Locality\x12\x17\n\x0fuser_agent_name\x18\x06 \x01(\t\x12\x1c\n\x12user_agent_version\x18\x07 \x01(\tH\x00\x12\x46\n\x18user_agent_build_version\x18\x08 \x01(\x0b\x32\".envoy.config.core.v3.BuildVersionH\x00\x12\x33\n\nextensions\x18\t \x03(\x0b\x32\x1f.envoy.config.core.v3.Extension\x12\x17\n\x0f\x63lient_features\x18\n \x03(\t\x12G\n\x13listening_addresses\x18\x0b \x03(\x0b\x32\x1d.envoy.config.core.v3.AddressB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x1aT\n\x16\x44ynamicParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.xds.core.v3.ContextParams:\x02\x38\x01:\x1d\x9a\xc5\x88\x1e\x18\n\x16\x65nvoy.api.v2.core.NodeB\x19\n\x17user_agent_version_typeJ\x04\x08\x05\x10\x06R\rbuild_version\"\x90\x03\n\x08Metadata\x12Y\n\x0f\x66ilter_metadata\x18\x01 \x03(\x0b\x32\x32.envoy.config.core.v3.Metadata.FilterMetadataEntryB\x0c\xfa\x42\t\x9a\x01\x06\"\x04r\x02\x10\x01\x12\x64\n\x15typed_filter_metadata\x18\x02 \x03(\x0b\x32\x37.envoy.config.core.v3.Metadata.TypedFilterMetadataEntryB\x0c\xfa\x42\t\x9a\x01\x06\"\x04r\x02\x10\x01\x1aN\n\x13\x46ilterMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct:\x02\x38\x01\x1aP\n\x18TypedFilterMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01:!\x9a\xc5\x88\x1e\x1c\n\x1a\x65nvoy.api.v2.core.Metadata\"c\n\rRuntimeUInt32\x12\x15\n\rdefault_value\x18\x02 \x01(\r\x12\x13\n\x0bruntime_key\x18\x03 \x01(\t:&\x9a\xc5\x88\x1e!\n\x1f\x65nvoy.api.v2.core.RuntimeUInt32\"T\n\x0eRuntimePercent\x12-\n\rdefault_value\x18\x01 \x01(\x0b\x32\x16.envoy.type.v3.Percent\x12\x13\n\x0bruntime_key\x18\x02 \x01(\t\"c\n\rRuntimeDouble\x12\x15\n\rdefault_value\x18\x01 \x01(\x01\x12\x13\n\x0bruntime_key\x18\x02 \x01(\t:&\x9a\xc5\x88\x1e!\n\x1f\x65nvoy.api.v2.core.RuntimeDouble\"\x93\x01\n\x12RuntimeFeatureFlag\x12;\n\rdefault_value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValueB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x13\n\x0bruntime_key\x18\x02 \x01(\t:+\x9a\xc5\x88\x1e&\n$envoy.api.v2.core.RuntimeFeatureFlag\"K\n\x08KeyValue\x12#\n\x03key\x18\x01 \x01(\tB\x16\x18\x01\xfa\x42\x08r\x06\x10\x01(\x80\x80\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12\x1a\n\x05value\x18\x02 \x01(\x0c\x42\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\"O\n\x0cKeyValuePair\x12\x18\n\x03key\x18\x01 \x01(\tB\x0b\xfa\x42\x08r\x06\x10\x01(\x80\x80\x01\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\"\xde\x02\n\x0eKeyValueAppend\x12\x32\n\x06record\x18\x03 \x01(\x0b\x32\".envoy.config.core.v3.KeyValuePair\x12\x42\n\x05\x65ntry\x18\x01 \x01(\x0b\x32\x1e.envoy.config.core.v3.KeyValueB\x13\x18\x01\xfa\x42\x05\x8a\x01\x02\x08\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12S\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32\x39.envoy.config.core.v3.KeyValueAppend.KeyValueAppendActionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\"\x7f\n\x14KeyValueAppendAction\x12\x1b\n\x17\x41PPEND_IF_EXISTS_OR_ADD\x10\x00\x12\x11\n\rADD_IF_ABSENT\x10\x01\x12\x1e\n\x1aOVERWRITE_IF_EXISTS_OR_ADD\x10\x02\x12\x17\n\x13OVERWRITE_IF_EXISTS\x10\x03\"c\n\x10KeyValueMutation\x12\x34\n\x06\x61ppend\x18\x01 \x01(\x0b\x32$.envoy.config.core.v3.KeyValueAppend\x12\x19\n\x06remove\x18\x02 \x01(\tB\t\xfa\x42\x06r\x04(\x80\x80\x01\"5\n\x0eQueryParameter\x12\x14\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\r\n\x05value\x18\x02 \x01(\t\"\xb7\x01\n\x0bHeaderValue\x12\x1e\n\x03key\x18\x01 \x01(\tB\x11\xfa\x42\x0er\x0c\x10\x01(\x80\x80\x01\xc0\x01\x01\xc8\x01\x00\x12\x30\n\x05value\x18\x02 \x01(\tB!\xfa\x42\x0cr\n(\x80\x80\x01\xc0\x01\x02\xc8\x01\x00\xf2\x98\xfe\x8f\x05\x0c\x12\nvalue_type\x12\x30\n\traw_value\x18\x03 \x01(\x0c\x42\x1d\xfa\x42\x08z\x06\x10\x00\x18\x80\x80\x01\xf2\x98\xfe\x8f\x05\x0c\x12\nvalue_type:$\x9a\xc5\x88\x1e\x1f\n\x1d\x65nvoy.api.v2.core.HeaderValue\"\xab\x03\n\x11HeaderValueOption\x12;\n\x06header\x18\x01 \x01(\x0b\x32!.envoy.config.core.v3.HeaderValueB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x37\n\x06\x61ppend\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValueB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12[\n\rappend_action\x18\x03 \x01(\x0e\x32:.envoy.config.core.v3.HeaderValueOption.HeaderAppendActionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x18\n\x10keep_empty_value\x18\x04 \x01(\x08\"}\n\x12HeaderAppendAction\x12\x1b\n\x17\x41PPEND_IF_EXISTS_OR_ADD\x10\x00\x12\x11\n\rADD_IF_ABSENT\x10\x01\x12\x1e\n\x1aOVERWRITE_IF_EXISTS_OR_ADD\x10\x02\x12\x17\n\x13OVERWRITE_IF_EXISTS\x10\x03:*\x9a\xc5\x88\x1e%\n#envoy.api.v2.core.HeaderValueOption\"c\n\tHeaderMap\x12\x32\n\x07headers\x18\x01 \x03(\x0b\x32!.envoy.config.core.v3.HeaderValue:\"\x9a\xc5\x88\x1e\x1d\n\x1b\x65nvoy.api.v2.core.HeaderMap\")\n\x10WatchedDirectory\x12\x15\n\x04path\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\"\xfd\x01\n\nDataSource\x12\x1b\n\x08\x66ilename\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x16\n\x0cinline_bytes\x18\x02 \x01(\x0cH\x00\x12\x17\n\rinline_string\x18\x03 \x01(\tH\x00\x12\'\n\x14\x65nvironment_variable\x18\x04 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x41\n\x11watched_directory\x18\x05 \x01(\x0b\x32&.envoy.config.core.v3.WatchedDirectory:#\x9a\xc5\x88\x1e\x1e\n\x1c\x65nvoy.api.v2.core.DataSourceB\x10\n\tspecifier\x12\x03\xf8\x42\x01\"\xe3\x04\n\x0bRetryPolicy\x12=\n\x0eretry_back_off\x18\x01 \x01(\x0b\x32%.envoy.config.core.v3.BackoffStrategy\x12\x46\n\x0bnum_retries\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x13\xf2\x98\xfe\x8f\x05\r\n\x0bmax_retries\x12\x10\n\x08retry_on\x18\x03 \x01(\t\x12G\n\x0eretry_priority\x18\x04 \x01(\x0b\x32/.envoy.config.core.v3.RetryPolicy.RetryPriority\x12R\n\x14retry_host_predicate\x18\x05 \x03(\x0b\x32\x34.envoy.config.core.v3.RetryPolicy.RetryHostPredicate\x12)\n!host_selection_retry_max_attempts\x18\x06 \x01(\x03\x1a\x63\n\rRetryPriority\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12,\n\x0ctyped_config\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\r\n\x0b\x63onfig_type\x1ah\n\x12RetryHostPredicate\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12,\n\x0ctyped_config\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\r\n\x0b\x63onfig_type:$\x9a\xc5\x88\x1e\x1f\n\x1d\x65nvoy.api.v2.core.RetryPolicy\"\xca\x01\n\x10RemoteDataSource\x12\x39\n\x08http_uri\x18\x01 \x01(\x0b\x32\x1d.envoy.config.core.v3.HttpUriB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x17\n\x06sha256\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x37\n\x0cretry_policy\x18\x03 \x01(\x0b\x32!.envoy.config.core.v3.RetryPolicy:)\x9a\xc5\x88\x1e$\n\"envoy.api.v2.core.RemoteDataSource\"\xba\x01\n\x0f\x41syncDataSource\x12\x31\n\x05local\x18\x01 \x01(\x0b\x32 .envoy.config.core.v3.DataSourceH\x00\x12\x38\n\x06remote\x18\x02 \x01(\x0b\x32&.envoy.config.core.v3.RemoteDataSourceH\x00:(\x9a\xc5\x88\x1e#\n!envoy.api.v2.core.AsyncDataSourceB\x10\n\tspecifier\x12\x03\xf8\x42\x01\"\x9d\x01\n\x0fTransportSocket\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12,\n\x0ctyped_config\x18\x03 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00:(\x9a\xc5\x88\x1e#\n!envoy.api.v2.core.TransportSocketB\r\n\x0b\x63onfig_typeJ\x04\x08\x02\x10\x03R\x06\x63onfig\"\xa5\x01\n\x18RuntimeFractionalPercent\x12\x41\n\rdefault_value\x18\x01 \x01(\x0b\x32 .envoy.type.v3.FractionalPercentB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x13\n\x0bruntime_key\x18\x02 \x01(\t:1\x9a\xc5\x88\x1e,\n*envoy.api.v2.core.RuntimeFractionalPercent\"I\n\x0c\x43ontrolPlane\x12\x12\n\nidentifier\x18\x01 \x01(\t:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.core.ControlPlane*(\n\x0fRoutingPriority\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04HIGH\x10\x01*\x89\x01\n\rRequestMethod\x12\x16\n\x12METHOD_UNSPECIFIED\x10\x00\x12\x07\n\x03GET\x10\x01\x12\x08\n\x04HEAD\x10\x02\x12\x08\n\x04POST\x10\x03\x12\x07\n\x03PUT\x10\x04\x12\n\n\x06\x44\x45LETE\x10\x05\x12\x0b\n\x07\x43ONNECT\x10\x06\x12\x0b\n\x07OPTIONS\x10\x07\x12\t\n\x05TRACE\x10\x08\x12\t\n\x05PATCH\x10\t*>\n\x10TrafficDirection\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07INBOUND\x10\x01\x12\x0c\n\x08OUTBOUND\x10\x02\x42}\n\"io.envoyproxy.envoy.config.core.v3B\tBaseProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + Locality = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Locality").msgclass + BuildVersion = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.BuildVersion").msgclass + Extension = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Extension").msgclass + Node = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Node").msgclass + Metadata = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Metadata").msgclass + RuntimeUInt32 = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RuntimeUInt32").msgclass + RuntimePercent = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RuntimePercent").msgclass + RuntimeDouble = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RuntimeDouble").msgclass + RuntimeFeatureFlag = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RuntimeFeatureFlag").msgclass + KeyValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.KeyValue").msgclass + KeyValuePair = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.KeyValuePair").msgclass + KeyValueAppend = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.KeyValueAppend").msgclass + KeyValueAppend::KeyValueAppendAction = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.KeyValueAppend.KeyValueAppendAction").enummodule + KeyValueMutation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.KeyValueMutation").msgclass + QueryParameter = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.QueryParameter").msgclass + HeaderValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HeaderValue").msgclass + HeaderValueOption = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HeaderValueOption").msgclass + HeaderValueOption::HeaderAppendAction = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HeaderValueOption.HeaderAppendAction").enummodule + HeaderMap = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HeaderMap").msgclass + WatchedDirectory = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.WatchedDirectory").msgclass + DataSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.DataSource").msgclass + RetryPolicy = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RetryPolicy").msgclass + RetryPolicy::RetryPriority = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RetryPolicy.RetryPriority").msgclass + RetryPolicy::RetryHostPredicate = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RetryPolicy.RetryHostPredicate").msgclass + RemoteDataSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RemoteDataSource").msgclass + AsyncDataSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.AsyncDataSource").msgclass + TransportSocket = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.TransportSocket").msgclass + RuntimeFractionalPercent = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RuntimeFractionalPercent").msgclass + ControlPlane = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ControlPlane").msgclass + RoutingPriority = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RoutingPriority").enummodule + RequestMethod = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RequestMethod").enummodule + TrafficDirection = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.TrafficDirection").enummodule + end + end + end +end diff --git a/lib/envoy/config/core/v3/cel_pb.rb b/lib/envoy/config/core/v3/cel_pb.rb new file mode 100644 index 0000000..8e2f8d9 --- /dev/null +++ b/lib/envoy/config/core/v3/cel_pb.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/cel.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" + + +descriptor_data = "\n\x1e\x65nvoy/config/core/v3/cel.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1dudpa/annotations/status.proto\"v\n\x13\x43\x65lExpressionConfig\x12 \n\x18\x65nable_string_conversion\x18\x01 \x01(\x08\x12\x1c\n\x14\x65nable_string_concat\x18\x02 \x01(\x08\x12\x1f\n\x17\x65nable_string_functions\x18\x03 \x01(\x08\x42|\n\"io.envoyproxy.envoy.config.core.v3B\x08\x43\x65lProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + CelExpressionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.CelExpressionConfig").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/config_source_pb.rb b/lib/envoy/config/core/v3/config_source_pb.rb new file mode 100644 index 0000000..c98ca69 --- /dev/null +++ b/lib/envoy/config/core/v3/config_source_pb.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/config_source.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/extension_pb" +require "envoy/config/core/v3/grpc_service_pb" +require "google/protobuf/any_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/wrappers_pb" +require "xds/core/v3/authority_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n(envoy/config/core/v3/config_source.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a$envoy/config/core/v3/extension.proto\x1a\'envoy/config/core/v3/grpc_service.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1bxds/core/v3/authority.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xdc\x05\n\x0f\x41piConfigSource\x12I\n\x08\x61pi_type\x18\x01 \x01(\x0e\x32-.envoy.config.core.v3.ApiConfigSource.ApiTypeB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12I\n\x15transport_api_version\x18\x08 \x01(\x0e\x32 .envoy.config.core.v3.ApiVersionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x15\n\rcluster_names\x18\x02 \x03(\t\x12\x38\n\rgrpc_services\x18\x04 \x03(\x0b\x32!.envoy.config.core.v3.GrpcService\x12\x30\n\rrefresh_delay\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12<\n\x0frequest_timeout\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12\x44\n\x13rate_limit_settings\x18\x06 \x01(\x0b\x32\'.envoy.config.core.v3.RateLimitSettings\x12&\n\x1eset_node_on_first_message_only\x18\x07 \x01(\x08\x12\x45\n\x11\x63onfig_validators\x18\t \x03(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\"\x92\x01\n\x07\x41piType\x12\x33\n%DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE\x10\x00\x1a\x08\x08\x01\xa8\xf7\xb4\x8b\x02\x01\x12\x08\n\x04REST\x10\x01\x12\x08\n\x04GRPC\x10\x02\x12\x0e\n\nDELTA_GRPC\x10\x03\x12\x13\n\x0f\x41GGREGATED_GRPC\x10\x05\x12\x19\n\x15\x41GGREGATED_DELTA_GRPC\x10\x06:(\x9a\xc5\x88\x1e#\n!envoy.api.v2.core.ApiConfigSource\"I\n\x16\x41ggregatedConfigSource:/\x9a\xc5\x88\x1e*\n(envoy.api.v2.core.AggregatedConfigSource\"\x88\x01\n\x10SelfConfigSource\x12I\n\x15transport_api_version\x18\x01 \x01(\x0e\x32 .envoy.config.core.v3.ApiVersionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01:)\x9a\xc5\x88\x1e$\n\"envoy.api.v2.core.SelfConfigSource\"\xb2\x01\n\x11RateLimitSettings\x12\x30\n\nmax_tokens\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12?\n\tfill_rate\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.DoubleValueB\x0e\xfa\x42\x0b\x12\t!\x00\x00\x00\x00\x00\x00\x00\x00:*\x9a\xc5\x88\x1e%\n#envoy.api.v2.core.RateLimitSettings\"l\n\x10PathConfigSource\x12\x15\n\x04path\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x41\n\x11watched_directory\x18\x02 \x01(\x0b\x32&.envoy.config.core.v3.WatchedDirectory\"\xa2\x04\n\x0c\x43onfigSource\x12+\n\x0b\x61uthorities\x18\x07 \x03(\x0b\x32\x16.xds.core.v3.Authority\x12\x1b\n\x04path\x18\x01 \x01(\tB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0H\x00\x12\x44\n\x12path_config_source\x18\x08 \x01(\x0b\x32&.envoy.config.core.v3.PathConfigSourceH\x00\x12\x42\n\x11\x61pi_config_source\x18\x02 \x01(\x0b\x32%.envoy.config.core.v3.ApiConfigSourceH\x00\x12;\n\x03\x61\x64s\x18\x03 \x01(\x0b\x32,.envoy.config.core.v3.AggregatedConfigSourceH\x00\x12\x36\n\x04self\x18\x05 \x01(\x0b\x32&.envoy.config.core.v3.SelfConfigSourceH\x00\x12\x38\n\x15initial_fetch_timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12H\n\x14resource_api_version\x18\x06 \x01(\x0e\x32 .envoy.config.core.v3.ApiVersionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.core.ConfigSourceB\x1e\n\x17\x63onfig_source_specifier\x12\x03\xf8\x42\x01\"\xd5\x01\n\x15\x45xtensionConfigSource\x12\x43\n\rconfig_source\x18\x01 \x01(\x0b\x32\".envoy.config.core.v3.ConfigSourceB\x08\xfa\x42\x05\xa2\x01\x02\x08\x01\x12,\n\x0e\x64\x65\x66\x61ult_config\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12,\n$apply_default_config_without_warming\x18\x03 \x01(\x08\x12\x1b\n\ttype_urls\x18\x04 \x03(\tB\x08\xfa\x42\x05\x92\x01\x02\x08\x01*3\n\nApiVersion\x12\x08\n\x04\x41UTO\x10\x00\x12\x13\n\x02V2\x10\x01\x1a\x0b\x08\x01\x8a\xf4\x9b\xb3\x05\x03\x33.0\x12\x06\n\x02V3\x10\x02\x42\x85\x01\n\"io.envoyproxy.envoy.config.core.v3B\x11\x43onfigSourceProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + ApiConfigSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ApiConfigSource").msgclass + ApiConfigSource::ApiType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ApiConfigSource.ApiType").enummodule + AggregatedConfigSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.AggregatedConfigSource").msgclass + SelfConfigSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SelfConfigSource").msgclass + RateLimitSettings = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.RateLimitSettings").msgclass + PathConfigSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.PathConfigSource").msgclass + ConfigSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ConfigSource").msgclass + ExtensionConfigSource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ExtensionConfigSource").msgclass + ApiVersion = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ApiVersion").enummodule + end + end + end +end diff --git a/lib/envoy/config/core/v3/event_service_config_pb.rb b/lib/envoy/config/core/v3/event_service_config_pb.rb new file mode 100644 index 0000000..44a6841 --- /dev/null +++ b/lib/envoy/config/core/v3/event_service_config_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/event_service_config.proto + +require "google/protobuf" + +require "envoy/config/core/v3/grpc_service_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n/envoy/config/core/v3/event_service_config.proto\x12\x14\x65nvoy.config.core.v3\x1a\'envoy/config/core/v3/grpc_service.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\x9c\x01\n\x12\x45ventServiceConfig\x12\x39\n\x0cgrpc_service\x18\x01 \x01(\x0b\x32!.envoy.config.core.v3.GrpcServiceH\x00:+\x9a\xc5\x88\x1e&\n$envoy.api.v2.core.EventServiceConfigB\x1e\n\x17\x63onfig_source_specifier\x12\x03\xf8\x42\x01\x42\x8b\x01\n\"io.envoyproxy.envoy.config.core.v3B\x17\x45ventServiceConfigProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + EventServiceConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.EventServiceConfig").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/extension_pb.rb b/lib/envoy/config/core/v3/extension_pb.rb new file mode 100644 index 0000000..900bbc9 --- /dev/null +++ b/lib/envoy/config/core/v3/extension_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/extension.proto + +require "google/protobuf" + +require "google/protobuf/any_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n$envoy/config/core/v3/extension.proto\x12\x14\x65nvoy.config.core.v3\x1a\x19google/protobuf/any.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"c\n\x14TypedExtensionConfig\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x34\n\x0ctyped_config\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyB\x08\xfa\x42\x05\xa2\x01\x02\x08\x01\x42\x82\x01\n\"io.envoyproxy.envoy.config.core.v3B\x0e\x45xtensionProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + TypedExtensionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.TypedExtensionConfig").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/grpc_method_list_pb.rb b/lib/envoy/config/core/v3/grpc_method_list_pb.rb new file mode 100644 index 0000000..3c7db2d --- /dev/null +++ b/lib/envoy/config/core/v3/grpc_method_list_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/grpc_method_list.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n+envoy/config/core/v3/grpc_method_list.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xec\x01\n\x0eGrpcMethodList\x12>\n\x08services\x18\x01 \x03(\x0b\x32,.envoy.config.core.v3.GrpcMethodList.Service\x1aq\n\x07Service\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x1e\n\x0cmethod_names\x18\x02 \x03(\tB\x08\xfa\x42\x05\x92\x01\x02\x08\x01:/\x9a\xc5\x88\x1e*\n(envoy.api.v2.core.GrpcMethodList.Service:\'\x9a\xc5\x88\x1e\"\n envoy.api.v2.core.GrpcMethodListB\x87\x01\n\"io.envoyproxy.envoy.config.core.v3B\x13GrpcMethodListProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + GrpcMethodList = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcMethodList").msgclass + GrpcMethodList::Service = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcMethodList.Service").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/grpc_service_pb.rb b/lib/envoy/config/core/v3/grpc_service_pb.rb new file mode 100644 index 0000000..c32ef1f --- /dev/null +++ b/lib/envoy/config/core/v3/grpc_service_pb.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/grpc_service.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "google/protobuf/any_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/empty_pb" +require "google/protobuf/struct_pb" +require "google/protobuf/wrappers_pb" +require "udpa/annotations/sensitive_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\'envoy/config/core/v3/grpc_service.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a udpa/annotations/sensitive.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xad\x1e\n\x0bGrpcService\x12\x41\n\nenvoy_grpc\x18\x01 \x01(\x0b\x32+.envoy.config.core.v3.GrpcService.EnvoyGrpcH\x00\x12\x43\n\x0bgoogle_grpc\x18\x02 \x01(\x0b\x32,.envoy.config.core.v3.GrpcService.GoogleGrpcH\x00\x12*\n\x07timeout\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x10initial_metadata\x18\x05 \x03(\x0b\x32!.envoy.config.core.v3.HeaderValue\x12\x37\n\x0cretry_policy\x18\x06 \x01(\x0b\x32!.envoy.config.core.v3.RetryPolicy\x1a\x97\x02\n\tEnvoyGrpc\x12\x1d\n\x0c\x63luster_name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12$\n\tauthority\x18\x02 \x01(\tB\x11\xfa\x42\x0er\x0c\x10\x00(\x80\x80\x01\xc0\x01\x02\xc8\x01\x00\x12\x37\n\x0cretry_policy\x18\x03 \x01(\x0b\x32!.envoy.config.core.v3.RetryPolicy\x12@\n\x1amax_receive_message_length\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x1a\n\x12skip_envoy_headers\x18\x05 \x01(\x08:.\x9a\xc5\x88\x1e)\n\'envoy.api.v2.core.GrpcService.EnvoyGrpc\x1a\x94\x19\n\nGoogleGrpc\x12\x1b\n\ntarget_uri\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\\\n\x13\x63hannel_credentials\x18\x02 \x01(\x0b\x32?.envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials\x12\x38\n\x1a\x63hannel_credentials_plugin\x18\t \x03(\x0b\x32\x14.google.protobuf.Any\x12V\n\x10\x63\x61ll_credentials\x18\x03 \x03(\x0b\x32<.envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials\x12\x35\n\x17\x63\x61ll_credentials_plugin\x18\n \x03(\x0b\x32\x14.google.protobuf.Any\x12\x1c\n\x0bstat_prefix\x18\x04 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12 \n\x18\x63redentials_factory_name\x18\x05 \x01(\t\x12\'\n\x06\x63onfig\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x43\n\x1dper_stream_buffer_limit_bytes\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12N\n\x0c\x63hannel_args\x18\x08 \x01(\x0b\x32\x38.envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs\x1a\xfb\x01\n\x0eSslCredentials\x12\x34\n\nroot_certs\x18\x01 \x01(\x0b\x32 .envoy.config.core.v3.DataSource\x12=\n\x0bprivate_key\x18\x02 \x01(\x0b\x32 .envoy.config.core.v3.DataSourceB\x06\xb8\xb7\x8b\xa4\x02\x01\x12\x34\n\ncert_chain\x18\x03 \x01(\x0b\x32 .envoy.config.core.v3.DataSource:>\x9a\xc5\x88\x1e\x39\n7envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials\x1a`\n\x16GoogleLocalCredentials:F\x9a\xc5\x88\x1e\x41\n?envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials\x1a\xe1\x02\n\x12\x43hannelCredentials\x12V\n\x0fssl_credentials\x18\x01 \x01(\x0b\x32;.envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentialsH\x00\x12\x30\n\x0egoogle_default\x18\x02 \x01(\x0b\x32\x16.google.protobuf.EmptyH\x00\x12`\n\x11local_credentials\x18\x03 \x01(\x0b\x32\x43.envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentialsH\x00:B\x9a\xc5\x88\x1e=\n;envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentialsB\x1b\n\x14\x63redential_specifier\x12\x03\xf8\x42\x01\x1a\xb1\x0c\n\x0f\x43\x61llCredentials\x12\x16\n\x0c\x61\x63\x63\x65ss_token\x18\x01 \x01(\tH\x00\x12\x37\n\x15google_compute_engine\x18\x02 \x01(\x0b\x32\x16.google.protobuf.EmptyH\x00\x12\x1e\n\x14google_refresh_token\x18\x03 \x01(\tH\x00\x12\x85\x01\n\x1aservice_account_jwt_access\x18\x04 \x01(\x0b\x32_.envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentialsH\x00\x12g\n\ngoogle_iam\x18\x05 \x01(\x0b\x32Q.envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentialsH\x00\x12q\n\x0b\x66rom_plugin\x18\x06 \x01(\x0b\x32Z.envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPluginH\x00\x12^\n\x0bsts_service\x18\x07 \x01(\x0b\x32G.envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsServiceH\x00\x1a\xba\x01\n\"ServiceAccountJWTAccessCredentials\x12\x10\n\x08json_key\x18\x01 \x01(\t\x12\x1e\n\x16token_lifetime_seconds\x18\x02 \x01(\x04:b\x9a\xc5\x88\x1e]\n[envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials\x1a\xa5\x01\n\x14GoogleIAMCredentials\x12\x1b\n\x13\x61uthorization_token\x18\x01 \x01(\t\x12\x1a\n\x12\x61uthority_selector\x18\x02 \x01(\t:T\x9a\xc5\x88\x1eO\nMenvoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials\x1a\xd7\x01\n\x1dMetadataCredentialsFromPlugin\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x0ctyped_config\x18\x03 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00:]\x9a\xc5\x88\x1eX\nVenvoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPluginB\r\n\x0b\x63onfig_typeJ\x04\x08\x02\x10\x03R\x06\x63onfig\x1a\xcb\x02\n\nStsService\x12\"\n\x1atoken_exchange_service_uri\x18\x01 \x01(\t\x12\x10\n\x08resource\x18\x02 \x01(\t\x12\x10\n\x08\x61udience\x18\x03 \x01(\t\x12\r\n\x05scope\x18\x04 \x01(\t\x12\x1c\n\x14requested_token_type\x18\x05 \x01(\t\x12#\n\x12subject_token_path\x18\x06 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12#\n\x12subject_token_type\x18\x07 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x18\n\x10\x61\x63tor_token_path\x18\x08 \x01(\t\x12\x18\n\x10\x61\x63tor_token_type\x18\t \x01(\t:J\x9a\xc5\x88\x1e\x45\nCenvoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService:?\x9a\xc5\x88\x1e:\n8envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentialsB\x1b\n\x14\x63redential_specifier\x12\x03\xf8\x42\x01\x1a\x9a\x02\n\x0b\x43hannelArgs\x12P\n\x04\x61rgs\x18\x01 \x03(\x0b\x32\x42.envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry\x1aL\n\x05Value\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x13\n\tint_value\x18\x02 \x01(\x03H\x00\x42\x16\n\x0fvalue_specifier\x12\x03\xf8\x42\x01\x1ak\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12M\n\x05value\x18\x02 \x01(\x0b\x32>.envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value:\x02\x38\x01:/\x9a\xc5\x88\x1e*\n(envoy.api.v2.core.GrpcService.GoogleGrpc:$\x9a\xc5\x88\x1e\x1f\n\x1d\x65nvoy.api.v2.core.GrpcServiceB\x17\n\x10target_specifier\x12\x03\xf8\x42\x01J\x04\x08\x04\x10\x05\x42\x84\x01\n\"io.envoyproxy.envoy.config.core.v3B\x10GrpcServiceProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + GrpcService = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService").msgclass + GrpcService::EnvoyGrpc = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.EnvoyGrpc").msgclass + GrpcService::GoogleGrpc = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc").msgclass + GrpcService::GoogleGrpc::SslCredentials = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials").msgclass + GrpcService::GoogleGrpc::GoogleLocalCredentials = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials").msgclass + GrpcService::GoogleGrpc::ChannelCredentials = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials").msgclass + GrpcService::GoogleGrpc::CallCredentials = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials").msgclass + GrpcService::GoogleGrpc::CallCredentials::ServiceAccountJWTAccessCredentials = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials").msgclass + GrpcService::GoogleGrpc::CallCredentials::GoogleIAMCredentials = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials").msgclass + GrpcService::GoogleGrpc::CallCredentials::MetadataCredentialsFromPlugin = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin").msgclass + GrpcService::GoogleGrpc::CallCredentials::StsService = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService").msgclass + GrpcService::GoogleGrpc::ChannelArgs = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs").msgclass + GrpcService::GoogleGrpc::ChannelArgs::Value = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/health_check_pb.rb b/lib/envoy/config/core/v3/health_check_pb.rb new file mode 100644 index 0000000..2c08ca7 --- /dev/null +++ b/lib/envoy/config/core/v3/health_check_pb.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/health_check.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/event_service_config_pb" +require "envoy/config/core/v3/extension_pb" +require "envoy/config/core/v3/proxy_protocol_pb" +require "envoy/type/matcher/v3/string_pb" +require "envoy/type/v3/http_pb" +require "envoy/type/v3/range_pb" +require "google/protobuf/any_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/struct_pb" +require "google/protobuf/wrappers_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\'envoy/config/core/v3/health_check.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a/envoy/config/core/v3/event_service_config.proto\x1a$envoy/config/core/v3/extension.proto\x1a)envoy/config/core/v3/proxy_protocol.proto\x1a\"envoy/type/matcher/v3/string.proto\x1a\x18\x65nvoy/type/v3/http.proto\x1a\x19\x65nvoy/type/v3/range.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"V\n\x0fHealthStatusSet\x12\x43\n\x08statuses\x18\x01 \x03(\x0e\x32\".envoy.config.core.v3.HealthStatusB\r\xfa\x42\n\x92\x01\x07\"\x05\x82\x01\x02\x10\x01\"\x93\x1a\n\x0bHealthCheck\x12\x36\n\x07timeout\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\n\xfa\x42\x07\xaa\x01\x04\x08\x01*\x00\x12\x37\n\x08interval\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\n\xfa\x42\x07\xaa\x01\x04\x08\x01*\x00\x12\x31\n\x0einitial_jitter\x18\x14 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0finterval_jitter\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x1f\n\x17interval_jitter_percent\x18\x12 \x01(\r\x12\x43\n\x13unhealthy_threshold\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x41\n\x11healthy_threshold\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12.\n\x08\x61lt_port\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x34\n\x10reuse_connection\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12N\n\x11http_health_check\x18\x08 \x01(\x0b\x32\x31.envoy.config.core.v3.HealthCheck.HttpHealthCheckH\x00\x12L\n\x10tcp_health_check\x18\t \x01(\x0b\x32\x30.envoy.config.core.v3.HealthCheck.TcpHealthCheckH\x00\x12N\n\x11grpc_health_check\x18\x0b \x01(\x0b\x32\x31.envoy.config.core.v3.HealthCheck.GrpcHealthCheckH\x00\x12R\n\x13\x63ustom_health_check\x18\r \x01(\x0b\x32\x33.envoy.config.core.v3.HealthCheck.CustomHealthCheckH\x00\x12@\n\x13no_traffic_interval\x18\x0c \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12H\n\x1bno_traffic_healthy_interval\x18\x18 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12?\n\x12unhealthy_interval\x18\x0e \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12\x44\n\x17unhealthy_edge_interval\x18\x0f \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12\x42\n\x15healthy_edge_interval\x18\x10 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12#\n\x0e\x65vent_log_path\x18\x11 \x01(\tB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12@\n\x0c\x65vent_logger\x18\x19 \x03(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12?\n\revent_service\x18\x16 \x01(\x0b\x32(.envoy.config.core.v3.EventServiceConfig\x12(\n always_log_health_check_failures\x18\x13 \x01(\x08\x12\'\n\x1f\x61lways_log_health_check_success\x18\x1a \x01(\x08\x12\x41\n\x0btls_options\x18\x15 \x01(\x0b\x32,.envoy.config.core.v3.HealthCheck.TlsOptions\x12@\n\x1ftransport_socket_match_criteria\x18\x17 \x01(\x0b\x32\x17.google.protobuf.Struct\x1ar\n\x07Payload\x12\x17\n\x04text\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x10\n\x06\x62inary\x18\x02 \x01(\x0cH\x00:,\x9a\xc5\x88\x1e\'\n%envoy.api.v2.core.HealthCheck.PayloadB\x0e\n\x07payload\x12\x03\xf8\x42\x01\x1a\x98\x06\n\x0fHttpHealthCheck\x12\x16\n\x04host\x18\x01 \x01(\tB\x08\xfa\x42\x05r\x03\xc0\x01\x02\x12\x18\n\x04path\x18\x02 \x01(\tB\n\xfa\x42\x07r\x05\x10\x01\xc0\x01\x02\x12\x37\n\x04send\x18\x03 \x01(\x0b\x32).envoy.config.core.v3.HealthCheck.Payload\x12:\n\x07receive\x18\x04 \x03(\x0b\x32).envoy.config.core.v3.HealthCheck.Payload\x12\x43\n\x14response_buffer_size\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.UInt64ValueB\x07\xfa\x42\x04\x32\x02(\x00\x12R\n\x16request_headers_to_add\x18\x06 \x03(\x0b\x32\'.envoy.config.core.v3.HeaderValueOptionB\t\xfa\x42\x06\x92\x01\x03\x10\xe8\x07\x12\x33\n\x19request_headers_to_remove\x18\x08 \x03(\tB\x10\xfa\x42\r\x92\x01\n\"\x08r\x06\xc0\x01\x01\xc8\x01\x00\x12\x34\n\x11\x65xpected_statuses\x18\t \x03(\x0b\x32\x19.envoy.type.v3.Int64Range\x12\x35\n\x12retriable_statuses\x18\x0c \x03(\x0b\x32\x19.envoy.type.v3.Int64Range\x12\x43\n\x11\x63odec_client_type\x18\n \x01(\x0e\x32\x1e.envoy.type.v3.CodecClientTypeB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x42\n\x14service_name_matcher\x18\x0b \x01(\x0b\x32$.envoy.type.matcher.v3.StringMatcher\x12?\n\x06method\x18\r \x01(\x0e\x32#.envoy.config.core.v3.RequestMethodB\n\xfa\x42\x07\x82\x01\x04\x10\x01 \x06:4\x9a\xc5\x88\x1e/\n-envoy.api.v2.core.HealthCheck.HttpHealthCheckJ\x04\x08\x05\x10\x06J\x04\x08\x07\x10\x08R\x0cservice_nameR\tuse_http2\x1a\x84\x02\n\x0eTcpHealthCheck\x12\x37\n\x04send\x18\x01 \x01(\x0b\x32).envoy.config.core.v3.HealthCheck.Payload\x12:\n\x07receive\x18\x02 \x03(\x0b\x32).envoy.config.core.v3.HealthCheck.Payload\x12H\n\x15proxy_protocol_config\x18\x03 \x01(\x0b\x32).envoy.config.core.v3.ProxyProtocolConfig:3\x9a\xc5\x88\x1e.\n,envoy.api.v2.core.HealthCheck.TcpHealthCheck\x1aV\n\x10RedisHealthCheck\x12\x0b\n\x03key\x18\x01 \x01(\t:5\x9a\xc5\x88\x1e\x30\n.envoy.api.v2.core.HealthCheck.RedisHealthCheck\x1a\xcb\x01\n\x0fGrpcHealthCheck\x12\x14\n\x0cservice_name\x18\x01 \x01(\t\x12\x1e\n\tauthority\x18\x02 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x02\xc8\x01\x00\x12L\n\x10initial_metadata\x18\x03 \x03(\x0b\x32\'.envoy.config.core.v3.HeaderValueOptionB\t\xfa\x42\x06\x92\x01\x03\x10\xe8\x07:4\x9a\xc5\x88\x1e/\n-envoy.api.v2.core.HealthCheck.GrpcHealthCheck\x1a\xad\x01\n\x11\x43ustomHealthCheck\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12,\n\x0ctyped_config\x18\x03 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00:6\x9a\xc5\x88\x1e\x31\n/envoy.api.v2.core.HealthCheck.CustomHealthCheckB\r\n\x0b\x63onfig_typeJ\x04\x08\x02\x10\x03R\x06\x63onfig\x1aU\n\nTlsOptions\x12\x16\n\x0e\x61lpn_protocols\x18\x01 \x03(\t:/\x9a\xc5\x88\x1e*\n(envoy.api.v2.core.HealthCheck.TlsOptions:$\x9a\xc5\x88\x1e\x1f\n\x1d\x65nvoy.api.v2.core.HealthCheckB\x15\n\x0ehealth_checker\x12\x03\xf8\x42\x01J\x04\x08\n\x10\x0b*`\n\x0cHealthStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07HEALTHY\x10\x01\x12\r\n\tUNHEALTHY\x10\x02\x12\x0c\n\x08\x44RAINING\x10\x03\x12\x0b\n\x07TIMEOUT\x10\x04\x12\x0c\n\x08\x44\x45GRADED\x10\x05\x42\x84\x01\n\"io.envoyproxy.envoy.config.core.v3B\x10HealthCheckProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + HealthStatusSet = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthStatusSet").msgclass + HealthCheck = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck").msgclass + HealthCheck::Payload = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.Payload").msgclass + HealthCheck::HttpHealthCheck = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.HttpHealthCheck").msgclass + HealthCheck::TcpHealthCheck = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.TcpHealthCheck").msgclass + HealthCheck::RedisHealthCheck = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.RedisHealthCheck").msgclass + HealthCheck::GrpcHealthCheck = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.GrpcHealthCheck").msgclass + HealthCheck::CustomHealthCheck = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.CustomHealthCheck").msgclass + HealthCheck::TlsOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthCheck.TlsOptions").msgclass + HealthStatus = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HealthStatus").enummodule + end + end + end +end diff --git a/lib/envoy/config/core/v3/http_service_pb.rb b/lib/envoy/config/core/v3/http_service_pb.rb new file mode 100644 index 0000000..d36f8a5 --- /dev/null +++ b/lib/envoy/config/core/v3/http_service_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/http_service.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/http_uri_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\'envoy/config/core/v3/http_service.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a#envoy/config/core/v3/http_uri.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\x92\x01\n\x0bHttpService\x12/\n\x08http_uri\x18\x01 \x01(\x0b\x32\x1d.envoy.config.core.v3.HttpUri\x12R\n\x16request_headers_to_add\x18\x02 \x03(\x0b\x32\'.envoy.config.core.v3.HeaderValueOptionB\t\xfa\x42\x06\x92\x01\x03\x10\xe8\x07\x42\x84\x01\n\"io.envoyproxy.envoy.config.core.v3B\x10HttpServiceProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + HttpService = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HttpService").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/http_uri_pb.rb b/lib/envoy/config/core/v3/http_uri_pb.rb new file mode 100644 index 0000000..d5f1723 --- /dev/null +++ b/lib/envoy/config/core/v3/http_uri_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/http_uri.proto + +require "google/protobuf" + +require "google/protobuf/duration_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n#envoy/config/core/v3/http_uri.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1egoogle/protobuf/duration.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb8\x01\n\x07HttpUri\x12\x14\n\x03uri\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x1a\n\x07\x63luster\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12>\n\x07timeout\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x12\xfa\x42\x0f\xaa\x01\x0c\x08\x01\x1a\x06\x08\x80\x80\x80\x80\x10\x32\x00: \x9a\xc5\x88\x1e\x1b\n\x19\x65nvoy.api.v2.core.HttpUriB\x19\n\x12http_upstream_type\x12\x03\xf8\x42\x01\x42\x80\x01\n\"io.envoyproxy.envoy.config.core.v3B\x0cHttpUriProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + HttpUri = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HttpUri").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/protocol_pb.rb b/lib/envoy/config/core/v3/protocol_pb.rb new file mode 100644 index 0000000..cb79cf8 --- /dev/null +++ b/lib/envoy/config/core/v3/protocol_pb.rb @@ -0,0 +1,51 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/protocol.proto + +require "google/protobuf" + +require "envoy/config/core/v3/extension_pb" +require "envoy/type/matcher/v3/string_pb" +require "envoy/type/v3/percent_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/wrappers_pb" +require "xds/annotations/v3/status_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n#envoy/config/core/v3/protocol.proto\x12\x14\x65nvoy.config.core.v3\x1a$envoy/config/core/v3/extension.proto\x1a\"envoy/type/matcher/v3/string.proto\x1a\x1b\x65nvoy/type/v3/percent.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1fxds/annotations/v3/status.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"A\n\x12TcpProtocolOptions:+\x9a\xc5\x88\x1e&\n$envoy.api.v2.core.TcpProtocolOptions\"\x8d\x01\n\x15QuicKeepAliveSettings\x12/\n\x0cmax_interval\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x43\n\x10initial_interval\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0e\xfa\x42\x0b\xaa\x01\x08\"\x00\x32\x04\x10\xc0\x84=\"\xf8\x08\n\x13QuicProtocolOptions\x12\x45\n\x16max_concurrent_streams\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01\x12N\n\x1ainitial_stream_window_size\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x0c\xfa\x42\t*\x07\x18\x80\x80\x80\x08(\x01\x12R\n\x1einitial_connection_window_size\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x0c\xfa\x42\t*\x07\x18\x80\x80\x80\x0c(\x01\x12W\n&num_timeouts_to_trigger_port_migration\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\t\xfa\x42\x06*\x04\x18\x05(\x00\x12I\n\x14\x63onnection_keepalive\x18\x05 \x01(\x0b\x32+.envoy.config.core.v3.QuicKeepAliveSettings\x12\x1a\n\x12\x63onnection_options\x18\x06 \x01(\t\x12!\n\x19\x63lient_connection_options\x18\x07 \x01(\t\x12\x43\n\x14idle_network_timeout\x18\x08 \x01(\x0b\x32\x19.google.protobuf.DurationB\n\xfa\x42\x07\xaa\x01\x04\x32\x02\x08\x01\x12\x37\n\x11max_packet_length\x18\t \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12H\n\x14\x63lient_packet_writer\x18\n \x01(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12\x63\n\x14\x63onnection_migration\x18\x0b \x01(\x0b\x32\x45.envoy.config.core.v3.QuicProtocolOptions.ConnectionMigrationSettings\x1a\xe5\x02\n\x1b\x43onnectionMigrationSettings\x12\x85\x01\n\x18migrate_idle_connections\x18\x01 \x01(\x0b\x32\x63.envoy.config.core.v3.QuicProtocolOptions.ConnectionMigrationSettings.MigrateIdleConnectionSettings\x12N\n\x1fmax_time_on_non_default_network\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\n\xfa\x42\x07\xaa\x01\x04\x32\x02\x08\x01\x1an\n\x1dMigrateIdleConnectionSettings\x12M\n\x1emax_idle_time_before_migration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\n\xfa\x42\x07\xaa\x01\x04\x32\x02\x08\x01\"\xb1\x01\n\x1bUpstreamHttpProtocolOptions\x12\x10\n\x08\x61uto_sni\x18\x01 \x01(\x08\x12\x1b\n\x13\x61uto_san_validation\x18\x02 \x01(\x08\x12-\n\x18override_auto_sni_header\x18\x03 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x01\xd0\x01\x01:4\x9a\xc5\x88\x1e/\n-envoy.api.v2.core.UpstreamHttpProtocolOptions\"\xa6\x03\n\x1e\x41lternateProtocolsCacheOptions\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12:\n\x0bmax_entries\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02 \x00\x12J\n\x16key_value_store_config\x18\x03 \x01(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12o\n\x14prepopulated_entries\x18\x04 \x03(\x0b\x32Q.envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry\x12\x1a\n\x12\x63\x61nonical_suffixes\x18\x05 \x03(\t\x1aX\n\x1c\x41lternateProtocolsCacheEntry\x12\x1d\n\x08hostname\x18\x01 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x01\xd0\x01\x01\x12\x19\n\x04port\x18\x02 \x01(\rB\x0b\xfa\x42\x08*\x06\x10\xff\xff\x03 \x00\"\xf9\x04\n\x13HttpProtocolOptions\x12/\n\x0cidle_timeout\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12:\n\x17max_connection_duration\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12@\n\x11max_headers_count\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01\x12I\n\x17max_response_headers_kb\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\n\xfa\x42\x07*\x05\x18\x80@ \x00\x12\x36\n\x13max_stream_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12o\n\x1fheaders_with_underscores_action\x18\x05 \x01(\x0e\x32\x46.envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction\x12\x41\n\x1bmax_requests_per_connection\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\"N\n\x1cHeadersWithUnderscoresAction\x12\t\n\x05\x41LLOW\x10\x00\x12\x12\n\x0eREJECT_REQUEST\x10\x01\x12\x0f\n\x0b\x44ROP_HEADER\x10\x02:,\x9a\xc5\x88\x1e\'\n%envoy.api.v2.core.HttpProtocolOptions\"\xe9\x07\n\x14Http1ProtocolOptions\x12\x36\n\x12\x61llow_absolute_url\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x16\n\x0e\x61\x63\x63\x65pt_http_10\x18\x02 \x01(\x08\x12 \n\x18\x64\x65\x66\x61ult_host_for_http_10\x18\x03 \x01(\t\x12U\n\x11header_key_format\x18\x04 \x01(\x0b\x32:.envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat\x12\x17\n\x0f\x65nable_trailers\x18\x05 \x01(\x08\x12\x1c\n\x14\x61llow_chunked_length\x18\x06 \x01(\x08\x12Q\n-override_stream_error_on_invalid_http_message\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12 \n\x18send_fully_qualified_url\x18\x08 \x01(\x08\x12\x41\n\x10use_balsa_parser\x18\t \x01(\x0b\x32\x1a.google.protobuf.BoolValueB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12&\n\x14\x61llow_custom_methods\x18\n \x01(\x08\x42\x08\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x12\x44\n\x16ignore_http_11_upgrade\x18\x0b \x03(\x0b\x32$.envoy.type.matcher.v3.StringMatcher\x1a\xfb\x02\n\x0fHeaderKeyFormat\x12g\n\x11proper_case_words\x18\x01 \x01(\x0b\x32J.envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWordsH\x00\x12H\n\x12stateful_formatter\x18\x08 \x01(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfigH\x00\x1a`\n\x0fProperCaseWords:M\x9a\xc5\x88\x1eH\nFenvoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords:=\x9a\xc5\x88\x1e\x38\n6envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormatB\x14\n\rheader_format\x12\x03\xf8\x42\x01:-\x9a\xc5\x88\x1e(\n&envoy.api.v2.core.Http1ProtocolOptions\"\x86\x02\n\x11KeepaliveSettings\x12\x39\n\x08interval\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\xfa\x42\t\xaa\x01\x06\x32\x04\x10\xc0\x84=\x12:\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0e\xfa\x42\x0b\xaa\x01\x08\x08\x01\x32\x04\x10\xc0\x84=\x12/\n\x0finterval_jitter\x18\x03 \x01(\x0b\x32\x16.envoy.type.v3.Percent\x12I\n\x18\x63onnection_idle_interval\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\xfa\x42\t\xaa\x01\x06\x32\x04\x10\xc0\x84=\"\x81\x0c\n\x14Http2ProtocolOptions\x12\x36\n\x10hpack_table_size\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12K\n\x16max_concurrent_streams\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\r\xfa\x42\n*\x08\x18\xff\xff\xff\xff\x07(\x01\x12Q\n\x1ainitial_stream_window_size\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x0f\xfa\x42\x0c*\n\x18\xff\xff\xff\xff\x07(\xff\xff\x03\x12U\n\x1einitial_connection_window_size\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x0f\xfa\x42\x0c*\n\x18\xff\xff\xff\xff\x07(\xff\xff\x03\x12\x15\n\rallow_connect\x18\x05 \x01(\x08\x12\x16\n\x0e\x61llow_metadata\x18\x06 \x01(\x08\x12\x42\n\x13max_outbound_frames\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01\x12J\n\x1bmax_outbound_control_frames\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01\x12W\n1max_consecutive_inbound_frames_with_empty_payload\x18\t \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12L\n&max_inbound_priority_frames_per_stream\x18\n \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x63\n4max_inbound_window_update_frames_per_data_frame_sent\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01\x12;\n&stream_error_on_invalid_http_messaging\x18\x0c \x01(\x08\x42\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\x12Q\n-override_stream_error_on_invalid_http_message\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12`\n\x1a\x63ustom_settings_parameters\x18\r \x03(\x0b\x32<.envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter\x12\x45\n\x14\x63onnection_keepalive\x18\x0f \x01(\x0b\x32\'.envoy.config.core.v3.KeepaliveSettings\x12?\n\x11use_oghttp2_codec\x18\x10 \x01(\x0b\x32\x1a.google.protobuf.BoolValueB\x08\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x12\x37\n\x11max_metadata_size\x18\x11 \x01(\x0b\x32\x1c.google.protobuf.UInt64Value\x12;\n\x17\x65nable_huffman_encoding\x18\x12 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x1a\xcf\x01\n\x11SettingsParameter\x12\x42\n\nidentifier\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x10\xfa\x42\r*\x06\x18\xff\xff\x03(\x00\x8a\x01\x02\x10\x01\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01:?\x9a\xc5\x88\x1e:\n8envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter:-\x9a\xc5\x88\x1e(\n&envoy.api.v2.core.Http2ProtocolOptions\"\x8f\x01\n\x13GrpcProtocolOptions\x12J\n\x16http2_protocol_options\x18\x01 \x01(\x0b\x32*.envoy.config.core.v3.Http2ProtocolOptions:,\x9a\xc5\x88\x1e\'\n%envoy.api.v2.core.GrpcProtocolOptions\"\xc1\x02\n\x14Http3ProtocolOptions\x12H\n\x15quic_protocol_options\x18\x01 \x01(\x0b\x32).envoy.config.core.v3.QuicProtocolOptions\x12Q\n-override_stream_error_on_invalid_http_message\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12(\n\x16\x61llow_extended_connect\x18\x05 \x01(\x08\x42\x08\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x12\x16\n\x0e\x61llow_metadata\x18\x06 \x01(\x08\x12\x15\n\rdisable_qpack\x18\x07 \x01(\x08\x12\x33\n+disable_connection_flow_control_for_streams\x18\x08 \x01(\x08\"y\n\x1aSchemeHeaderTransformation\x12\x31\n\x13scheme_to_overwrite\x18\x01 \x01(\tB\x12\xfa\x42\x0fr\rR\x04httpR\x05httpsH\x00\x12\x16\n\x0ematch_upstream\x18\x02 \x01(\x08\x42\x10\n\x0etransformationB\x81\x01\n\"io.envoyproxy.envoy.config.core.v3B\rProtocolProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + TcpProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.TcpProtocolOptions").msgclass + QuicKeepAliveSettings = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.QuicKeepAliveSettings").msgclass + QuicProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.QuicProtocolOptions").msgclass + QuicProtocolOptions::ConnectionMigrationSettings = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.QuicProtocolOptions.ConnectionMigrationSettings").msgclass + QuicProtocolOptions::ConnectionMigrationSettings::MigrateIdleConnectionSettings = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.QuicProtocolOptions.ConnectionMigrationSettings.MigrateIdleConnectionSettings").msgclass + UpstreamHttpProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.UpstreamHttpProtocolOptions").msgclass + AlternateProtocolsCacheOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.AlternateProtocolsCacheOptions").msgclass + AlternateProtocolsCacheOptions::AlternateProtocolsCacheEntry = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry").msgclass + HttpProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HttpProtocolOptions").msgclass + HttpProtocolOptions::HeadersWithUnderscoresAction = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction").enummodule + Http1ProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Http1ProtocolOptions").msgclass + Http1ProtocolOptions::HeaderKeyFormat = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat").msgclass + Http1ProtocolOptions::HeaderKeyFormat::ProperCaseWords = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords").msgclass + KeepaliveSettings = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.KeepaliveSettings").msgclass + Http2ProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Http2ProtocolOptions").msgclass + Http2ProtocolOptions::SettingsParameter = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter").msgclass + GrpcProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.GrpcProtocolOptions").msgclass + Http3ProtocolOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.Http3ProtocolOptions").msgclass + SchemeHeaderTransformation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SchemeHeaderTransformation").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/proxy_protocol_pb.rb b/lib/envoy/config/core/v3/proxy_protocol_pb.rb new file mode 100644 index 0000000..0e8ab3e --- /dev/null +++ b/lib/envoy/config/core/v3/proxy_protocol_pb.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/proxy_protocol.proto + +require "google/protobuf" + +require "envoy/config/core/v3/substitution_format_string_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n)envoy/config/core/v3/proxy_protocol.proto\x12\x14\x65nvoy.config.core.v3\x1a\x35\x65nvoy/config/core/v3/substitution_format_string.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\xcc\x01\n\x1cProxyProtocolPassThroughTLVs\x12X\n\nmatch_type\x18\x01 \x01(\x0e\x32\x44.envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType\x12\x1f\n\x08tlv_type\x18\x02 \x03(\rB\r\xfa\x42\n\x92\x01\x07\"\x05*\x03\x10\x80\x02\"1\n\x11PassTLVsMatchType\x12\x0f\n\x0bINCLUDE_ALL\x10\x00\x12\x0b\n\x07INCLUDE\x10\x01\"x\n\x08TlvEntry\x12\x16\n\x04type\x18\x01 \x01(\rB\x08\xfa\x42\x05*\x03\x10\x80\x02\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x45\n\rformat_string\x18\x03 \x01(\x0b\x32..envoy.config.core.v3.SubstitutionFormatString\"\xf7\x01\n\x13ProxyProtocolConfig\x12\x42\n\x07version\x18\x01 \x01(\x0e\x32\x31.envoy.config.core.v3.ProxyProtocolConfig.Version\x12M\n\x11pass_through_tlvs\x18\x02 \x01(\x0b\x32\x32.envoy.config.core.v3.ProxyProtocolPassThroughTLVs\x12\x32\n\nadded_tlvs\x18\x03 \x03(\x0b\x32\x1e.envoy.config.core.v3.TlvEntry\"\x19\n\x07Version\x12\x06\n\x02V1\x10\x00\x12\x06\n\x02V2\x10\x01\"C\n\rPerHostConfig\x12\x32\n\nadded_tlvs\x18\x01 \x03(\x0b\x32\x1e.envoy.config.core.v3.TlvEntryB\x86\x01\n\"io.envoyproxy.envoy.config.core.v3B\x12ProxyProtocolProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + ProxyProtocolPassThroughTLVs = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ProxyProtocolPassThroughTLVs").msgclass + ProxyProtocolPassThroughTLVs::PassTLVsMatchType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType").enummodule + TlvEntry = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.TlvEntry").msgclass + ProxyProtocolConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ProxyProtocolConfig").msgclass + ProxyProtocolConfig::Version = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.ProxyProtocolConfig.Version").enummodule + PerHostConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.PerHostConfig").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/resolver_pb.rb b/lib/envoy/config/core/v3/resolver_pb.rb new file mode 100644 index 0000000..598dab3 --- /dev/null +++ b/lib/envoy/config/core/v3/resolver_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/resolver.proto + +require "google/protobuf" + +require "envoy/config/core/v3/address_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n#envoy/config/core/v3/resolver.proto\x12\x14\x65nvoy.config.core.v3\x1a\"envoy/config/core/v3/address.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"W\n\x12\x44nsResolverOptions\x12\x1f\n\x17use_tcp_for_dns_lookups\x18\x01 \x01(\x08\x12 \n\x18no_default_search_domain\x18\x02 \x01(\x08\"\x99\x01\n\x13\x44nsResolutionConfig\x12:\n\tresolvers\x18\x01 \x03(\x0b\x32\x1d.envoy.config.core.v3.AddressB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x46\n\x14\x64ns_resolver_options\x18\x02 \x01(\x0b\x32(.envoy.config.core.v3.DnsResolverOptionsB\x81\x01\n\"io.envoyproxy.envoy.config.core.v3B\rResolverProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + DnsResolverOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.DnsResolverOptions").msgclass + DnsResolutionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.DnsResolutionConfig").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/socket_cmsg_headers_pb.rb b/lib/envoy/config/core/v3/socket_cmsg_headers_pb.rb new file mode 100644 index 0000000..db0c012 --- /dev/null +++ b/lib/envoy/config/core/v3/socket_cmsg_headers_pb.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/socket_cmsg_headers.proto + +require "google/protobuf" + +require "google/protobuf/wrappers_pb" +require "udpa/annotations/status_pb" + + +descriptor_data = "\n.envoy/config/core/v3/socket_cmsg_headers.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dudpa/annotations/status.proto\"\x83\x01\n\x11SocketCmsgHeaders\x12+\n\x05level\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12*\n\x04type\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x15\n\rexpected_size\x18\x03 \x01(\rB\x8a\x01\n\"io.envoyproxy.envoy.config.core.v3B\x16SocketCmsgHeadersProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + SocketCmsgHeaders = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketCmsgHeaders").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/socket_option_pb.rb b/lib/envoy/config/core/v3/socket_option_pb.rb new file mode 100644 index 0000000..509a9c4 --- /dev/null +++ b/lib/envoy/config/core/v3/socket_option_pb.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/socket_option.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n(envoy/config/core/v3/socket_option.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xa2\x04\n\x0cSocketOption\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12\r\n\x05level\x18\x02 \x01(\x03\x12\x0c\n\x04name\x18\x03 \x01(\x03\x12\x13\n\tint_value\x18\x04 \x01(\x03H\x00\x12\x13\n\tbuf_value\x18\x05 \x01(\x0cH\x00\x12G\n\x05state\x18\x06 \x01(\x0e\x32..envoy.config.core.v3.SocketOption.SocketStateB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12;\n\x04type\x18\x07 \x01(\x0b\x32-.envoy.config.core.v3.SocketOption.SocketType\x1a\xb2\x01\n\nSocketType\x12\x44\n\x06stream\x18\x01 \x01(\x0b\x32\x34.envoy.config.core.v3.SocketOption.SocketType.Stream\x12H\n\x08\x64\x61tagram\x18\x02 \x01(\x0b\x32\x36.envoy.config.core.v3.SocketOption.SocketType.Datagram\x1a\x08\n\x06Stream\x1a\n\n\x08\x44\x61tagram\"F\n\x0bSocketState\x12\x11\n\rSTATE_PREBIND\x10\x00\x12\x0f\n\x0bSTATE_BOUND\x10\x01\x12\x13\n\x0fSTATE_LISTENING\x10\x02:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.core.SocketOptionB\x0c\n\x05value\x12\x03\xf8\x42\x01\"S\n\x15SocketOptionsOverride\x12:\n\x0esocket_options\x18\x01 \x03(\x0b\x32\".envoy.config.core.v3.SocketOptionB\x85\x01\n\"io.envoyproxy.envoy.config.core.v3B\x11SocketOptionProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + SocketOption = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketOption").msgclass + SocketOption::SocketType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketOption.SocketType").msgclass + SocketOption::SocketType::Stream = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketOption.SocketType.Stream").msgclass + SocketOption::SocketType::Datagram = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketOption.SocketType.Datagram").msgclass + SocketOption::SocketState = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketOption.SocketState").enummodule + SocketOptionsOverride = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SocketOptionsOverride").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/substitution_format_string_pb.rb b/lib/envoy/config/core/v3/substitution_format_string_pb.rb new file mode 100644 index 0000000..08a20b8 --- /dev/null +++ b/lib/envoy/config/core/v3/substitution_format_string_pb.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/substitution_format_string.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/extension_pb" +require "google/protobuf/struct_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n5envoy/config/core/v3/substitution_format_string.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a$envoy/config/core/v3/extension.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"9\n\x11JsonFormatOptions\x12$\n\x0fsort_properties\x18\x01 \x01(\x08\x42\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0\"\x8b\x03\n\x18SubstitutionFormatString\x12\"\n\x0btext_format\x18\x01 \x01(\tB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0H\x00\x12\x38\n\x0bjson_format\x18\x02 \x01(\x0b\x32\x17.google.protobuf.StructB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00\x12>\n\x12text_format_source\x18\x05 \x01(\x0b\x32 .envoy.config.core.v3.DataSourceH\x00\x12\x19\n\x11omit_empty_values\x18\x03 \x01(\x08\x12!\n\x0c\x63ontent_type\x18\x04 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x02\xc8\x01\x00\x12>\n\nformatters\x18\x06 \x03(\x0b\x32*.envoy.config.core.v3.TypedExtensionConfig\x12\x44\n\x13json_format_options\x18\x07 \x01(\x0b\x32\'.envoy.config.core.v3.JsonFormatOptionsB\r\n\x06\x66ormat\x12\x03\xf8\x42\x01\x42\x91\x01\n\"io.envoyproxy.envoy.config.core.v3B\x1dSubstitutionFormatStringProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + JsonFormatOptions = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.JsonFormatOptions").msgclass + SubstitutionFormatString = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.SubstitutionFormatString").msgclass + end + end + end +end diff --git a/lib/envoy/config/core/v3/udp_socket_config_pb.rb b/lib/envoy/config/core/v3/udp_socket_config_pb.rb new file mode 100644 index 0000000..b70d0e6 --- /dev/null +++ b/lib/envoy/config/core/v3/udp_socket_config_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/core/v3/udp_socket_config.proto + +require "google/protobuf" + +require "google/protobuf/wrappers_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n,envoy/config/core/v3/udp_socket_config.proto\x12\x14\x65nvoy.config.core.v3\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\x8a\x01\n\x0fUdpSocketConfig\x12G\n\x14max_rx_datagram_size\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt64ValueB\x0b\xfa\x42\x08\x32\x06\x10\x80\x80\x04 \x00\x12.\n\nprefer_gro\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.BoolValueB\x88\x01\n\"io.envoyproxy.envoy.config.core.v3B\x14UdpSocketConfigProtoP\x01ZBgithub.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Core + module V3 + UdpSocketConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.core.v3.UdpSocketConfig").msgclass + end + end + end +end diff --git a/lib/envoy/config/endpoint/v3/endpoint_components_pb.rb b/lib/envoy/config/endpoint/v3/endpoint_components_pb.rb new file mode 100644 index 0000000..84a718a --- /dev/null +++ b/lib/envoy/config/endpoint/v3/endpoint_components_pb.rb @@ -0,0 +1,40 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/endpoint/v3/endpoint_components.proto + +require "google/protobuf" + +require "envoy/config/core/v3/address_pb" +require "envoy/config/core/v3/base_pb" +require "envoy/config/core/v3/config_source_pb" +require "envoy/config/core/v3/health_check_pb" +require "google/protobuf/wrappers_pb" +require "xds/core/v3/collection_entry_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n2envoy/config/endpoint/v3/endpoint_components.proto\x12\x18\x65nvoy.config.endpoint.v3\x1a\"envoy/config/core/v3/address.proto\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a(envoy/config/core/v3/config_source.proto\x1a\'envoy/config/core/v3/health_check.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\"xds/core/v3/collection_entry.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb4\x04\n\x08\x45ndpoint\x12.\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x1d.envoy.config.core.v3.Address\x12Q\n\x13health_check_config\x18\x02 \x01(\x0b\x32\x34.envoy.config.endpoint.v3.Endpoint.HealthCheckConfig\x12\x10\n\x08hostname\x18\x03 \x01(\t\x12R\n\x14\x61\x64\x64itional_addresses\x18\x04 \x03(\x0b\x32\x34.envoy.config.endpoint.v3.Endpoint.AdditionalAddress\x1a\xd2\x01\n\x11HealthCheckConfig\x12\x1d\n\nport_value\x18\x01 \x01(\rB\t\xfa\x42\x06*\x04\x18\xff\xff\x03\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12.\n\x07\x61\x64\x64ress\x18\x03 \x01(\x0b\x32\x1d.envoy.config.core.v3.Address\x12#\n\x1b\x64isable_active_health_check\x18\x04 \x01(\x08:7\x9a\xc5\x88\x1e\x32\n0envoy.api.v2.endpoint.Endpoint.HealthCheckConfig\x1a\x43\n\x11\x41\x64\x64itionalAddress\x12.\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x1d.envoy.config.core.v3.Address:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.endpoint.Endpoint\"\xcc\x02\n\nLbEndpoint\x12\x36\n\x08\x65ndpoint\x18\x01 \x01(\x0b\x32\".envoy.config.endpoint.v3.EndpointH\x00\x12\x17\n\rendpoint_name\x18\x05 \x01(\tH\x00\x12\x39\n\rhealth_status\x18\x02 \x01(\x0e\x32\".envoy.config.core.v3.HealthStatus\x12\x30\n\x08metadata\x18\x03 \x01(\x0b\x32\x1e.envoy.config.core.v3.Metadata\x12\x44\n\x15load_balancing_weight\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01:\'\x9a\xc5\x88\x1e\"\n envoy.api.v2.endpoint.LbEndpointB\x11\n\x0fhost_identifier\"E\n\x14LbEndpointCollection\x12-\n\x07\x65ntries\x18\x01 \x01(\x0b\x32\x1c.xds.core.v3.CollectionEntry\"r\n\x19LedsClusterLocalityConfig\x12\x37\n\x0bleds_config\x18\x01 \x01(\x0b\x32\".envoy.config.core.v3.ConfigSource\x12\x1c\n\x14leds_collection_name\x18\x02 \x01(\t\"\xa0\x05\n\x13LocalityLbEndpoints\x12\x30\n\x08locality\x18\x01 \x01(\x0b\x32\x1e.envoy.config.core.v3.Locality\x12\x30\n\x08metadata\x18\t \x01(\x0b\x32\x1e.envoy.config.core.v3.Metadata\x12:\n\x0clb_endpoints\x18\x02 \x03(\x0b\x32$.envoy.config.endpoint.v3.LbEndpoint\x12l\n\x17load_balancer_endpoints\x18\x07 \x01(\x0b\x32<.envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointListB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0H\x00\x12[\n\x1cleds_cluster_locality_config\x18\x08 \x01(\x0b\x32\x33.envoy.config.endpoint.v3.LedsClusterLocalityConfigH\x00\x12\x44\n\x15load_balancing_weight\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02(\x01\x12\x1a\n\x08priority\x18\x05 \x01(\rB\x08\xfa\x42\x05*\x03\x18\x80\x01\x12/\n\tproximity\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x1aL\n\x0eLbEndpointList\x12:\n\x0clb_endpoints\x18\x01 \x03(\x0b\x32$.envoy.config.endpoint.v3.LbEndpoint:0\x9a\xc5\x88\x1e+\n)envoy.api.v2.endpoint.LocalityLbEndpointsB\x0b\n\tlb_configB\x97\x01\n&io.envoyproxy.envoy.config.endpoint.v3B\x17\x45ndpointComponentsProtoP\x01ZJgithub.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3;endpointv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Endpoint + module V3 + Endpoint = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.Endpoint").msgclass + Endpoint::HealthCheckConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.Endpoint.HealthCheckConfig").msgclass + Endpoint::AdditionalAddress = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.Endpoint.AdditionalAddress").msgclass + LbEndpoint = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.LbEndpoint").msgclass + LbEndpointCollection = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.LbEndpointCollection").msgclass + LedsClusterLocalityConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.LedsClusterLocalityConfig").msgclass + LocalityLbEndpoints = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.LocalityLbEndpoints").msgclass + LocalityLbEndpoints::LbEndpointList = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.LocalityLbEndpoints.LbEndpointList").msgclass + end + end + end +end diff --git a/lib/envoy/config/endpoint/v3/endpoint_pb.rb b/lib/envoy/config/endpoint/v3/endpoint_pb.rb new file mode 100644 index 0000000..8dca3b0 --- /dev/null +++ b/lib/envoy/config/endpoint/v3/endpoint_pb.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/endpoint/v3/endpoint.proto + +require "google/protobuf" + +require "envoy/config/endpoint/v3/endpoint_components_pb" +require "envoy/type/v3/percent_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/wrappers_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\'envoy/config/endpoint/v3/endpoint.proto\x12\x18\x65nvoy.config.endpoint.v3\x1a\x32\x65nvoy/config/endpoint/v3/endpoint_components.proto\x1a\x1b\x65nvoy/type/v3/percent.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb7\x07\n\x15\x43lusterLoadAssignment\x12\x1d\n\x0c\x63luster_name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12@\n\tendpoints\x18\x02 \x03(\x0b\x32-.envoy.config.endpoint.v3.LocalityLbEndpoints\x12\\\n\x0fnamed_endpoints\x18\x05 \x03(\x0b\x32\x43.envoy.config.endpoint.v3.ClusterLoadAssignment.NamedEndpointsEntry\x12\x46\n\x06policy\x18\x04 \x01(\x0b\x32\x36.envoy.config.endpoint.v3.ClusterLoadAssignment.Policy\x1a\x90\x04\n\x06Policy\x12[\n\x0e\x64rop_overloads\x18\x02 \x03(\x0b\x32\x43.envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload\x12\x46\n\x17overprovisioning_factor\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02 \x00\x12\x41\n\x14\x65ndpoint_stale_after\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationB\x08\xfa\x42\x05\xaa\x01\x02*\x00\x12 \n\x18weighted_priority_health\x18\x06 \x01(\x08\x1a\xa3\x01\n\x0c\x44ropOverload\x12\x19\n\x08\x63\x61tegory\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x39\n\x0f\x64rop_percentage\x18\x02 \x01(\x0b\x32 .envoy.type.v3.FractionalPercent:=\x9a\xc5\x88\x1e\x38\n6envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload:0\x9a\xc5\x88\x1e+\n)envoy.api.v2.ClusterLoadAssignment.PolicyJ\x04\x08\x01\x10\x02J\x04\x08\x05\x10\x06R\x18\x64isable_overprovisioning\x1aY\n\x13NamedEndpointsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".envoy.config.endpoint.v3.Endpoint:\x02\x38\x01:)\x9a\xc5\x88\x1e$\n\"envoy.api.v2.ClusterLoadAssignmentB\x8d\x01\n&io.envoyproxy.envoy.config.endpoint.v3B\rEndpointProtoP\x01ZJgithub.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3;endpointv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Endpoint + module V3 + ClusterLoadAssignment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.ClusterLoadAssignment").msgclass + ClusterLoadAssignment::Policy = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.ClusterLoadAssignment.Policy").msgclass + ClusterLoadAssignment::Policy::DropOverload = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload").msgclass + end + end + end +end diff --git a/lib/envoy/config/endpoint/v3/load_report_pb.rb b/lib/envoy/config/endpoint/v3/load_report_pb.rb new file mode 100644 index 0000000..561092b --- /dev/null +++ b/lib/envoy/config/endpoint/v3/load_report_pb.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/config/endpoint/v3/load_report.proto + +require "google/protobuf" + +require "envoy/config/core/v3/address_pb" +require "envoy/config/core/v3/base_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/struct_pb" +require "xds/annotations/v3/status_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n*envoy/config/endpoint/v3/load_report.proto\x12\x18\x65nvoy.config.endpoint.v3\x1a\"envoy/config/core/v3/address.proto\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fxds/annotations/v3/status.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb3\x06\n\x15UpstreamLocalityStats\x12\x30\n\x08locality\x18\x01 \x01(\x0b\x32\x1e.envoy.config.core.v3.Locality\x12!\n\x19total_successful_requests\x18\x02 \x01(\x04\x12\"\n\x1atotal_requests_in_progress\x18\x03 \x01(\x04\x12\x1c\n\x14total_error_requests\x18\x04 \x01(\x04\x12\x1d\n\x15total_issued_requests\x18\x08 \x01(\x04\x12*\n\x18total_active_connections\x18\t \x01(\x04\x42\x08\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x12\'\n\x15total_new_connections\x18\n \x01(\x04\x42\x08\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x12(\n\x16total_fail_connections\x18\x0b \x01(\x04\x42\x08\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x12Q\n\x0f\x63pu_utilization\x18\x0c \x01(\x0b\x32\x38.envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats\x12Q\n\x0fmem_utilization\x18\r \x01(\x0b\x32\x38.envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats\x12Y\n\x17\x61pplication_utilization\x18\x0e \x01(\x0b\x32\x38.envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats\x12L\n\x11load_metric_stats\x18\x05 \x03(\x0b\x32\x31.envoy.config.endpoint.v3.EndpointLoadMetricStats\x12P\n\x17upstream_endpoint_stats\x18\x07 \x03(\x0b\x32/.envoy.config.endpoint.v3.UpstreamEndpointStats\x12\x10\n\x08priority\x18\x06 \x01(\r:2\x9a\xc5\x88\x1e-\n+envoy.api.v2.endpoint.UpstreamLocalityStats\"\xf8\x02\n\x15UpstreamEndpointStats\x12.\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x1d.envoy.config.core.v3.Address\x12)\n\x08metadata\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12!\n\x19total_successful_requests\x18\x02 \x01(\x04\x12\"\n\x1atotal_requests_in_progress\x18\x03 \x01(\x04\x12\x1c\n\x14total_error_requests\x18\x04 \x01(\x04\x12\x1d\n\x15total_issued_requests\x18\x07 \x01(\x04\x12L\n\x11load_metric_stats\x18\x05 \x03(\x0b\x32\x31.envoy.config.endpoint.v3.EndpointLoadMetricStats:2\x9a\xc5\x88\x1e-\n+envoy.api.v2.endpoint.UpstreamEndpointStats\"\xab\x01\n\x17\x45ndpointLoadMetricStats\x12\x13\n\x0bmetric_name\x18\x01 \x01(\t\x12)\n!num_requests_finished_with_metric\x18\x02 \x01(\x04\x12\x1a\n\x12total_metric_value\x18\x03 \x01(\x01:4\x9a\xc5\x88\x1e/\n-envoy.api.v2.endpoint.EndpointLoadMetricStats\"g\n\x1eUnnamedEndpointLoadMetricStats\x12)\n!num_requests_finished_with_metric\x18\x01 \x01(\x04\x12\x1a\n\x12total_metric_value\x18\x02 \x01(\x01\"\xfd\x03\n\x0c\x43lusterStats\x12\x1d\n\x0c\x63luster_name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x1c\n\x14\x63luster_service_name\x18\x06 \x01(\t\x12Z\n\x17upstream_locality_stats\x18\x02 \x03(\x0b\x32/.envoy.config.endpoint.v3.UpstreamLocalityStatsB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x1e\n\x16total_dropped_requests\x18\x03 \x01(\x04\x12P\n\x10\x64ropped_requests\x18\x05 \x03(\x0b\x32\x36.envoy.config.endpoint.v3.ClusterStats.DroppedRequests\x12\x37\n\x14load_report_interval\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x1a~\n\x0f\x44roppedRequests\x12\x19\n\x08\x63\x61tegory\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x15\n\rdropped_count\x18\x02 \x01(\x04:9\x9a\xc5\x88\x1e\x34\n2envoy.api.v2.endpoint.ClusterStats.DroppedRequests:)\x9a\xc5\x88\x1e$\n\"envoy.api.v2.endpoint.ClusterStatsB\x8f\x01\n&io.envoyproxy.envoy.config.endpoint.v3B\x0fLoadReportProtoP\x01ZJgithub.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3;endpointv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Config + module Endpoint + module V3 + UpstreamLocalityStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.UpstreamLocalityStats").msgclass + UpstreamEndpointStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.UpstreamEndpointStats").msgclass + EndpointLoadMetricStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.EndpointLoadMetricStats").msgclass + UnnamedEndpointLoadMetricStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.UnnamedEndpointLoadMetricStats").msgclass + ClusterStats = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.ClusterStats").msgclass + ClusterStats::DroppedRequests = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.config.endpoint.v3.ClusterStats.DroppedRequests").msgclass + end + end + end +end diff --git a/lib/envoy/service/discovery/v3/ads_pb.rb b/lib/envoy/service/discovery/v3/ads_pb.rb new file mode 100644 index 0000000..39a7e4f --- /dev/null +++ b/lib/envoy/service/discovery/v3/ads_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/service/discovery/v3/ads.proto + +require "google/protobuf" + +require "envoy/service/discovery/v3/discovery_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" + + +descriptor_data = "\n$envoy/service/discovery/v3/ads.proto\x12\x1a\x65nvoy.service.discovery.v3\x1a*envoy/service/discovery/v3/discovery.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\"6\n\x08\x41\x64sDummy:*\x9a\xc5\x88\x1e%\n#envoy.service.discovery.v2.AdsDummy2\xa6\x02\n\x1a\x41ggregatedDiscoveryService\x12~\n\x19StreamAggregatedResources\x12,.envoy.service.discovery.v3.DiscoveryRequest\x1a-.envoy.service.discovery.v3.DiscoveryResponse\"\x00(\x01\x30\x01\x12\x87\x01\n\x18\x44\x65ltaAggregatedResources\x12\x31.envoy.service.discovery.v3.DeltaDiscoveryRequest\x1a\x32.envoy.service.discovery.v3.DeltaDiscoveryResponse\"\x00(\x01\x30\x01\x42\x8d\x01\n(io.envoyproxy.envoy.service.discovery.v3B\x08\x41\x64sProtoP\x01ZMgithub.com/envoyproxy/go-control-plane/envoy/service/discovery/v3;discoveryv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Service + module Discovery + module V3 + AdsDummy = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.AdsDummy").msgclass + end + end + end +end diff --git a/lib/envoy/service/discovery/v3/aggregated_discovery_service.rb b/lib/envoy/service/discovery/v3/aggregated_discovery_service.rb new file mode 100644 index 0000000..aefe521 --- /dev/null +++ b/lib/envoy/service/discovery/v3/aggregated_discovery_service.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2025-2026, by Samuel Williams. + +# Service interface for Envoy Aggregated Discovery Service (ADS) +# This defines the RPC methods for xDS communication + +require "protocol/grpc/interface" +require "envoy/service/discovery/v3/discovery_pb" + +module Envoy + module Service + module Discovery + module V3 + # Interface definition for AggregatedDiscoveryService + # Used with Async::GRPC::Client to make xDS calls + # + # @example Using with Async::GRPC::Client + # require "envoy/service/discovery/v3/aggregated_discovery_service" + # require "async/grpc/client" + # + # endpoint = Async::HTTP::Endpoint.parse("https://xds-control-plane:18000") + # http_client = Async::HTTP::Client.new(endpoint) + # grpc_client = Async::GRPC::Client.new(http_client) + # + # stub = grpc_client.stub( + # Envoy::Service::Discovery::V3::AggregatedDiscoveryService, + # "envoy.service.discovery.v3.AggregatedDiscoveryService" + # ) + # + # # Bidirectional streaming RPC + # stub.stream_aggregated_resources do |input, output| + # request = Envoy::Service::Discovery::V3::DiscoveryRequest.new( + # type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + # resource_names: ["my-cluster"] + # ) + # output.write(request) + # + # input.each do |response| + # # Process DiscoveryResponse + # end + # end + class AggregatedDiscoveryService < Protocol::GRPC::Interface + # StreamAggregatedResources is a bidirectional streaming RPC + # Request: stream of DiscoveryRequest + # Response: stream of DiscoveryResponse + rpc :StreamAggregatedResources, + request_class: DiscoveryRequest, + response_class: DiscoveryResponse, + streaming: :bidirectional + + # DeltaAggregatedResources is a bidirectional streaming RPC for incremental xDS + # Request: stream of DeltaDiscoveryRequest + # Response: stream of DeltaDiscoveryResponse + rpc :DeltaAggregatedResources, + request_class: DeltaDiscoveryRequest, + response_class: DeltaDiscoveryResponse, + streaming: :bidirectional + end + end + end + end +end diff --git a/lib/envoy/service/discovery/v3/discovery_pb.rb b/lib/envoy/service/discovery/v3/discovery_pb.rb new file mode 100644 index 0000000..05579dc --- /dev/null +++ b/lib/envoy/service/discovery/v3/discovery_pb.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/service/discovery/v3/discovery.proto + +require "google/protobuf" + +require "envoy/config/core/v3/base_pb" +require "google/protobuf/any_pb" +require "google/protobuf/duration_pb" +require "google/rpc/status_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n*envoy/service/discovery/v3/discovery.proto\x12\x1a\x65nvoy.service.discovery.v3\x1a\x1f\x65nvoy/config/core/v3/base.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb9\x01\n\x0fResourceLocator\x12\x0c\n\x04name\x18\x01 \x01(\t\x12^\n\x12\x64ynamic_parameters\x18\x02 \x03(\x0b\x32\x42.envoy.service.discovery.v3.ResourceLocator.DynamicParametersEntry\x1a\x38\n\x16\x44ynamicParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"|\n\x0cResourceName\x12\x0c\n\x04name\x18\x01 \x01(\t\x12^\n\x1d\x64ynamic_parameter_constraints\x18\x02 \x01(\x0b\x32\x37.envoy.service.discovery.v3.DynamicParameterConstraints\"z\n\rResourceError\x12?\n\rresource_name\x18\x01 \x01(\x0b\x32(.envoy.service.discovery.v3.ResourceName\x12(\n\x0c\x65rror_detail\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xac\x02\n\x10\x44iscoveryRequest\x12\x14\n\x0cversion_info\x18\x01 \x01(\t\x12(\n\x04node\x18\x02 \x01(\x0b\x32\x1a.envoy.config.core.v3.Node\x12\x16\n\x0eresource_names\x18\x03 \x03(\t\x12\x46\n\x11resource_locators\x18\x07 \x03(\x0b\x32+.envoy.service.discovery.v3.ResourceLocator\x12\x10\n\x08type_url\x18\x04 \x01(\t\x12\x16\n\x0eresponse_nonce\x18\x05 \x01(\t\x12(\n\x0c\x65rror_detail\x18\x06 \x01(\x0b\x32\x12.google.rpc.Status:$\x9a\xc5\x88\x1e\x1f\n\x1d\x65nvoy.api.v2.DiscoveryRequest\"\xa9\x02\n\x11\x44iscoveryResponse\x12\x14\n\x0cversion_info\x18\x01 \x01(\t\x12\'\n\tresources\x18\x02 \x03(\x0b\x32\x14.google.protobuf.Any\x12\x0e\n\x06\x63\x61nary\x18\x03 \x01(\x08\x12\x10\n\x08type_url\x18\x04 \x01(\t\x12\r\n\x05nonce\x18\x05 \x01(\t\x12\x39\n\rcontrol_plane\x18\x06 \x01(\x0b\x32\".envoy.config.core.v3.ControlPlane\x12\x42\n\x0fresource_errors\x18\x07 \x03(\x0b\x32).envoy.service.discovery.v3.ResourceError:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.api.v2.DiscoveryResponse\"\xdf\x04\n\x15\x44\x65ltaDiscoveryRequest\x12(\n\x04node\x18\x01 \x01(\x0b\x32\x1a.envoy.config.core.v3.Node\x12\x10\n\x08type_url\x18\x02 \x01(\t\x12 \n\x18resource_names_subscribe\x18\x03 \x03(\t\x12\"\n\x1aresource_names_unsubscribe\x18\x04 \x03(\t\x12P\n\x1bresource_locators_subscribe\x18\x08 \x03(\x0b\x32+.envoy.service.discovery.v3.ResourceLocator\x12R\n\x1dresource_locators_unsubscribe\x18\t \x03(\x0b\x32+.envoy.service.discovery.v3.ResourceLocator\x12q\n\x19initial_resource_versions\x18\x05 \x03(\x0b\x32N.envoy.service.discovery.v3.DeltaDiscoveryRequest.InitialResourceVersionsEntry\x12\x16\n\x0eresponse_nonce\x18\x06 \x01(\t\x12(\n\x0c\x65rror_detail\x18\x07 \x01(\x0b\x32\x12.google.rpc.Status\x1a>\n\x1cInitialResourceVersionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:)\x9a\xc5\x88\x1e$\n\"envoy.api.v2.DeltaDiscoveryRequest\"\x9f\x03\n\x16\x44\x65ltaDiscoveryResponse\x12\x1b\n\x13system_version_info\x18\x01 \x01(\t\x12\x37\n\tresources\x18\x02 \x03(\x0b\x32$.envoy.service.discovery.v3.Resource\x12\x10\n\x08type_url\x18\x04 \x01(\t\x12\x19\n\x11removed_resources\x18\x06 \x03(\t\x12H\n\x16removed_resource_names\x18\x08 \x03(\x0b\x32(.envoy.service.discovery.v3.ResourceName\x12\r\n\x05nonce\x18\x05 \x01(\t\x12\x39\n\rcontrol_plane\x18\x07 \x01(\x0b\x32\".envoy.config.core.v3.ControlPlane\x12\x42\n\x0fresource_errors\x18\t \x03(\x0b\x32).envoy.service.discovery.v3.ResourceError:*\x9a\xc5\x88\x1e%\n#envoy.api.v2.DeltaDiscoveryResponse\"\xb6\x05\n\x1b\x44ynamicParameterConstraints\x12^\n\nconstraint\x18\x01 \x01(\x0b\x32H.envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraintH\x00\x12`\n\x0eor_constraints\x18\x02 \x01(\x0b\x32\x46.envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintListH\x00\x12\x61\n\x0f\x61nd_constraints\x18\x03 \x01(\x0b\x32\x46.envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintListH\x00\x12R\n\x0fnot_constraints\x18\x04 \x01(\x0b\x32\x37.envoy.service.discovery.v3.DynamicParameterConstraintsH\x00\x1a\xb5\x01\n\x10SingleConstraint\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0f\n\x05value\x18\x02 \x01(\tH\x00\x12\x61\n\x06\x65xists\x18\x03 \x01(\x0b\x32O.envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.ExistsH\x00\x1a\x08\n\x06\x45xistsB\x16\n\x0f\x63onstraint_type\x12\x03\xf8\x42\x01\x1a^\n\x0e\x43onstraintList\x12L\n\x0b\x63onstraints\x18\x01 \x03(\x0b\x32\x37.envoy.service.discovery.v3.DynamicParameterConstraintsB\x06\n\x04type\"\x8b\x03\n\x08Resource\x12\x0c\n\x04name\x18\x03 \x01(\t\x12?\n\rresource_name\x18\x08 \x01(\x0b\x32(.envoy.service.discovery.v3.ResourceName\x12\x0f\n\x07\x61liases\x18\x04 \x03(\t\x12\x0f\n\x07version\x18\x01 \x01(\t\x12&\n\x08resource\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12&\n\x03ttl\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12H\n\rcache_control\x18\x07 \x01(\x0b\x32\x31.envoy.service.discovery.v3.Resource.CacheControl\x12\x30\n\x08metadata\x18\t \x01(\x0b\x32\x1e.envoy.config.core.v3.Metadata\x1a$\n\x0c\x43\x61\x63heControl\x12\x14\n\x0c\x64o_not_cache\x18\x01 \x01(\x08:\x1c\x9a\xc5\x88\x1e\x17\n\x15\x65nvoy.api.v2.ResourceB\x93\x01\n(io.envoyproxy.envoy.service.discovery.v3B\x0e\x44iscoveryProtoP\x01ZMgithub.com/envoyproxy/go-control-plane/envoy/service/discovery/v3;discoveryv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Service + module Discovery + module V3 + ResourceLocator = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.ResourceLocator").msgclass + ResourceName = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.ResourceName").msgclass + ResourceError = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.ResourceError").msgclass + DiscoveryRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DiscoveryRequest").msgclass + DiscoveryResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DiscoveryResponse").msgclass + DeltaDiscoveryRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DeltaDiscoveryRequest").msgclass + DeltaDiscoveryResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DeltaDiscoveryResponse").msgclass + DynamicParameterConstraints = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DynamicParameterConstraints").msgclass + DynamicParameterConstraints::SingleConstraint = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint").msgclass + DynamicParameterConstraints::SingleConstraint::Exists = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DynamicParameterConstraints.SingleConstraint.Exists").msgclass + DynamicParameterConstraints::ConstraintList = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.DynamicParameterConstraints.ConstraintList").msgclass + Resource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.Resource").msgclass + Resource::CacheControl = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.service.discovery.v3.Resource.CacheControl").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/address_pb.rb b/lib/envoy/type/matcher/v3/address_pb.rb new file mode 100644 index 0000000..0613041 --- /dev/null +++ b/lib/envoy/type/matcher/v3/address_pb.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/address.proto + +require "google/protobuf" + +require "xds/core/v3/cidr_pb" +require "udpa/annotations/status_pb" + + +descriptor_data = "\n#envoy/type/matcher/v3/address.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\x16xds/core/v3/cidr.proto\x1a\x1dudpa/annotations/status.proto\"8\n\x0e\x41\x64\x64ressMatcher\x12&\n\x06ranges\x18\x01 \x03(\x0b\x32\x16.xds.core.v3.CidrRangeB\x85\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x0c\x41\x64\x64ressProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + AddressMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.AddressMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/filter_state_pb.rb b/lib/envoy/type/matcher/v3/filter_state_pb.rb new file mode 100644 index 0000000..ef7f349 --- /dev/null +++ b/lib/envoy/type/matcher/v3/filter_state_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/filter_state.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/address_pb" +require "envoy/type/matcher/v3/string_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n(envoy/type/matcher/v3/filter_state.proto\x12\x15\x65nvoy.type.matcher.v3\x1a#envoy/type/matcher/v3/address.proto\x1a\"envoy/type/matcher/v3/string.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\xb8\x01\n\x12\x46ilterStateMatcher\x12\x14\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12<\n\x0cstring_match\x18\x02 \x01(\x0b\x32$.envoy.type.matcher.v3.StringMatcherH\x00\x12>\n\raddress_match\x18\x03 \x01(\x0b\x32%.envoy.type.matcher.v3.AddressMatcherH\x00\x42\x0e\n\x07matcher\x12\x03\xf8\x42\x01\x42\x89\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x10\x46ilterStateProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + FilterStateMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.FilterStateMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/http_inputs_pb.rb b/lib/envoy/type/matcher/v3/http_inputs_pb.rb new file mode 100644 index 0000000..b466325 --- /dev/null +++ b/lib/envoy/type/matcher/v3/http_inputs_pb.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/http_inputs.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\'envoy/type/matcher/v3/http_inputs.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"?\n\x1bHttpRequestHeaderMatchInput\x12 \n\x0bheader_name\x18\x01 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x01\xc8\x01\x00\"@\n\x1cHttpRequestTrailerMatchInput\x12 \n\x0bheader_name\x18\x01 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x01\xc8\x01\x00\"@\n\x1cHttpResponseHeaderMatchInput\x12 \n\x0bheader_name\x18\x01 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x01\xc8\x01\x00\"A\n\x1dHttpResponseTrailerMatchInput\x12 \n\x0bheader_name\x18\x01 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x01\xc8\x01\x00\"?\n\x1fHttpRequestQueryParamMatchInput\x12\x1c\n\x0bquery_param\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x42\x88\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x0fHttpInputsProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + HttpRequestHeaderMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpRequestHeaderMatchInput").msgclass + HttpRequestTrailerMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpRequestTrailerMatchInput").msgclass + HttpResponseHeaderMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpResponseHeaderMatchInput").msgclass + HttpResponseTrailerMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpResponseTrailerMatchInput").msgclass + HttpRequestQueryParamMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpRequestQueryParamMatchInput").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/metadata_pb.rb b/lib/envoy/type/matcher/v3/metadata_pb.rb new file mode 100644 index 0000000..16a93ea --- /dev/null +++ b/lib/envoy/type/matcher/v3/metadata_pb.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/metadata.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/value_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n$envoy/type/matcher/v3/metadata.proto\x12\x15\x65nvoy.type.matcher.v3\x1a!envoy/type/matcher/v3/value.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xdd\x02\n\x0fMetadataMatcher\x12\x17\n\x06\x66ilter\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12J\n\x04path\x18\x02 \x03(\x0b\x32\x32.envoy.type.matcher.v3.MetadataMatcher.PathSegmentB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12<\n\x05value\x18\x03 \x01(\x0b\x32#.envoy.type.matcher.v3.ValueMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x0e\n\x06invert\x18\x04 \x01(\x08\x1al\n\x0bPathSegment\x12\x16\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00:5\x9a\xc5\x88\x1e\x30\n.envoy.type.matcher.MetadataMatcher.PathSegmentB\x0e\n\x07segment\x12\x03\xf8\x42\x01:)\x9a\xc5\x88\x1e$\n\"envoy.type.matcher.MetadataMatcherB\x86\x01\n#io.envoyproxy.envoy.type.matcher.v3B\rMetadataProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + MetadataMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.MetadataMatcher").msgclass + MetadataMatcher::PathSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.MetadataMatcher.PathSegment").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/node_pb.rb b/lib/envoy/type/matcher/v3/node_pb.rb new file mode 100644 index 0000000..eeb53fc --- /dev/null +++ b/lib/envoy/type/matcher/v3/node_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/node.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/string_pb" +require "envoy/type/matcher/v3/struct_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" + + +descriptor_data = "\n envoy/type/matcher/v3/node.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\"envoy/type/matcher/v3/string.proto\x1a\"envoy/type/matcher/v3/struct.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\"\xa9\x01\n\x0bNodeMatcher\x12\x35\n\x07node_id\x18\x01 \x01(\x0b\x32$.envoy.type.matcher.v3.StringMatcher\x12<\n\x0enode_metadatas\x18\x02 \x03(\x0b\x32$.envoy.type.matcher.v3.StructMatcher:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.type.matcher.NodeMatcherB\x82\x01\n#io.envoyproxy.envoy.type.matcher.v3B\tNodeProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + NodeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.NodeMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/number_pb.rb b/lib/envoy/type/matcher/v3/number_pb.rb new file mode 100644 index 0000000..4fc7fb3 --- /dev/null +++ b/lib/envoy/type/matcher/v3/number_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/number.proto + +require "google/protobuf" + +require "envoy/type/v3/range_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"envoy/type/matcher/v3/number.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\x19\x65nvoy/type/v3/range.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\x8c\x01\n\rDoubleMatcher\x12+\n\x05range\x18\x01 \x01(\x0b\x32\x1a.envoy.type.v3.DoubleRangeH\x00\x12\x0f\n\x05\x65xact\x18\x02 \x01(\x01H\x00:\'\x9a\xc5\x88\x1e\"\n envoy.type.matcher.DoubleMatcherB\x14\n\rmatch_pattern\x12\x03\xf8\x42\x01\x42\x84\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x0bNumberProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + DoubleMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.DoubleMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/path_pb.rb b/lib/envoy/type/matcher/v3/path_pb.rb new file mode 100644 index 0000000..36b61ff --- /dev/null +++ b/lib/envoy/type/matcher/v3/path_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/path.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/string_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n envoy/type/matcher/v3/path.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\"envoy/type/matcher/v3/string.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\x81\x01\n\x0bPathMatcher\x12>\n\x04path\x18\x01 \x01(\x0b\x32$.envoy.type.matcher.v3.StringMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.type.matcher.PathMatcherB\x0b\n\x04rule\x12\x03\xf8\x42\x01\x42\x82\x01\n#io.envoyproxy.envoy.type.matcher.v3B\tPathProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + PathMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.PathMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/regex_pb.rb b/lib/envoy/type/matcher/v3/regex_pb.rb new file mode 100644 index 0000000..5ad3707 --- /dev/null +++ b/lib/envoy/type/matcher/v3/regex_pb.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/regex.proto + +require "google/protobuf" + +require "google/protobuf/wrappers_pb" +require "envoy/annotations/deprecation_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n!envoy/type/matcher/v3/regex.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\x1egoogle/protobuf/wrappers.proto\x1a#envoy/annotations/deprecation.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xb4\x02\n\x0cRegexMatcher\x12P\n\ngoogle_re2\x18\x01 \x01(\x0b\x32-.envoy.type.matcher.v3.RegexMatcher.GoogleRE2B\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0H\x00\x12\x16\n\x05regex\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x1a\x82\x01\n\tGoogleRE2\x12\x43\n\x10max_program_size\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x0b\x18\x01\x92\xc7\x86\xd8\x04\x03\x33.0:0\x9a\xc5\x88\x1e+\n)envoy.type.matcher.RegexMatcher.GoogleRE2:&\x9a\xc5\x88\x1e!\n\x1f\x65nvoy.type.matcher.RegexMatcherB\r\n\x0b\x65ngine_type\"\xaf\x01\n\x17RegexMatchAndSubstitute\x12>\n\x07pattern\x18\x01 \x01(\x0b\x32#.envoy.type.matcher.v3.RegexMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12!\n\x0csubstitution\x18\x02 \x01(\tB\x0b\xfa\x42\x08r\x06\xc0\x01\x02\xc8\x01\x00:1\x9a\xc5\x88\x1e,\n*envoy.type.matcher.RegexMatchAndSubstituteB\x83\x01\n#io.envoyproxy.envoy.type.matcher.v3B\nRegexProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + RegexMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.RegexMatcher").msgclass + RegexMatcher::GoogleRE2 = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.RegexMatcher.GoogleRE2").msgclass + RegexMatchAndSubstitute = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.RegexMatchAndSubstitute").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/status_code_input_pb.rb b/lib/envoy/type/matcher/v3/status_code_input_pb.rb new file mode 100644 index 0000000..a1f5c9d --- /dev/null +++ b/lib/envoy/type/matcher/v3/status_code_input_pb.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/status_code_input.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" + + +descriptor_data = "\n-envoy/type/matcher/v3/status_code_input.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\x1dudpa/annotations/status.proto\"\"\n HttpResponseStatusCodeMatchInput\"\'\n%HttpResponseStatusCodeClassMatchInputB\x8d\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x14StatusCodeInputProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + HttpResponseStatusCodeMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpResponseStatusCodeMatchInput").msgclass + HttpResponseStatusCodeClassMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.HttpResponseStatusCodeClassMatchInput").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/string_pb.rb b/lib/envoy/type/matcher/v3/string_pb.rb new file mode 100644 index 0000000..447a665 --- /dev/null +++ b/lib/envoy/type/matcher/v3/string_pb.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/string.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/regex_pb" +require "xds/core/v3/extension_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"envoy/type/matcher/v3/string.proto\x12\x15\x65nvoy.type.matcher.v3\x1a!envoy/type/matcher/v3/regex.proto\x1a\x1bxds/core/v3/extension.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xce\x02\n\rStringMatcher\x12\x0f\n\x05\x65xact\x18\x01 \x01(\tH\x00\x12\x19\n\x06prefix\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x19\n\x06suffix\x18\x03 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x43\n\nsafe_regex\x18\x05 \x01(\x0b\x32#.envoy.type.matcher.v3.RegexMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00\x12\x1b\n\x08\x63ontains\x18\x07 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x33\n\x06\x63ustom\x18\x08 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigH\x00\x12\x13\n\x0bignore_case\x18\x06 \x01(\x08:\'\x9a\xc5\x88\x1e\"\n envoy.type.matcher.StringMatcherB\x14\n\rmatch_pattern\x12\x03\xf8\x42\x01J\x04\x08\x04\x10\x05R\x05regex\"\x82\x01\n\x11ListStringMatcher\x12@\n\x08patterns\x18\x01 \x03(\x0b\x32$.envoy.type.matcher.v3.StringMatcherB\x08\xfa\x42\x05\x92\x01\x02\x08\x01:+\x9a\xc5\x88\x1e&\n$envoy.type.matcher.ListStringMatcherB\x84\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x0bStringProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + StringMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.StringMatcher").msgclass + ListStringMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.ListStringMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/struct_pb.rb b/lib/envoy/type/matcher/v3/struct_pb.rb new file mode 100644 index 0000000..adad5eb --- /dev/null +++ b/lib/envoy/type/matcher/v3/struct_pb.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/struct.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/value_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"envoy/type/matcher/v3/struct.proto\x12\x15\x65nvoy.type.matcher.v3\x1a!envoy/type/matcher/v3/value.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xac\x02\n\rStructMatcher\x12H\n\x04path\x18\x02 \x03(\x0b\x32\x30.envoy.type.matcher.v3.StructMatcher.PathSegmentB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12<\n\x05value\x18\x03 \x01(\x0b\x32#.envoy.type.matcher.v3.ValueMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x1aj\n\x0bPathSegment\x12\x16\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00:3\x9a\xc5\x88\x1e.\n,envoy.type.matcher.StructMatcher.PathSegmentB\x0e\n\x07segment\x12\x03\xf8\x42\x01:\'\x9a\xc5\x88\x1e\"\n envoy.type.matcher.StructMatcherB\x84\x01\n#io.envoyproxy.envoy.type.matcher.v3B\x0bStructProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + StructMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.StructMatcher").msgclass + StructMatcher::PathSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.StructMatcher.PathSegment").msgclass + end + end + end +end diff --git a/lib/envoy/type/matcher/v3/value_pb.rb b/lib/envoy/type/matcher/v3/value_pb.rb new file mode 100644 index 0000000..8cf7354 --- /dev/null +++ b/lib/envoy/type/matcher/v3/value_pb.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/matcher/v3/value.proto + +require "google/protobuf" + +require "envoy/type/matcher/v3/number_pb" +require "envoy/type/matcher/v3/string_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n!envoy/type/matcher/v3/value.proto\x12\x15\x65nvoy.type.matcher.v3\x1a\"envoy/type/matcher/v3/number.proto\x1a\"envoy/type/matcher/v3/string.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xeb\x03\n\x0cValueMatcher\x12\x43\n\nnull_match\x18\x01 \x01(\x0b\x32-.envoy.type.matcher.v3.ValueMatcher.NullMatchH\x00\x12<\n\x0c\x64ouble_match\x18\x02 \x01(\x0b\x32$.envoy.type.matcher.v3.DoubleMatcherH\x00\x12<\n\x0cstring_match\x18\x03 \x01(\x0b\x32$.envoy.type.matcher.v3.StringMatcherH\x00\x12\x14\n\nbool_match\x18\x04 \x01(\x08H\x00\x12\x17\n\rpresent_match\x18\x05 \x01(\x08H\x00\x12\x38\n\nlist_match\x18\x06 \x01(\x0b\x32\".envoy.type.matcher.v3.ListMatcherH\x00\x12\x34\n\x08or_match\x18\x07 \x01(\x0b\x32 .envoy.type.matcher.v3.OrMatcherH\x00\x1a=\n\tNullMatch:0\x9a\xc5\x88\x1e+\n)envoy.type.matcher.ValueMatcher.NullMatch:&\x9a\xc5\x88\x1e!\n\x1f\x65nvoy.type.matcher.ValueMatcherB\x14\n\rmatch_pattern\x12\x03\xf8\x42\x01\"\x81\x01\n\x0bListMatcher\x12\x35\n\x06one_of\x18\x01 \x01(\x0b\x32#.envoy.type.matcher.v3.ValueMatcherH\x00:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.type.matcher.ListMatcherB\x14\n\rmatch_pattern\x12\x03\xf8\x42\x01\"R\n\tOrMatcher\x12\x45\n\x0evalue_matchers\x18\x01 \x03(\x0b\x32#.envoy.type.matcher.v3.ValueMatcherB\x08\xfa\x42\x05\x92\x01\x02\x08\x02\x42\x83\x01\n#io.envoyproxy.envoy.type.matcher.v3B\nValueProtoP\x01ZFgithub.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Matcher + module V3 + ValueMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.ValueMatcher").msgclass + ValueMatcher::NullMatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.ValueMatcher.NullMatch").msgclass + ListMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.ListMatcher").msgclass + OrMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.matcher.v3.OrMatcher").msgclass + end + end + end +end diff --git a/lib/envoy/type/metadata/v3/metadata_pb.rb b/lib/envoy/type/metadata/v3/metadata_pb.rb new file mode 100644 index 0000000..615d515 --- /dev/null +++ b/lib/envoy/type/metadata/v3/metadata_pb.rb @@ -0,0 +1,32 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/metadata/v3/metadata.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n%envoy/type/metadata/v3/metadata.proto\x12\x16\x65nvoy.type.metadata.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\x85\x02\n\x0bMetadataKey\x12\x14\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12G\n\x04path\x18\x02 \x03(\x0b\x32/.envoy.type.metadata.v3.MetadataKey.PathSegmentB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x1al\n\x0bPathSegment\x12\x16\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00:5\x9a\xc5\x88\x1e\x30\n.envoy.type.metadata.v2.MetadataKey.PathSegmentB\x0e\n\x07segment\x12\x03\xf8\x42\x01:)\x9a\xc5\x88\x1e$\n\"envoy.type.metadata.v2.MetadataKey\"\xb3\x04\n\x0cMetadataKind\x12?\n\x07request\x18\x01 \x01(\x0b\x32,.envoy.type.metadata.v3.MetadataKind.RequestH\x00\x12;\n\x05route\x18\x02 \x01(\x0b\x32*.envoy.type.metadata.v3.MetadataKind.RouteH\x00\x12?\n\x07\x63luster\x18\x03 \x01(\x0b\x32,.envoy.type.metadata.v3.MetadataKind.ClusterH\x00\x12\x39\n\x04host\x18\x04 \x01(\x0b\x32).envoy.type.metadata.v3.MetadataKind.HostH\x00\x1a=\n\x07Request:2\x9a\xc5\x88\x1e-\n+envoy.type.metadata.v2.MetadataKind.Request\x1a\x39\n\x05Route:0\x9a\xc5\x88\x1e+\n)envoy.type.metadata.v2.MetadataKind.Route\x1a=\n\x07\x43luster:2\x9a\xc5\x88\x1e-\n+envoy.type.metadata.v2.MetadataKind.Cluster\x1a\x37\n\x04Host:/\x9a\xc5\x88\x1e*\n(envoy.type.metadata.v2.MetadataKind.Host:*\x9a\xc5\x88\x1e%\n#envoy.type.metadata.v2.MetadataKindB\x0b\n\x04kind\x12\x03\xf8\x42\x01\x42\x89\x01\n$io.envoyproxy.envoy.type.metadata.v3B\rMetadataProtoP\x01ZHgithub.com/envoyproxy/go-control-plane/envoy/type/metadata/v3;metadatav3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module Metadata + module V3 + MetadataKey = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKey").msgclass + MetadataKey::PathSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKey.PathSegment").msgclass + MetadataKind = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKind").msgclass + MetadataKind::Request = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKind.Request").msgclass + MetadataKind::Route = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKind.Route").msgclass + MetadataKind::Cluster = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKind.Cluster").msgclass + MetadataKind::Host = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.metadata.v3.MetadataKind.Host").msgclass + end + end + end +end diff --git a/lib/envoy/type/v3/hash_policy_pb.rb b/lib/envoy/type/v3/hash_policy_pb.rb new file mode 100644 index 0000000..7999bd6 --- /dev/null +++ b/lib/envoy/type/v3/hash_policy_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/hash_policy.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1f\x65nvoy/type/v3/hash_policy.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\x93\x02\n\nHashPolicy\x12\x37\n\tsource_ip\x18\x01 \x01(\x0b\x32\".envoy.type.v3.HashPolicy.SourceIpH\x00\x12=\n\x0c\x66ilter_state\x18\x02 \x01(\x0b\x32%.envoy.type.v3.HashPolicy.FilterStateH\x00\x1a\x31\n\x08SourceIp:%\x9a\xc5\x88\x1e \n\x1e\x65nvoy.type.HashPolicy.SourceIp\x1a#\n\x0b\x46ilterState\x12\x14\n\x03key\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01:\x1c\x9a\xc5\x88\x1e\x17\n\x15\x65nvoy.type.HashPolicyB\x17\n\x10policy_specifier\x12\x03\xf8\x42\x01\x42u\n\x1bio.envoyproxy.envoy.type.v3B\x0fHashPolicyProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + HashPolicy = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.HashPolicy").msgclass + HashPolicy::SourceIp = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.HashPolicy.SourceIp").msgclass + HashPolicy::FilterState = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.HashPolicy.FilterState").msgclass + end + end +end diff --git a/lib/envoy/type/v3/http_pb.rb b/lib/envoy/type/v3/http_pb.rb new file mode 100644 index 0000000..76a0b59 --- /dev/null +++ b/lib/envoy/type/v3/http_pb.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/http.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" + + +descriptor_data = "\n\x18\x65nvoy/type/v3/http.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto*2\n\x0f\x43odecClientType\x12\t\n\x05HTTP1\x10\x00\x12\t\n\x05HTTP2\x10\x01\x12\t\n\x05HTTP3\x10\x02\x42o\n\x1bio.envoyproxy.envoy.type.v3B\tHttpProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + CodecClientType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.CodecClientType").enummodule + end + end +end diff --git a/lib/envoy/type/v3/http_status_pb.rb b/lib/envoy/type/v3/http_status_pb.rb new file mode 100644 index 0000000..57db855 --- /dev/null +++ b/lib/envoy/type/v3/http_status_pb.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/http_status.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1f\x65nvoy/type/v3/http_status.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"_\n\nHttpStatus\x12\x33\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x19.envoy.type.v3.StatusCodeB\n\xfa\x42\x07\x82\x01\x04\x10\x01 \x00:\x1c\x9a\xc5\x88\x1e\x17\n\x15\x65nvoy.type.HttpStatus*\xb5\t\n\nStatusCode\x12\t\n\x05\x45mpty\x10\x00\x12\x0c\n\x08\x43ontinue\x10\x64\x12\x07\n\x02OK\x10\xc8\x01\x12\x0c\n\x07\x43reated\x10\xc9\x01\x12\r\n\x08\x41\x63\x63\x65pted\x10\xca\x01\x12 \n\x1bNonAuthoritativeInformation\x10\xcb\x01\x12\x0e\n\tNoContent\x10\xcc\x01\x12\x11\n\x0cResetContent\x10\xcd\x01\x12\x13\n\x0ePartialContent\x10\xce\x01\x12\x10\n\x0bMultiStatus\x10\xcf\x01\x12\x14\n\x0f\x41lreadyReported\x10\xd0\x01\x12\x0b\n\x06IMUsed\x10\xe2\x01\x12\x14\n\x0fMultipleChoices\x10\xac\x02\x12\x15\n\x10MovedPermanently\x10\xad\x02\x12\n\n\x05\x46ound\x10\xae\x02\x12\r\n\x08SeeOther\x10\xaf\x02\x12\x10\n\x0bNotModified\x10\xb0\x02\x12\r\n\x08UseProxy\x10\xb1\x02\x12\x16\n\x11TemporaryRedirect\x10\xb3\x02\x12\x16\n\x11PermanentRedirect\x10\xb4\x02\x12\x0f\n\nBadRequest\x10\x90\x03\x12\x11\n\x0cUnauthorized\x10\x91\x03\x12\x14\n\x0fPaymentRequired\x10\x92\x03\x12\x0e\n\tForbidden\x10\x93\x03\x12\r\n\x08NotFound\x10\x94\x03\x12\x15\n\x10MethodNotAllowed\x10\x95\x03\x12\x12\n\rNotAcceptable\x10\x96\x03\x12 \n\x1bProxyAuthenticationRequired\x10\x97\x03\x12\x13\n\x0eRequestTimeout\x10\x98\x03\x12\r\n\x08\x43onflict\x10\x99\x03\x12\t\n\x04Gone\x10\x9a\x03\x12\x13\n\x0eLengthRequired\x10\x9b\x03\x12\x17\n\x12PreconditionFailed\x10\x9c\x03\x12\x14\n\x0fPayloadTooLarge\x10\x9d\x03\x12\x0f\n\nURITooLong\x10\x9e\x03\x12\x19\n\x14UnsupportedMediaType\x10\x9f\x03\x12\x18\n\x13RangeNotSatisfiable\x10\xa0\x03\x12\x16\n\x11\x45xpectationFailed\x10\xa1\x03\x12\x17\n\x12MisdirectedRequest\x10\xa5\x03\x12\x18\n\x13UnprocessableEntity\x10\xa6\x03\x12\x0b\n\x06Locked\x10\xa7\x03\x12\x15\n\x10\x46\x61iledDependency\x10\xa8\x03\x12\x14\n\x0fUpgradeRequired\x10\xaa\x03\x12\x19\n\x14PreconditionRequired\x10\xac\x03\x12\x14\n\x0fTooManyRequests\x10\xad\x03\x12 \n\x1bRequestHeaderFieldsTooLarge\x10\xaf\x03\x12\x18\n\x13InternalServerError\x10\xf4\x03\x12\x13\n\x0eNotImplemented\x10\xf5\x03\x12\x0f\n\nBadGateway\x10\xf6\x03\x12\x17\n\x12ServiceUnavailable\x10\xf7\x03\x12\x13\n\x0eGatewayTimeout\x10\xf8\x03\x12\x1c\n\x17HTTPVersionNotSupported\x10\xf9\x03\x12\x1a\n\x15VariantAlsoNegotiates\x10\xfa\x03\x12\x18\n\x13InsufficientStorage\x10\xfb\x03\x12\x11\n\x0cLoopDetected\x10\xfc\x03\x12\x10\n\x0bNotExtended\x10\xfe\x03\x12\"\n\x1dNetworkAuthenticationRequired\x10\xff\x03\x42u\n\x1bio.envoyproxy.envoy.type.v3B\x0fHttpStatusProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + HttpStatus = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.HttpStatus").msgclass + StatusCode = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.StatusCode").enummodule + end + end +end diff --git a/lib/envoy/type/v3/percent_pb.rb b/lib/envoy/type/v3/percent_pb.rb new file mode 100644 index 0000000..e980667 --- /dev/null +++ b/lib/envoy/type/v3/percent_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/percent.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1b\x65nvoy/type/v3/percent.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"L\n\x07Percent\x12&\n\x05value\x18\x01 \x01(\x01\x42\x17\xfa\x42\x14\x12\x12\x19\x00\x00\x00\x00\x00\x00Y@)\x00\x00\x00\x00\x00\x00\x00\x00:\x19\x9a\xc5\x88\x1e\x14\n\x12\x65nvoy.type.Percent\"\xdb\x01\n\x11\x46ractionalPercent\x12\x11\n\tnumerator\x18\x01 \x01(\r\x12O\n\x0b\x64\x65nominator\x18\x02 \x01(\x0e\x32\x30.envoy.type.v3.FractionalPercent.DenominatorTypeB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\"=\n\x0f\x44\x65nominatorType\x12\x0b\n\x07HUNDRED\x10\x00\x12\x10\n\x0cTEN_THOUSAND\x10\x01\x12\x0b\n\x07MILLION\x10\x02:#\x9a\xc5\x88\x1e\x1e\n\x1c\x65nvoy.type.FractionalPercentBr\n\x1bio.envoyproxy.envoy.type.v3B\x0cPercentProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + Percent = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.Percent").msgclass + FractionalPercent = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.FractionalPercent").msgclass + FractionalPercent::DenominatorType = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.FractionalPercent.DenominatorType").enummodule + end + end +end diff --git a/lib/envoy/type/v3/range_pb.rb b/lib/envoy/type/v3/range_pb.rb new file mode 100644 index 0000000..0b86923 --- /dev/null +++ b/lib/envoy/type/v3/range_pb.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/range.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" + + +descriptor_data = "\n\x19\x65nvoy/type/v3/range.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\"F\n\nInt64Range\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x03:\x1c\x9a\xc5\x88\x1e\x17\n\x15\x65nvoy.type.Int64Range\"F\n\nInt32Range\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05:\x1c\x9a\xc5\x88\x1e\x17\n\x15\x65nvoy.type.Int32Range\"H\n\x0b\x44oubleRange\x12\r\n\x05start\x18\x01 \x01(\x01\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x01:\x1d\x9a\xc5\x88\x1e\x18\n\x16\x65nvoy.type.DoubleRangeBp\n\x1bio.envoyproxy.envoy.type.v3B\nRangeProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + Int64Range = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.Int64Range").msgclass + Int32Range = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.Int32Range").msgclass + DoubleRange = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.DoubleRange").msgclass + end + end +end diff --git a/lib/envoy/type/v3/ratelimit_strategy_pb.rb b/lib/envoy/type/v3/ratelimit_strategy_pb.rb new file mode 100644 index 0000000..6133e6d --- /dev/null +++ b/lib/envoy/type/v3/ratelimit_strategy_pb.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/ratelimit_strategy.proto + +require "google/protobuf" + +require "envoy/type/v3/ratelimit_unit_pb" +require "envoy/type/v3/token_bucket_pb" +require "xds/annotations/v3/status_pb" +require "udpa/annotations/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n&envoy/type/v3/ratelimit_strategy.proto\x12\renvoy.type.v3\x1a\"envoy/type/v3/ratelimit_unit.proto\x1a envoy/type/v3/token_bucket.proto\x1a\x1fxds/annotations/v3/status.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\x9e\x03\n\x11RateLimitStrategy\x12N\n\x0c\x62lanket_rule\x18\x01 \x01(\x0e\x32,.envoy.type.v3.RateLimitStrategy.BlanketRuleB\x08\xfa\x42\x05\x82\x01\x02\x10\x01H\x00\x12V\n\x16requests_per_time_unit\x18\x02 \x01(\x0b\x32\x34.envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnitH\x00\x12\x32\n\x0ctoken_bucket\x18\x03 \x01(\x0b\x32\x1a.envoy.type.v3.TokenBucketH\x00\x1ap\n\x13RequestsPerTimeUnit\x12\x1e\n\x16requests_per_time_unit\x18\x01 \x01(\x04\x12\x39\n\ttime_unit\x18\x02 \x01(\x0e\x32\x1c.envoy.type.v3.RateLimitUnitB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\"*\n\x0b\x42lanketRule\x12\r\n\tALLOW_ALL\x10\x00\x12\x0c\n\x08\x44\x45NY_ALL\x10\x01\x42\x0f\n\x08strategy\x12\x03\xf8\x42\x01\x42\x84\x01\n\x1bio.envoyproxy.envoy.type.v3B\x16RatelimitStrategyProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + RateLimitStrategy = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.RateLimitStrategy").msgclass + RateLimitStrategy::RequestsPerTimeUnit = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.RateLimitStrategy.RequestsPerTimeUnit").msgclass + RateLimitStrategy::BlanketRule = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.RateLimitStrategy.BlanketRule").enummodule + end + end +end diff --git a/lib/envoy/type/v3/ratelimit_unit_pb.rb b/lib/envoy/type/v3/ratelimit_unit_pb.rb new file mode 100644 index 0000000..1512761 --- /dev/null +++ b/lib/envoy/type/v3/ratelimit_unit_pb.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/ratelimit_unit.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" + + +descriptor_data = "\n\"envoy/type/v3/ratelimit_unit.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto*\\\n\rRateLimitUnit\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SECOND\x10\x01\x12\n\n\x06MINUTE\x10\x02\x12\x08\n\x04HOUR\x10\x03\x12\x07\n\x03\x44\x41Y\x10\x04\x12\t\n\x05MONTH\x10\x05\x12\x08\n\x04YEAR\x10\x06\x42x\n\x1bio.envoyproxy.envoy.type.v3B\x12RatelimitUnitProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + RateLimitUnit = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.RateLimitUnit").enummodule + end + end +end diff --git a/lib/envoy/type/v3/semantic_version_pb.rb b/lib/envoy/type/v3/semantic_version_pb.rb new file mode 100644 index 0000000..3a72307 --- /dev/null +++ b/lib/envoy/type/v3/semantic_version_pb.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/semantic_version.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" + + +descriptor_data = "\n$envoy/type/v3/semantic_version.proto\x12\renvoy.type.v3\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\"o\n\x0fSemanticVersion\x12\x14\n\x0cmajor_number\x18\x01 \x01(\r\x12\x14\n\x0cminor_number\x18\x02 \x01(\r\x12\r\n\x05patch\x18\x03 \x01(\r:!\x9a\xc5\x88\x1e\x1c\n\x1a\x65nvoy.type.SemanticVersionBz\n\x1bio.envoyproxy.envoy.type.v3B\x14SemanticVersionProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + SemanticVersion = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.SemanticVersion").msgclass + end + end +end diff --git a/lib/envoy/type/v3/token_bucket_pb.rb b/lib/envoy/type/v3/token_bucket_pb.rb new file mode 100644 index 0000000..577a147 --- /dev/null +++ b/lib/envoy/type/v3/token_bucket_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: envoy/type/v3/token_bucket.proto + +require "google/protobuf" + +require "google/protobuf/duration_pb" +require "google/protobuf/wrappers_pb" +require "udpa/annotations/status_pb" +require "udpa/annotations/versioning_pb" +require "validate/validate_pb" + + +descriptor_data = "\n envoy/type/v3/token_bucket.proto\x12\renvoy.type.v3\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dudpa/annotations/status.proto\x1a!udpa/annotations/versioning.proto\x1a\x17validate/validate.proto\"\xc7\x01\n\x0bTokenBucket\x12\x1b\n\nmax_tokens\x18\x01 \x01(\rB\x07\xfa\x42\x04*\x02 \x00\x12>\n\x0ftokens_per_fill\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x07\xfa\x42\x04*\x02 \x00\x12<\n\rfill_interval\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\n\xfa\x42\x07\xaa\x01\x04\x08\x01*\x00:\x1d\x9a\xc5\x88\x1e\x18\n\x16\x65nvoy.type.TokenBucketBv\n\x1bio.envoyproxy.envoy.type.v3B\x10TokenBucketProtoP\x01Z;github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3\xba\x80\xc8\xd1\x06\x02\x10\x02\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Envoy + module Type + module V3 + TokenBucket = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("envoy.type.v3.TokenBucket").msgclass + end + end +end diff --git a/lib/google/protobuf/any_pb.rb b/lib/google/protobuf/any_pb.rb new file mode 100644 index 0000000..063b706 --- /dev/null +++ b/lib/google/protobuf/any_pb.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/any.proto + +require "google/protobuf" + + +descriptor_data = "\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42v\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Protobuf + Any = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Any").msgclass + end +end diff --git a/lib/google/protobuf/duration_pb.rb b/lib/google/protobuf/duration_pb.rb new file mode 100644 index 0000000..fdc9b4e --- /dev/null +++ b/lib/google/protobuf/duration_pb.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/duration.proto + +require "google/protobuf" + + +descriptor_data = "\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Protobuf + Duration = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Duration").msgclass + end +end diff --git a/lib/google/protobuf/empty_pb.rb b/lib/google/protobuf/empty_pb.rb new file mode 100644 index 0000000..f3ca6c4 --- /dev/null +++ b/lib/google/protobuf/empty_pb.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/empty.proto + +require "google/protobuf" + + +descriptor_data = "\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Protobuf + Empty = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Empty").msgclass + end +end diff --git a/lib/google/protobuf/struct_pb.rb b/lib/google/protobuf/struct_pb.rb new file mode 100644 index 0000000..c5ba85f --- /dev/null +++ b/lib/google/protobuf/struct_pb.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/struct.proto + +require "google/protobuf" + + +descriptor_data = "\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x84\x01\n\x06Struct\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntry\x1a\x45\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\"\xea\x01\n\x05Value\x12\x30\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x16\n\x0cnumber_value\x18\x02 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x12/\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x30\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x42\x06\n\x04kind\"3\n\tListValue\x12&\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.Value*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Protobuf + Struct = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Struct").msgclass + Value = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Value").msgclass + ListValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.ListValue").msgclass + NullValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.NullValue").enummodule + end +end diff --git a/lib/google/protobuf/timestamp_pb.rb b/lib/google/protobuf/timestamp_pb.rb new file mode 100644 index 0000000..d391f75 --- /dev/null +++ b/lib/google/protobuf/timestamp_pb.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/timestamp.proto + +require "google/protobuf" + + +descriptor_data = "\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Protobuf + Timestamp = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Timestamp").msgclass + end +end diff --git a/lib/google/protobuf/wrappers_pb.rb b/lib/google/protobuf/wrappers_pb.rb new file mode 100644 index 0000000..764ddff --- /dev/null +++ b/lib/google/protobuf/wrappers_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/protobuf/wrappers.proto + +require "google/protobuf" + + +descriptor_data = "\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42\x83\x01\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Protobuf + DoubleValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.DoubleValue").msgclass + FloatValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.FloatValue").msgclass + Int64Value = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Int64Value").msgclass + UInt64Value = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.UInt64Value").msgclass + Int32Value = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Int32Value").msgclass + UInt32Value = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.UInt32Value").msgclass + BoolValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.BoolValue").msgclass + StringValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.StringValue").msgclass + BytesValue = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.BytesValue").msgclass + end +end diff --git a/lib/google/rpc/status_pb.rb b/lib/google/rpc/status_pb.rb new file mode 100644 index 0000000..08d3465 --- /dev/null +++ b/lib/google/rpc/status_pb.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/rpc/status.proto + +require "google/protobuf" + +require "google/protobuf/any_pb" + + +descriptor_data = "\n\x17google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyBa\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xf8\x01\x01\xa2\x02\x03RPCb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Google + module Rpc + Status = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.rpc.Status").msgclass + end +end diff --git a/lib/udpa/annotations/migrate_pb.rb b/lib/udpa/annotations/migrate_pb.rb new file mode 100644 index 0000000..fbff48e --- /dev/null +++ b/lib/udpa/annotations/migrate_pb.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: udpa/annotations/migrate.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" + + +descriptor_data = "\n\x1eudpa/annotations/migrate.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto\"#\n\x11MigrateAnnotation\x12\x0e\n\x06rename\x18\x01 \x01(\t\"A\n\x16\x46ieldMigrateAnnotation\x12\x0e\n\x06rename\x18\x01 \x01(\t\x12\x17\n\x0foneof_promotion\x18\x02 \x01(\t\"0\n\x15\x46ileMigrateAnnotation\x12\x17\n\x0fmove_to_package\x18\x02 \x01(\t:`\n\x0fmessage_migrate\x12\x1f.google.protobuf.MessageOptions\x18\x8e\xe3\xffQ \x01(\x0b\x32#.udpa.annotations.MigrateAnnotation:a\n\rfield_migrate\x12\x1d.google.protobuf.FieldOptions\x18\x8e\xe3\xffQ \x01(\x0b\x32(.udpa.annotations.FieldMigrateAnnotation:Z\n\x0c\x65num_migrate\x12\x1c.google.protobuf.EnumOptions\x18\x8e\xe3\xffQ \x01(\x0b\x32#.udpa.annotations.MigrateAnnotation:e\n\x12\x65num_value_migrate\x12!.google.protobuf.EnumValueOptions\x18\x8e\xe3\xffQ \x01(\x0b\x32#.udpa.annotations.MigrateAnnotation:^\n\x0c\x66ile_migrate\x12\x1c.google.protobuf.FileOptions\x18\x8e\xe3\xffQ \x01(\x0b\x32\'.udpa.annotations.FileMigrateAnnotationb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Udpa + module Annotations + MigrateAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.MigrateAnnotation").msgclass + FieldMigrateAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.FieldMigrateAnnotation").msgclass + FileMigrateAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.FileMigrateAnnotation").msgclass + end +end diff --git a/lib/udpa/annotations/security_pb.rb b/lib/udpa/annotations/security_pb.rb new file mode 100644 index 0000000..527e45d --- /dev/null +++ b/lib/udpa/annotations/security_pb.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: udpa/annotations/security.proto + +require "google/protobuf" + +require "udpa/annotations/status_pb" +require "google/protobuf/any_pb" +require "google/protobuf/descriptor_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1fudpa/annotations/security.proto\x12\x10udpa.annotations\x1a\x1dudpa/annotations/status.proto\x1a\x19google/protobuf/any.proto\x1a google/protobuf/descriptor.proto\x1a\x17validate/validate.proto\"o\n\x17\x46ieldSecurityAnnotation\x12*\n\"configure_for_untrusted_downstream\x18\x01 \x01(\x08\x12(\n configure_for_untrusted_upstream\x18\x02 \x01(\x08:]\n\x08security\x12\x1d.google.protobuf.FieldOptions\x18\xb1\xf2\xa6\x05 \x01(\x0b\x32).udpa.annotations.FieldSecurityAnnotationB\x08\xba\x80\xc8\xd1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Udpa + module Annotations + FieldSecurityAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.FieldSecurityAnnotation").msgclass + end +end diff --git a/lib/udpa/annotations/sensitive_pb.rb b/lib/udpa/annotations/sensitive_pb.rb new file mode 100644 index 0000000..f460f9a --- /dev/null +++ b/lib/udpa/annotations/sensitive_pb.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: udpa/annotations/sensitive.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" + + +descriptor_data = "\n udpa/annotations/sensitive.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto:3\n\tsensitive\x12\x1d.google.protobuf.FieldOptions\x18\xf7\xb6\xc1$ \x01(\x08\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Udpa + module Annotations + end +end diff --git a/lib/udpa/annotations/status_pb.rb b/lib/udpa/annotations/status_pb.rb new file mode 100644 index 0000000..4297626 --- /dev/null +++ b/lib/udpa/annotations/status_pb.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: udpa/annotations/status.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" + + +descriptor_data = "\n\x1dudpa/annotations/status.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto\"t\n\x10StatusAnnotation\x12\x18\n\x10work_in_progress\x18\x01 \x01(\x08\x12\x46\n\x16package_version_status\x18\x02 \x01(\x0e\x32&.udpa.annotations.PackageVersionStatus*]\n\x14PackageVersionStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x46ROZEN\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12 \n\x1cNEXT_MAJOR_VERSION_CANDIDATE\x10\x03:X\n\x0b\x66ile_status\x12\x1c.google.protobuf.FileOptions\x18\x87\x80\x99j \x01(\x0b\x32\".udpa.annotations.StatusAnnotationb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Udpa + module Annotations + StatusAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.StatusAnnotation").msgclass + PackageVersionStatus = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.PackageVersionStatus").enummodule + end +end diff --git a/lib/udpa/annotations/versioning_pb.rb b/lib/udpa/annotations/versioning_pb.rb new file mode 100644 index 0000000..bde1292 --- /dev/null +++ b/lib/udpa/annotations/versioning_pb.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: udpa/annotations/versioning.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" + + +descriptor_data = "\n!udpa/annotations/versioning.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto\"5\n\x14VersioningAnnotation\x12\x1d\n\x15previous_message_type\x18\x01 \x01(\t:^\n\nversioning\x12\x1f.google.protobuf.MessageOptions\x18\xd3\x88\xe1\x03 \x01(\x0b\x32&.udpa.annotations.VersioningAnnotationb\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Udpa + module Annotations + VersioningAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("udpa.annotations.VersioningAnnotation").msgclass + end +end diff --git a/lib/validate/validate_pb.rb b/lib/validate/validate_pb.rb new file mode 100644 index 0000000..c28e655 --- /dev/null +++ b/lib/validate/validate_pb.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: validate/validate.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" +require "google/protobuf/duration_pb" +require "google/protobuf/timestamp_pb" + + +descriptor_data = "\n\x17validate/validate.proto\x12\x08validate\x1a google/protobuf/descriptor.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x98\x07\n\nFieldRules\x12\'\n\x07message\x18\x11 \x01(\x0b\x32\x16.validate.MessageRules\x12%\n\x05\x66loat\x18\x01 \x01(\x0b\x32\x14.validate.FloatRulesH\x00\x12\'\n\x06\x64ouble\x18\x02 \x01(\x0b\x32\x15.validate.DoubleRulesH\x00\x12%\n\x05int32\x18\x03 \x01(\x0b\x32\x14.validate.Int32RulesH\x00\x12%\n\x05int64\x18\x04 \x01(\x0b\x32\x14.validate.Int64RulesH\x00\x12\'\n\x06uint32\x18\x05 \x01(\x0b\x32\x15.validate.UInt32RulesH\x00\x12\'\n\x06uint64\x18\x06 \x01(\x0b\x32\x15.validate.UInt64RulesH\x00\x12\'\n\x06sint32\x18\x07 \x01(\x0b\x32\x15.validate.SInt32RulesH\x00\x12\'\n\x06sint64\x18\x08 \x01(\x0b\x32\x15.validate.SInt64RulesH\x00\x12)\n\x07\x66ixed32\x18\t \x01(\x0b\x32\x16.validate.Fixed32RulesH\x00\x12)\n\x07\x66ixed64\x18\n \x01(\x0b\x32\x16.validate.Fixed64RulesH\x00\x12+\n\x08sfixed32\x18\x0b \x01(\x0b\x32\x17.validate.SFixed32RulesH\x00\x12+\n\x08sfixed64\x18\x0c \x01(\x0b\x32\x17.validate.SFixed64RulesH\x00\x12#\n\x04\x62ool\x18\r \x01(\x0b\x32\x13.validate.BoolRulesH\x00\x12\'\n\x06string\x18\x0e \x01(\x0b\x32\x15.validate.StringRulesH\x00\x12%\n\x05\x62ytes\x18\x0f \x01(\x0b\x32\x14.validate.BytesRulesH\x00\x12#\n\x04\x65num\x18\x10 \x01(\x0b\x32\x13.validate.EnumRulesH\x00\x12+\n\x08repeated\x18\x12 \x01(\x0b\x32\x17.validate.RepeatedRulesH\x00\x12!\n\x03map\x18\x13 \x01(\x0b\x32\x12.validate.MapRulesH\x00\x12!\n\x03\x61ny\x18\x14 \x01(\x0b\x32\x12.validate.AnyRulesH\x00\x12+\n\x08\x64uration\x18\x15 \x01(\x0b\x32\x17.validate.DurationRulesH\x00\x12-\n\ttimestamp\x18\x16 \x01(\x0b\x32\x18.validate.TimestampRulesH\x00\x42\x06\n\x04type\"\x7f\n\nFloatRules\x12\r\n\x05\x63onst\x18\x01 \x01(\x02\x12\n\n\x02lt\x18\x02 \x01(\x02\x12\x0b\n\x03lte\x18\x03 \x01(\x02\x12\n\n\x02gt\x18\x04 \x01(\x02\x12\x0b\n\x03gte\x18\x05 \x01(\x02\x12\n\n\x02in\x18\x06 \x03(\x02\x12\x0e\n\x06not_in\x18\x07 \x03(\x02\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x80\x01\n\x0b\x44oubleRules\x12\r\n\x05\x63onst\x18\x01 \x01(\x01\x12\n\n\x02lt\x18\x02 \x01(\x01\x12\x0b\n\x03lte\x18\x03 \x01(\x01\x12\n\n\x02gt\x18\x04 \x01(\x01\x12\x0b\n\x03gte\x18\x05 \x01(\x01\x12\n\n\x02in\x18\x06 \x03(\x01\x12\x0e\n\x06not_in\x18\x07 \x03(\x01\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x7f\n\nInt32Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x05\x12\n\n\x02lt\x18\x02 \x01(\x05\x12\x0b\n\x03lte\x18\x03 \x01(\x05\x12\n\n\x02gt\x18\x04 \x01(\x05\x12\x0b\n\x03gte\x18\x05 \x01(\x05\x12\n\n\x02in\x18\x06 \x03(\x05\x12\x0e\n\x06not_in\x18\x07 \x03(\x05\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x7f\n\nInt64Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x03\x12\n\n\x02lt\x18\x02 \x01(\x03\x12\x0b\n\x03lte\x18\x03 \x01(\x03\x12\n\n\x02gt\x18\x04 \x01(\x03\x12\x0b\n\x03gte\x18\x05 \x01(\x03\x12\n\n\x02in\x18\x06 \x03(\x03\x12\x0e\n\x06not_in\x18\x07 \x03(\x03\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x80\x01\n\x0bUInt32Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\r\x12\n\n\x02lt\x18\x02 \x01(\r\x12\x0b\n\x03lte\x18\x03 \x01(\r\x12\n\n\x02gt\x18\x04 \x01(\r\x12\x0b\n\x03gte\x18\x05 \x01(\r\x12\n\n\x02in\x18\x06 \x03(\r\x12\x0e\n\x06not_in\x18\x07 \x03(\r\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x80\x01\n\x0bUInt64Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x04\x12\n\n\x02lt\x18\x02 \x01(\x04\x12\x0b\n\x03lte\x18\x03 \x01(\x04\x12\n\n\x02gt\x18\x04 \x01(\x04\x12\x0b\n\x03gte\x18\x05 \x01(\x04\x12\n\n\x02in\x18\x06 \x03(\x04\x12\x0e\n\x06not_in\x18\x07 \x03(\x04\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x80\x01\n\x0bSInt32Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x11\x12\n\n\x02lt\x18\x02 \x01(\x11\x12\x0b\n\x03lte\x18\x03 \x01(\x11\x12\n\n\x02gt\x18\x04 \x01(\x11\x12\x0b\n\x03gte\x18\x05 \x01(\x11\x12\n\n\x02in\x18\x06 \x03(\x11\x12\x0e\n\x06not_in\x18\x07 \x03(\x11\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x80\x01\n\x0bSInt64Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x12\x12\n\n\x02lt\x18\x02 \x01(\x12\x12\x0b\n\x03lte\x18\x03 \x01(\x12\x12\n\n\x02gt\x18\x04 \x01(\x12\x12\x0b\n\x03gte\x18\x05 \x01(\x12\x12\n\n\x02in\x18\x06 \x03(\x12\x12\x0e\n\x06not_in\x18\x07 \x03(\x12\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x81\x01\n\x0c\x46ixed32Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x07\x12\n\n\x02lt\x18\x02 \x01(\x07\x12\x0b\n\x03lte\x18\x03 \x01(\x07\x12\n\n\x02gt\x18\x04 \x01(\x07\x12\x0b\n\x03gte\x18\x05 \x01(\x07\x12\n\n\x02in\x18\x06 \x03(\x07\x12\x0e\n\x06not_in\x18\x07 \x03(\x07\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x81\x01\n\x0c\x46ixed64Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x06\x12\n\n\x02lt\x18\x02 \x01(\x06\x12\x0b\n\x03lte\x18\x03 \x01(\x06\x12\n\n\x02gt\x18\x04 \x01(\x06\x12\x0b\n\x03gte\x18\x05 \x01(\x06\x12\n\n\x02in\x18\x06 \x03(\x06\x12\x0e\n\x06not_in\x18\x07 \x03(\x06\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x82\x01\n\rSFixed32Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x0f\x12\n\n\x02lt\x18\x02 \x01(\x0f\x12\x0b\n\x03lte\x18\x03 \x01(\x0f\x12\n\n\x02gt\x18\x04 \x01(\x0f\x12\x0b\n\x03gte\x18\x05 \x01(\x0f\x12\n\n\x02in\x18\x06 \x03(\x0f\x12\x0e\n\x06not_in\x18\x07 \x03(\x0f\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x82\x01\n\rSFixed64Rules\x12\r\n\x05\x63onst\x18\x01 \x01(\x10\x12\n\n\x02lt\x18\x02 \x01(\x10\x12\x0b\n\x03lte\x18\x03 \x01(\x10\x12\n\n\x02gt\x18\x04 \x01(\x10\x12\x0b\n\x03gte\x18\x05 \x01(\x10\x12\n\n\x02in\x18\x06 \x03(\x10\x12\x0e\n\x06not_in\x18\x07 \x03(\x10\x12\x14\n\x0cignore_empty\x18\x08 \x01(\x08\"\x1a\n\tBoolRules\x12\r\n\x05\x63onst\x18\x01 \x01(\x08\"\xfd\x03\n\x0bStringRules\x12\r\n\x05\x63onst\x18\x01 \x01(\t\x12\x0b\n\x03len\x18\x13 \x01(\x04\x12\x0f\n\x07min_len\x18\x02 \x01(\x04\x12\x0f\n\x07max_len\x18\x03 \x01(\x04\x12\x11\n\tlen_bytes\x18\x14 \x01(\x04\x12\x11\n\tmin_bytes\x18\x04 \x01(\x04\x12\x11\n\tmax_bytes\x18\x05 \x01(\x04\x12\x0f\n\x07pattern\x18\x06 \x01(\t\x12\x0e\n\x06prefix\x18\x07 \x01(\t\x12\x0e\n\x06suffix\x18\x08 \x01(\t\x12\x10\n\x08\x63ontains\x18\t \x01(\t\x12\x14\n\x0cnot_contains\x18\x17 \x01(\t\x12\n\n\x02in\x18\n \x03(\t\x12\x0e\n\x06not_in\x18\x0b \x03(\t\x12\x0f\n\x05\x65mail\x18\x0c \x01(\x08H\x00\x12\x12\n\x08hostname\x18\r \x01(\x08H\x00\x12\x0c\n\x02ip\x18\x0e \x01(\x08H\x00\x12\x0e\n\x04ipv4\x18\x0f \x01(\x08H\x00\x12\x0e\n\x04ipv6\x18\x10 \x01(\x08H\x00\x12\r\n\x03uri\x18\x11 \x01(\x08H\x00\x12\x11\n\x07uri_ref\x18\x12 \x01(\x08H\x00\x12\x11\n\x07\x61\x64\x64ress\x18\x15 \x01(\x08H\x00\x12\x0e\n\x04uuid\x18\x16 \x01(\x08H\x00\x12\x30\n\x10well_known_regex\x18\x18 \x01(\x0e\x32\x14.validate.KnownRegexH\x00\x12\x14\n\x06strict\x18\x19 \x01(\x08:\x04true\x12\x14\n\x0cignore_empty\x18\x1a \x01(\x08\x42\x0c\n\nwell_known\"\xfb\x01\n\nBytesRules\x12\r\n\x05\x63onst\x18\x01 \x01(\x0c\x12\x0b\n\x03len\x18\r \x01(\x04\x12\x0f\n\x07min_len\x18\x02 \x01(\x04\x12\x0f\n\x07max_len\x18\x03 \x01(\x04\x12\x0f\n\x07pattern\x18\x04 \x01(\t\x12\x0e\n\x06prefix\x18\x05 \x01(\x0c\x12\x0e\n\x06suffix\x18\x06 \x01(\x0c\x12\x10\n\x08\x63ontains\x18\x07 \x01(\x0c\x12\n\n\x02in\x18\x08 \x03(\x0c\x12\x0e\n\x06not_in\x18\t \x03(\x0c\x12\x0c\n\x02ip\x18\n \x01(\x08H\x00\x12\x0e\n\x04ipv4\x18\x0b \x01(\x08H\x00\x12\x0e\n\x04ipv6\x18\x0c \x01(\x08H\x00\x12\x14\n\x0cignore_empty\x18\x0e \x01(\x08\x42\x0c\n\nwell_known\"L\n\tEnumRules\x12\r\n\x05\x63onst\x18\x01 \x01(\x05\x12\x14\n\x0c\x64\x65\x66ined_only\x18\x02 \x01(\x08\x12\n\n\x02in\x18\x03 \x03(\x05\x12\x0e\n\x06not_in\x18\x04 \x03(\x05\".\n\x0cMessageRules\x12\x0c\n\x04skip\x18\x01 \x01(\x08\x12\x10\n\x08required\x18\x02 \x01(\x08\"\x80\x01\n\rRepeatedRules\x12\x11\n\tmin_items\x18\x01 \x01(\x04\x12\x11\n\tmax_items\x18\x02 \x01(\x04\x12\x0e\n\x06unique\x18\x03 \x01(\x08\x12#\n\x05items\x18\x04 \x01(\x0b\x32\x14.validate.FieldRules\x12\x14\n\x0cignore_empty\x18\x05 \x01(\x08\"\xa3\x01\n\x08MapRules\x12\x11\n\tmin_pairs\x18\x01 \x01(\x04\x12\x11\n\tmax_pairs\x18\x02 \x01(\x04\x12\x11\n\tno_sparse\x18\x03 \x01(\x08\x12\"\n\x04keys\x18\x04 \x01(\x0b\x32\x14.validate.FieldRules\x12$\n\x06values\x18\x05 \x01(\x0b\x32\x14.validate.FieldRules\x12\x14\n\x0cignore_empty\x18\x06 \x01(\x08\"8\n\x08\x41nyRules\x12\x10\n\x08required\x18\x01 \x01(\x08\x12\n\n\x02in\x18\x02 \x03(\t\x12\x0e\n\x06not_in\x18\x03 \x03(\t\"\xbb\x02\n\rDurationRules\x12\x10\n\x08required\x18\x01 \x01(\x08\x12(\n\x05\x63onst\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12%\n\x02lt\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12&\n\x03lte\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12%\n\x02gt\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12&\n\x03gte\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12%\n\x02in\x18\x07 \x03(\x0b\x32\x19.google.protobuf.Duration\x12)\n\x06not_in\x18\x08 \x03(\x0b\x32\x19.google.protobuf.Duration\"\xba\x02\n\x0eTimestampRules\x12\x10\n\x08required\x18\x01 \x01(\x08\x12)\n\x05\x63onst\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x02lt\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03lte\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x02gt\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\x03gte\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06lt_now\x18\x07 \x01(\x08\x12\x0e\n\x06gt_now\x18\x08 \x01(\x08\x12)\n\x06within\x18\t \x01(\x0b\x32\x19.google.protobuf.Duration*F\n\nKnownRegex\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x14\n\x10HTTP_HEADER_NAME\x10\x01\x12\x15\n\x11HTTP_HEADER_VALUE\x10\x02:2\n\x08\x64isabled\x12\x1f.google.protobuf.MessageOptions\x18\xaf\x08 \x01(\x08:1\n\x07ignored\x12\x1f.google.protobuf.MessageOptions\x18\xb0\x08 \x01(\x08:0\n\x08required\x12\x1d.google.protobuf.OneofOptions\x18\xaf\x08 \x01(\x08:C\n\x05rules\x12\x1d.google.protobuf.FieldOptions\x18\xaf\x08 \x01(\x0b\x32\x14.validate.FieldRulesBP\n\x1aio.envoyproxy.pgv.validateZ2github.com/envoyproxy/protoc-gen-validate/validate" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Validate + FieldRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.FieldRules").msgclass + FloatRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.FloatRules").msgclass + DoubleRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.DoubleRules").msgclass + Int32Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.Int32Rules").msgclass + Int64Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.Int64Rules").msgclass + UInt32Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.UInt32Rules").msgclass + UInt64Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.UInt64Rules").msgclass + SInt32Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.SInt32Rules").msgclass + SInt64Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.SInt64Rules").msgclass + Fixed32Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.Fixed32Rules").msgclass + Fixed64Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.Fixed64Rules").msgclass + SFixed32Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.SFixed32Rules").msgclass + SFixed64Rules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.SFixed64Rules").msgclass + BoolRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.BoolRules").msgclass + StringRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.StringRules").msgclass + BytesRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.BytesRules").msgclass + EnumRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.EnumRules").msgclass + MessageRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.MessageRules").msgclass + RepeatedRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.RepeatedRules").msgclass + MapRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.MapRules").msgclass + AnyRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.AnyRules").msgclass + DurationRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.DurationRules").msgclass + TimestampRules = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.TimestampRules").msgclass + KnownRegex = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("validate.KnownRegex").enummodule +end diff --git a/lib/xds/annotations/v3/status_pb.rb b/lib/xds/annotations/v3/status_pb.rb new file mode 100644 index 0000000..4c06674 --- /dev/null +++ b/lib/xds/annotations/v3/status_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/annotations/v3/status.proto + +require "google/protobuf" + +require "google/protobuf/descriptor_pb" + + +descriptor_data = "\n\x1fxds/annotations/v3/status.proto\x12\x12xds.annotations.v3\x1a google/protobuf/descriptor.proto\"0\n\x14\x46ileStatusAnnotation\x12\x18\n\x10work_in_progress\x18\x01 \x01(\x08\"3\n\x17MessageStatusAnnotation\x12\x18\n\x10work_in_progress\x18\x01 \x01(\x08\"1\n\x15\x46ieldStatusAnnotation\x12\x18\n\x10work_in_progress\x18\x01 \x01(\x08\"v\n\x10StatusAnnotation\x12\x18\n\x10work_in_progress\x18\x01 \x01(\x08\x12H\n\x16package_version_status\x18\x02 \x01(\x0e\x32(.xds.annotations.v3.PackageVersionStatus*]\n\x14PackageVersionStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x46ROZEN\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12 \n\x1cNEXT_MAJOR_VERSION_CANDIDATE\x10\x03:^\n\x0b\x66ile_status\x12\x1c.google.protobuf.FileOptions\x18\xea\xc8\x94l \x01(\x0b\x32(.xds.annotations.v3.FileStatusAnnotation:g\n\x0emessage_status\x12\x1f.google.protobuf.MessageOptions\x18\xea\xc8\x94l \x01(\x0b\x32+.xds.annotations.v3.MessageStatusAnnotation:a\n\x0c\x66ield_status\x12\x1d.google.protobuf.FieldOptions\x18\xea\xc8\x94l \x01(\x0b\x32).xds.annotations.v3.FieldStatusAnnotationB+Z)github.com/cncf/xds/go/xds/annotations/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Annotations + module V3 + FileStatusAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.annotations.v3.FileStatusAnnotation").msgclass + MessageStatusAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.annotations.v3.MessageStatusAnnotation").msgclass + FieldStatusAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.annotations.v3.FieldStatusAnnotation").msgclass + StatusAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.annotations.v3.StatusAnnotation").msgclass + PackageVersionStatus = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.annotations.v3.PackageVersionStatus").enummodule + end + end +end diff --git a/lib/xds/core/v3/authority_pb.rb b/lib/xds/core/v3/authority_pb.rb new file mode 100644 index 0000000..dc02485 --- /dev/null +++ b/lib/xds/core/v3/authority_pb.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/authority.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1bxds/core/v3/authority.proto\x12\x0bxds.core.v3\x1a\x1fxds/annotations/v3/status.proto\x1a\x17validate/validate.proto\"\"\n\tAuthority\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x42V\n\x16\x63om.github.xds.core.v3B\x0e\x41uthorityProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + Authority = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.Authority").msgclass + end + end +end diff --git a/lib/xds/core/v3/cidr_pb.rb b/lib/xds/core/v3/cidr_pb.rb new file mode 100644 index 0000000..9b94688 --- /dev/null +++ b/lib/xds/core/v3/cidr_pb.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/cidr.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" +require "google/protobuf/wrappers_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x16xds/core/v3/cidr.proto\x12\x0bxds.core.v3\x1a\x1fxds/annotations/v3/status.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17validate/validate.proto\"h\n\tCidrRange\x12\x1f\n\x0e\x61\x64\x64ress_prefix\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12:\n\nprefix_len\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.UInt32ValueB\x08\xfa\x42\x05*\x03\x18\x80\x01\x42V\n\x16\x63om.github.xds.core.v3B\x0e\x43idrRangeProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + CidrRange = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.CidrRange").msgclass + end + end +end diff --git a/lib/xds/core/v3/collection_entry_pb.rb b/lib/xds/core/v3/collection_entry_pb.rb new file mode 100644 index 0000000..c6c21de --- /dev/null +++ b/lib/xds/core/v3/collection_entry_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/collection_entry.proto + +require "google/protobuf" + +require "google/protobuf/any_pb" +require "xds/annotations/v3/status_pb" +require "xds/core/v3/resource_locator_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"xds/core/v3/collection_entry.proto\x12\x0bxds.core.v3\x1a\x19google/protobuf/any.proto\x1a\x1fxds/annotations/v3/status.proto\x1a\"xds/core/v3/resource_locator.proto\x1a\x17validate/validate.proto\"\x93\x02\n\x0f\x43ollectionEntry\x12/\n\x07locator\x18\x01 \x01(\x0b\x32\x1c.xds.core.v3.ResourceLocatorH\x00\x12@\n\x0cinline_entry\x18\x02 \x01(\x0b\x32(.xds.core.v3.CollectionEntry.InlineEntryH\x00\x1ar\n\x0bInlineEntry\x12*\n\x04name\x18\x01 \x01(\tB\x1c\xfa\x42\x19r\x17\x32\x15^[0-9a-zA-Z_\\-\\.~:]+$\x12\x0f\n\x07version\x18\x02 \x01(\t\x12&\n\x08resource\x18\x03 \x01(\x0b\x32\x14.google.protobuf.AnyB\x19\n\x12resource_specifier\x12\x03\xf8\x42\x01\x42\\\n\x16\x63om.github.xds.core.v3B\x14\x43ollectionEntryProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + CollectionEntry = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.CollectionEntry").msgclass + CollectionEntry::InlineEntry = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.CollectionEntry.InlineEntry").msgclass + end + end +end diff --git a/lib/xds/core/v3/context_params_pb.rb b/lib/xds/core/v3/context_params_pb.rb new file mode 100644 index 0000000..04b226d --- /dev/null +++ b/lib/xds/core/v3/context_params_pb.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/context_params.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" + + +descriptor_data = "\n xds/core/v3/context_params.proto\x12\x0bxds.core.v3\x1a\x1fxds/annotations/v3/status.proto\"v\n\rContextParams\x12\x36\n\x06params\x18\x01 \x03(\x0b\x32&.xds.core.v3.ContextParams.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42Z\n\x16\x63om.github.xds.core.v3B\x12\x43ontextParamsProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + ContextParams = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.ContextParams").msgclass + end + end +end diff --git a/lib/xds/core/v3/extension_pb.rb b/lib/xds/core/v3/extension_pb.rb new file mode 100644 index 0000000..94ba928 --- /dev/null +++ b/lib/xds/core/v3/extension_pb.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/extension.proto + +require "google/protobuf" + +require "validate/validate_pb" +require "google/protobuf/any_pb" + + +descriptor_data = "\n\x1bxds/core/v3/extension.proto\x12\x0bxds.core.v3\x1a\x17validate/validate.proto\x1a\x19google/protobuf/any.proto\"c\n\x14TypedExtensionConfig\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x34\n\x0ctyped_config\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyB\x08\xfa\x42\x05\xa2\x01\x02\x08\x01\x42N\n\x16\x63om.github.xds.core.v3B\x0e\x45xtensionProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + TypedExtensionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.TypedExtensionConfig").msgclass + end + end +end diff --git a/lib/xds/core/v3/resource_locator_pb.rb b/lib/xds/core/v3/resource_locator_pb.rb new file mode 100644 index 0000000..1eeb837 --- /dev/null +++ b/lib/xds/core/v3/resource_locator_pb.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/resource_locator.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" +require "xds/core/v3/context_params_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\"xds/core/v3/resource_locator.proto\x12\x0bxds.core.v3\x1a\x1fxds/annotations/v3/status.proto\x1a xds/core/v3/context_params.proto\x1a\x17validate/validate.proto\"\xc2\x03\n\x0fResourceLocator\x12=\n\x06scheme\x18\x01 \x01(\x0e\x32#.xds.core.v3.ResourceLocator.SchemeB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\n\n\x02id\x18\x02 \x01(\t\x12\x11\n\tauthority\x18\x03 \x01(\t\x12\x1e\n\rresource_type\x18\x04 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12\x33\n\rexact_context\x18\x05 \x01(\x0b\x32\x1a.xds.core.v3.ContextParamsH\x00\x12:\n\ndirectives\x18\x06 \x03(\x0b\x32&.xds.core.v3.ResourceLocator.Directive\x1a|\n\tDirective\x12+\n\x03\x61lt\x18\x01 \x01(\x0b\x32\x1c.xds.core.v3.ResourceLocatorH\x00\x12\x30\n\x05\x65ntry\x18\x02 \x01(\tB\x1f\xfa\x42\x1cr\x1a\x10\x01\x32\x16^[0-9a-zA-Z_\\-\\./~:]+$H\x00\x42\x10\n\tdirective\x12\x03\xf8\x42\x01\"\'\n\x06Scheme\x12\t\n\x05XDSTP\x10\x00\x12\x08\n\x04HTTP\x10\x01\x12\x08\n\x04\x46ILE\x10\x02\x42\x19\n\x17\x63ontext_param_specifierB\\\n\x16\x63om.github.xds.core.v3B\x14ResourceLocatorProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + ResourceLocator = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.ResourceLocator").msgclass + ResourceLocator::Directive = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.ResourceLocator.Directive").msgclass + ResourceLocator::Scheme = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.ResourceLocator.Scheme").enummodule + end + end +end diff --git a/lib/xds/core/v3/resource_name_pb.rb b/lib/xds/core/v3/resource_name_pb.rb new file mode 100644 index 0000000..c01d9d2 --- /dev/null +++ b/lib/xds/core/v3/resource_name_pb.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/resource_name.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" +require "xds/core/v3/context_params_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1fxds/core/v3/resource_name.proto\x12\x0bxds.core.v3\x1a\x1fxds/annotations/v3/status.proto\x1a xds/core/v3/context_params.proto\x1a\x17validate/validate.proto\"z\n\x0cResourceName\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tauthority\x18\x02 \x01(\t\x12\x1e\n\rresource_type\x18\x03 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x12+\n\x07\x63ontext\x18\x04 \x01(\x0b\x32\x1a.xds.core.v3.ContextParamsBY\n\x16\x63om.github.xds.core.v3B\x11ResourceNameProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + ResourceName = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.ResourceName").msgclass + end + end +end diff --git a/lib/xds/core/v3/resource_pb.rb b/lib/xds/core/v3/resource_pb.rb new file mode 100644 index 0000000..55ffef5 --- /dev/null +++ b/lib/xds/core/v3/resource_pb.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/core/v3/resource.proto + +require "google/protobuf" + +require "google/protobuf/any_pb" +require "xds/annotations/v3/status_pb" +require "xds/core/v3/resource_name_pb" + + +descriptor_data = "\n\x1axds/core/v3/resource.proto\x12\x0bxds.core.v3\x1a\x19google/protobuf/any.proto\x1a\x1fxds/annotations/v3/status.proto\x1a\x1fxds/core/v3/resource_name.proto\"l\n\x08Resource\x12\'\n\x04name\x18\x01 \x01(\x0b\x32\x19.xds.core.v3.ResourceName\x12\x0f\n\x07version\x18\x02 \x01(\t\x12&\n\x08resource\x18\x03 \x01(\x0b\x32\x14.google.protobuf.AnyBU\n\x16\x63om.github.xds.core.v3B\rResourceProtoP\x01Z\"github.com/cncf/xds/go/xds/core/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Core + module V3 + Resource = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.core.v3.Resource").msgclass + end + end +end diff --git a/lib/xds/type/matcher/v3/domain_pb.rb b/lib/xds/type/matcher/v3/domain_pb.rb new file mode 100644 index 0000000..546e3e3 --- /dev/null +++ b/lib/xds/type/matcher/v3/domain_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/domain.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" +require "xds/type/matcher/v3/matcher_pb" +require "validate/validate_pb" + + +descriptor_data = "\n xds/type/matcher/v3/domain.proto\x12\x13xds.type.matcher.v3\x1a\x1fxds/annotations/v3/status.proto\x1a!xds/type/matcher/v3/matcher.proto\x1a\x17validate/validate.proto\"\xc6\x01\n\x11ServerNameMatcher\x12M\n\x0f\x64omain_matchers\x18\x01 \x03(\x0b\x32\x34.xds.type.matcher.v3.ServerNameMatcher.DomainMatcher\x1a\x62\n\rDomainMatcher\x12\x19\n\x07\x64omains\x18\x01 \x03(\tB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x36\n\x08on_match\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatchBn\n\x1e\x63om.github.xds.type.matcher.v3B\x16ServerNameMatcherProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + ServerNameMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.ServerNameMatcher").msgclass + ServerNameMatcher::DomainMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.ServerNameMatcher.DomainMatcher").msgclass + end + end + end +end diff --git a/lib/xds/type/matcher/v3/http_inputs_pb.rb b/lib/xds/type/matcher/v3/http_inputs_pb.rb new file mode 100644 index 0000000..12b35e6 --- /dev/null +++ b/lib/xds/type/matcher/v3/http_inputs_pb.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/http_inputs.proto + +require "google/protobuf" + + +descriptor_data = "\n%xds/type/matcher/v3/http_inputs.proto\x12\x13xds.type.matcher.v3\"\x1d\n\x1bHttpAttributesCelMatchInputB_\n\x1e\x63om.github.xds.type.matcher.v3B\x0fHttpInputsProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + HttpAttributesCelMatchInput = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.HttpAttributesCelMatchInput").msgclass + end + end + end +end diff --git a/lib/xds/type/matcher/v3/ip_pb.rb b/lib/xds/type/matcher/v3/ip_pb.rb new file mode 100644 index 0000000..8e3addc --- /dev/null +++ b/lib/xds/type/matcher/v3/ip_pb.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/ip.proto + +require "google/protobuf" + +require "xds/annotations/v3/status_pb" +require "xds/core/v3/cidr_pb" +require "xds/type/matcher/v3/matcher_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1cxds/type/matcher/v3/ip.proto\x12\x13xds.type.matcher.v3\x1a\x1fxds/annotations/v3/status.proto\x1a\x16xds/core/v3/cidr.proto\x1a!xds/type/matcher/v3/matcher.proto\x1a\x17validate/validate.proto\"\xe2\x01\n\tIPMatcher\x12\x45\n\x0erange_matchers\x18\x01 \x03(\x0b\x32-.xds.type.matcher.v3.IPMatcher.IPRangeMatcher\x1a\x8d\x01\n\x0eIPRangeMatcher\x12\x30\n\x06ranges\x18\x01 \x03(\x0b\x32\x16.xds.core.v3.CidrRangeB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x36\n\x08on_match\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatch\x12\x11\n\texclusive\x18\x03 \x01(\x08\x42\x66\n\x1e\x63om.github.xds.type.matcher.v3B\x0eIPMatcherProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3\xd2\xc6\xa4\xe1\x06\x02\x08\x01\x62\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + IPMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.IPMatcher").msgclass + IPMatcher::IPRangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.IPMatcher.IPRangeMatcher").msgclass + end + end + end +end diff --git a/lib/xds/type/matcher/v3/matcher_pb.rb b/lib/xds/type/matcher/v3/matcher_pb.rb new file mode 100644 index 0000000..b8a8668 --- /dev/null +++ b/lib/xds/type/matcher/v3/matcher_pb.rb @@ -0,0 +1,34 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/matcher.proto + +require "google/protobuf" + +require "xds/core/v3/extension_pb" +require "xds/type/matcher/v3/string_pb" +require "validate/validate_pb" + + +descriptor_data = "\n!xds/type/matcher/v3/matcher.proto\x12\x13xds.type.matcher.v3\x1a\x1bxds/core/v3/extension.proto\x1a xds/type/matcher/v3/string.proto\x1a\x17validate/validate.proto\"\x96\x0e\n\x07Matcher\x12@\n\x0cmatcher_list\x18\x01 \x01(\x0b\x32(.xds.type.matcher.v3.Matcher.MatcherListH\x00\x12@\n\x0cmatcher_tree\x18\x02 \x01(\x0b\x32(.xds.type.matcher.v3.Matcher.MatcherTreeH\x00\x12\x39\n\x0bon_no_match\x18\x03 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatch\x1a\x97\x01\n\x07OnMatch\x12/\n\x07matcher\x18\x01 \x01(\x0b\x32\x1c.xds.type.matcher.v3.MatcherH\x00\x12\x33\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigH\x00\x12\x15\n\rkeep_matching\x18\x03 \x01(\x08\x42\x0f\n\x08on_match\x12\x03\xf8\x42\x01\x1a\xb9\x07\n\x0bMatcherList\x12Q\n\x08matchers\x18\x01 \x03(\x0b\x32\x35.xds.type.matcher.v3.Matcher.MatcherList.FieldMatcherB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x1a\xb2\x05\n\tPredicate\x12^\n\x10single_predicate\x18\x01 \x01(\x0b\x32\x42.xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicateH\x00\x12V\n\nor_matcher\x18\x02 \x01(\x0b\x32@.xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateListH\x00\x12W\n\x0b\x61nd_matcher\x18\x03 \x01(\x0b\x32@.xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateListH\x00\x12I\n\x0bnot_matcher\x18\x04 \x01(\x0b\x32\x32.xds.type.matcher.v3.Matcher.MatcherList.PredicateH\x00\x1a\xd3\x01\n\x0fSinglePredicate\x12:\n\x05input\x18\x01 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x39\n\x0bvalue_match\x18\x02 \x01(\x0b\x32\".xds.type.matcher.v3.StringMatcherH\x00\x12\x39\n\x0c\x63ustom_match\x18\x03 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigH\x00\x42\x0e\n\x07matcher\x12\x03\xf8\x42\x01\x1a`\n\rPredicateList\x12O\n\tpredicate\x18\x01 \x03(\x0b\x32\x32.xds.type.matcher.v3.Matcher.MatcherList.PredicateB\x08\xfa\x42\x05\x92\x01\x02\x08\x02\x42\x11\n\nmatch_type\x12\x03\xf8\x42\x01\x1a\xa1\x01\n\x0c\x46ieldMatcher\x12O\n\tpredicate\x18\x01 \x01(\x0b\x32\x32.xds.type.matcher.v3.Matcher.MatcherList.PredicateB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12@\n\x08on_match\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatchB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x1a\xe5\x03\n\x0bMatcherTree\x12:\n\x05input\x18\x01 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12L\n\x0f\x65xact_match_map\x18\x02 \x01(\x0b\x32\x31.xds.type.matcher.v3.Matcher.MatcherTree.MatchMapH\x00\x12M\n\x10prefix_match_map\x18\x03 \x01(\x0b\x32\x31.xds.type.matcher.v3.Matcher.MatcherTree.MatchMapH\x00\x12\x39\n\x0c\x63ustom_match\x18\x04 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigH\x00\x1a\xaf\x01\n\x08MatchMap\x12Q\n\x03map\x18\x01 \x03(\x0b\x32:.xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntryB\x08\xfa\x42\x05\x9a\x01\x02\x08\x01\x1aP\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatch:\x02\x38\x01\x42\x10\n\ttree_type\x12\x03\xf8\x42\x01\x42\x0e\n\x0cmatcher_typeB\\\n\x1e\x63om.github.xds.type.matcher.v3B\x0cMatcherProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + Matcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher").msgclass + Matcher::OnMatch = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.OnMatch").msgclass + Matcher::MatcherList = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherList").msgclass + Matcher::MatcherList::Predicate = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherList.Predicate").msgclass + Matcher::MatcherList::Predicate::SinglePredicate = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate").msgclass + Matcher::MatcherList::Predicate::PredicateList = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList").msgclass + Matcher::MatcherList::FieldMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher").msgclass + Matcher::MatcherTree = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherTree").msgclass + Matcher::MatcherTree::MatchMap = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Matcher.MatcherTree.MatchMap").msgclass + end + end + end +end diff --git a/lib/xds/type/matcher/v3/range_pb.rb b/lib/xds/type/matcher/v3/range_pb.rb new file mode 100644 index 0000000..7f43c88 --- /dev/null +++ b/lib/xds/type/matcher/v3/range_pb.rb @@ -0,0 +1,31 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/range.proto + +require "google/protobuf" + +require "xds/type/v3/range_pb" +require "xds/type/matcher/v3/matcher_pb" +require "validate/validate_pb" + + +descriptor_data = "\n\x1fxds/type/matcher/v3/range.proto\x12\x13xds.type.matcher.v3\x1a\x17xds/type/v3/range.proto\x1a!xds/type/matcher/v3/matcher.proto\x1a\x17validate/validate.proto\"\xdb\x01\n\x11Int64RangeMatcher\x12K\n\x0erange_matchers\x18\x01 \x03(\x0b\x32\x33.xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher\x1ay\n\x0cRangeMatcher\x12\x31\n\x06ranges\x18\x01 \x03(\x0b\x32\x17.xds.type.v3.Int64RangeB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x36\n\x08on_match\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatch\"\xdb\x01\n\x11Int32RangeMatcher\x12K\n\x0erange_matchers\x18\x01 \x03(\x0b\x32\x33.xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher\x1ay\n\x0cRangeMatcher\x12\x31\n\x06ranges\x18\x01 \x03(\x0b\x32\x17.xds.type.v3.Int32RangeB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x36\n\x08on_match\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatch\"\xde\x01\n\x12\x44oubleRangeMatcher\x12L\n\x0erange_matchers\x18\x01 \x03(\x0b\x32\x34.xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher\x1az\n\x0cRangeMatcher\x12\x32\n\x06ranges\x18\x01 \x03(\x0b\x32\x18.xds.type.v3.DoubleRangeB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x12\x36\n\x08on_match\x18\x02 \x01(\x0b\x32$.xds.type.matcher.v3.Matcher.OnMatchBZ\n\x1e\x63om.github.xds.type.matcher.v3B\nRangeProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + Int64RangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Int64RangeMatcher").msgclass + Int64RangeMatcher::RangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher").msgclass + Int32RangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Int32RangeMatcher").msgclass + Int32RangeMatcher::RangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher").msgclass + DoubleRangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.DoubleRangeMatcher").msgclass + DoubleRangeMatcher::RangeMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher").msgclass + end + end + end +end diff --git a/lib/xds/type/matcher/v3/regex_pb.rb b/lib/xds/type/matcher/v3/regex_pb.rb new file mode 100644 index 0000000..79f809d --- /dev/null +++ b/lib/xds/type/matcher/v3/regex_pb.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/regex.proto + +require "google/protobuf" + +require "validate/validate_pb" + + +descriptor_data = "\n\x1fxds/type/matcher/v3/regex.proto\x12\x13xds.type.matcher.v3\x1a\x17validate/validate.proto\"\x94\x01\n\x0cRegexMatcher\x12K\n\ngoogle_re2\x18\x01 \x01(\x0b\x32+.xds.type.matcher.v3.RegexMatcher.GoogleRE2B\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00\x12\x16\n\x05regex\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x1a\x0b\n\tGoogleRE2B\x12\n\x0b\x65ngine_type\x12\x03\xf8\x42\x01\x42Z\n\x1e\x63om.github.xds.type.matcher.v3B\nRegexProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + RegexMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.RegexMatcher").msgclass + RegexMatcher::GoogleRE2 = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.RegexMatcher.GoogleRE2").msgclass + end + end + end +end diff --git a/lib/xds/type/matcher/v3/string_pb.rb b/lib/xds/type/matcher/v3/string_pb.rb new file mode 100644 index 0000000..f83eb34 --- /dev/null +++ b/lib/xds/type/matcher/v3/string_pb.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xds/type/matcher/v3/string.proto + +require "google/protobuf" + +require "xds/core/v3/extension_pb" +require "xds/type/matcher/v3/regex_pb" +require "validate/validate_pb" + + +descriptor_data = "\n xds/type/matcher/v3/string.proto\x12\x13xds.type.matcher.v3\x1a\x1bxds/core/v3/extension.proto\x1a\x1fxds/type/matcher/v3/regex.proto\x1a\x17validate/validate.proto\"\x96\x02\n\rStringMatcher\x12\x0f\n\x05\x65xact\x18\x01 \x01(\tH\x00\x12\x19\n\x06prefix\x18\x02 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x19\n\x06suffix\x18\x03 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x41\n\nsafe_regex\x18\x05 \x01(\x0b\x32!.xds.type.matcher.v3.RegexMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00\x12\x1b\n\x08\x63ontains\x18\x07 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01H\x00\x12\x33\n\x06\x63ustom\x18\x08 \x01(\x0b\x32!.xds.core.v3.TypedExtensionConfigH\x00\x12\x13\n\x0bignore_case\x18\x06 \x01(\x08\x42\x14\n\rmatch_pattern\x12\x03\xf8\x42\x01\"S\n\x11ListStringMatcher\x12>\n\x08patterns\x18\x01 \x03(\x0b\x32\".xds.type.matcher.v3.StringMatcherB\x08\xfa\x42\x05\x92\x01\x02\x08\x01\x42[\n\x1e\x63om.github.xds.type.matcher.v3B\x0bStringProtoP\x01Z*github.com/cncf/xds/go/xds/type/matcher/v3b\x06proto3" + +pool = ::Google::Protobuf::DescriptorPool.generated_pool +pool.add_serialized_file(descriptor_data) + +module Xds + module Type + module Matcher + module V3 + StringMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.StringMatcher").msgclass + ListStringMatcher = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("xds.type.matcher.v3.ListStringMatcher").msgclass + end + end + end +end diff --git a/proto/README.md b/proto/README.md new file mode 100644 index 0000000..6939ede --- /dev/null +++ b/proto/README.md @@ -0,0 +1,70 @@ +# Envoy Protobuf Definitions + +This directory contains vendored Envoy protobuf definitions for xDS support. + +## Source + +These files come from [envoyproxy/data-plane-api](https://github.com/envoyproxy/data-plane-api). + +## Contents + +- `envoy/service/discovery/v3/` - Discovery service definitions (ADS, DiscoveryRequest/Response) +- `envoy/config/cluster/v3/` - Cluster definitions (CDS) +- `envoy/config/endpoint/v3/` - Endpoint definitions (EDS) +- `envoy/config/listener/v3/` - Listener definitions (LDS) +- `envoy/config/route/v3/` - Route definitions (RDS) +- `envoy/config/core/v3/` - Core types (Node, Address, etc.) +- `envoy/extensions/transport_sockets/tls/v3/` - TLS/Secret definitions (SDS) +- `google/protobuf/` - Google protobuf well-known types + +## Updating + +To update these files, run: + +```bash +./xds/update_protos.sh +``` + +Or manually: + +```bash +# Clone envoy data-plane-api +git clone --depth 1 https://github.com/envoyproxy/data-plane-api.git /tmp/envoy-api + +# Copy needed files +cp -r /tmp/envoy-api/envoy proto/ +cp -r /tmp/envoy-api/google proto/ + +# Cleanup +rm -rf /tmp/envoy-api +``` + +## Generating Ruby Code + +After updating proto files, generate Ruby classes: + +```bash +bundle exec bake async:grpc:xds:generate_protos +``` + +## Version + +These files are from the latest `main` branch of: +- `envoyproxy/data-plane-api` - Envoy API definitions +- `protocolbuffers/protobuf` - Google protobuf well-known types +- `googleapis/api-common-protos` - Google RPC status + +To lock to a specific version, modify `xds/update_protos.sh` to check out specific tags: + +```bash +cd /tmp/envoy-api +git checkout v1.30.0 # Use specific Envoy version +# Then copy files +``` + +## Note on Dependencies + +Some proto files import `udpa/annotations/*` and `validate/validate.proto`. These are optional annotations used for validation and versioning. They won't break compilation if missing, but you may want to include them for full compatibility: + +- `udpa` annotations: https://github.com/cncf/udpa +- `validate` annotations: https://github.com/envoyproxy/protoc-gen-validate diff --git a/proto/envoy/annotations/deprecation.proto b/proto/envoy/annotations/deprecation.proto new file mode 100644 index 0000000..c9a96f1 --- /dev/null +++ b/proto/envoy/annotations/deprecation.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.annotations; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/annotations"; + +import "google/protobuf/descriptor.proto"; + +// [#protodoc-title: Deprecation] +// Adds annotations for deprecated fields and enums to allow tagging proto +// fields as fatal by default and the minor version on which the field was +// deprecated. One Envoy release after deprecation, deprecated fields will be +// disallowed by default, a state which is reversible with +// :ref:`runtime overrides `. + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "envoy.annotation.disallowed_by_default" and "envoy.annotation.deprecated_at_minor_version" +extend google.protobuf.FieldOptions { + bool disallowed_by_default = 189503207; + + // The API major and minor version on which the field was deprecated + // (e.g., "3.5" for major version 3 and minor version 5). + string deprecated_at_minor_version = 157299826; +} + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "envoy.annotation.disallowed_by_default_enum" and +// "envoy.annotation.deprecated_at_minor_version_eum" +extend google.protobuf.EnumValueOptions { + bool disallowed_by_default_enum = 70100853; + + // The API major and minor version on which the enum value was deprecated + // (e.g., "3.5" for major version 3 and minor version 5). + string deprecated_at_minor_version_enum = 181198657; +} diff --git a/proto/envoy/annotations/resource.proto b/proto/envoy/annotations/resource.proto new file mode 100644 index 0000000..3877afc --- /dev/null +++ b/proto/envoy/annotations/resource.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package envoy.annotations; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/annotations"; + +import "google/protobuf/descriptor.proto"; + +// [#protodoc-title: Resource] + +// Magic number in this file derived from top 28bit of SHA256 digest of "envoy.annotation.resource". +extend google.protobuf.ServiceOptions { + ResourceAnnotation resource = 265073217; +} + +message ResourceAnnotation { + // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource + // type. + string type = 1; +} diff --git a/proto/envoy/config/README.md b/proto/envoy/config/README.md new file mode 100644 index 0000000..279bd7c --- /dev/null +++ b/proto/envoy/config/README.md @@ -0,0 +1,3 @@ +Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration. + +Visibility should be constrained to none or `//envoy/config/bootstrap/v2` by default. diff --git a/proto/envoy/config/cluster/v3/BUILD b/proto/envoy/config/cluster/v3/BUILD new file mode 100644 index 0000000..3ed7505 --- /dev/null +++ b/proto/envoy/config/cluster/v3/BUILD @@ -0,0 +1,18 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/config/endpoint/v3:pkg", + "//envoy/type/metadata/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + "@com_github_cncf_xds//xds/core/v3:pkg", + "@com_github_cncf_xds//xds/type/matcher/v3:pkg", + ], +) diff --git a/proto/envoy/config/cluster/v3/circuit_breaker.proto b/proto/envoy/config/cluster/v3/circuit_breaker.proto new file mode 100644 index 0000000..fe798ce --- /dev/null +++ b/proto/envoy/config/cluster/v3/circuit_breaker.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package envoy.config.cluster.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v3"; +option java_outer_classname = "CircuitBreakerProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Circuit breakers] + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +message CircuitBreakers { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.cluster.CircuitBreakers"; + + // A Thresholds defines CircuitBreaker settings for a + // :ref:`RoutingPriority`. + // [#next-free-field: 9] + message Thresholds { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.cluster.CircuitBreakers.Thresholds"; + + message RetryBudget { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget"; + + // Specifies the limit on concurrent retries as a percentage of the sum of active requests and + // active pending requests. For example, if there are 100 active requests and the + // budget_percent is set to 25, there may be 25 active retries. + // + // This parameter is optional. Defaults to 20%. + type.v3.Percent budget_percent = 1; + + // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the + // number of active retries may never go below this number. + // + // This parameter is optional. Defaults to 3. + google.protobuf.UInt32Value min_retry_concurrency = 2; + } + + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + core.v3.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; + + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_connections = 2; + + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + // This limit is applied as a connection limit for non-HTTP traffic. + google.protobuf.UInt32Value max_pending_requests = 3; + + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + // This limit does not apply to non-HTTP traffic. + google.protobuf.UInt32Value max_requests = 4; + + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + google.protobuf.UInt32Value max_retries = 5; + + // Specifies a limit on concurrent retries in relation to the number of active requests. This + // parameter is optional. + // + // .. note:: + // + // If this field is set, the retry budget will override any configured retry circuit + // breaker. + RetryBudget retry_budget = 8; + + // If track_remaining is true, then stats will be published that expose + // the number of resources remaining until the circuit breakers open. If + // not specified, the default is false. + // + // .. note:: + // + // If a retry budget is used in lieu of the max_retries circuit breaker, + // the remaining retry resources remaining will not be tracked. + bool track_remaining = 6; + + // The maximum number of connection pools per cluster that Envoy will concurrently support at + // once. If not specified, the default is unlimited. Set this for clusters which create a + // large number of connection pools. See + // :ref:`Circuit Breaking ` for + // more details. + google.protobuf.UInt32Value max_connection_pools = 7; + } + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + repeated Thresholds thresholds = 1; + + // Optional per-host limits which apply to each individual host in a cluster. + // + // .. note:: + // currently only the :ref:`max_connections + // ` field is supported for per-host limits. + // + // If multiple per-host :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no per-host Thresholds are defined for a given + // :ref:`RoutingPriority`, + // the cluster will not have per-host limits. + repeated Thresholds per_host_thresholds = 2; +} diff --git a/proto/envoy/config/cluster/v3/cluster.proto b/proto/envoy/config/cluster/v3/cluster.proto new file mode 100644 index 0000000..1924090 --- /dev/null +++ b/proto/envoy/config/cluster/v3/cluster.proto @@ -0,0 +1,1407 @@ +syntax = "proto3"; + +package envoy.config.cluster.v3; + +import "envoy/config/cluster/v3/circuit_breaker.proto"; +import "envoy/config/cluster/v3/filter.proto"; +import "envoy/config/cluster/v3/outlier_detection.proto"; +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/health_check.proto"; +import "envoy/config/core/v3/protocol.proto"; +import "envoy/config/core/v3/resolver.proto"; +import "envoy/config/endpoint/v3/endpoint.proto"; +import "envoy/type/metadata/v3/metadata.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/core/v3/collection_entry.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/security.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v3"; +option java_outer_classname = "ClusterProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Cluster configuration] + +// Cluster list collections. Entries are ``Cluster`` resources or references. +// [#not-implemented-hide:] +message ClusterCollection { + xds.core.v3.CollectionEntry entries = 1; +} + +// Configuration for a single upstream cluster. +// [#next-free-field: 60] +message Cluster { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; + + // Refer to :ref:`service discovery type ` + // for an explanation on each type. + enum DiscoveryType { + // Refer to the :ref:`static discovery type` + // for an explanation. + STATIC = 0; + + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + STRICT_DNS = 1; + + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + LOGICAL_DNS = 2; + + // Refer to the :ref:`service discovery type` + // for an explanation. + EDS = 3; + + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + ORIGINAL_DST = 4; + } + + // Refer to :ref:`load balancer type ` architecture + // overview section for information on each type. + enum LbPolicy { + reserved 4; + + reserved "ORIGINAL_DST_LB"; + + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + ROUND_ROBIN = 0; + + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + LEAST_REQUEST = 1; + + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + RING_HASH = 2; + + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + RANDOM = 3; + + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + MAGLEV = 5; + + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + CLUSTER_PROVIDED = 6; + + // Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // This has been deprecated in favor of using the :ref:`load_balancing_policy + // ` field without + // setting any value in :ref:`lb_policy`. + LOAD_BALANCING_POLICY_CONFIG = 7; + } + + // When V4_ONLY is selected, the DNS resolver will only perform a lookup for + // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + // only perform a lookup for addresses in the IPv6 family. If AUTO is + // specified, the DNS resolver will first perform a lookup for addresses in + // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // This is semantically equivalent to a non-existent V6_PREFERRED option. + // AUTO is a legacy name that is more opaque than + // necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. + // If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the + // IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback + // target will only get v6 addresses if there were NO v4 addresses to return. + // If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, + // and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for + // upstream connections. Refer to :ref:`Happy Eyeballs Support ` + // for more information. + // For cluster types other than + // :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS`, + // this setting is + // ignored. + // [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] + enum DnsLookupFamily { + AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; + V4_PREFERRED = 3; + ALL = 4; + } + + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; + + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } + + // TransportSocketMatch specifies what transport socket config will be used + // when the match conditions are satisfied. + message TransportSocketMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.TransportSocketMatch"; + + // The name of the match, used in stats generation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Optional metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match + // against the values specified in this field. + google.protobuf.Struct match = 2; + + // The configuration of the transport socket. + // [#extension-category: envoy.transport_sockets.upstream] + core.v3.TransportSocket transport_socket = 3; + } + + // Extended cluster type. + message CustomClusterType { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CustomClusterType"; + + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + // [#extension-category: envoy.clusters] + google.protobuf.Any typed_config = 2; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.EdsClusterConfig"; + + // Configuration for the source of EDS updates for this Cluster. + core.v3.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. This may be a xdstp:// URL. + string service_name = 2; + } + + // Optionally divide the endpoints in this cluster into subsets defined by + // endpoint metadata and selected by route and weighted cluster metadata. + // [#next-free-field: 9] + message LbSubsetConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.LbSubsetConfig"; + + // If NO_FALLBACK is selected, a result + // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + // any cluster endpoint may be returned (subject to policy, health checks, + // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + enum LbSubsetFallbackPolicy { + NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; + } + + enum LbSubsetMetadataFallbackPolicy { + // No fallback. Route metadata will be used as-is. + METADATA_NO_FALLBACK = 0; + + // A special metadata key ``fallback_list`` will be used to provide variants of metadata to try. + // Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will + // be merged with route metadata, overriding keys that appear in both places. + // ``fallback_list`` entries will be used in order until a host is found. + // + // ``fallback_list`` key itself is removed from metadata before subset load balancing is performed. + // + // Example: + // + // for metadata: + // + // .. code-block:: yaml + // + // version: 1.0 + // fallback_list: + // - version: 2.0 + // hardware: c64 + // - hardware: c32 + // - version: 3.0 + // + // at first, metadata: + // + // .. code-block:: json + // + // {"version": "2.0", "hardware": "c64"} + // + // will be used for load balancing. If no host is found, metadata: + // + // .. code-block:: json + // + // {"version": "1.0", "hardware": "c32"} + // + // is next to try. If it still results in no host, finally metadata: + // + // .. code-block:: json + // + // {"version": "3.0"} + // + // is used. + FALLBACK_LIST = 1; + } + + // Specifications for subsets. + message LbSubsetSelector { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector"; + + // Allows to override top level fallback policy per selector. + enum LbSubsetSelectorFallbackPolicy { + // If NOT_DEFINED top level config fallback policy is used instead. + NOT_DEFINED = 0; + + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + NO_FALLBACK = 1; + + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + ANY_ENDPOINT = 2; + + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + DEFAULT_SUBSET = 3; + + // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + // keys reduced to + // :ref:`fallback_keys_subset`. + // It allows for a fallback to a different, less specific selector if some of the keys of + // the selector are considered optional. + KEYS_SUBSET = 4; + } + + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for + // choosing a host, but updating hosts is faster, especially for large numbers of hosts. + // + // If a match is found to a host, that host will be used regardless of priority levels. + // + // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys`` + // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge + // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are + // present in the current configuration. + bool single_host_per_subset = 4; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; + + // Subset of + // :ref:`keys` used by + // :ref:`KEYS_SUBSET` + // fallback policy. + // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + // For any other fallback policy the parameter is not used and should not be set. + // Only values also present in + // :ref:`keys` are allowed, but + // ``fallback_keys_subset`` cannot be equal to ``keys``. + repeated string fallback_keys_subset = 3; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the ``envoy.lb`` + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // For each entry, LbEndpoint.Metadata's + // ``envoy.lb`` namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + repeated LbSubsetSelector subset_selectors = 3; + + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + bool locality_weight_aware = 4; + + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionately affected by the + // subset predicate. + bool scale_locality_weight = 5; + + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + bool panic_mode_any = 6; + + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + bool list_as_any = 7; + + // Fallback mechanism that allows to try different route metadata until a host is found. + // If load balancing process, including all its mechanisms (like + // :ref:`fallback_policy`) + // fails to select a host, this policy decides if and how the process is repeated using another metadata. + // + // The value defaults to + // :ref:`METADATA_NO_FALLBACK`. + LbSubsetMetadataFallbackPolicy metadata_fallback_policy = 8 + [(validate.rules).enum = {defined_only: true}]; + } + + // Configuration for :ref:`slow start mode `. + message SlowStartConfig { + // Represents the size of slow start window. + // If set, the newly created host remains in slow start mode starting from its creation time + // for the duration of slow start window. + google.protobuf.Duration slow_start_window = 1; + + // This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + // so that endpoint would get linearly increasing amount of traffic. + // When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + // The value of aggression parameter should be greater than 0.0. + // By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + // + // During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + // ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + // where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. + // + // As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + // Once host exits slow start, time_factor and aggression no longer affect its weight. + core.v3.RuntimeDouble aggression = 2; + + // Configures the minimum percentage of origin weight that avoids too small new weight, + // which may cause endpoints in slow start mode receive no traffic in slow start window. + // If not specified, the default is 10%. + type.v3.Percent min_weight_percent = 3; + } + + // Specific configuration for the RoundRobin load balancing policy. + message RoundRobinLbConfig { + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig slow_start_config = 1; + } + + // Specific configuration for the LeastRequest load balancing policy. + message LeastRequestLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.LeastRequestLbConfig"; + + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + + // The following formula is used to calculate the dynamic weights when hosts have different load + // balancing weights: + // + // ``weight = load_balancing_weight / (active_requests + 1)^active_request_bias`` + // + // The larger the active request bias is, the more aggressively active requests will lower the + // effective weight when all host weights are not equal. + // + // ``active_request_bias`` must be greater than or equal to 0.0. + // + // When ``active_request_bias == 0.0`` the Least Request Load Balancer doesn't consider the number + // of active requests at the time it picks a host and behaves like the Round Robin Load + // Balancer. + // + // When ``active_request_bias > 0.0`` the Least Request Load Balancer scales the load balancing + // weight by the number of active requests at the time it does a pick. + // + // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + // host sets changes, e.g., whenever there is a host membership update or a host load balancing + // weight change. + // + // .. note:: + // This setting only takes effect if all host weights are not equal. + core.v3.RuntimeDouble active_request_bias = 2; + + // Configuration for slow start mode. + // If this configuration is not set, slow start will not be not enabled. + SlowStartConfig slow_start_config = 3; + } + + // Specific configuration for the :ref:`RingHash` + // load balancing policy. + message RingHashLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.RingHashLbConfig"; + + // The hash function used to hash hosts onto the ketama ring. + enum HashFunction { + // Use `xxHash `_, this is the default hash function. + XX_HASH = 0; + + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + MURMUR_HASH_2 = 1; + } + + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; + + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; + } + + // Specific configuration for the :ref:`Maglev` + // load balancing policy. + message MaglevLbConfig { + // The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee. + // Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same + // upstream as it was before. Increasing the table size reduces the amount of disruption. + // The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}]; + } + + // Specific configuration for the + // :ref:`Original Destination ` + // load balancing policy. + // [#extension: envoy.clusters.original_dst] + message OriginalDstLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.OriginalDstLbConfig"; + + // When true, a HTTP header can be used to override the original dst address. The default header is + // :ref:`x-envoy-original-dst-host `. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + bool use_http_header = 1; + + // The http header to override destination address if :ref:`use_http_header `. + // is set to true. If the value is empty, :ref:`x-envoy-original-dst-host ` will be used. + string http_header_name = 2; + + // The port to override for the original dst address. This port + // will take precedence over filter state and header override ports + google.protobuf.UInt32Value upstream_port_override = 3 [(validate.rules).uint32 = {lte: 65535}]; + + // The dynamic metadata key to override destination address. + // First the request metadata is considered, then the connection one. + type.metadata.v3.MetadataKey metadata_key = 4; + } + + // Common configuration for all load balancer implementations. + // [#next-free-field: 9] + message CommonLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CommonLbConfig"; + + // Configuration for :ref:`zone aware routing + // `. + message ZoneAwareLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig"; + + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + type.v3.Percent routing_enabled = 1; + + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + google.protobuf.UInt64Value min_cluster_size = 2; + + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + bool fail_traffic_on_panic = 3; + } + + // Configuration for :ref:`locality weighted load balancing + // ` + message LocalityWeightedLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; + } + + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + message ConsistentHashingLbConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; + + // If set to ``true``, the cluster will use hostname instead of the resolved + // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + bool use_hostname_for_hashing = 1; + + // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + // Minimum is 100. + // + // Applies to both Ring Hash and Maglev load balancers. + // + // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + // ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests + // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + // cascading overflow effect when choosing the next host in the ring/table). + // + // If weights are specified on the hosts, they are respected. + // + // This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts + // being probed, so use a higher value if you require better performance. + google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; + } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.v3.Percent healthy_panic_threshold = 1; + + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; + + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } + + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // .. note:: + // Merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + google.protobuf.Duration update_merge_window = 4; + + // If set to true, Envoy will :ref:`exclude ` new hosts + // when computing load balancing weights until they have been health checked for the first time. + // This will have no effect unless active health checking is also configured. + bool ignore_new_hosts_until_first_hc = 5; + + // If set to ``true``, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; + + // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + ConsistentHashingLbConfig consistent_hashing_lb_config = 7; + + // This controls what hosts are considered valid when using + // :ref:`host overrides `, which is used by some + // filters to modify the load balancing decision. + // + // If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is + // set with an empty set of statuses then host overrides will be ignored by the load balancing. + core.v3.HealthStatusSet override_host_status = 8; + } + + message RefreshRate { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RefreshRate"; + + // Specifies the base interval between refreshes. This parameter is required and must be greater + // than zero and less than + // :ref:`max_interval `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {nanos: 1000000} + }]; + + // Specifies the maximum interval between refreshes. This parameter is optional, but must be + // greater than or equal to the + // :ref:`base_interval ` if set. The default + // is 10 times the :ref:`base_interval `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; + } + + message PreconnectPolicy { + // Indicates how many streams (rounded up) can be anticipated per-upstream for each + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting + // will only be done if the upstream is healthy and the cluster has traffic. + // + // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + // established, one for the new incoming stream, and one for a presumed follow-up stream. For + // HTTP/2, only one connection would be established by default as one connection can + // serve both the original and presumed follow-up stream. + // + // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + // active streams, there would be 100 connections in use, and 50 connections preconnected. + // This might be a useful value for something like short lived single-use connections, + // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue + // in case of unexpected disconnects where the connection could not be reused. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight. This means in steady state if a connection is torn down, + // a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. + // + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + + // Indicates how many streams (rounded up) can be anticipated across a cluster for each + // stream, useful for low QPS services. This is currently supported for a subset of + // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + // Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a + // cluster, doing best effort predictions of what upstream would be picked next and + // pre-establishing a connection. + // + // Preconnecting will be limited to one preconnect per configured upstream in the cluster and will + // only be done if there are healthy upstreams and the cluster has traffic. + // + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this + // cluster, one to the second on the assumption there will be a follow-up stream. + // + // If this value is not set, or set explicitly to one, Envoy will fetch as many connections + // as needed to serve streams in flight, so during warm up and in steady state if a connection + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for + // connection establishment. + // + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each + // upstream. + // + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + google.protobuf.DoubleValue predictive_preconnect_ratio = 2 + [(validate.rules).double = {lte: 3.0 gte: 1.0}]; + } + + reserved 12, 15, 7, 11, 35; + + reserved "hosts", "tls_context", "extension_protocol_options"; + + // Configuration to use different transport sockets for different endpoints. The entry of + // ``envoy.transport_socket_match`` in the :ref:`LbEndpoint.Metadata + // ` is used to match against the + // transport sockets as they appear in the list. If a match is not found, the search continues in + // :ref:`LocalityLbEndpoints.Metadata + // `. The first :ref:`match + // ` is used. For example, with + // the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: envoy.transport_sockets.raw_buffer + // + // Connections to the endpoints whose metadata value under ``envoy.transport_socket_match`` + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any + // ``TransportSocketMatch``, the locality metadata is then checked for a match. Barring any + // matches in the endpoint or locality metadata, the socket configuration fallbacks to use the + // ``tls_context`` or ``transport_socket`` specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // ``TransportSocketMatch`` in this field. Other client Envoys receive CDS without + // ``transport_socket_match`` set, and still send plain text traffic to the same cluster. + // + // This field can be used to specify custom transport socket configurations for health + // checks by adding matching key/value pairs in a health check's + // :ref:`transport socket match criteria ` field. + // + // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + repeated TransportSocketMatch transport_socket_matches = 43; + + // Optional matcher that selects a transport socket from + // :ref:`transport_socket_matches `. + // + // This matcher uses the generic xDS matcher framework to select a named transport socket + // based on various inputs available at transport socket selection time. + // + // Supported matching inputs: + // + // * ``endpoint_metadata``: Extract values from the selected endpoint's metadata. + // * ``locality_metadata``: Extract values from the endpoint's locality metadata. + // * ``transport_socket_filter_state``: Extract values from filter state that was explicitly shared from + // downstream to upstream via ``TransportSocketOptions``. This enables flexible + // downstream-connection-based matching, such as: + // + // - Network namespace matching. + // - Custom connection attributes. + // - Any data explicitly passed via filter state. + // + // .. note:: + // Filter state sharing follows the same pattern as tunneling in Envoy. Filters must explicitly + // share data by setting filter state with the appropriate sharing mode. The filter state is + // then accessible via the ``transport_socket_filter_state`` input during transport socket selection. + // + // If this field is set, it takes precedence over legacy metadata-based selection + // performed by :ref:`transport_socket_matches + // ` alone. + // If the matcher does not yield a match, Envoy uses the default transport socket + // configured for the cluster. + // + // When using this field, each entry in + // :ref:`transport_socket_matches ` + // must have a unique ``name``. The matcher outcome is expected to reference one of + // these names. + xds.type.matcher.v3.Matcher transport_socket_matcher = 59; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // An optional alternative to the cluster name to be used for observability. This name is used + // for emitting stats for the cluster and access logging the cluster name. This will appear as + // additional information in configuration dumps of a cluster's current status as + // :ref:`observability_name ` + // and as an additional tag "upstream_cluster.name" while tracing. + // + // .. note:: + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be confused with + // :ref:`Router Filter Header `. + string alt_stat_name = 28 [(udpa.annotations.field_migrate).rename = "observability_name"]; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + // If not set, a default value of 5s will be used. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_upstream = true]; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes the ``hosts`` field in the v2 API. + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // + endpoint.v3.ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.v3.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + google.protobuf.UInt32Value max_requests_per_connection = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Optional :ref:`circuit breaking ` for the cluster. + CircuitBreakers circuit_breakers = 10; + + // HTTP protocol options that are applied only to upstream HTTP connections. + // These options apply to all HTTP versions. + // This has been deprecated in favor of + // :ref:`upstream_http_protocol_options ` + // in the :ref:`http_protocol_options ` message. + // upstream_http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Additional options when handling HTTP requests upstream. These options will be applicable to + // both HTTP1 and HTTP2 requests. + // This has been deprecated in favor of + // :ref:`common_http_protocol_options ` + // in the :ref:`http_protocol_options ` message. + // common_http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + core.v3.HttpProtocolOptions common_http_protocol_options = 29 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Additional options when handling HTTP1 requests. + // This has been deprecated in favor of http_protocol_options fields in the + // :ref:`http_protocol_options ` message. + // http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + core.v3.Http1ProtocolOptions http_protocol_options = 13 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, ``http2_protocol_options`` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + // This has been deprecated in favor of http2_protocol_options fields in the + // :ref:`http_protocol_options ` + // message. http2_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + // See :ref:`upstream_http_protocol_options + // ` + // for example usage. + core.v3.Http2ProtocolOptions http2_protocol_options = 14 [ + deprecated = true, + (udpa.annotations.security).configure_for_untrusted_upstream = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + // [#next-major-version: make this a list of typed extensions.] + // [#extension-category: envoy.upstream_options] + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. The value configured must be at least 1ms. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + // This field is deprecated in favor of using the :ref:`cluster_type` + // extension point and configuring it with :ref:`DnsCluster`. + // If :ref:`cluster_type` is configured with + // :ref:`DnsCluster`, this field will be ignored. + google.protobuf.Duration dns_refresh_rate = 16 [ + deprecated = true, + (validate.rules).duration = {gt {nanos: 1000000}}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // DNS jitter can be optionally specified if the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`. + // DNS jitter causes the cluster to refresh DNS entries later by a random amount of time to avoid a + // stampede of DNS requests. This value sets the upper bound (exclusive) for the random amount. + // There will be no jitter if this value is omitted. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + // This field is deprecated in favor of using the :ref:`cluster_type` + // extension point and configuring it with :ref:`DnsCluster`. + // If :ref:`cluster_type` is configured with + // :ref:`DnsCluster`, this field will be ignored. + google.protobuf.Duration dns_jitter = 58 [ + deprecated = true, + (validate.rules).duration = {gte {}}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // If the DNS failure refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + // other than :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS` this setting is + // ignored. + // This field is deprecated in favor of using the :ref:`cluster_type` + // extension point and configuring it with :ref:`DnsCluster`. + // If :ref:`cluster_type` is configured with + // :ref:`DnsCluster`, this field will be ignored. + RefreshRate dns_failure_refresh_rate = 44 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + // This field is deprecated in favor of using the :ref:`cluster_type` + // extension point and configuring it with :ref:`DnsCluster`. + // If :ref:`cluster_type` is configured with + // :ref:`DnsCluster`, this field will be ignored. + bool respect_dns_ttl = 39 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + // For logical and strict dns cluster, this field is deprecated in favor of using the + // :ref:`cluster_type` + // extension point and configuring it with :ref:`DnsCluster`. + // If :ref:`cluster_type` is configured with + // :ref:`DnsCluster`, this field will be ignored. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + // This field is deprecated in favor of ``dns_resolution_config`` + // which aggregates all of the DNS resolver configuration in a single message. + repeated core.v3.Address dns_resolvers = 18 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Always use TCP queries instead of UDP queries for DNS lookups. + // This field is deprecated in favor of ``dns_resolution_config`` + // which aggregates all of the DNS resolver configuration in a single message. + bool use_tcp_for_dns_lookups = 45 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // DNS resolution configuration which includes the underlying dns resolver addresses and options. + // This field is deprecated in favor of + // :ref:`typed_dns_resolver_config `. + core.v3.DnsResolutionConfig dns_resolution_config = 53 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + // or any other DNS resolver types and the related parameters. + // For example, an object of + // :ref:`CaresDnsResolverConfig ` + // can be packed into this ``typed_dns_resolver_config``. This configuration replaces the + // :ref:`dns_resolution_config ` + // configuration. + // During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists, + // when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``. + // When ``typed_dns_resolver_config`` is missing, the default behavior is in place. + // Also note that this field is deprecated for logical dns and strict dns clusters and will be ignored when + // :ref:`cluster_type` is configured with + // :ref:`DnsCluster`. + // [#extension-category: envoy.network.dns_resolver] + core.v3.TypedExtensionConfig typed_dns_resolver_config = 55; + + // Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // or :ref:`Redis Cluster`. + // If true, cluster readiness blocks on warm-up. If false, the cluster will complete + // initialization whether or not warm-up has completed. Defaults to true. + google.protobuf.BoolValue wait_for_warm_on_init = 54; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.v3.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH`, + // :ref:`MAGLEV` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Maglev load balancing policy. + MaglevLbConfig maglev_lb_config = 52; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + + // Optional configuration for the RoundRobin load balancing policy. + RoundRobinLbConfig round_robin_lb_config = 56; + } + + // Common configuration for all load balancer implementations. + CommonLbConfig common_lb_config = 27; + + // Optional custom transport socket implementation to use for upstream connections. + // To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + // :ref:`UpstreamTlsContexts ` in the ``typed_config``. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.v3.TransportSocket transport_socket = 24; + + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as ``envoy.filters.http.router``. + core.v3.Metadata metadata = 25; + + // Determines how Envoy selects the protocol used to speak to upstream hosts. + // This has been deprecated in favor of setting explicit protocol selection + // in the :ref:`http_protocol_options + // ` message. + // http_protocol_options can be set via the cluster's + // :ref:`extension_protocol_options`. + ClusterProtocolSelection protocol_selection = 26 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Optional options for upstream connections. + UpstreamConnectionOptions upstream_connection_options = 30; + + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + bool close_connections_on_host_health_failure = 31; + + // If set to true, Envoy will ignore the health value of a host when processing its removal + // from service discovery. This means that if active health checking is used, Envoy will *not* + // wait for the endpoint to go unhealthy before removing it. + bool ignore_health_on_host_removal = 32; + + // An (optional) network filter chain, listed in the order the filters should be applied. + // The chain will be applied to all outgoing connections that Envoy makes to the upstream + // servers of this cluster. + repeated Filter filters = 40; + + // If this field is set and is supported by the client, it will supersede the value of + // :ref:`lb_policy`. + LoadBalancingPolicy load_balancing_policy = 41; + + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.v3.ConfigSource lrs_server = 42; + + // A list of metric names from :ref:`ORCA load reports ` to propagate to LRS. + // + // If not specified, then ORCA load reports will not be propagated to LRS. + // + // For map fields in the ORCA proto, the string will be of the form ``.``. + // For example, the string ``named_metrics.foo`` will mean to look for the key ``foo`` in the ORCA + // :ref:`named_metrics ` field. + // + // The special map key ``*`` means to report all entries in the map (e.g., ``named_metrics.*`` means to + // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted + // backends. + // + // The metric names in LRS will follow the same semantics as this field. In other words, if this field + // contains ``named_metrics.foo``, then the LRS load report will include the data with that same string + // as the key. + repeated string lrs_report_endpoint_metrics = 57; + + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + // + // .. attention:: + // + // This field has been deprecated in favor of ``timeout_budgets``, part of + // :ref:`track_cluster_stats `. + bool track_timeout_budgets = 47 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Optional customization and configuration of upstream connection pool, and upstream type. + // + // Currently this field only applies for HTTP traffic but is designed for eventual use for custom + // TCP upstreams. + // + // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + // HTTP, using the http connection pool and the codec from ``http2_protocol_options`` + // + // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + // + // The default pool used is the generic connection pool which creates the HTTP upstream for most + // HTTP requests, and the TCP upstream if CONNECT termination is configured. + // + // If users desire custom connection pool or upstream behavior, for example terminating + // CONNECT only if a custom filter indicates it is appropriate, the custom factories + // can be registered and configured here. + // [#extension-category: envoy.upstreams] + core.v3.TypedExtensionConfig upstream_config = 48; + + // Configuration to track optional cluster stats. + TrackClusterStats track_cluster_stats = 49; + + // Preconnect configuration for this cluster. + PreconnectPolicy preconnect_policy = 50; + + // If ``connection_pool_per_downstream_connection`` is true, the cluster will use a separate + // connection pool for every downstream connection + bool connection_pool_per_downstream_connection = 51; +} + +// Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +message LoadBalancingPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy"; + + message Policy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.LoadBalancingPolicy.Policy"; + + reserved 2, 1, 3; + + reserved "config", "name", "typed_config"; + + // [#extension-category: envoy.load_balancing_policies] + core.v3.TypedExtensionConfig typed_extension_config = 4; + } + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + repeated Policy policies = 1; +} + +message UpstreamConnectionOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.UpstreamConnectionOptions"; + + enum FirstAddressFamilyVersion { + // respect the native ranking of destination ip addresses returned from dns + // resolution + DEFAULT = 0; + + V4 = 1; + + V6 = 2; + } + + message HappyEyeballsConfig { + // Specify the IP address family to attempt connection first in happy + // eyeballs algorithm according to RFC8305#section-4. + FirstAddressFamilyVersion first_address_family_version = 1; + + // Specify the number of addresses of the first_address_family_version being + // attempted for connection before the other address family. + google.protobuf.UInt32Value first_address_family_count = 2 [(validate.rules).uint32 = {gte: 1}]; + } + + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + core.v3.TcpKeepalive tcp_keepalive = 1; + + // If enabled, associates the interface name of the local address with the upstream connection. + // This can be used by extensions during processing of requests. The association mechanism is + // implementation specific. Defaults to false due to performance concerns. + bool set_local_interface_name_on_upstream_connections = 2; + + // Configurations for happy eyeballs algorithm. + // Add configs for first_address_family_version and first_address_family_count + // when sorting destination ip addresses. + HappyEyeballsConfig happy_eyeballs_config = 3; +} + +message TrackClusterStats { + // If timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool timeout_budgets = 1; + + // If request_response_sizes is true, then the :ref:`histograms + // ` tracking header and body sizes + // of requests and responses will be published. Additionally, number of headers in the requests and responses will be tracked. + bool request_response_sizes = 2; + + // If true, some stats will be emitted per-endpoint, similar to the stats in admin ``/clusters`` + // output. + // + // This does not currently output correct stats during a hot-restart. + // + // This is not currently implemented by all stat sinks. + // + // These stats do not honor filtering or tag extraction rules in :ref:`StatsConfig + // ` (but fixed-value tags are supported). Admin + // endpoint filtering is supported. + // + // This may not be used at the same time as + // :ref:`load_stats_config `. + bool per_endpoint_stats = 3; +} diff --git a/proto/envoy/config/cluster/v3/filter.proto b/proto/envoy/config/cluster/v3/filter.proto new file mode 100644 index 0000000..54611ed --- /dev/null +++ b/proto/envoy/config/cluster/v3/filter.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package envoy.config.cluster.v3; + +import "envoy/config/core/v3/config_source.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v3"; +option java_outer_classname = "FilterProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Upstream network filters] +// Upstream network filters apply to the connections to the upstream cluster hosts. + +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter"; + + // The name of the filter configuration. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // Note that Envoy's :ref:`downstream network + // filters ` are not valid upstream network filters. + // Only one of typed_config or config_discovery can be used. + google.protobuf.Any typed_config = 2; + + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + // Only one of typed_config or config_discovery can be used. + core.v3.ExtensionConfigSource config_discovery = 3; +} diff --git a/proto/envoy/config/cluster/v3/outlier_detection.proto b/proto/envoy/config/cluster/v3/outlier_detection.proto new file mode 100644 index 0000000..822d81d --- /dev/null +++ b/proto/envoy/config/cluster/v3/outlier_detection.proto @@ -0,0 +1,180 @@ +syntax = "proto3"; + +package envoy.config.cluster.v3; + +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.cluster.v3"; +option java_outer_classname = "OutlierDetectionProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Outlier detection] + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +// [#next-free-field: 26] +message OutlierDetection { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.cluster.OutlierDetection"; + + // The number of consecutive server-side error responses (for HTTP traffic, + // 5xx responses; for TCP traffic, connection failures; for Redis, failure to + // respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_5xx = 1; + + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; + + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. + // Defaults to 30000ms or 30s. + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; + + // The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% . + // Will eject at least one host regardless of the value if :ref:`always_eject_one_host` is enabled. + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; + + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + google.protobuf.UInt32Value success_rate_minimum_hosts = 7; + + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + google.protobuf.UInt32Value success_rate_request_volume = 8; + + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + google.protobuf.UInt32Value success_rate_stdev_factor = 9; + + // The number of consecutive gateway failures (502, 503, 504 status codes) + // before a consecutive gateway failure ejection occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_gateway_failure = 10; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 + [(validate.rules).uint32 = {lte: 100}]; + + // Determines whether to distinguish local origin failures from external errors. If set to true + // the following configuration parameters are taken into account: + // :ref:`consecutive_local_origin_failure`, + // :ref:`enforcing_consecutive_local_origin_failure` + // and + // :ref:`enforcing_local_origin_success_rate`. + // Defaults to false. + bool split_external_local_origin_errors = 12; + + // The number of consecutive locally originated failures before ejection + // occurs. Defaults to 5. Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value consecutive_local_origin_failure = 13; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive locally originated failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics for locally originated errors. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + // Parameter takes effect only when + // :ref:`split_external_local_origin_errors` + // is set to true. + google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 + [(validate.rules).uint32 = {lte: 100}]; + + // The failure percentage to use when determining failure percentage-based outlier detection. If + // the failure percentage of a given host is greater than or equal to this value, it will be + // ejected. Defaults to 85. + google.protobuf.UInt32Value failure_percentage_threshold = 16 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // failure percentage statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + // + // [#next-major-version: setting this without setting failure_percentage_threshold should be + // invalid in v4.] + google.protobuf.UInt32Value enforcing_failure_percentage = 17 + [(validate.rules).uint32 = {lte: 100}]; + + // The % chance that a host will be actually ejected when an outlier status is detected through + // local-origin failure percentage statistics. This setting can be used to disable ejection or to + // ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 + [(validate.rules).uint32 = {lte: 100}]; + + // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. + // If the total number of hosts in the cluster is less than this value, failure percentage-based + // ejection will not be performed. Defaults to 5. + google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; + + // The minimum number of total requests that must be collected in one interval (as defined by the + // interval duration above) to perform failure percentage-based ejection for this host. If the + // volume is lower than this setting, failure percentage-based ejection will not be performed for + // this host. Defaults to 50. + google.protobuf.UInt32Value failure_percentage_request_volume = 20; + + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. If not specified, the default value (300000ms or 300s) or + // :ref:`base_ejection_time` value is applied, whatever is larger. + google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; + + // The maximum amount of jitter to add to the ejection time, in order to prevent + // a 'thundering herd' effect where all proxies try to reconnect to host at the same time. + // See :ref:`max_ejection_time_jitter` + // Defaults to 0s. + google.protobuf.Duration max_ejection_time_jitter = 22; + + // If active health checking is enabled and a host is ejected by outlier detection, a successful active health check + // unejects the host by default and considers it as healthy. Unejection also clears all the outlier detection counters. + // To change this default behavior set this config to ``false`` where active health checking will not uneject the host. + // Defaults to true. + google.protobuf.BoolValue successful_active_health_check_uneject_host = 23; + + // Set of host's passive monitors. + // [#not-implemented-hide:] + repeated core.v3.TypedExtensionConfig monitors = 24; + + // If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent`. + // Defaults to false. + google.protobuf.BoolValue always_eject_one_host = 25; +} diff --git a/proto/envoy/config/core/v3/BUILD b/proto/envoy/config/core/v3/BUILD new file mode 100644 index 0000000..15185f7 --- /dev/null +++ b/proto/envoy/config/core/v3/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + "@com_github_cncf_xds//xds/annotations/v3:pkg", + "@com_github_cncf_xds//xds/core/v3:pkg", + ], +) diff --git a/proto/envoy/config/core/v3/address.proto b/proto/envoy/config/core/v3/address.proto new file mode 100644 index 0000000..17a6826 --- /dev/null +++ b/proto/envoy/config/core/v3/address.proto @@ -0,0 +1,214 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/socket_option.proto"; + +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "AddressProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Network addresses] + +message Pipe { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Pipe"; + + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + string path = 1 [(validate.rules).string = {min_len: 1}]; + + // The mode for the Pipe. Not applicable for abstract sockets. + uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; +} + +// The address represents an envoy internal listener. +// [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] +message EnvoyInternalAddress { + oneof address_name_specifier { + option (validate.required) = true; + + // Specifies the :ref:`name ` of the + // internal listener. + string server_listener_name = 1; + } + + // Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a + // single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for + // example, may be set to the final destination IP for the target internal listener. + string endpoint_id = 2; +} + +// [#next-free-field: 8] +message SocketAddress { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; + + enum Protocol { + TCP = 0; + UDP = 1; + } + + Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; + + // The address for this socket. :ref:`Listeners ` will bind + // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` + // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: + // It is possible to distinguish a Listener address via the prefix/suffix matching + // in :ref:`FilterChainMatch `.] When used + // within an upstream :ref:`BindConfig `, the address + // controls the source address of outbound connections. For :ref:`clusters + // `, the cluster type determines whether the + // address must be an IP (``STATIC`` or ``EDS`` clusters) or a hostname resolved by DNS + // (``STRICT_DNS`` or ``LOGICAL_DNS`` clusters). Address resolution can be customized + // via :ref:`resolver_name `. + string address = 2 [(validate.rules).string = {min_len: 1}]; + + oneof port_specifier { + option (validate.required) = true; + + uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; + + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + string named_port = 4; + } + + // The name of the custom resolver. This must have been registered with Envoy. If + // this is empty, a context dependent default applies. If the address is a concrete + // IP address, no resolution will occur. If address is a hostname this + // should be set for resolution other than DNS. Specifying a custom resolver with + // ``STRICT_DNS`` or ``LOGICAL_DNS`` will generate an error at runtime. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibility + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; + + // Filepath that specifies the Linux network namespace this socket will be created in (see ``man 7 + // network_namespaces``). If this field is set, Envoy will create the socket in the specified + // network namespace. + // + // .. note:: + // Setting this parameter requires Envoy to run with the ``CAP_NET_ADMIN`` capability. + // + // .. attention:: + // Network namespaces are only configurable on Linux. Otherwise, this field has no effect. + string network_namespace_filepath = 7; +} + +message TcpKeepalive { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TcpKeepalive"; + + // Maximum number of keepalive probes to send without response before deciding + // the connection is dead. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 9.) Setting this to ``0`` disables TCP keepalive. + google.protobuf.UInt32Value keepalive_probes = 1; + + // The number of seconds a connection needs to be idle before keep-alive probes + // start being sent. Default is to use the OS level configuration (unless + // overridden, Linux defaults to 7200s (i.e., 2 hours.) Setting this to ``0`` disables + // TCP keepalive. + google.protobuf.UInt32Value keepalive_time = 2; + + // The number of seconds between keep-alive probes. Default is to use the OS + // level configuration (unless overridden, Linux defaults to 75s.) Setting this to + // ``0`` disables TCP keepalive. + google.protobuf.UInt32Value keepalive_interval = 3; +} + +message ExtraSourceAddress { + // The additional address to bind. + SocketAddress address = 1 [(validate.rules).message = {required: true}]; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the BindConfig. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + SocketOptionsOverride socket_options = 2; +} + +// [#next-free-field: 7] +message BindConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig"; + + // The address to bind to when creating a socket. + SocketAddress source_address = 1; + + // Whether to set the ``IP_FREEBIND`` option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option ``IP_FREEBIND`` is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + google.protobuf.BoolValue freebind = 2; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated SocketOption socket_options = 3; + + // Extra source addresses appended to the address specified in the ``source_address`` + // field. This enables to specify multiple source addresses. + // The source address selection is determined by :ref:`local_address_selector + // `. + repeated ExtraSourceAddress extra_source_addresses = 5; + + // Deprecated by + // :ref:`extra_source_addresses ` + repeated SocketAddress additional_source_addresses = 4 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Custom local address selector to override the default (i.e. + // :ref:`DefaultLocalAddressSelector + // `). + // [#extension-category: envoy.upstream.local_address_selector] + TypedExtensionConfig local_address_selector = 6; +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +message Address { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Address"; + + oneof address { + option (validate.required) = true; + + SocketAddress socket_address = 1; + + Pipe pipe = 2; + + // Specifies a user-space address handled by :ref:`internal listeners + // `. + EnvoyInternalAddress envoy_internal_address = 3; + } +} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +message CidrRange { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange"; + + // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. + string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; +} diff --git a/proto/envoy/config/core/v3/backoff.proto b/proto/envoy/config/core/v3/backoff.proto new file mode 100644 index 0000000..435b361 --- /dev/null +++ b/proto/envoy/config/core/v3/backoff.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "BackoffProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Backoff strategy] + +// Configuration defining a jittered exponential back off strategy. +message BackoffStrategy { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BackoffStrategy"; + + // The base interval to be used for the next back off computation. It should + // be greater than zero and less than or equal to :ref:`max_interval + // `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, + // but must be greater than or equal to the :ref:`base_interval + // ` if set. The default + // is 10 times the :ref:`base_interval + // `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; +} diff --git a/proto/envoy/config/core/v3/base.proto b/proto/envoy/config/core/v3/base.proto new file mode 100644 index 0000000..978f365 --- /dev/null +++ b/proto/envoy/config/core/v3/base.proto @@ -0,0 +1,662 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/backoff.proto"; +import "envoy/config/core/v3/http_uri.proto"; +import "envoy/type/v3/percent.proto"; +import "envoy/type/v3/semantic_version.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/core/v3/context_params.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "BaseProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common types] + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; + PATCH = 9; +} + +// Identifies the direction of the traffic relative to the local Envoy. +enum TrafficDirection { + // Default option is unspecified. + UNSPECIFIED = 0; + + // The transport is used for incoming traffic. + INBOUND = 1; + + // The transport is used for outgoing traffic. + OUTBOUND = 2; +} + +// Identifies location of where either Envoy runs or where upstream hosts run. +message Locality { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Locality"; + + // Region this :ref:`zone ` belongs to. + string region = 1; + + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + string zone = 2; + + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + string sub_zone = 3; +} + +// BuildVersion combines SemVer version of extension with free-form build information +// (i.e. 'alpha', 'private-build') as a set of strings. +message BuildVersion { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BuildVersion"; + + // SemVer version of extension. + type.v3.SemanticVersion version = 1; + + // Free-form build information. + // Envoy defines several well known keys in the source/common/version/version.h file + google.protobuf.Struct metadata = 2; +} + +// Version and identification for an Envoy extension. +// [#next-free-field: 7] +message Extension { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Extension"; + + // This is the name of the Envoy filter as specified in the Envoy + // configuration, e.g. envoy.filters.http.router, com.acme.widget. + string name = 1; + + // Category of the extension. + // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" + // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from + // acme.com vendor. + // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] + string category = 2; + + // [#not-implemented-hide:] Type descriptor of extension configuration proto. + // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] + // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + string type_descriptor = 3 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // The version is a property of the extension and maintained independently + // of other extensions and the Envoy API. + // This field is not set when extension did not provide version information. + BuildVersion version = 4; + + // Indicates that the extension is present but was disabled via dynamic configuration. + bool disabled = 5; + + // Type URLs of extension configuration protos. + repeated string type_urls = 6; +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +// [#next-free-field: 13] +message Node { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Node"; + + reserved 5; + + reserved "build_version"; + + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + string id = 1; + + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification + // `, + // :ref:`runtime override directory `, + // :ref:`user agent addition + // `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + string cluster = 2; + + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + google.protobuf.Struct metadata = 3; + + // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike + // other fields in this message). For example, the xDS client may have a shard identifier that + // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the + // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic + // parameter then appears in this field during future discovery requests. + map dynamic_parameters = 12; + + // Locality specifying where the Envoy instance is running. + Locality locality = 4; + + // Free-form string that identifies the entity requesting config. + // E.g. "envoy" or "grpc" + string user_agent_name = 6; + + oneof user_agent_version_type { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + string user_agent_version = 7; + + // Structured version of the entity requesting config. + BuildVersion user_agent_build_version = 8; + } + + // List of extensions and their versions supported by the node. + repeated Extension extensions = 9; + + // Client feature support list. These are well known features described + // in the Envoy API repository for a given major version of an API. Client features + // use reverse DNS naming scheme, for example ``com.acme.feature``. + // See :ref:`the list of features ` that xDS client may + // support. + repeated string client_features = 10; + + // Known listening ports on the node as a generic hint to the management server + // for filtering :ref:`listeners ` to be returned. For example, + // if there is a listener bound to port 80, the list can optionally contain the + // SocketAddress ``(0.0.0.0,80)``. The field is optional and just a hint. + repeated Address listening_addresses = 11 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; +} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map, usually from +// filter name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// Another example use of metadata is to per service config info in cluster metadata, which may get +// consumed by multiple filters. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +// [#next-major-version: move to type/metadata/v2] +message Metadata { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Metadata"; + + // Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` + // namespace is reserved for Envoy's built-in filters. + // If both ``filter_metadata`` and + // :ref:`typed_filter_metadata ` + // fields are present in the metadata with same keys, + // only ``typed_filter_metadata`` field will be parsed. + map filter_metadata = 1 + [(validate.rules).map = {keys {string {min_len: 1}}}]; + + // Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` + // namespace is reserved for Envoy's built-in filters. + // The value is encoded as google.protobuf.Any. + // If both :ref:`filter_metadata ` + // and ``typed_filter_metadata`` fields are present in the metadata with same keys, + // only ``typed_filter_metadata`` field will be parsed. + map typed_filter_metadata = 2 + [(validate.rules).map = {keys {string {min_len: 1}}}]; +} + +// Runtime derived uint32 with a default when not specified. +message RuntimeUInt32 { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeUInt32"; + + // Default value if runtime value is not available. + uint32 default_value = 2; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 3; +} + +// Runtime derived percentage with a default when not specified. +message RuntimePercent { + // Default value if runtime value is not available. + type.v3.Percent default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2; +} + +// Runtime derived double with a default when not specified. +message RuntimeDouble { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeDouble"; + + // Default value if runtime value is not available. + double default_value = 1; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 2; +} + +// Runtime derived bool with a default when not specified. +message RuntimeFeatureFlag { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.RuntimeFeatureFlag"; + + // Default value if runtime value is not available. + google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key to get value for comparison. This value is used if defined. The boolean value must + // be represented via its + // `canonical JSON encoding `_. + string runtime_key = 2; +} + +// Please use :ref:`KeyValuePair ` instead. +// [#not-implemented-hide:] +message KeyValue { + // The key of the key/value pair. + string key = 1 [ + deprecated = true, + (validate.rules).string = {min_len: 1 max_bytes: 16384}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // The value of the key/value pair. + // + // The ``bytes`` type is used. This means if JSON or YAML is used to to represent the + // configuration, the value must be base64 encoded. This is unfriendly for users in most + // use scenarios of this message. + // + bytes value = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; +} + +message KeyValuePair { + // The key of the key/value pair. + string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384}]; + + // The value of the key/value pair. + google.protobuf.Value value = 2; +} + +// Key/value pair plus option to control append behavior. This is used to specify +// key/value pairs that should be appended to a set of existing key/value pairs. +message KeyValueAppend { + // Describes the supported actions types for key/value pair append action. + enum KeyValueAppendAction { + // If the key already exists, this action will result in the following behavior: + // + // - Comma-concatenated value if multiple values are not allowed. + // - New value added to the list of values if multiple values are allowed. + // + // If the key doesn't exist then this will add pair with specified key and value. + APPEND_IF_EXISTS_OR_ADD = 0; + + // This action will add the key/value pair if it doesn't already exist. If the + // key already exists then this will be a no-op. + ADD_IF_ABSENT = 1; + + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will add + // the pair with specified key and value. + OVERWRITE_IF_EXISTS_OR_ADD = 2; + + // This action will overwrite the specified value by discarding any existing + // values if the key already exists. If the key doesn't exist then this will + // be no-op. + OVERWRITE_IF_EXISTS = 3; + } + + // The single key/value pair record to be appended or overridden. This field must be set. + KeyValuePair record = 3; + + // Key/value pair entry that this option to append or overwrite. This field is deprecated + // and please use :ref:`record ` + // as replacement. + // [#not-implemented-hide:] + KeyValue entry = 1 [ + deprecated = true, + (validate.rules).message = {skip: true}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // Describes the action taken to append/overwrite the given value for an existing + // key or to only add this key if it's absent. + KeyValueAppendAction action = 2 [(validate.rules).enum = {defined_only: true}]; +} + +// Key/value pair to append or remove. +message KeyValueMutation { + // Key/value pair to append or overwrite. Only one of ``append`` or ``remove`` can be set or + // the configuration will be rejected. + KeyValueAppend append = 1; + + // Key to remove. Only one of ``append`` or ``remove`` can be set or the configuration will be + // rejected. + string remove = 2 [(validate.rules).string = {max_bytes: 16384}]; +} + +// Query parameter name/value pair. +message QueryParameter { + // The key of the query parameter. Case sensitive. + string key = 1 [(validate.rules).string = {min_len: 1}]; + + // The value of the query parameter. + string value = 2; +} + +// Header name/value pair. +message HeaderValue { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue"; + + // Header name. + string key = 1 + [(validate.rules).string = + {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of ``-``. + // Header value is encoded as string. This does not work for non-utf8 characters. + // Only one of ``value`` or ``raw_value`` can be set. + string value = 2 [ + (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "value_type" + ]; + + // Header value is encoded as bytes which can support non-utf8 characters. + // Only one of ``value`` or ``raw_value`` can be set. + bytes raw_value = 3 [ + (validate.rules).bytes = {min_len: 0 max_len: 16384}, + (udpa.annotations.field_migrate).oneof_promotion = "value_type" + ]; +} + +// Header name/value pair plus option to control append behavior. +message HeaderValueOption { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HeaderValueOption"; + + // Describes the supported actions types for header append action. + enum HeaderAppendAction { + // If the header already exists, this action will result in: + // + // - Comma-concatenated for predefined inline headers. + // - Duplicate header added in the ``HeaderMap`` for other headers. + // + // If the header doesn't exist then this will add new header with specified key and value. + APPEND_IF_EXISTS_OR_ADD = 0; + + // This action will add the header if it doesn't already exist. If the header + // already exists then this will be a no-op. + ADD_IF_ABSENT = 1; + + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will add the header + // with specified key and value. + OVERWRITE_IF_EXISTS_OR_ADD = 2; + + // This action will overwrite the specified value by discarding any existing values if + // the header already exists. If the header doesn't exist then this will be no-op. + OVERWRITE_IF_EXISTS = 3; + } + + // Header name/value pair that this option applies to. + HeaderValue header = 1 [(validate.rules).message = {required: true}]; + + // Should the value be appended? If true (default), the value is appended to + // existing values. Otherwise it replaces any existing values. + // This field is deprecated and please use + // :ref:`append_action ` as replacement. + // + // .. note:: + // The :ref:`external authorization service ` and + // :ref:`external processor service ` have + // default value (``false``) for this field. + google.protobuf.BoolValue append = 2 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Describes the action taken to append/overwrite the given value for an existing header + // or to only add this header if it's absent. + // Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD + // `. + HeaderAppendAction append_action = 3 [(validate.rules).enum = {defined_only: true}]; + + // Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped, + // otherwise they are added. + bool keep_empty_value = 4; +} + +// Wrapper for a set of headers. +message HeaderMap { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderMap"; + + // A list of header names and their values. + repeated HeaderValue headers = 1; +} + +// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename +// events inside this directory trigger the watch. +message WatchedDirectory { + // Directory path to watch. + string path = 1 [(validate.rules).string = {min_len: 1}]; +} + +// Data source consisting of a file, an inline value, or an environment variable. +// [#next-free-field: 6] +message DataSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource"; + + oneof specifier { + option (validate.required) = true; + + // Local filesystem data source. + string filename = 1 [(validate.rules).string = {min_len: 1}]; + + // Bytes inlined in the configuration. + bytes inline_bytes = 2; + + // String inlined in the configuration. + string inline_string = 3; + + // Environment variable data source. + string environment_variable = 4 [(validate.rules).string = {min_len: 1}]; + } + + // Watched directory that is watched for file changes. If this is set explicitly, the file + // specified in the ``filename`` field will be reloaded when relevant file move events occur. + // + // .. note:: + // This field only makes sense when the ``filename`` field is set. + // + // .. note:: + // Envoy only updates when the file is replaced by a file move, and not when the file is + // edited in place. + // + // .. note:: + // Not all use cases of ``DataSource`` support watching directories. It depends on the + // specific usage of the ``DataSource``. See the documentation of the parent message for + // details. + WatchedDirectory watched_directory = 5; +} + +// The message specifies the retry policy of remote data source when fetching fails. +// [#next-free-field: 7] +message RetryPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy"; + + // See :ref:`RetryPriority `. + message RetryPriority { + string name = 1 [(validate.rules).string = {min_len: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 2; + } + } + + // See :ref:`RetryHostPredicate `. + message RetryHostPredicate { + string name = 1 [(validate.rules).string = {min_len: 1}]; + + oneof config_type { + google.protobuf.Any typed_config = 2; + } + } + + // Specifies parameters that control :ref:`retry backoff strategy `. + // This parameter is optional, in which case the default base interval is 1000 milliseconds. The + // default maximum interval is 10 times the base interval. + BackoffStrategy retry_back_off = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; + + // For details, see :ref:`retry_on `. + string retry_on = 3; + + // For details, see :ref:`retry_priority `. + RetryPriority retry_priority = 4; + + // For details, see :ref:`RetryHostPredicate `. + repeated RetryHostPredicate retry_host_predicate = 5; + + // For details, see :ref:`host_selection_retry_max_attempts `. + int64 host_selection_retry_max_attempts = 6; +} + +// The message specifies how to fetch data from remote and how to verify it. +message RemoteDataSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RemoteDataSource"; + + // The HTTP URI to fetch the remote data. + HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; + + // SHA256 string for verifying data. + string sha256 = 2 [(validate.rules).string = {min_len: 1}]; + + // Retry policy for fetching remote data. + RetryPolicy retry_policy = 3; +} + +// Async data source which support async data fetch. +message AsyncDataSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.AsyncDataSource"; + + oneof specifier { + option (validate.required) = true; + + // Local async data source. + DataSource local = 1; + + // Remote async data source. + RemoteDataSource remote = 2; + } +} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +message TransportSocket { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TransportSocket"; + + reserved 2; + + reserved "config"; + + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + oneof config_type { + google.protobuf.Any typed_config = 3; + } +} + +// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not +// specified via a runtime key. +// +// .. note:: +// +// Parsing of the runtime key's data is implemented such that it may be represented as a +// :ref:`FractionalPercent ` proto represented as JSON/YAML +// and may also be represented as an integer with the assumption that the value is an integral +// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse +// as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED. +message RuntimeFractionalPercent { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.RuntimeFractionalPercent"; + + // Default value if the runtime value's for the numerator/denominator keys are not available. + type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key for a YAML representation of a FractionalPercent. + string runtime_key = 2; +} + +// Identifies a specific ControlPlane instance that Envoy is connected to. +message ControlPlane { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ControlPlane"; + + // An opaque control plane identifier that uniquely identifies an instance + // of control plane. This can be used to identify which control plane instance, + // the Envoy is connected to. + string identifier = 1; +} diff --git a/proto/envoy/config/core/v3/cel.proto b/proto/envoy/config/core/v3/cel.proto new file mode 100644 index 0000000..940a66d --- /dev/null +++ b/proto/envoy/config/core/v3/cel.proto @@ -0,0 +1,63 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "CelProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: CEL Expression Configuration] + +// CEL expression evaluation configuration. +// These options control the behavior of the Common Expression Language runtime for +// individual CEL expressions. +message CelExpressionConfig { + // Enable string conversion functions for CEL expressions. When enabled, CEL expressions + // can convert values to strings using the ``string()`` function. + // + // .. attention:: + // + // This option is disabled by default to avoid unbounded memory allocation. + // CEL evaluation cost is typically bounded by the expression size, but converting + // arbitrary values (e.g., large messages, lists, or maps) to strings may allocate + // memory proportional to input data size, which can be unbounded and lead to + // memory exhaustion. + bool enable_string_conversion = 1; + + // Enable string concatenation for CEL expressions. When enabled, CEL expressions + // can concatenate strings using the ``+`` operator. + // + // .. attention:: + // + // This option is disabled by default to avoid unbounded memory allocation. + // While CEL normally bounds evaluation by expression size, enabling string + // concatenation allows building outputs whose size depends on input data, + // potentially causing large intermediate allocations and memory exhaustion. + bool enable_string_concat = 2; + + // Enable string manipulation functions for CEL expressions. When enabled, CEL + // expressions can use additional string functions: + // + // * ``replace(old, new)`` - Replaces all occurrences of ``old`` with ``new``. + // * ``split(separator)`` - Splits a string into a list of substrings. + // * ``lowerAscii()`` - Converts ASCII characters to lowercase. + // * ``upperAscii()`` - Converts ASCII characters to uppercase. + // + // .. note:: + // + // Standard CEL string functions like ``contains()``, ``startsWith()``, and + // ``endsWith()`` are always available regardless of this setting. + // + // .. attention:: + // + // This option is disabled by default to avoid unbounded memory allocation. + // Although CEL generally bounds evaluation by expression size, functions such as + // ``replace``, ``split``, ``lowerAscii()``, and ``upperAscii()`` can allocate memory + // proportional to input data size. Under adversarial inputs this can lead to + // unbounded allocations and memory exhaustion. + bool enable_string_functions = 3; +} diff --git a/proto/envoy/config/core/v3/config_source.proto b/proto/envoy/config/core/v3/config_source.proto new file mode 100644 index 0000000..430562a --- /dev/null +++ b/proto/envoy/config/core/v3/config_source.proto @@ -0,0 +1,283 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/grpc_service.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/core/v3/authority.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ConfigSourceProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Configuration sources] + +// xDS API and non-xDS services version. This is used to describe both resource and transport +// protocol versions (in distinct configuration fields). +enum ApiVersion { + // When not specified, we assume v3; it is the only supported version. + AUTO = 0; + + // Use xDS v2 API. This is no longer supported. + V2 = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"]; + + // Use xDS v3 API. + V3 = 2; +} + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +// [#next-free-field: 10] +message ApiConfigSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ApiConfigSource"; + + // APIs may be fetched via either REST or gRPC. + enum ApiType { + // Ideally this would be 'reserved 0' but one can't reserve the default + // value. Instead we throw an exception if this is ever used. + DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; + + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + REST = 1; + + // SotW gRPC service. + GRPC = 2; + + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + DELTA_GRPC = 3; + + // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_GRPC = 5; + + // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + // multiplexed on a single connection to an ADS endpoint. + // [#not-implemented-hide:] + AGGREGATED_DELTA_GRPC = 6; + } + + // API type (gRPC, REST, delta gRPC) + ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; + + // Cluster names should be used only with REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + repeated string cluster_names = 2; + + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + repeated GrpcService grpc_services = 4; + + // For REST APIs, the delay between successive polls. + google.protobuf.Duration refresh_delay = 3; + + // For REST APIs, the request timeout. If not set, a default value of 1s will be used. + google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; + + // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + // rate limited. + RateLimitSettings rate_limit_settings = 6; + + // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + bool set_node_on_first_message_only = 7; + + // A list of config validators that will be executed when a new update is + // received from the ApiConfigSource. Note that each validator handles a + // specific xDS service type, and only the validators corresponding to the + // type url (in ``:ref: DiscoveryResponse`` or ``:ref: DeltaDiscoveryResponse``) + // will be invoked. + // If the validator returns false or throws an exception, the config will be rejected by + // the client, and a NACK will be sent. + // [#extension-category: envoy.config.validators] + repeated TypedExtensionConfig config_validators = 9; +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +message AggregatedConfigSource { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.AggregatedConfigSource"; +} + +// [#not-implemented-hide:] +// Self-referencing config source options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that other data can be obtained from the same server. +message SelfConfigSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; + + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; +} + +// Rate Limit settings to be applied for discovery requests made by Envoy. +message RateLimitSettings { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.RateLimitSettings"; + + // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a + // default value of 100 will be used. + google.protobuf.UInt32Value max_tokens = 1; + + // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens + // per second will be used. The minimal fill rate is once per year. Lower + // fill rates will be set to once per year. + google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; +} + +// Local filesystem path configuration source. +message PathConfigSource { + // Path on the filesystem to source and watch for configuration updates. + // When sourcing configuration for a :ref:`secret `, + // the certificate and key files are also watched for updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*. + // This is because in general only moves are atomic. The same method of swapping files as is + // demonstrated in the :ref:`runtime documentation ` can be + // used here also. If ``watched_directory`` is configured, no watch will be placed directly on + // this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of + // this path. This is required in certain deployment scenarios. See below for more information. + string path = 1 [(validate.rules).string = {min_len: 1}]; + + // If configured, this directory will be watched for *moves*. When an entry in this directory is + // moved to, the ``path`` will be reloaded. This is required in certain deployment scenarios. + // + // Specifically, if trying to load an xDS resource using a + // `Kubernetes ConfigMap `_, the + // following configuration might be used: + // 1. Store xds.yaml inside a ConfigMap. + // 2. Mount the ConfigMap to ``/config_map/xds`` + // 3. Configure path ``/config_map/xds/xds.yaml`` + // 4. Configure watched directory ``/config_map/xds`` + // + // The above configuration will ensure that Envoy watches the owning directory for moves which is + // required due to how Kubernetes manages ConfigMap symbolic links during atomic updates. + WatchedDirectory watched_directory = 2; +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +// [#next-free-field: 9] +message ConfigSource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; + + // Authorities that this config source may be used for. An authority specified in a xdstp:// URL + // is resolved to a ``ConfigSource`` prior to configuration fetch. This field provides the + // association between authority name and configuration source. + // [#not-implemented-hide:] + repeated xds.core.v3.Authority authorities = 7; + + oneof config_source_specifier { + option (validate.required) = true; + + // Deprecated in favor of ``path_config_source``. Use that field instead. + string path = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Local filesystem path configuration source. + PathConfigSource path_config_source = 8; + + // API configuration source. + ApiConfigSource api_config_source = 2; + + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + AggregatedConfigSource ads = 3; + + // [#not-implemented-hide:] + // When set, the client will access the resources from the same server it got the + // ConfigSource from, although not necessarily from the same stream. This is similar to the + // :ref:`ads` field, except that the client may use a + // different stream to the same server. As a result, this field can be used for things + // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) + // LDS to RDS on the same server without requiring the management server to know its name + // or required credentials. + // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since + // this field can implicitly mean to use the same stream in the case where the ConfigSource + // is provided via ADS and the specified data can also be obtained via ADS.] + SelfConfigSource self = 5; + } + + // When this timeout is specified, Envoy will wait no longer than the specified time for first + // config response on this xDS subscription during the :ref:`initialization process + // `. After reaching the timeout, Envoy will move to the next + // initialization phase, even if the first config is not delivered yet. The timer is activated + // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 + // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another + // timeout applies). The default is 15s. + google.protobuf.Duration initial_fetch_timeout = 4; + + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; +} + +// Configuration source specifier for a late-bound extension configuration. The +// parent resource is warmed until all the initial extension configurations are +// received, unless the flag to apply the default configuration is set. +// Subsequent extension updates are atomic on a per-worker basis. Once an +// extension configuration is applied to a request or a connection, it remains +// constant for the duration of processing. If the initial delivery of the +// extension configuration fails, due to a timeout for example, the optional +// default configuration is applied. Without a default configuration, the +// extension is disabled, until an extension configuration is received. The +// behavior of a disabled extension depends on the context. For example, a +// filter chain with a disabled extension filter rejects all incoming streams. +message ExtensionConfigSource { + ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; + + // Optional default configuration to use as the initial configuration if + // there is a failure to receive the initial extension configuration or if + // ``apply_default_config_without_warming`` flag is set. + google.protobuf.Any default_config = 2; + + // Use the default config as the initial configuration without warming and + // waiting for the first discovery response. Requires the default configuration + // to be supplied. + bool apply_default_config_without_warming = 3; + + // A set of permitted extension type URLs for the type encoded inside of the + // :ref:`TypedExtensionConfig `. Extension + // configuration updates are rejected if they do not match any type URL in the set. + repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/proto/envoy/config/core/v3/event_service_config.proto b/proto/envoy/config/core/v3/event_service_config.proto new file mode 100644 index 0000000..68c8df4 --- /dev/null +++ b/proto/envoy/config/core/v3/event_service_config.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/grpc_service.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "EventServiceConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#not-implemented-hide:] +// Configuration of the event reporting service endpoint. +message EventServiceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.EventServiceConfig"; + + oneof config_source_specifier { + option (validate.required) = true; + + // Specifies the gRPC service that hosts the event reporting service. + GrpcService grpc_service = 1; + } +} diff --git a/proto/envoy/config/core/v3/extension.proto b/proto/envoy/config/core/v3/extension.proto new file mode 100644 index 0000000..cacc7b0 --- /dev/null +++ b/proto/envoy/config/core/v3/extension.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Extension configuration] + +// Message type for extension configuration. +// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. +message TypedExtensionConfig { + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is ``xds.type.v3.TypedStruct`` + // (or, for historical reasons, ``udpa.type.v1.TypedStruct``), the inner type + // URL of ``TypedStruct`` will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} diff --git a/proto/envoy/config/core/v3/grpc_method_list.proto b/proto/envoy/config/core/v3/grpc_method_list.proto new file mode 100644 index 0000000..8242b42 --- /dev/null +++ b/proto/envoy/config/core/v3/grpc_method_list.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "GrpcMethodListProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: gRPC method list] + +// A list of gRPC methods which can be used as an allowlist, for example. +message GrpcMethodList { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList"; + + message Service { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcMethodList.Service"; + + // The name of the gRPC service. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The names of the gRPC methods in this service. + repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; + } + + repeated Service services = 1; +} diff --git a/proto/envoy/config/core/v3/grpc_service.proto b/proto/envoy/config/core/v3/grpc_service.proto new file mode 100644 index 0000000..9c44006 --- /dev/null +++ b/proto/envoy/config/core/v3/grpc_service.proto @@ -0,0 +1,355 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "GrpcServiceProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: gRPC services] + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +// [#next-free-field: 7] +message GrpcService { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService"; + + // [#next-free-field: 6] + message EnvoyGrpc { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.EnvoyGrpc"; + + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`transport_socket + // `. + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; + + // The ``:authority`` header in the grpc request. If this field is not set, the authority header value will be ``cluster_name``. + // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + string authority = 2 + [(validate.rules).string = + {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Specifies the retry backoff policy for re-establishing long‑lived xDS gRPC streams. + // + // This field is optional. If ``retry_back_off.max_interval`` is not provided, it will be set to + // ten times the configured ``retry_back_off.base_interval``. + // + // .. note:: + // + // This field is only honored for management‑plane xDS gRPC streams created from + // :ref:`ApiConfigSource ` that use + // ``envoy_grpc``. Data‑plane gRPC clients (for example external authorization or external + // processing filters) must use :ref:`GrpcService.retry_policy + // ` instead. + // + // If not set, xDS gRPC streams default to a base interval of 500ms and a maximum interval of 30s. + RetryPolicy retry_policy = 3; + + // Maximum gRPC message size that is allowed to be received. + // If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error. + // This limit is applied to individual messages in the streaming response and not the total size of streaming response. + // Defaults to 0, which means unlimited. + google.protobuf.UInt32Value max_receive_message_length = 4; + + // This provides gRPC client level control over envoy generated headers. + // If false, the header will be sent but it can be overridden by per stream option. + // If true, the header will be removed and can not be overridden by per stream option. + // Default to false. + bool skip_envoy_headers = 5; + } + + // [#next-free-field: 11] + message GoogleGrpc { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc"; + + // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. + message SslCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials"; + + // PEM encoded server root certificates. + DataSource root_certs = 1; + + // PEM encoded client private key. + DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // PEM encoded client certificate chain. + DataSource cert_chain = 3; + } + + // Local channel credentials. Only UDS is supported for now. + // See https://github.com/grpc/grpc/pull/15909. + message GoogleLocalCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials"; + } + + // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call + // credential types. + message ChannelCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials"; + + oneof credential_specifier { + option (validate.required) = true; + + SslCredentials ssl_credentials = 1; + + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_default = 2; + + GoogleLocalCredentials local_credentials = 3; + } + } + + // [#next-free-field: 8] + message CallCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials"; + + message ServiceAccountJWTAccessCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." + "ServiceAccountJWTAccessCredentials"; + + string json_key = 1; + + uint64 token_lifetime_seconds = 2; + } + + message GoogleIAMCredentials { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; + + string authorization_token = 1; + + string authority_selector = 2; + } + + message MetadataCredentialsFromPlugin { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." + "MetadataCredentialsFromPlugin"; + + reserved 2; + + reserved "config"; + + string name = 1; + + // [#extension-category: envoy.grpc_credentials] + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Security token service configuration that allows Google gRPC to + // fetch security token from an OAuth 2.0 authorization server. + // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and + // https://github.com/grpc/grpc/pull/19587. + // [#next-free-field: 10] + message StsService { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService"; + + // URI of the token exchange service that handles token exchange requests. + // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by + // https://github.com/bufbuild/protoc-gen-validate/issues/303] + string token_exchange_service_uri = 1; + + // Location of the target service or resource where the client + // intends to use the requested security token. + string resource = 2; + + // Logical name of the target service where the client intends to + // use the requested security token. + string audience = 3; + + // The desired scope of the requested security token in the + // context of the service or resource where the token will be used. + string scope = 4; + + // Type of the requested security token. + string requested_token_type = 5; + + // The path of subject token, a security token that represents the + // identity of the party on behalf of whom the request is being made. + string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; + + // Type of the subject token. + string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; + + // The path of actor token, a security token that represents the identity + // of the acting party. The acting party is authorized to use the + // requested security token and act on behalf of the subject. + string actor_token_path = 8; + + // Type of the actor token. + string actor_token_type = 9; + } + + oneof credential_specifier { + option (validate.required) = true; + + // Access token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + string access_token = 1; + + // Google Compute Engine credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 + google.protobuf.Empty google_compute_engine = 2; + + // Google refresh token credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. + string google_refresh_token = 3; + + // Service Account JWT Access credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. + ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; + + // Google IAM credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. + GoogleIAMCredentials google_iam = 5; + + // Custom authenticator credentials. + // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. + // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. + MetadataCredentialsFromPlugin from_plugin = 6; + + // Custom security token service which implements OAuth 2.0 token exchange. + // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 + // See https://github.com/grpc/grpc/pull/19587. + StsService sts_service = 7; + } + } + + // Channel arguments. + message ChannelArgs { + message Value { + // Pointer values are not supported, since they don't make any sense when + // delivered via the API. + oneof value_specifier { + option (validate.required) = true; + + string string_value = 1; + + int64 int_value = 2; + } + } + + // See grpc_types.h GRPC_ARG #defines for keys that work here. + map args = 1; + } + + // The target URI when using the `Google C++ gRPC client + // `_. + string target_uri = 1 [(validate.rules).string = {min_len: 1}]; + + // The channel credentials to use. See `channel credentials + // `_. + // Ignored if ``channel_credentials_plugin`` is set. + ChannelCredentials channel_credentials = 2; + + // A list of channel credentials plugins. + // The data plane will iterate over the list in order and stop at the first credential type + // that it supports. This provides a mechanism for starting to use new credential types that + // are not yet supported by all data planes. + // [#not-implemented-hide:] + repeated google.protobuf.Any channel_credentials_plugin = 9; + + // The call credentials to use. See `channel credentials + // `_. + // Ignored if ``call_credentials_plugin`` is set. + repeated CallCredentials call_credentials = 3; + + // A list of call credentials plugins. All supported plugins will be used. + // Unsupported plugin types will be ignored. + // [#not-implemented-hide:] + repeated google.protobuf.Any call_credentials_plugin = 10; + + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; + + // The name of the Google gRPC credentials factory to use. This must have been registered with + // Envoy. If this is empty, a default credentials factory will be used that sets up channel + // credentials based on other configuration parameters. + string credentials_factory_name = 5; + + // Additional configuration for site-specific customizations of the Google + // gRPC library. + google.protobuf.Struct config = 6; + + // How many bytes each stream can buffer internally. + // If not set an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; + + // Custom channels args. + ChannelArgs channel_args = 8; + } + + reserved 4; + + oneof target_specifier { + option (validate.required) = true; + + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc envoy_grpc = 1; + + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc google_grpc = 2; + } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // Additional metadata to include in streams initiated to the GrpcService. This can be used for + // scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + // be injected. For more information, including details on header value syntax, see the + // documentation on :ref:`custom request headers + // `. + repeated HeaderValue initial_metadata = 5; + + // Optional default retry policy for RPCs or streams initiated toward this gRPC service. + // + // If an async stream does not have a retry policy configured in its per‑stream options, this + // policy is used as the default. + // + // .. note:: + // + // This field is only applied by Envoy gRPC (``envoy_grpc``) clients. Google gRPC + // (``google_grpc``) clients currently ignore this field. + // + // If not specified, no default retry policy is applied at the client level and retries only occur + // when explicitly configured in per‑stream options. + RetryPolicy retry_policy = 6; +} diff --git a/proto/envoy/config/core/v3/health_check.proto b/proto/envoy/config/core/v3/health_check.proto new file mode 100644 index 0000000..a4ed6e9 --- /dev/null +++ b/proto/envoy/config/core/v3/health_check.proto @@ -0,0 +1,443 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/event_service_config.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/v3/http.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Health check] +// * Health checking :ref:`architecture overview `. +// * If health checking is configured for a cluster, additional statistics are emitted. They are +// documented :ref:`here `. + +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as ``HEALTHY``. + UNKNOWN = 0; + + // Healthy. + HEALTHY = 1; + + // Unhealthy. + UNHEALTHY = 2; + + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as ``UNHEALTHY``. + DRAINING = 3; + + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // ``UNHEALTHY``. + TIMEOUT = 4; + + // Degraded. + DEGRADED = 5; +} + +message HealthStatusSet { + // An order-independent set of health status. + repeated HealthStatus statuses = 1 + [(validate.rules).repeated = {items {enum {defined_only: true}}}]; +} + +// [#next-free-field: 27] +message HealthCheck { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; + + // Describes the encoding of the payload bytes in the payload. + message Payload { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.Payload"; + + oneof payload { + option (validate.required) = true; + + // Hex encoded payload. E.g., "000000FF". + string text = 1 [(validate.rules).string = {min_len: 1}]; + + // Binary payload. + bytes binary = 2; + } + } + + // [#next-free-field: 15] + message HttpHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.HttpHealthCheck"; + + reserved 5, 7; + + reserved "service_name", "use_http2"; + + // The value of the host header in the HTTP health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The host header can be customized for a specific endpoint by setting the + // :ref:`hostname ` field. + string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE}]; + + // Specifies the HTTP path that will be requested during health checking. For example + // ``/healthcheck``. + string path = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE}]; + + // HTTP specific payload to be sent as the request body during health checking. + // If specified, the method should support a request body (POST, PUT, PATCH, etc.). + Payload send = 3; + + // Specifies a list of HTTP expected responses to match in the first ``response_buffer_size`` bytes of the response body. + // If it is set, both the expected response check and status code determine the health check. + // When checking the response, “fuzzy” matching is performed such that each payload block must be found, + // and in the order specified, but not necessarily contiguous. + // + // .. note:: + // + // It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency. + // The default buffer size is 1024 bytes when it is not set. + repeated Payload receive = 4; + + // Specifies the size of response buffer in bytes that is used to Payload match. + // The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. + google.protobuf.UInt64Value response_buffer_size = 14 [(validate.rules).uint64 = {gte: 0}]; + + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. For more information, including details on header value syntax, see + // the documentation on :ref:`custom request headers + // `. + repeated HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request that is sent to the + // health checked cluster. + repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + // semantics of :ref:`Int64Range `. The start and end of each + // range are required. Only statuses in the range [100, 600) are allowed. + repeated type.v3.Int64Range expected_statuses = 9; + + // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + // will count towards the configured :ref:`unhealthy_threshold `, + // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + // :ref:`Int64Range `. The start and end of each range are required. + // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + // be considered a successful health check. By default all responses not in + // :ref:`expected_statuses ` will result in + // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + // non-200 response will result in the host being marked unhealthy. + repeated type.v3.Int64Range retriable_statuses = 12; + + // Use specified application protocol for health checks. + type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; + + // An optional service name parameter which is used to validate the identity of + // the health checked cluster using a :ref:`StringMatcher + // `. See the :ref:`architecture overview + // ` for more information. + type.matcher.v3.StringMatcher service_name_matcher = 11; + + // HTTP Method that will be used for health checking, default is "GET". + // GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported. + // Request body payloads are supported for POST, PUT, PATCH, and OPTIONS methods only. + // CONNECT method is disallowed because it is not appropriate for health check request. + // If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `. + RequestMethod method = 13 [(validate.rules).enum = {defined_only: true not_in: 6}]; + } + + message TcpHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.TcpHealthCheck"; + + // Empty payloads imply a connect-only health check. + Payload send = 1; + + // When checking the response, “fuzzy” matching is performed such that each + // payload block must be found, and in the order specified, but not + // necessarily contiguous. + repeated Payload receive = 2; + + // When setting this value, it tries to attempt health check request with ProxyProtocol. + // When ``send`` is presented, they are sent after preceding ProxyProtocol header. + // Only ProxyProtocol header is sent when ``send`` is not presented. + // It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes + // LOCAL command and doesn't include L3/L4. + ProxyProtocolConfig proxy_protocol_config = 3; + } + + message RedisHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.RedisHealthCheck"; + + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; + } + + // `grpc.health.v1.Health + // `_-based + // healthcheck. See `gRPC doc `_ + // for details. + message GrpcHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.GrpcHealthCheck"; + + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + string service_name = 1; + + // The value of the :authority header in the gRPC health check request. If + // left empty (default value), the name of the cluster this health check is associated + // with will be used. The authority header can be customized for a specific endpoint by setting + // the :ref:`hostname ` field. + string authority = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Specifies a list of key-value pairs that should be added to the metadata of each GRPC call + // that is sent to the health checked cluster. For more information, including details on header value syntax, + // see the documentation on :ref:`custom request headers + // `. + repeated HeaderValueOption initial_metadata = 3 [(validate.rules).repeated = {max_items: 1000}]; + } + + // Custom health check. + message CustomHealthCheck { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.CustomHealthCheck"; + + reserved 2; + + reserved "config"; + + // The registered name of the custom health checker. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + // [#extension-category: envoy.health_checkers] + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + // Health checks occur over the transport socket specified for the cluster. This implies that if a + // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. + // + // This allows overriding the cluster TLS settings, just for health check connections. + message TlsOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HealthCheck.TlsOptions"; + + // Specifies the ALPN protocols for health check connections. This is useful if the + // corresponding upstream is using ALPN-based :ref:`FilterChainMatch + // ` along with different protocols for health checks + // versus data connections. If empty, no ALPN protocols will be set on health check connections. + repeated string alpn_protocols = 1; + } + + reserved 10; + + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration = { + required: true + gt {} + }]; + + // An optional jitter amount in milliseconds. If specified, Envoy will start health + // checking after for a random time in ms between 0 and initial_jitter. This only + // applies to the first health check. + google.protobuf.Duration initial_jitter = 20; + + // An optional jitter amount in milliseconds. If specified, during every + // interval Envoy will add interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // An optional jitter amount as a percentage of interval_ms. If specified, + // during every interval Envoy will add ``interval_ms`` * + // ``interval_jitter_percent`` / 100 to the wait time. + // + // If interval_jitter_ms and interval_jitter_percent are both set, both of + // them will be used to increase the wait time. + uint32 interval_jitter_percent = 18; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for ``http`` health checking if a host responds with a code not in + // :ref:`expected_statuses ` + // or :ref:`retriable_statuses `, + // this threshold is ignored and the host is considered immediately unhealthy. + google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + + oneof health_checker { + option (validate.required) = true; + + // HTTP health check. + HttpHealthCheck http_health_check = 8; + + // TCP health check. + TcpHealthCheck tcp_health_check = 9; + + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // Custom health check. + CustomHealthCheck custom_health_check = 13; + } + + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; + + // The "no traffic healthy interval" is a special health check interval that + // is used for hosts that are currently passing active health checking + // (including new hosts) when the cluster has received no traffic. + // + // This is useful for when we want to send frequent health checks with + // ``no_traffic_interval`` but then revert to lower frequency ``no_traffic_healthy_interval`` once + // a host in the cluster is marked as healthy. + // + // Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. + // + // If no_traffic_healthy_interval is not set, it will default to the + // no traffic interval and send that interval regardless of health state. + google.protobuf.Duration no_traffic_healthy_interval = 24 [(validate.rules).duration = {gt {}}]; + + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; + + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; + + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; + + // Specifies the path to the :ref:`health check event log `. + // + // .. attention:: + // This field is deprecated in favor of the extension + // :ref:`event_logger ` and + // :ref:`event_log_path ` + // in the file sink extension. + string event_log_path = 17 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // A list of event log sinks to process the health check event. + // [#extension-category: envoy.health_check.event_sinks] + repeated TypedExtensionConfig event_logger = 25; + + // [#not-implemented-hide:] + // The gRPC service for the health check event service. + // If empty, health check events won't be sent to a remote endpoint. + EventServiceConfig event_service = 22; + + // If set to true, health check failure events will always be logged. If set to false, only the + // initial health check failure event will be logged. + // The default value is false. + bool always_log_health_check_failures = 19; + + // If set to true, health check success events will always be logged. If set to false, only host addition event will be logged + // if it is the first successful health check, or if the healthy threshold is reached. + // The default value is false. + bool always_log_health_check_success = 26; + + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions tls_options = 21; + + // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + // :ref:`tranport socket matches `. + // For example, the following match criteria + // + // .. code-block:: yaml + // + // transport_socket_match_criteria: + // useMTLS: true + // + // Will match the following :ref:`cluster socket match ` + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "useMTLS" + // match: + // useMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // + // If this field is set, then for health checks it will supersede an entry of ``envoy.transport_socket`` in the + // :ref:`LbEndpoint.Metadata `. + // This allows using different transport socket capabilities for health checking versus proxying to the + // endpoint. + // + // If the key/values pairs specified do not match any + // :ref:`transport socket matches `, + // the cluster's :ref:`transport socket ` + // will be used for health check socket configuration. + google.protobuf.Struct transport_socket_match_criteria = 23; +} diff --git a/proto/envoy/config/core/v3/http_service.proto b/proto/envoy/config/core/v3/http_service.proto new file mode 100644 index 0000000..426994c --- /dev/null +++ b/proto/envoy/config/core/v3/http_service.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/http_uri.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "HttpServiceProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP services] + +// HTTP service configuration. +message HttpService { + // The service's HTTP URI. For example: + // + // .. code-block:: yaml + // + // http_uri: + // uri: https://www.myserviceapi.com/v1/data + // cluster: www.myserviceapi.com|443 + // + HttpUri http_uri = 1; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. + repeated HeaderValueOption request_headers_to_add = 2 + [(validate.rules).repeated = {max_items: 1000}]; +} diff --git a/proto/envoy/config/core/v3/http_uri.proto b/proto/envoy/config/core/v3/http_uri.proto new file mode 100644 index 0000000..bac37c0 --- /dev/null +++ b/proto/envoy/config/core/v3/http_uri.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/duration.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "HttpUriProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP service URI ] + +// Envoy external URI descriptor +message HttpUri { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpUri"; + + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + // + string uri = 1 [(validate.rules).string = {min_len: 1}]; + + // Specify how ``uri`` is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + oneof http_upstream_type { + option (validate.required) = true; + + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // + string cluster = 2 [(validate.rules).string = {min_len: 1}]; + } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [(validate.rules).duration = { + required: true + lt {seconds: 4294967296} + gte {} + }]; +} diff --git a/proto/envoy/config/core/v3/protocol.proto b/proto/envoy/config/core/v3/protocol.proto new file mode 100644 index 0000000..63e189e --- /dev/null +++ b/proto/envoy/config/core/v3/protocol.proto @@ -0,0 +1,807 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/extension.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/annotations/v3/status.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ProtocolProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Protocol options] + +// [#not-implemented-hide:] +message TcpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.TcpProtocolOptions"; +} + +// Config for keepalive probes in a QUIC connection. +// +// .. note:: +// +// QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet +// itself doesn't timeout waiting for a probing response. QUIC has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `. +message QuicKeepAliveSettings { + // The max interval for a connection to send keep-alive probing packets (with ``PING`` or ``PATH_RESPONSE``). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than ``1s`` to avoid throttling the connection or flooding the peer with probes. + // + // If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing. + // + // If zero, disable keepalive probing. + // If absent, use the QUICHE default interval to probe. + google.protobuf.Duration max_interval = 1; + + // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. + // + // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. + // + // If absent, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + google.protobuf.Duration initial_interval = 2 [(validate.rules).duration = { + lte {} + gte {nanos: 1000000} + }]; +} + +// QUIC protocol options which apply to both downstream and upstream connections. +// [#next-free-field: 12] +message QuicProtocolOptions { + // Config for QUIC connection migration across network interfaces, i.e. cellular to WIFI, upon + // network change events from the platform, i.e. the current network gets + // disconnected, or upon the QUIC detecting a bad connection. After migration, the + // connection may be on a different network other than the default network + // picked by the platform. Both iOS and Android will use a default network to interact with the internet, usually prefer unmetered network (WIFI) + // over metered ones (cellular). And users can specify which network to be used as the default. A connection on non-default network is only allowed to + // serve new requests for a certain period of time before being drained, and + // meanwhile, QUIC will try to migrate to the default network if possible. + message ConnectionMigrationSettings { + // Config for options to migrate idle connections which aren't serving any requests. + message MigrateIdleConnectionSettings { + // If idle connections are allowed to be migrated, only migrate the connection + // if it hasn't been idle for longer than this idle period. Otherwise, the + // connection will be closed instead. + // Default to 30s. + google.protobuf.Duration max_idle_time_before_migration = 1 + [(validate.rules).duration = {gte {seconds: 1}}]; + } + + // Config whether and how to migrate idle connections. + // If absent, idle connections will not be migrated but be closed upon + // migration signals. + MigrateIdleConnectionSettings migrate_idle_connections = 1; + + // After migrating to a non-default network interface, the connection will + // only be allowed to stay on that network for up to this period of time before + // being drained unless it migrates to the default network or that network + // gets picked as the default by the device by then. + // Default to 128s. + google.protobuf.Duration max_time_on_non_default_network = 2 + [(validate.rules).duration = {gte {seconds: 1}}]; + } + + // Maximum number of streams that the client can negotiate per connection. ``100`` + // if not specified. + google.protobuf.UInt32Value max_concurrent_streams = 1 [(validate.rules).uint32 = {gte: 1}]; + + // `Initial stream-level flow-control receive window + // `_ size. Valid values range from + // ``1`` to ``16777216`` (``2^24``, maximum supported by QUICHE) and defaults to ``16777216`` (``16 * 1024 * 1024``). + // + // .. note:: + // + // ``16384`` (``2^14``) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use + // ``16384`` instead. QUICHE IETF QUIC implementation supports ``1`` byte window. We only support increasing the default + // window size now, so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the stream buffers. + google.protobuf.UInt32Value initial_stream_window_size = 2 + [(validate.rules).uint32 = {lte: 16777216 gte: 1}]; + + // Similar to ``initial_stream_window_size``, but for connection-level + // flow-control. Valid values range from ``1`` to ``25165824`` (``24MB``, maximum supported by QUICHE) and defaults + // to ``25165824`` (``24 * 1024 * 1024``). + // + // .. note:: + // + // ``16384`` (``2^14``) is the minimum window size supported in Google QUIC. We only support increasing the default + // window size now, so it's also the minimum. + // + google.protobuf.UInt32Value initial_connection_window_size = 3 + [(validate.rules).uint32 = {lte: 25165824 gte: 1}]; + + // The number of timeouts that can occur before port migration is triggered for QUIC clients. + // This defaults to ``4``. If set to ``0``, port migration will not occur on path degrading. + // Timeout here refers to QUIC internal path degrading timeout mechanism, such as ``PTO``. + // This has no effect on server sessions. + google.protobuf.UInt32Value num_timeouts_to_trigger_port_migration = 4 + [(validate.rules).uint32 = {lte: 5 gte: 0}]; + + // Probes the peer at the configured interval to solicit traffic, i.e. ``ACK`` or ``PATH_RESPONSE``, from the peer to push back connection idle timeout. + // If absent, use the default keepalive behavior of which a client connection sends ``PING``s every ``15s``, and a server connection doesn't do anything. + QuicKeepAliveSettings connection_keepalive = 5; + + // A comma-separated list of strings representing QUIC connection options defined in + // `QUICHE `_ and to be sent by upstream connections. + string connection_options = 6; + + // A comma-separated list of strings representing QUIC client connection options defined in + // `QUICHE `_ and to be sent by upstream connections. + string client_connection_options = 7; + + // The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE + // default ``600s`` will be applied. + // For internal corporate network, a long timeout is often fine. + // But for client facing network, ``30s`` is usually a good choice. + // Do not add an upper bound here. A long idle timeout is useful for maintaining warm connections at non-front-line proxy for low QPS services. + google.protobuf.Duration idle_network_timeout = 8 + [(validate.rules).duration = {gte {seconds: 1}}]; + + // Maximum packet length for QUIC connections. It refers to the largest size of a QUIC packet that can be transmitted over the connection. + // If not specified, one of the `default values in QUICHE `_ is used. + google.protobuf.UInt64Value max_packet_length = 9; + + // A customized UDP socket and a QUIC packet writer using the socket for + // client connections. i.e. Mobile uses its own implementation to interact + // with platform socket APIs. + // If not present, the default platform-independent socket and writer will be used. + // [#extension-category: envoy.quic.client_packet_writer] + TypedExtensionConfig client_packet_writer = 10; + + // Enable QUIC `connection migration + // ` + // to a different network interface when the current network is degrading or + // has become bad. + // In order to use a different network interface other than the platform's default one, + // a customized :ref:`client_packet_writer ` needs to be configured to + // create UDP sockets on non-default networks. + // Only takes effect when runtime key ``envoy.reloadable_features.use_migration_in_quiche`` is true. + // If absent, the feature will be disabled. + // [#not-implemented-hide:] + ConnectionMigrationSettings connection_migration = 11; +} + +message UpstreamHttpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.UpstreamHttpProtocolOptions"; + + // Set transport socket `SNI `_ for new + // upstream connections based on the downstream HTTP host/authority header or any other arbitrary + // header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + // + // See :ref:`SNI configuration ` for details on how this + // interacts with other validation options. + bool auto_sni = 1; + + // Automatic validate upstream presented certificate for new upstream connections based on the + // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + // is set, as seen by the :ref:`router filter `. + // This field is intended to be set with ``auto_sni`` field. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + // + // See :ref:`validation configuration ` for how this interacts with + // other validation options. + bool auto_san_validation = 2; + + // An optional alternative to the host/authority header to be used for setting the SNI value. + // It should be a valid downstream HTTP header, as seen by the + // :ref:`router filter `. + // If unset, host/authority header will be used for populating the SNI. If the specified header + // is not found or the value is empty, host/authority header will be used instead. + // This field is intended to be set with ``auto_sni`` and/or ``auto_san_validation`` fields. + // If none of these fields are set then setting this would be a no-op. + // Does nothing if a filter before the http router filter sets the corresponding metadata. + string override_auto_sni_header = 3 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; +} + +// Configures the alternate protocols cache which tracks alternate protocols that can be used to +// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for +// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 +// for the "HTTPS" DNS resource record. +// [#next-free-field: 6] +message AlternateProtocolsCacheOptions { + // Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime. + // This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not + // advertised HTTP/3 support. These entries will be overwritten by alt-svc + // response headers or cached values. + // As with regular cached entries, if the origin response would result in clearing an existing + // alternate protocol cache entry, pre-populated entries will also be cleared. + // Adding a cache entry with hostname=foo.com port=123 is the equivalent of getting + // response headers + // alt-svc: h3=:"123"; ma=86400" in a response to a request to foo.com:123 + message AlternateProtocolsCacheEntry { + // The host name for the alternate protocol entry. + string hostname = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; + + // The port for the alternate protocol entry. + uint32 port = 2 [(validate.rules).uint32 = {lt: 65535 gt: 0}]; + } + + // The name of the cache. Multiple named caches allow independent alternate protocols cache + // configurations to operate within a single Envoy process using different configurations. All + // alternate protocols cache options with the same name *must* be equal in all fields when + // referenced from different configuration components. Configuration will fail to load if this is + // not the case. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The maximum number of entries that the cache will hold. If not specified defaults to ``1024``. + // + // .. note:: + // + // The implementation is approximate and enforced independently on each worker thread, thus + // it is possible for the maximum entries in the cache to go slightly above the configured + // value depending on timing. This is similar to how other circuit breakers work. + google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; + + // Allows configuring a persistent + // :ref:`key value store ` to flush + // alternate protocols entries to disk. + // This function is currently only supported if concurrency is 1 + // Cached entries will take precedence over pre-populated entries below. + TypedExtensionConfig key_value_store_config = 3; + + // Allows pre-populating the cache with entries, as described above. + repeated AlternateProtocolsCacheEntry prepopulated_entries = 4; + + // Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if + // this list contained the value ``.c.example.com``, then an Alt-Svc entry for ``foo.c.example.com`` + // could be shared with ``bar.c.example.com`` but would not be shared with ``baz.example.com``. On + // the other hand, if the list contained the value ``.example.com`` then all three hosts could share + // Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the + // first listed suffix will be used. + // + // Since lookup in this list is O(n), it is recommended that the number of suffixes be limited. + // [#not-implemented-hide:] + repeated string canonical_suffixes = 5; +} + +// [#next-free-field: 8] +message HttpProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.HttpProtocolOptions"; + + // Action to take when Envoy receives client request with header names containing underscore + // characters. + // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + // characters. + enum HeadersWithUnderscoresAction { + // Allow headers with underscores. This is the default behavior. + ALLOW = 0; + + // Reject client request. HTTP/1 requests are rejected with ``HTTP 400`` status. HTTP/2 requests + // end with the stream reset. The ``httpN.requests_rejected_with_underscores_in_headers`` counter + // is incremented for each rejected request. + REJECT_REQUEST = 1; + + // Drop the client header with name containing underscores. The header is dropped before the filter chain is + // invoked and as such filters will not see dropped headers. The + // ``httpN.dropped_headers_with_underscores`` is incremented for each dropped header. + DROP_HEADER = 2; + } + + // The idle timeout for connections. The idle timeout is defined as the + // period in which there are no active requests. When the + // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + // downstream connection a drain sequence will occur prior to closing the connection, see + // :ref:`drain_timeout + // `. + // + // .. note:: + // + // Request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + // + // If not specified, this defaults to ``1 hour``. To disable idle timeouts explicitly set this to ``0``. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled for downstream connections according to the value for + // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. + google.protobuf.Duration idle_timeout = 1; + + // The maximum duration of a connection. The duration is defined as a period since a connection + // was established. If not set, there is no max duration. When max_connection_duration is reached, + // the drain sequence will kick-in. The connection will be closed after the drain timeout period + // if there are no active streams. See :ref:`drain_timeout + // `. + google.protobuf.Duration max_connection_duration = 3; + + // The maximum number of headers (request headers if configured on HttpConnectionManager, + // response headers when configured on a cluster). + // If unconfigured, the default maximum number of headers allowed is ``100``. + // The default value for requests can be overridden by setting runtime key ``envoy.reloadable_features.max_request_headers_count``. + // The default value for responses can be overridden by setting runtime key ``envoy.reloadable_features.max_response_headers_count``. + // Downstream requests that exceed this limit will receive a ``HTTP 431`` response for HTTP/1.x and cause a stream + // reset for HTTP/2. + // Upstream responses that exceed this limit will result in a ``HTTP 502`` response. + google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; + + // The maximum size of response headers. + // If unconfigured, the default is ``60 KiB``, except for HTTP/1 response headers which have a default + // of ``80 KiB``. + // The default value can be overridden by setting runtime key ``envoy.reloadable_features.max_response_headers_size_kb``. + // Responses that exceed this limit will result in a ``HTTP 503`` response. + // In Envoy, this setting is only valid when configured on an upstream cluster, not on the + // :ref:`HTTP Connection Manager + // `. + // + // .. note:: + // + // Currently some protocol codecs impose limits on the maximum size of a single header. + // + // * HTTP/2 (when using ``nghttp2``) limits a single header to around ``100kb``. + // * HTTP/3 limits a single header to around ``1024kb``. + // + google.protobuf.UInt32Value max_response_headers_kb = 7 + [(validate.rules).uint32 = {lte: 8192 gt: 0}]; + + // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + // reset independent of any other timeouts. If not specified, this value is not set. + google.protobuf.Duration max_stream_duration = 4; + + // Action to take when a client request with a header name containing underscore characters is received. + // If this setting is not specified, the value defaults to ``ALLOW``. + // + // .. note:: + // + // Upstream responses are not affected by this setting. + // + // .. note:: + // + // This only affects client headers. It does not affect headers added by Envoy filters and does not have any + // impact if added to cluster config. + HeadersWithUnderscoresAction headers_with_underscores_action = 5; + + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to ``1`` will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + google.protobuf.UInt32Value max_requests_per_connection = 6; +} + +// [#next-free-field: 12] +message Http1ProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.Http1ProtocolOptions"; + + // [#next-free-field: 9] + message HeaderKeyFormat { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat"; + + message ProperCaseWords { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; + } + + oneof header_format { + option (validate.required) = true; + + // Formats the header by proper casing words: the first character and any character following + // a special character will be capitalized if it's an alpha character. For example, + // ``"content-type"`` becomes ``"Content-Type"``, and ``"foo$b#$are"`` becomes ``"Foo$B#$Are"``. + // + // .. note:: + // + // While this results in most headers following conventional casing, certain headers + // are not covered. For example, the ``"TE"`` header will be formatted as ``"Te"``. + ProperCaseWords proper_case_words = 1; + + // Configuration for stateful formatter extensions that allow using received headers to + // affect the output of encoding headers. E.g., preserving case during proxying. + // [#extension-category: envoy.http.stateful_header_formatters] + TypedExtensionConfig stateful_formatter = 8; + } + } + + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // ``http_proxy`` environment variable. + google.protobuf.BoolValue allow_absolute_url = 1; + + // Handle incoming HTTP/1.0 and HTTP/0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // ``default_host_for_http_10`` is configured. + bool accept_http_10 = 2; + + // A default host for HTTP/1.0 requests. This is highly suggested if ``accept_http_10`` is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if ``accept_http_10`` is not true. + string default_host_for_http_10 = 3; + + // Describes how the keys for response headers should be formatted. By default, all header keys + // are lower cased. + HeaderKeyFormat header_key_format = 4; + + // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + // + // .. attention:: + // + // This only happens when Envoy is chunk encoding which occurs when: + // - The request is HTTP/1.1. + // - Is neither a ``HEAD`` only request nor a HTTP Upgrade. + // - Not a response to a ``HEAD`` request. + // - The ``Content-Length`` header is not present. + bool enable_trailers = 5; + + // Allows Envoy to process requests/responses with both ``Content-Length`` and ``Transfer-Encoding`` + // headers set. By default such messages are rejected, but if option is enabled - Envoy will + // remove ``Content-Length`` header and process message. + // See `RFC7230, sec. 3.3.3 `_ for details. + // + // .. attention:: + // + // Enabling this option might lead to request smuggling vulnerability, especially if traffic + // is proxied via multiple layers of proxies. + // [#comment:TODO: This field is ignored when the + // :ref:`header validation configuration ` + // is present.] + bool allow_chunked_length = 6; + + // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate + // HTTP/1.1 connections upon receiving an invalid HTTP message. However, + // when this option is true, then Envoy will leave the HTTP/1.1 connection + // open where possible. + // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // `. + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; + + // Allows sending fully qualified URLs when proxying the first line of the + // response. By default, Envoy will only send the path components in the first line. + // If this is true, Envoy will create a fully qualified URI composing scheme + // (inferred if not present), host (from the host/:authority header) and path + // (from first line or :path header). + bool send_fully_qualified_url = 8; + + // [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out. + // If set, force HTTP/1 parser: BalsaParser if true, http-parser if false. + // If unset, HTTP/1 parser is selected based on + // envoy.reloadable_features.http1_use_balsa_parser. + // See issue #21245. + google.protobuf.BoolValue use_balsa_parser = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // [#not-implemented-hide:] Hiding so that field can be removed. + // If true, and BalsaParser is used (either `use_balsa_parser` above is true, + // or `envoy.reloadable_features.http1_use_balsa_parser` is true and + // `use_balsa_parser` is unset), then every non-empty method with only valid + // characters is accepted. Otherwise, methods not on the hard-coded list are + // rejected. + // Once UHV is enabled, this field should be removed, and BalsaParser should + // allow any method. UHV validates the method, rejecting empty string or + // invalid characters, and provides :ref:`restrict_http_methods + // ` + // to reject custom methods. + bool allow_custom_methods = 10 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // Ignore HTTP/1.1 upgrade values matching any of the supplied matchers. + // + // .. note:: + // + // ``h2c`` upgrades are always removed for backwards compatibility, regardless of the + // value in this setting. + repeated type.matcher.v3.StringMatcher ignore_http_11_upgrade = 11; +} + +message KeepaliveSettings { + // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + // If this is zero, interval PINGs will not be sent. + google.protobuf.Duration interval = 1 [(validate.rules).duration = {gte {nanos: 1000000}}]; + + // How long to wait for a response to a keepalive PING. If a response is not received within this + // time period, the connection will be aborted. + // + // .. note:: + // + // In order to prevent the influence of Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on + // the connection, under the assumption that if a frame is received the connection is healthy. + google.protobuf.Duration timeout = 2 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; + + // A random jitter amount as a percentage of interval that will be added to each interval. + // A value of zero means there will be no jitter. + // The default value is ``15%``. + type.v3.Percent interval_jitter = 3; + + // If the connection has been idle for this duration, send a HTTP/2 ping ahead + // of new stream creation, to quickly detect dead connections. + // If this is zero, this type of PING will not be sent. + // If an interval ping is outstanding, a second ping will not be sent as the + // interval ping will determine if the connection is dead. + // + // The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. + google.protobuf.Duration connection_idle_interval = 4 + [(validate.rules).duration = {gte {nanos: 1000000}}]; +} + +// [#next-free-field: 19] +message Http2ProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.Http2ProtocolOptions"; + + // Defines a parameter to be sent in the SETTINGS frame. + // See `RFC7540, sec. 6.5.1 `_ for details. + message SettingsParameter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter"; + + // The 16 bit parameter identifier. + google.protobuf.UInt32Value identifier = 1 [ + (validate.rules).uint32 = {lte: 65535 gte: 0}, + (validate.rules).message = {required: true} + ]; + + // The 32 bit parameter value. + google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; + } + + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from ``0`` to ``4294967295`` (``2^32 - 1``) and defaults to ``4096``. ``0`` effectively disables header + // compression. + google.protobuf.UInt32Value hpack_table_size = 1; + + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from ``1`` to ``2147483647`` (``2^31 - 1``) + // and defaults to ``1024`` for safety and should be sufficient for most use cases. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). + // + // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given + // connection based on upstream settings. Config dumps will reflect the configured upper bound, + // not the per-connection negotiated limits. + google.protobuf.UInt32Value max_concurrent_streams = 2 + [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; + + // `Initial stream-level flow-control window + // `_ size. Valid values range from ``65535`` + // (``2^16 - 1``, HTTP/2 default) to ``2147483647`` (``2^31 - 1``, HTTP/2 maximum) and defaults to + // ``16MiB`` (``16 * 1024 * 1024``). + // + // .. note:: + // + // ``65535`` is the initial window size from HTTP/2 spec. We only support increasing the default window size now, + // so it's also the minimum. + // + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + google.protobuf.UInt32Value initial_stream_window_size = 3 + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; + + // Similar to ``initial_stream_window_size``, but for connection-level flow-control + // window. The default is ``24MiB`` (``24 * 1024 * 1024``). + google.protobuf.UInt32Value initial_connection_window_size = 4 + [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; + + // Allows proxying Websocket and other upgrades over H2 connect. + bool allow_connect = 5; + + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/2 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + bool allow_metadata = 6; + + // Limit the number of pending outbound downstream frames of all types (frames that are waiting to + // be written into the socket). Exceeding this limit triggers flood mitigation and connection is + // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due + // to flood mitigation. The default limit is ``10000``. + google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of pending outbound downstream frames of types ``PING``, ``SETTINGS`` and ``RST_STREAM``, + // preventing high memory utilization when receiving continuous stream of these frames. Exceeding + // this limit triggers flood mitigation and connection is terminated. The + // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood + // mitigation. The default limit is ``1000``. + google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; + + // Limit the number of consecutive inbound frames of types ``HEADERS``, ``CONTINUATION`` and ``DATA`` with an + // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but + // might be a result of a broken HTTP/2 implementation. The ``http2.inbound_empty_frames_flood`` + // stat tracks the number of connections terminated due to flood mitigation. + // Setting this to ``0`` will terminate connection upon receiving first frame with an empty payload + // and no end stream flag. The default limit is ``1``. + google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; + + // Limit the number of inbound ``PRIORITY`` frames allowed per each opened stream. If the number + // of ``PRIORITY`` frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``) + // + // the connection is terminated. For downstream connections the ``opened_streams`` is incremented when + // Envoy receives complete response headers from the upstream server. For upstream connection the + // ``opened_streams`` is incremented when Envoy sends the ``HEADERS`` frame for a new stream. The + // ``http2.inbound_priority_frames_flood`` stat tracks + // the number of connections terminated due to flood mitigation. The default limit is ``100``. + google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; + + // Limit the number of inbound ``WINDOW_UPDATE`` frames allowed per ``DATA`` frame sent. If the number + // of ``WINDOW_UPDATE`` frames received over the lifetime of connection exceeds the value calculated + // using this formula:: + // + // ``5 + 2 * (opened_streams + + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames)`` + // + // the connection is terminated. For downstream connections the ``opened_streams`` is incremented when + // Envoy receives complete response headers from the upstream server. For upstream connections the + // ``opened_streams`` is incremented when Envoy sends the ``HEADERS`` frame for a new stream. The + // ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to + // flood mitigation. The default ``max_inbound_window_update_frames_per_data_frame_sent`` value is ``10``. + // Setting this to ``1`` should be enough to support HTTP/2 implementations with basic flow control, + // but more complex implementations that try to estimate available bandwidth require at least ``2``. + google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 + [(validate.rules).uint32 = {gte: 1}]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // iff present. + // + // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + bool stream_error_on_invalid_http_messaging = 12 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // ` + // + // See `RFC7540, sec. 8.1 `_ for details. + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; + + // [#not-implemented-hide:] + // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: + // + // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by + // Envoy. + // + // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field + // 'allow_connect'. + // + // .. note:: + // + // Custom parameters specified through this field can not also be set in the + // corresponding named parameters: + // + // .. code-block:: text + // + // ID Field Name + // ---------------- + // 0x1 hpack_table_size + // 0x3 max_concurrent_streams + // 0x4 initial_stream_window_size + // + // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies + // between custom parameters with the same identifier will trigger a failure. + // + // See `IANA HTTP/2 Settings + // `_ for + // standardized identifiers. + repeated SettingsParameter custom_settings_parameters = 13; + + // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + // does not respond within the configured timeout, the connection will be aborted. + KeepaliveSettings connection_keepalive = 15; + + // [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. + // If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. + // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. + google.protobuf.BoolValue use_oghttp2_codec = 16 + [(xds.annotations.v3.field_status).work_in_progress = true]; + + // Configure the maximum amount of metadata than can be handled per stream. Defaults to ``1 MB``. + google.protobuf.UInt64Value max_metadata_size = 17; + + // Controls whether to encode headers using huffman encoding. + // This can be useful in cases where the cpu spent encoding the headers isn't + // worth the network bandwidth saved e.g. for localhost. + // If unset, uses the data plane's default value. + google.protobuf.BoolValue enable_huffman_encoding = 18; +} + +// [#not-implemented-hide:] +message GrpcProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.core.GrpcProtocolOptions"; + + Http2ProtocolOptions http2_protocol_options = 1; +} + +// A message which allows using HTTP/3. +// [#next-free-field: 9] +message Http3ProtocolOptions { + QuicProtocolOptions quic_protocol_options = 1; + + // Allows invalid HTTP messaging and headers. When this option is disabled (default), then + // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, + // when this option is enabled, only the offending stream is terminated. + // + // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + // `. + google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 2; + + // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + // the header mechanisms from the `HTTP/2 extended connect RFC + // `_ + // and settings `proposed for HTTP/3 + // `_ + // + // .. note:: + // + // HTTP/3 CONNECT is not yet an RFC. + bool allow_extended_connect = 5 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // [#not-implemented-hide:] Hiding until Envoy has full metadata support. + // Still under implementation. DO NOT USE. + // + // Allows sending and receiving HTTP/3 METADATA frames. See [metadata + // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more + // information. + bool allow_metadata = 6; + + // [#not-implemented-hide:] Hiding until Envoy has full HTTP/3 upstream support. + // Still under implementation. DO NOT USE. + // + // Disables QPACK compression related features for HTTP/3 including: + // No huffman encoding, zero dynamic table capacity and no cookie crumbling. + // This can be useful for trading off CPU vs bandwidth when an upstream HTTP/3 connection multiplexes multiple downstream connections. + bool disable_qpack = 7; + + // Disables connection level flow control for HTTP/3 streams. This is useful in situations where the streams share the same connection + // but originate from different end-clients, so that each stream can make progress independently at non-front-line proxies. + bool disable_connection_flow_control_for_streams = 8; +} + +// A message to control transformations to the :scheme header +message SchemeHeaderTransformation { + oneof transformation { + // Overwrite any Scheme header with the contents of this string. + // If set, takes precedence over ``match_upstream``. + string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}]; + } + + // Set the Scheme header to match the upstream transport protocol. For example, should a + // request be sent to the upstream over TLS, the scheme header will be set to ``"https"``. Should the + // request be sent over plaintext, the scheme header will be set to ``"http"``. + // If ``scheme_to_overwrite`` is set, this field is not used. + bool match_upstream = 2; +} diff --git a/proto/envoy/config/core/v3/proxy_protocol.proto b/proto/envoy/config/core/v3/proxy_protocol.proto new file mode 100644 index 0000000..2da5fe5 --- /dev/null +++ b/proto/envoy/config/core/v3/proxy_protocol.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/substitution_format_string.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ProxyProtocolProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Proxy protocol] + +message ProxyProtocolPassThroughTLVs { + enum PassTLVsMatchType { + // Pass all TLVs. + INCLUDE_ALL = 0; + + // Pass specific TLVs defined in tlv_type. + INCLUDE = 1; + } + + // The strategy to pass through TLVs. Default is INCLUDE_ALL. + // If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field. + PassTLVsMatchType match_type = 1; + + // The TLV types that are applied based on match_type. + // TLV type is defined as uint8_t in proxy protocol. See `the spec + // `_ for details. + repeated uint32 tlv_type = 2 [(validate.rules).repeated = {items {uint32 {lt: 256}}}]; +} + +// Represents a single Type-Length-Value (TLV) entry. +message TlvEntry { + // The type of the TLV. Must be a uint8 (0-255) as per the Proxy Protocol v2 specification. + uint32 type = 1 [(validate.rules).uint32 = {lt: 256}]; + + // The static value of the TLV. + // Only one of ``value`` or ``format_string`` may be set. + bytes value = 2; + + // Uses the :ref:`format string ` to dynamically + // populate the TLV value from stream information. This allows dynamic values + // such as metadata, filter state, or other stream properties to be included in + // the TLV. + // + // For example: + // + // .. code-block:: yaml + // + // type: 0xF0 + // format_string: + // text_format_source: + // inline_string: "%DYNAMIC_METADATA(envoy.filters.network:key)%" + // + // The formatted string will be used directly as the TLV value. + // Only one of ``value`` or ``format_string`` may be set. + SubstitutionFormatString format_string = 3; +} + +message ProxyProtocolConfig { + enum Version { + // PROXY protocol version 1. Human readable format. + V1 = 0; + + // PROXY protocol version 2. Binary format. + V2 = 1; + } + + // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + Version version = 1; + + // This config controls which TLVs can be passed to upstream if it is Proxy Protocol + // V2 header. If there is no setting for this field, no TLVs will be passed through. + ProxyProtocolPassThroughTLVs pass_through_tlvs = 2; + + // This config allows additional TLVs to be included in the upstream PROXY protocol + // V2 header. Unlike ``pass_through_tlvs``, which passes TLVs from the downstream request, + // ``added_tlvs`` provides an extension mechanism for defining new TLVs that are included + // with the upstream request. These TLVs may not be present in the downstream request and + // can be defined at either the transport socket level or the host level to provide more + // granular control over the TLVs that are included in the upstream request. + // + // Host-level TLVs are specified in the ``metadata.typed_filter_metadata`` field under the + // ``envoy.transport_sockets.proxy_protocol`` namespace. + // + // .. literalinclude:: /_configs/repo/proxy_protocol.yaml + // :language: yaml + // :lines: 49-57 + // :linenos: + // :lineno-start: 49 + // :caption: :download:`proxy_protocol.yaml ` + // + // **Precedence behavior**: + // + // - When a TLV is defined at both the host level and the transport socket level, the value + // from the host level configuration takes precedence. This allows users to define default TLVs + // at the transport socket level and override them at the host level. + // - Any TLV defined in the ``pass_through_tlvs`` field will be overridden by either the host-level + // or transport socket-level TLV. + // + // If there are multiple TLVs with the same type, only the TLVs from the highest precedence level + // will be used. + repeated TlvEntry added_tlvs = 3; +} + +message PerHostConfig { + // Enables per-host configuration for Proxy Protocol. + repeated TlvEntry added_tlvs = 1; +} diff --git a/proto/envoy/config/core/v3/resolver.proto b/proto/envoy/config/core/v3/resolver.proto new file mode 100644 index 0000000..f4d103a --- /dev/null +++ b/proto/envoy/config/core/v3/resolver.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/address.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "ResolverProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Resolver] + +// Configuration of DNS resolver option flags which control the behavior of the DNS resolver. +message DnsResolverOptions { + // Use TCP for all DNS queries instead of the default protocol UDP. + bool use_tcp_for_dns_lookups = 1; + + // Do not use the default search domains; only query hostnames as-is or as aliases. + bool no_default_search_domain = 2; +} + +// DNS resolution configuration which includes the underlying dns resolver addresses and options. +message DnsResolutionConfig { + // A list of dns resolver addresses. If specified, the DNS client library will perform resolution + // via the underlying DNS resolvers. Otherwise, the default system resolvers + // (e.g., /etc/resolv.conf) will be used. + repeated Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + DnsResolverOptions dns_resolver_options = 2; +} diff --git a/proto/envoy/config/core/v3/socket_cmsg_headers.proto b/proto/envoy/config/core/v3/socket_cmsg_headers.proto new file mode 100644 index 0000000..cc3e58e --- /dev/null +++ b/proto/envoy/config/core/v3/socket_cmsg_headers.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SocketCmsgHeadersProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Socket CMSG headers] + +// Configuration for socket cmsg headers. +// See `:ref:CMSG `_ for further information. +message SocketCmsgHeaders { + // cmsg level. Default is unset. + google.protobuf.UInt32Value level = 1; + + // cmsg type. Default is unset. + google.protobuf.UInt32Value type = 2; + + // Expected size of cmsg value. Default is zero. + uint32 expected_size = 3; +} diff --git a/proto/envoy/config/core/v3/socket_option.proto b/proto/envoy/config/core/v3/socket_option.proto new file mode 100644 index 0000000..ad73d72 --- /dev/null +++ b/proto/envoy/config/core/v3/socket_option.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SocketOptionProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Socket option] + +// Generic socket option message. This would be used to set socket options that +// might not exist in upstream kernels or precompiled Envoy binaries. +// +// For example: +// +// .. code-block:: json +// +// { +// "description": "support tcp keep alive", +// "state": 0, +// "level": 1, +// "name": 9, +// "int_value": 1, +// } +// +// 1 means SOL_SOCKET and 9 means SO_KEEPALIVE on Linux. +// With the above configuration, `TCP Keep-Alives `_ +// can be enabled in socket with Linux, which can be used in +// :ref:`listener's` or +// :ref:`admin's ` socket_options etc. +// +// It should be noted that the name or level may have different values on different platforms. +// [#next-free-field: 8] +message SocketOption { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketOption"; + + enum SocketState { + // Socket options are applied after socket creation but before binding the socket to a port + STATE_PREBIND = 0; + + // Socket options are applied after binding the socket to a port but before calling listen() + STATE_BOUND = 1; + + // Socket options are applied after calling listen() + STATE_LISTENING = 2; + } + + // The `socket type `_ to apply the socket option to. + // Only one field should be set. If multiple fields are set, the precedence order will determine + // the selected one. If none of the fields is set, the socket option will be applied to all socket types. + // + // For example: + // If :ref:`stream ` is set, + // it takes precedence over :ref:`datagram `. + message SocketType { + // The stream socket type. + message Stream { + } + + // The datagram socket type. + message Datagram { + } + + // Apply the socket option to the stream socket type. + Stream stream = 1; + + // Apply the socket option to the datagram socket type. + Datagram datagram = 2; + } + + // An optional name to give this socket option for debugging, etc. + // Uniqueness is not required and no special meaning is assumed. + string description = 1; + + // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + int64 level = 2; + + // The numeric name as passed to setsockopt + int64 name = 3; + + oneof value { + option (validate.required) = true; + + // Because many sockopts take an int value. + int64 int_value = 4; + + // Otherwise it's a byte buffer. + bytes buf_value = 5; + } + + // The state in which the option will be applied. When used in BindConfig + // STATE_PREBIND is currently the only valid value. + SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; + + // Apply the socket option to the specified `socket type `_. + // If not specified, the socket option will be applied to all socket types. + SocketType type = 7; +} + +message SocketOptionsOverride { + repeated SocketOption socket_options = 1; +} diff --git a/proto/envoy/config/core/v3/substitution_format_string.proto b/proto/envoy/config/core/v3/substitution_format_string.proto new file mode 100644 index 0000000..3edbf5f --- /dev/null +++ b/proto/envoy/config/core/v3/substitution_format_string.proto @@ -0,0 +1,136 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; + +import "google/protobuf/struct.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "SubstitutionFormatStringProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Substitution format string] + +// Optional configuration options to be used with json_format. +message JsonFormatOptions { + // The output JSON string properties will be sorted. + // + // .. note:: + // As the properties are always sorted, this option has no effect and is deprecated. + // + bool sort_properties = 1 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; +} + +// Configuration to use multiple :ref:`command operators ` +// to generate a new string in either plain text or JSON format. +// [#next-free-field: 8] +message SubstitutionFormatString { + oneof format { + option (validate.required) = true; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // For example, setting ``text_format`` like below, + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // generates plain text similar to: + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + // + // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. + string text_format = 1 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Specify a format with command operators to form a JSON string. + // Its details is described in :ref:`format dictionary`. + // Values are rendered as strings, numbers, or boolean values as appropriate. + // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + // See the documentation for a specific command operator for details. + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // json_format: + // status: "%RESPONSE_CODE%" + // message: "%LOCAL_REPLY_BODY%" + // + // The following JSON object would be created: + // + // .. code-block:: json + // + // { + // "status": 500, + // "message": "My error message" + // } + // + google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; + + // Specify a format with command operators to form a text string. + // Its details is described in :ref:`format string`. + // + // For example, setting ``text_format`` like below, + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // text_format_source: + // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + // + // generates plain text similar to: + // + // .. code-block:: text + // + // upstream connect error:503:path=/foo + // + DataSource text_format_source = 5; + } + + // If set to true, when command operators are evaluated to null, + // + // * for ``text_format``, the output of the empty operator is changed from ``-`` to an + // empty string, so that empty values are omitted entirely. + // * for ``json_format`` the keys with null values are omitted in the output structure. + // + // .. note:: + // This option does not work perfectly with ``json_format`` as keys with ``null`` values + // will still be included in the output. See https://github.com/envoyproxy/envoy/issues/37941 + // for more details. + // + bool omit_empty_values = 3; + + // Specify a ``content_type`` field. + // If this field is not set then ``text/plain`` is used for ``text_format`` and + // ``application/json`` is used for ``json_format``. + // + // .. validated-code-block:: yaml + // :type-name: envoy.config.core.v3.SubstitutionFormatString + // + // content_type: "text/html; charset=UTF-8" + // + string content_type = 4 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Specifies a collection of Formatter plugins that can be called from the access log configuration. + // See the formatters extensions documentation for details. + // [#extension-category: envoy.formatter] + repeated TypedExtensionConfig formatters = 6; + + // If json_format is used, the options will be applied to the output JSON string. + JsonFormatOptions json_format_options = 7; +} diff --git a/proto/envoy/config/core/v3/udp_socket_config.proto b/proto/envoy/config/core/v3/udp_socket_config.proto new file mode 100644 index 0000000..ec9f77f --- /dev/null +++ b/proto/envoy/config/core/v3/udp_socket_config.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package envoy.config.core.v3; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.core.v3"; +option java_outer_classname = "UdpSocketConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: UDP socket config] + +// Generic UDP socket configuration. +message UdpSocketConfig { + // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate + // more memory per socket. Received datagrams above this size will be dropped. If not set + // defaults to 1500 bytes. + google.protobuf.UInt64Value max_rx_datagram_size = 1 + [(validate.rules).uint64 = {lt: 65536 gt: 0}]; + + // Configures whether Generic Receive Offload (GRO) + // _ is preferred when reading from the + // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. + // This option affects performance but not functionality. If GRO is not supported by the operating + // system, non-GRO receive will be used. + google.protobuf.BoolValue prefer_gro = 2; +} diff --git a/proto/envoy/config/endpoint/v3/BUILD b/proto/envoy/config/endpoint/v3/BUILD new file mode 100644 index 0000000..c379ae0 --- /dev/null +++ b/proto/envoy/config/endpoint/v3/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + "@com_github_cncf_xds//xds/annotations/v3:pkg", + "@com_github_cncf_xds//xds/core/v3:pkg", + ], +) diff --git a/proto/envoy/config/endpoint/v3/endpoint.proto b/proto/envoy/config/endpoint/v3/endpoint.proto new file mode 100644 index 0000000..a149f60 --- /dev/null +++ b/proto/envoy/config/endpoint/v3/endpoint.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package envoy.config.endpoint.v3; + +import "envoy/config/endpoint/v3/endpoint_components.proto"; +import "envoy/type/v3/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; +option java_outer_classname = "EndpointProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3;endpointv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Endpoint configuration] +// Endpoint discovery :ref:`architecture overview ` + +// Each route from RDS will map to a single cluster or traffic split across +// clusters using weights expressed in the RDS WeightedCluster. +// +// With EDS, each cluster is treated independently from a LB perspective, with +// LB taking place between the Localities within a cluster and at a finer +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. +// [#next-free-field: 6] +message ClusterLoadAssignment { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment"; + + // Load balancing policy settings. + // [#next-free-field: 7] + message Policy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.ClusterLoadAssignment.Policy"; + + message DropOverload { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; + + // Identifier for the policy specifying the drop. + string category = 1 [(validate.rules).string = {min_len: 1}]; + + // Percentage of traffic that should be dropped for the category. + type.v3.FractionalPercent drop_percentage = 2; + } + + reserved 1, 5; + + reserved "disable_overprovisioning"; + + // Action to trim the overall incoming traffic to protect the upstream + // hosts. This action allows protection in case the hosts are unable to + // recover from an outage, or unable to autoscale or unable to handle + // incoming traffic volume for any reason. + // + // At the client each category is applied one after the other to generate + // the 'actual' drop percentage on all outgoing traffic. For example: + // + // .. code-block:: json + // + // { "drop_overloads": [ + // { "category": "throttle", "drop_percentage": 60 } + // { "category": "lb", "drop_percentage": 50 } + // ]} + // + // The actual drop percentages applied to the traffic at the clients will be + // "throttle"_drop = 60% + // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. + // actual_outgoing_load = 20% // remaining after applying all categories. + // + // Envoy supports only one element and will NACK if more than one element is present. + // Other xDS-capable data planes will not necessarily have this limitation. + // + // In Envoy, this ``drop_overloads`` config can be overridden by a runtime key + // "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to + // any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%. + // When both ``drop_overloads`` config and "load_balancing_policy.drop_overload_limit" + // setting are in place, the min of these two wins. + repeated DropOverload drop_overloads = 2; + + // Priority levels and localities are considered overprovisioned with this + // factor (in percentage). This means that we don't consider a priority + // level or locality unhealthy until the fraction of healthy hosts + // multiplied by the overprovisioning factor drops below 100. + // With the default value 140(1.4), Envoy doesn't consider a priority level + // or a locality unhealthy until their percentage of healthy hosts drops + // below 72%. For example: + // + // .. code-block:: json + // + // { "overprovisioning_factor": 100 } + // + // Read more at :ref:`priority levels ` and + // :ref:`localities `. + google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; + + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; + + // If true, use the :ref:`load balancing weight + // ` of healthy and unhealthy + // hosts to determine the health of the priority level. If false, use the number of healthy and unhealthy hosts + // to determine the health of the priority level, or in other words assume each host has a weight of 1 for + // this calculation. + // + // .. note:: + // This is not currently implemented for + // :ref:`locality weighted load balancing `. + bool weighted_priority_health = 6; + } + + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; + + // List of endpoints to load balance to. + repeated LocalityLbEndpoints endpoints = 2; + + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + // [#not-implemented-hide:] + map named_endpoints = 5; + + // Load balancing policy settings. + Policy policy = 4; +} diff --git a/proto/envoy/config/endpoint/v3/endpoint_components.proto b/proto/envoy/config/endpoint/v3/endpoint_components.proto new file mode 100644 index 0000000..eacc555 --- /dev/null +++ b/proto/envoy/config/endpoint/v3/endpoint_components.proto @@ -0,0 +1,229 @@ +syntax = "proto3"; + +package envoy.config.endpoint.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/health_check.proto"; + +import "google/protobuf/wrappers.proto"; + +import "xds/core/v3/collection_entry.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; +option java_outer_classname = "EndpointComponentsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3;endpointv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Endpoints] + +// Upstream host identifier. +message Endpoint { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.Endpoint"; + + // The optional health check configuration. + message HealthCheckConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.endpoint.Endpoint.HealthCheckConfig"; + + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + + // By default, the host header for L7 health checks is controlled by cluster level configuration + // (see: :ref:`host ` and + // :ref:`authority `). Setting this + // to a non-empty value allows overriding the cluster level configuration for a specific + // endpoint. + string hostname = 2; + + // Optional alternative health check host address. + // + // .. attention:: + // + // The form of the health check host address is expected to be a direct IP address. + core.v3.Address address = 3; + + // Optional flag to control if perform active health check for this endpoint. + // Active health check is enabled by default if there is a health checker. + bool disable_active_health_check = 4; + } + + message AdditionalAddress { + // Additional address that is associated with the endpoint. + core.v3.Address address = 1; + } + + // The upstream host address. + // + // .. attention:: + // + // The form of host address depends on the given cluster type. For STATIC or EDS, + // it is expected to be a direct IP address (or something resolvable by the + // specified :ref:`resolver ` + // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, + // and will be resolved via DNS. + core.v3.Address address = 1; + + // The optional health check configuration is used as configuration for the + // health checker to contact the health checked host. + // + // .. attention:: + // + // This takes into effect only for upstream clusters with + // :ref:`active health checking ` enabled. + HealthCheckConfig health_check_config = 2; + + // The hostname associated with this endpoint. This hostname is not used for routing or address + // resolution. If provided, it will be associated with the endpoint, and can be used for features + // that require a hostname, like + // :ref:`auto_host_rewrite `. + string hostname = 3; + + // An ordered list of addresses that together with ``address`` comprise the + // list of addresses for an endpoint. The address given in the ``address`` is + // prepended to this list. It is assumed that the list must already be + // sorted by preference order of the addresses. This will only be supported + // for STATIC and EDS clusters. + repeated AdditionalAddress additional_addresses = 4; +} + +// An Endpoint that Envoy can route traffic to. +// [#next-free-field: 6] +message LbEndpoint { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; + + // Upstream host identifier or a named reference. + oneof host_identifier { + Endpoint endpoint = 1; + + // [#not-implemented-hide:] + string endpoint_name = 5; + } + + // Optional health status when known and supplied by EDS server. + core.v3.HealthStatus health_status = 2; + + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as ``envoy.lb``. An example boolean key-value pair + // is ``canary``, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + core.v3.Metadata metadata = 3; + + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, will be treated as 1. The sum + // of the weights of all endpoints in the endpoint's locality must not + // exceed uint32_t maximal value (4294967295). + google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; +} + +// LbEndpoint list collection. Entries are `LbEndpoint` resources or references. +// [#not-implemented-hide:] +message LbEndpointCollection { + xds.core.v3.CollectionEntry entries = 1; +} + +// A configuration for an LEDS collection. +message LedsClusterLocalityConfig { + // Configuration for the source of LEDS updates for a Locality. + core.v3.ConfigSource leds_config = 1; + + // The name of the LbEndpoint collection resource. + // + // If the name ends in ``/*``, it indicates an LbEndpoint glob collection, + // which is supported only in the xDS incremental protocol variants. + // Otherwise, it indicates an LbEndpointCollection list collection. + // + // Envoy currently supports only glob collections. + string leds_collection_name = 2; +} + +// A group of endpoints belonging to a Locality. +// One can have multiple LocalityLbEndpoints for a locality, but only if +// they have different priorities. +// [#next-free-field: 10] +message LocalityLbEndpoints { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.endpoint.LocalityLbEndpoints"; + + // [#not-implemented-hide:] + // A list of endpoints of a specific locality. + message LbEndpointList { + repeated LbEndpoint lb_endpoints = 1; + } + + // Identifies location of where the upstream hosts run. + core.v3.Locality locality = 1; + + // Metadata to provide additional information about the locality endpoints in aggregate. + core.v3.Metadata metadata = 9; + + // The group of endpoints belonging to the locality specified. + // This is ignored if :ref:`leds_cluster_locality_config + // ` is set. + repeated LbEndpoint lb_endpoints = 2; + + oneof lb_config { + // [#not-implemented-hide:] + // Not implemented and deprecated. + LbEndpointList load_balancer_endpoints = 7 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // LEDS Configuration for the current locality. + // If this is set, the :ref:`lb_endpoints + // ` + // field is ignored. + LedsClusterLocalityConfig leds_cluster_locality_config = 8; + } + + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load + // balancing weight for a locality is divided by the sum of the weights of all + // localities at the same priority level to produce the effective percentage + // of traffic for the locality. The sum of the weights of all localities at + // the same priority level must not exceed uint32_t maximal value (4294967295). + // + // Locality weights are only considered when :ref:`locality weighted load + // balancing ` is + // configured. These weights are ignored otherwise. If no weights are + // specified when locality weighted load balancing is enabled, the locality is + // assigned no load. + google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; + + // Optional: the priority for this LocalityLbEndpoints. If unspecified this will + // default to the highest priority (0). + // + // Under usual circumstances, Envoy will only select endpoints for the highest + // priority (0). In the event that enough endpoints for a particular priority are + // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the + // next highest priority group. Read more at :ref:`priority levels `. + // + // Priorities should range from 0 (highest) to N (lowest) without skipping. + uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; + + // Optional: Per locality proximity value which indicates how close this + // locality is from the source locality. This value only provides ordering + // information (lower the value, closer it is to the source locality). + // This will be consumed by load balancing schemes that need proximity order + // to determine where to route the requests. + // [#not-implemented-hide:] + google.protobuf.UInt32Value proximity = 6; +} diff --git a/proto/envoy/config/endpoint/v3/load_report.proto b/proto/envoy/config/endpoint/v3/load_report.proto new file mode 100644 index 0000000..6d12765 --- /dev/null +++ b/proto/envoy/config/endpoint/v3/load_report.proto @@ -0,0 +1,220 @@ +syntax = "proto3"; + +package envoy.config.endpoint.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "xds/annotations/v3/status.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; +option java_outer_classname = "LoadReportProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3;endpointv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Load Report] + +// These are stats Envoy reports to the management server at a frequency defined by +// :ref:`LoadStatsResponse.load_reporting_interval`. +// Stats per upstream region/zone and optionally per subzone. +// [#next-free-field: 15] +message UpstreamLocalityStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.endpoint.UpstreamLocalityStats"; + + // Name of zone, region and optionally endpoint group these metrics were + // collected from. Zone and region names could be empty if unknown. + core.v3.Locality locality = 1; + + // The total number of requests successfully completed by the endpoints in the + // locality. + uint64 total_successful_requests = 2; + + // The total number of unfinished requests. A request can be an HTTP request + // or a TCP connection for a TCP connection pool. + uint64 total_requests_in_progress = 3; + + // The total number of requests that failed due to errors at the endpoint, + // aggregated over all endpoints in the locality. + uint64 total_error_requests = 4; + + // The total number of requests that were issued by this Envoy since + // the last report. This information is aggregated over all the + // upstream endpoints in the locality. A request can be an HTTP request + // or a TCP connection for a TCP connection pool. + uint64 total_issued_requests = 8; + + // The total number of connections in an established state at the time of the + // report. This field is aggregated over all the upstream endpoints in the + // locality. + // In Envoy, this information may be based on ``upstream_cx_active metric``. + // [#not-implemented-hide:] + uint64 total_active_connections = 9 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // The total number of connections opened since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on ``upstream_cx_total`` metric + // compared to itself between start and end of an interval, i.e. + // ``upstream_cx_total``(now) - ``upstream_cx_total``(now - + // load_report_interval). + // [#not-implemented-hide:] + uint64 total_new_connections = 10 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // The total number of connection failures since the last report. + // This field is aggregated over all the upstream endpoints in the locality. + // In Envoy, this information may be based on ``upstream_cx_connect_fail`` + // metric compared to itself between start and end of an interval, i.e. + // ``upstream_cx_connect_fail``(now) - ``upstream_cx_connect_fail``(now - + // load_report_interval). + // [#not-implemented-hide:] + uint64 total_fail_connections = 11 [(xds.annotations.v3.field_status).work_in_progress = true]; + + // CPU utilization stats for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + UnnamedEndpointLoadMetricStats cpu_utilization = 12; + + // Memory utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + UnnamedEndpointLoadMetricStats mem_utilization = 13; + + // Blended application-defined utilization for multi-dimensional load balancing. + // This typically comes from endpoint metrics reported via ORCA. + UnnamedEndpointLoadMetricStats application_utilization = 14; + + // Named stats for multi-dimensional load balancing. + // These typically come from endpoint metrics reported via ORCA. + repeated EndpointLoadMetricStats load_metric_stats = 5; + + // Endpoint granularity stats information for this locality. This information + // is populated if the Server requests it by setting + // :ref:`LoadStatsResponse.report_endpoint_granularity`. + repeated UpstreamEndpointStats upstream_endpoint_stats = 7; + + // [#not-implemented-hide:] The priority of the endpoint group these metrics + // were collected from. + uint32 priority = 6; +} + +// [#next-free-field: 8] +message UpstreamEndpointStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.endpoint.UpstreamEndpointStats"; + + // Upstream host address. + core.v3.Address address = 1; + + // Opaque and implementation dependent metadata of the + // endpoint. Envoy will pass this directly to the management server. + google.protobuf.Struct metadata = 6; + + // The total number of requests successfully completed by the endpoints in the + // locality. These include non-5xx responses for HTTP, where errors + // originate at the client and the endpoint responded successfully. For gRPC, + // the grpc-status values are those not covered by total_error_requests below. + uint64 total_successful_requests = 2; + + // The total number of unfinished requests for this endpoint. + uint64 total_requests_in_progress = 3; + + // The total number of requests that failed due to errors at the endpoint. + // For HTTP these are responses with 5xx status codes and for gRPC the + // grpc-status values: + // + // - DeadlineExceeded + // - Unimplemented + // - Internal + // - Unavailable + // - Unknown + // - DataLoss + uint64 total_error_requests = 4; + + // The total number of requests that were issued to this endpoint + // since the last report. A single TCP connection, HTTP or gRPC + // request or stream is counted as one request. + uint64 total_issued_requests = 7; + + // Stats for multi-dimensional load balancing. + repeated EndpointLoadMetricStats load_metric_stats = 5; +} + +message EndpointLoadMetricStats { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.endpoint.EndpointLoadMetricStats"; + + // Name of the metric; may be empty. + string metric_name = 1; + + // Number of calls that finished and included this metric. + uint64 num_requests_finished_with_metric = 2; + + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + double total_metric_value = 3; +} + +// Same as EndpointLoadMetricStats, except without the metric_name field. +message UnnamedEndpointLoadMetricStats { + // Number of calls that finished and included this metric. + uint64 num_requests_finished_with_metric = 1; + + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + double total_metric_value = 2; +} + +// Per cluster load stats. Envoy reports these stats a management server in a +// :ref:`LoadStatsRequest` +// Next ID: 7 +// [#next-free-field: 7] +message ClusterStats { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats"; + + message DroppedRequests { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.endpoint.ClusterStats.DroppedRequests"; + + // Identifier for the policy specifying the drop. + string category = 1 [(validate.rules).string = {min_len: 1}]; + + // Total number of deliberately dropped requests for the category. + uint64 dropped_count = 2; + } + + // The name of the cluster. + string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; + + // The eds_cluster_config service_name of the cluster. + // It's possible that two clusters send the same service_name to EDS, + // in that case, the management server is supposed to do aggregation on the load reports. + string cluster_service_name = 6; + + // Need at least one. + repeated UpstreamLocalityStats upstream_locality_stats = 2 + [(validate.rules).repeated = {min_items: 1}]; + + // Cluster-level stats such as total_successful_requests may be computed by + // summing upstream_locality_stats. In addition, below there are additional + // cluster-wide stats. + // + // The total number of dropped requests. This covers requests + // deliberately dropped by the drop_overload policy and circuit breaking. + uint64 total_dropped_requests = 3; + + // Information about deliberately dropped requests for each category specified + // in the DropOverload policy. + repeated DroppedRequests dropped_requests = 5; + + // Period over which the actual load report occurred. This will be guaranteed to include every + // request reported. Due to system load and delays between the ``LoadStatsRequest`` sent from Envoy + // and the ``LoadStatsResponse`` message sent from the management server, this may be longer than + // the requested load reporting interval in the ``LoadStatsResponse``. + google.protobuf.Duration load_report_interval = 4; +} diff --git a/proto/envoy/config/listener/v3/BUILD b/proto/envoy/config/listener/v3/BUILD new file mode 100644 index 0000000..712a0d8 --- /dev/null +++ b/proto/envoy/config/listener/v3/BUILD @@ -0,0 +1,18 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/accesslog/v3:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + "@com_github_cncf_xds//xds/annotations/v3:pkg", + "@com_github_cncf_xds//xds/core/v3:pkg", + "@com_github_cncf_xds//xds/type/matcher/v3:pkg", + ], +) diff --git a/proto/envoy/config/listener/v3/api_listener.proto b/proto/envoy/config/listener/v3/api_listener.proto new file mode 100644 index 0000000..a3610e6 --- /dev/null +++ b/proto/envoy/config/listener/v3/api_listener.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "ApiListenerProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3;listenerv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: API listener] + +// Describes a type of API listener, which is used in non-proxy clients. The type of API +// exposed to the non-proxy application depends on the type of API listener. +message ApiListener { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.listener.v2.ApiListener"; + + // The type in this field determines the type of API listener. At present, the following + // types are supported: + // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) + // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the + // specific config message for each type of API listener. We could not do this in v2 because + // it would have caused circular dependencies for go protos: lds.proto depends on this file, + // and http_connection_manager.proto depends on rds.proto, which is in the same directory as + // lds.proto, so lds.proto cannot depend on this file.] + google.protobuf.Any api_listener = 1; +} diff --git a/proto/envoy/config/listener/v3/listener.proto b/proto/envoy/config/listener/v3/listener.proto new file mode 100644 index 0000000..54ef2cf --- /dev/null +++ b/proto/envoy/config/listener/v3/listener.proto @@ -0,0 +1,455 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "envoy/config/accesslog/v3/accesslog.proto"; +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/socket_option.proto"; +import "envoy/config/listener/v3/api_listener.proto"; +import "envoy/config/listener/v3/listener_components.proto"; +import "envoy/config/listener/v3/udp_listener_config.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/core/v3/collection_entry.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/security.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3;listenerv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Listener configuration] +// Listener :ref:`configuration overview ` + +// The additional address the listener is listening on. +message AdditionalAddress { + core.v3.Address address = 1; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. If specified, this will override the + // :ref:`socket_options ` + // in the listener. If specified with no + // :ref:`socket_options ` + // or an empty list of :ref:`socket_options `, + // it means no socket option will apply. + core.v3.SocketOptionsOverride socket_options = 2; + + // Configures TCP keepalive settings for the additional address. + // If not set, the listener :ref:`tcp_keepalive ` + // configuration is inherited. You can explicitly disable TCP keepalive for the additional address by setting any keepalive field + // (:ref:`keepalive_probes `, + // :ref:`keepalive_time `, or + // :ref:`keepalive_interval `) to ``0``. + core.v3.TcpKeepalive tcp_keepalive = 3; +} + +// Listener list collections. Entries are ``Listener`` resources or references. +// [#not-implemented-hide:] +message ListenerCollection { + repeated xds.core.v3.CollectionEntry entries = 1; +} + +// [#next-free-field: 38] +message Listener { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Listener.DeprecatedV1"; + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated. Use :ref:`Listener.bind_to_port + // ` + google.protobuf.BoolValue bind_to_port = 1; + } + + // Configuration for listener connection balancing. + message ConnectionBalanceConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Listener.ConnectionBalanceConfig"; + + // A connection balancer implementation that does exact balancing. This means that a lock is + // held during balancing so that connection counts are nearly exactly balanced between worker + // threads. This is "nearly" exact in the sense that a connection might close in parallel thus + // making the counts incorrect, but this should be rectified on the next accept. This balancer + // sacrifices accept throughput for accuracy and should be used when there are a small number of + // connections that rarely cycle (e.g., service mesh gRPC egress). + message ExactBalance { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance"; + } + + oneof balance_type { + option (validate.required) = true; + + // If specified, the listener will use the exact connection balancer. + ExactBalance exact_balance = 1; + + // The listener will use the connection balancer according to ``type_url``. If ``type_url`` is invalid, + // Envoy will not attempt to balance active connections between worker threads. + // [#extension-category: envoy.network.connection_balance] + core.v3.TypedExtensionConfig extend_balance = 2; + } + } + + // Configuration for envoy internal listener. All the future internal listener features should be added here. + message InternalListenerConfig { + } + + // Configuration for filter chains discovery. + // [#not-implemented-hide:] + message FcdsConfig { + // Optional name to present to the filter chain discovery service. This may be an arbitrary name with arbitrary + // length. If a name is not provided, the listener's name is used. Refer to :ref:`filter_chains `. + // for details on how listener name is determined if unspecified. In addition, this may be a xdstp:// URL. + string name = 1; + + // Configuration for the source of FCDS updates for this listener. + // .. note:: + // This discovery service only supports ``AGGREGATED_GRPC`` API type. + core.v3.ConfigSource config_source = 2; + } + + reserved 14, 23; + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + // Required unless ``api_listener`` or ``listener_specifier`` is populated. + // + // When the address contains a network namespace filepath (via + // :ref:`network_namespace_filepath `), + // Envoy automatically populates the filter state with key ``envoy.network.network_namespace`` + // when a connection is accepted. This provides read-only access to the network namespace for + // filters, access logs, and other components. + core.v3.Address address = 2; + + // The additional addresses the listener should listen on. The addresses must be unique across all + // listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener, + // all addresses use the same protocol, and multiple internal addresses are not supported. + repeated AdditionalAddress additional_addresses = 33; + + // Optional prefix to use on listener stats. If empty, the stats will be rooted at + // ``listener.
.``. If non-empty, stats will be rooted at + // ``listener..``. + string stat_prefix = 28; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated FilterChain filter_chains = 3; + + // Discover filter chains configurations by external service. Dynamic discovery of filter chains is allowed + // while having statically configured filter chains, however, a filter chain name must be unique within a + // listener. If a discovered filter chain matches a name of an existing filter chain, it is discarded. + // [#not-implemented-hide:] + FcdsConfig fcds_config = 36; + + // :ref:`Matcher API ` resolving the filter chain name from the + // network properties. This matcher is used as a replacement for the filter chain match condition + // :ref:`filter_chain_match + // `. If specified, all + // :ref:`filter_chains ` must have a + // non-empty and unique :ref:`name ` field + // and not specify :ref:`filter_chain_match + // ` field. + // + // .. note:: + // + // Once matched, each connection is permanently bound to its filter chain. + // If the matcher changes but the filter chain remains the same, the + // connections bound to the filter chain are not drained. If, however, the + // filter chain is removed or structurally modified, then the drain for its + // connections is initiated. + xds.type.matcher.v3.Matcher filter_chain_matcher = 32; + + // If a connection is redirected using ``iptables``, the port on which the proxy + // receives it might be different from the original destination address. When this flag is set to + // true, the listener hands off redirected connections to the listener associated with the + // original destination address. If there is no listener associated with the original destination + // address, the connection is handled by the listener that receives it. Defaults to false. + google.protobuf.BoolValue use_original_dst = 4; + + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 + [(udpa.annotations.security).configure_for_untrusted_downstream = true]; + + // Listener metadata. + core.v3.Metadata metadata = 6; + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // ` and no + // :ref:`quic_options ` is specified in :ref:`udp_listener_config `. + // QUIC listener filters can be specified when :ref:`quic_options + // ` is + // specified in :ref:`udp_listener_config `. + // They are processed sequentially right before connection creation. And like TCP Listener filters, they can be used to manipulate the connection metadata and socket. But the difference is that they can't be used to pause connection creation. + repeated ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // ``continue_on_listener_filters_timeout`` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // ``iptables`` ``TPROXY`` target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using ``TPROXY`` cannot be distinguished from connections redirected using ``TPROXY`` and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the ``CAP_NET_ADMIN`` capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the ``IP_FREEBIND`` socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option ``IP_FREEBIND`` is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + // It is not allowed to update the socket options for any existing address if + // :ref:`enable_reuse_port ` + // is ``false`` to avoid the conflict when creating new sockets for the listener. + repeated core.v3.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + // Specifies the intended direction of the traffic relative to the local Envoy. + // This property is required on Windows for listeners using the original destination filter, + // see :ref:`Original Destination `. + core.v3.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies UDP + // listener specific configuration. + UdpListenerConfig udp_listener_config = 18; + + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + ApiListener api_listener = 19; + + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + // + // In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 + // by setting :ref:`use_original_dst ` in X + // and :ref:`bind_to_port ` to false in Y1 and Y2, + // it is recommended to disable the balance config in listener X to avoid the cost of balancing, and + // enable the balance config in Y1 and Y2 to balance the connections among the workers. + ConnectionBalanceConfig connection_balance_config = 20; + + // Deprecated. Use ``enable_reuse_port`` instead. + bool reuse_port = 21 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // When this flag is set to true, listeners set the ``SO_REUSEPORT`` socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. The change of field will be rejected during an listener update when the + // runtime flag ``envoy.reloadable_features.enable_update_listener_socket_options`` is enabled. + // Otherwise, the update of this field will be ignored quietly. + // + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. + // + // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + google.protobuf.BoolValue enable_reuse_port = 29; + + // Configuration for :ref:`access logs ` + // emitted by this listener. + repeated accesslog.v3.AccessLog access_log = 22; + + // The maximum length a tcp listener's pending connections queue can grow to. If no value is + // provided net.core.somaxconn will be used on Linux and 128 otherwise. + google.protobuf.UInt32Value tcp_backlog_size = 24; + + // The maximum number of connections to accept from the kernel per socket + // event. Envoy may decide to close these connections after accepting them + // from the kernel e.g. due to load shedding, or other policies. + // If there are more than max_connections_to_accept_per_socket_event + // connections pending accept, connections over this threshold will be + // accepted in later event loop iterations. + // If no value is provided Envoy will accept all connections pending accept + // from the kernel. + // + // .. note:: + // + // It is recommended to lower this value for better overload management and reduced per-event cost. + // Setting it to 1 is a viable option with no noticeable impact on performance. + google.protobuf.UInt32Value max_connections_to_accept_per_socket_event = 34 + [(validate.rules).uint32 = {gt: 0}]; + + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that set + // :ref:`use_original_dst ` + // to true. Default is true. + google.protobuf.BoolValue bind_to_port = 26; + + // The exclusive listener type and the corresponding config. + oneof listener_specifier { + // Used to represent an internal listener which does not listen on OSI L4 address but can be used by the + // :ref:`envoy cluster ` to create a user space connection to. + // The internal listener acts as a TCP listener. It supports listener filters and network filter chains. + // Upstream clusters refer to the internal listeners by their :ref:`name + // `. :ref:`Address + // ` must not be set on the internal listeners. + // + // There are some limitations that are derived from the implementation. The known limitations include: + // + // * :ref:`ConnectionBalanceConfig ` is not + // allowed because both the cluster connection and the listener connection must be owned by the same dispatcher. + // * :ref:`tcp_backlog_size ` + // * :ref:`freebind ` + // * :ref:`transparent ` + InternalListenerConfig internal_listener = 27; + } + + // Enable MPTCP (multi-path TCP) on this listener. Clients will be allowed to establish + // MPTCP connections. Non-MPTCP clients will fall back to regular TCP. + bool enable_mptcp = 30; + + // Whether the listener should limit connections based upon the value of + // :ref:`global_downstream_max_connections `. + bool ignore_global_conn_limit = 31; + + // Whether the listener bypasses configured overload manager actions. + bool bypass_overload_manager = 35; + + // If set, TCP keepalive settings are configured for the listener address and inherited by + // additional addresses. If not set, TCP keepalive settings are not configured for the + // listener address and additional addresses by default. See :ref:`tcp_keepalive ` + // to explicitly configure TCP keepalive settings for individual additional addresses. + core.v3.TcpKeepalive tcp_keepalive = 37; +} + +// A placeholder proto so that users can explicitly configure the standard +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +message ListenerManager { +} + +// A placeholder proto so that users can explicitly configure the standard +// Validation Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +message ValidationListenerManager { +} + +// A placeholder proto so that users can explicitly configure the API +// Listener Manager via the bootstrap's :ref:`listener_manager `. +// [#not-implemented-hide:] +message ApiListenerManager { +} diff --git a/proto/envoy/config/listener/v3/listener_components.proto b/proto/envoy/config/listener/v3/listener_components.proto new file mode 100644 index 0000000..16b4356 --- /dev/null +++ b/proto/envoy/config/listener/v3/listener_components.proto @@ -0,0 +1,353 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "ListenerComponentsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3;listenerv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +// [#next-free-field: 6] +message Filter { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.Filter"; + + reserved 3, 2; + + reserved "config"; + + // The name of the filter configuration. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + oneof config_type { + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // [#extension-category: envoy.filters.network] + google.protobuf.Any typed_config = 4; + + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + core.v3.ExtensionConfigSource config_discovery = 5; + } +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Directly connected source IP address (this will only be different from the source IP address +// when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol +// listener filter `). +// 7. Source type (e.g. any, local or external network). +// 8. Source IP address. +// 9. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 14] +message FilterChainMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.FilterChainMatch"; + + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + SAME_IP_OR_LOOPBACK = 1; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.v3.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // The criteria is satisfied if the directly connected source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the parameter is not + // specified or the list is empty, the directly connected source IP address is ignored. + repeated core.v3.CidrRange direct_source_prefix_ranges = 13; + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.v3.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // The value ``*`` is also not supported, and ``server_names`` should be omitted instead. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 10] +message FilterChain { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChain"; + + reserved 2, 8; + + reserved "tls_context", "on_demand_configuration"; + + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + // + // For QUIC listeners, network filters other than HTTP Connection Manager (HCM) + // can be created, but due to differences in the connection implementation compared + // to TCP, the onData() method will never be called. Therefore, network filters + // for QUIC listeners should only expect to do work at the start of a new connection + // (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + // + // This field is deprecated. Add a + // :ref:`PROXY protocol listener filter ` + // explicitly instead. + google.protobuf.BoolValue use_proxy_proto = 4 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Filter chain metadata. + core.v3.Metadata metadata = 5; + + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + // :ref:`DownstreamTlsContext ` in the ``typed_config``. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + // [#extension-category: envoy.transport_sockets.downstream] + core.v3.TransportSocket transport_socket = 6; + + // If present and nonzero, the amount of time to allow incoming connections to complete any + // transport socket negotiations. If this expires before the transport reports connection + // establishment, the connection is summarily closed. + google.protobuf.Duration transport_socket_connect_timeout = 9; + + // The unique name (or empty) by which this filter chain is known. + // + // .. note:: + // :ref:`filter_chain_matcher + // ` + // requires that filter chains are uniquely named within a listener. + string name = 7; +} + +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// +// Examples: +// +// * Matches if the destination port is 3306. +// +// .. code-block:: yaml +// +// destination_port_range: +// start: 3306 +// end: 3307 +// +// * Matches if the destination port is 3306 or 15000. +// +// .. code-block:: yaml +// +// or_match: +// rules: +// - destination_port_range: +// start: 3306 +// end: 3307 +// - destination_port_range: +// start: 15000 +// end: 15001 +// +// [#next-free-field: 6] +message ListenerFilterChainMatchPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.ListenerFilterChainMatchPredicate"; + + // A set of match configurations used for logical operations. + message MatchSet { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.ListenerFilterChainMatchPredicate.MatchSet"; + + // The list of rules that make up the set. + repeated ListenerFilterChainMatchPredicate rules = 1 + [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + ListenerFilterChainMatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + type.v3.Int32Range destination_port_range = 5; + } +} + +// [#next-free-field: 6] +message ListenerFilter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.ListenerFilter"; + + reserved 2; + + reserved "config"; + + // The name of the filter configuration. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + oneof config_type { + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + // [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] + google.protobuf.Any typed_config = 3; + + // Configuration source specifier for an extension configuration discovery + // service. In case of a failure and without the default configuration, the + // listener closes the connections. + core.v3.ExtensionConfigSource config_discovery = 5; + } + + // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + // See :ref:`ListenerFilterChainMatchPredicate ` + // for further examples. + ListenerFilterChainMatchPredicate filter_disabled = 4; +} diff --git a/proto/envoy/config/listener/v3/quic_config.proto b/proto/envoy/config/listener/v3/quic_config.proto new file mode 100644 index 0000000..c208a58 --- /dev/null +++ b/proto/envoy/config/listener/v3/quic_config.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/protocol.proto"; +import "envoy/config/core/v3/socket_cmsg_headers.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/annotations/v3/status.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "QuicConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3;listenerv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: QUIC listener config] + +// Configuration specific to the UDP QUIC listener. +// [#next-free-field: 15] +message QuicProtocolOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.QuicProtocolOptions"; + + core.v3.QuicProtocolOptions quic_protocol_options = 1; + + // Maximum number of milliseconds that connection will be alive when there is + // no network activity. + // + // If it is less than 1ms, Envoy will use 1ms. 300000ms if not specified. + google.protobuf.Duration idle_timeout = 2; + + // Connection timeout in milliseconds before the crypto handshake is finished. + // + // If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. + google.protobuf.Duration crypto_handshake_timeout = 3; + + // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + // to enabled. + core.v3.RuntimeFeatureFlag enabled = 4; + + // A multiplier to number of connections which is used to determine how many packets to read per + // event loop. A reasonable number should allow the listener to process enough payload but not + // starve TCP and other UDP sockets and also prevent long event loop duration. + // The default value is 32. This means if there are N QUIC connections, the total number of + // packets to read in each read event will be 32 * N. + // The actual number of packets to read in total by the UDP listener is also + // bound by 6000, regardless of this field or how many connections there are. + google.protobuf.UInt32Value packets_to_read_to_connection_count_ratio = 5 + [(validate.rules).uint32 = {gte: 1}]; + + // Configure which implementation of ``quic::QuicCryptoClientStreamBase`` to be used for this listener. + // If not specified the :ref:`QUICHE default one configured by ` will be used. + // [#extension-category: envoy.quic.server.crypto_stream] + core.v3.TypedExtensionConfig crypto_stream_config = 6; + + // Configure which implementation of ``quic::ProofSource`` to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.proof_source] + core.v3.TypedExtensionConfig proof_source_config = 7; + + // Config which implementation of ``quic::ConnectionIdGeneratorInterface`` to be used for this listener. + // If not specified the :ref:`default one configured by ` will be used. + // [#extension-category: envoy.quic.connection_id_generator] + core.v3.TypedExtensionConfig connection_id_generator_config = 8; + + // Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. + // The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with. + // If not specified, Envoy will not advertise any server's preferred address. + // [#extension-category: envoy.quic.server_preferred_address] + core.v3.TypedExtensionConfig server_preferred_address_config = 9 + [(xds.annotations.v3.field_status).work_in_progress = true]; + + // Configure the server to send transport parameter `disable_active_migration `_. + // Defaults to false (do not send this transport parameter). + google.protobuf.BoolValue send_disable_active_migration = 10; + + // Configure which implementation of ``quic::QuicConnectionDebugVisitor`` to be used for this listener. + // If not specified, no debug visitor will be attached to connections. + // [#extension-category: envoy.quic.connection_debug_visitor] + core.v3.TypedExtensionConfig connection_debug_visitor_config = 11; + + // Configure a type of UDP cmsg to pass to listener filters via QuicReceivedPacket. + // Both level and type must be specified for cmsg to be saved. + // Cmsg may be truncated or omitted if expected size is not set. + // If not specified, no cmsg will be saved to QuicReceivedPacket. + repeated core.v3.SocketCmsgHeaders save_cmsg_config = 12 + [(validate.rules).repeated = {max_items: 1}]; + + // If true, the listener will reject connection-establishing packets at the + // QUIC layer by replying with an empty version negotiation packet to the + // client. + bool reject_new_connections = 13; + + // Maximum number of QUIC sessions to create per event loop. + // If not specified, the default value is 16. + // This is an equivalent of the TCP listener option + // max_connections_to_accept_per_socket_event. + google.protobuf.UInt32Value max_sessions_per_event_loop = 14 [(validate.rules).uint32 = {gt: 0}]; +} diff --git a/proto/envoy/config/listener/v3/udp_listener_config.proto b/proto/envoy/config/listener/v3/udp_listener_config.proto new file mode 100644 index 0000000..4e619b1 --- /dev/null +++ b/proto/envoy/config/listener/v3/udp_listener_config.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.config.listener.v3; + +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/udp_socket_config.proto"; +import "envoy/config/listener/v3/quic_config.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v3"; +option java_outer_classname = "UdpListenerConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3;listenerv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: UDP listener config] +// Listener :ref:`configuration overview ` + +// [#next-free-field: 9] +message UdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.UdpListenerConfig"; + + reserved 1, 2, 3, 4, 6; + + reserved "config"; + + // UDP socket configuration for the listener. The default for + // :ref:`prefer_gro ` is false for + // listener sockets. If receiving a large amount of datagrams from a small number of sources, it + // may be worthwhile to enable this option after performance testing. + core.v3.UdpSocketConfig downstream_socket_config = 5; + + // Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set + // to the default object to enable QUIC without modifying any additional options. + QuicProtocolOptions quic_options = 7; + + // Configuration for the UDP packet writer. If empty, HTTP/3 will use GSO if available + // (:ref:`UdpDefaultWriterFactory `) + // or the default kernel sendmsg if not, + // (:ref:`UdpDefaultWriterFactory `) + // and raw UDP will use kernel sendmsg. + // [#extension-category: envoy.udp_packet_writer] + core.v3.TypedExtensionConfig udp_packet_packet_writer_config = 8; +} + +message ActiveRawUdpListenerConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.listener.ActiveRawUdpListenerConfig"; +} diff --git a/proto/envoy/config/route/v3/BUILD b/proto/envoy/config/route/v3/BUILD new file mode 100644 index 0000000..6da6fb8 --- /dev/null +++ b/proto/envoy/config/route/v3/BUILD @@ -0,0 +1,19 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/common/mutation_rules/v3:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "//envoy/type/metadata/v3:pkg", + "//envoy/type/tracing/v3:pkg", + "//envoy/type/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + "@com_github_cncf_xds//xds/type/matcher/v3:pkg", + ], +) diff --git a/proto/envoy/config/route/v3/route.proto b/proto/envoy/config/route/v3/route.proto new file mode 100644 index 0000000..5bd909f --- /dev/null +++ b/proto/envoy/config/route/v3/route.proto @@ -0,0 +1,172 @@ +syntax = "proto3"; + +package envoy.config.route.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/config/route/v3/route_components.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v3"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/route/v3;routev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP route configuration] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// [#next-free-field: 19] +message RouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration"; + + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. + string name = 1; + + // An array of virtual hosts that make up the route table. + repeated VirtualHost virtual_hosts = 2; + + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both ``virtual_hosts`` and ``vhds`` fields will be used when present. ``virtual_hosts`` can be used + // for a base routing table or for infrequently changing virtual hosts. ``vhds`` is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with ``vhds`` derived configuration + // taking precedence. + Vhds vhds = 9; + + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + repeated string internal_only_headers = 3 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption response_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + repeated string response_headers_to_remove = 5 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + repeated string request_headers_to_remove = 8 [ + (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} + ]; + + // Headers mutations at all levels are evaluated, if specified. By default, the order is from most + // specific (i.e. route entry level) to least specific (i.e. route configuration level). Later header + // mutations may override earlier mutations. + // This order can be reversed by setting this field to true. In other words, most specific level mutation + // is evaluated last. + // + bool most_specific_header_mutations_wins = 10; + + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + google.protobuf.BoolValue validate_clusters = 7; + + // The maximum bytes of the response :ref:`direct response body + // ` size. If not specified the default + // is 4096. + // + // .. warning:: + // + // Envoy currently holds the content of :ref:`direct response body + // ` in memory. Be careful setting + // this to be larger than the default 4KB, since the allocated memory for direct response body + // is not subject to data plane buffering controls. + // + google.protobuf.UInt32Value max_direct_response_body_size_bytes = 11; + + // A list of plugins and their configurations which may be used by a + // :ref:`cluster specifier plugin name ` + // within the route. All ``extension.name`` fields in this list must be unique. + repeated ClusterSpecifierPlugin cluster_specifier_plugins = 12; + + // Specify a set of default request mirroring policies which apply to all routes under its virtual hosts. + // Note that policies are not merged, the most specific non-empty one becomes the mirror policies. + repeated RouteAction.RequestMirrorPolicy request_mirror_policies = 13; + + // By default, port in :authority header (if any) is used in host matching. + // With this option enabled, Envoy will ignore the port number in the :authority header (if any) when picking VirtualHost. + // + // .. note:: + // This option will not strip the port number (if any) contained in route config + // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`.domains field. + bool ignore_port_in_host_matching = 14; + + // Normally, virtual host matching is done using the :authority (or + // Host: in HTTP < 2) HTTP header. Setting this will instead, use a + // different HTTP header for this purpose. + string vhost_header = 18; + + // Ignore path-parameters in path-matching. + // Before RFC3986, URI were like(RFC1808): :///;?# + // Envoy by default takes ":path" as ";". + // For users who want to only match path on the "" portion, this option should be true. + bool ignore_path_parameters_in_path_matching = 15; + + // This field can be used to provide RouteConfiguration level per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`Http filter route specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + map typed_per_filter_config = 16; + + // The metadata field can be used to provide additional information + // about the route configuration. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as ``envoy.filters.http.router``. + core.v3.Metadata metadata = 17; +} + +message Vhds { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Vhds"; + + // Configuration source specifier for VHDS. + core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; +} diff --git a/proto/envoy/config/route/v3/route_components.proto b/proto/envoy/config/route/v3/route_components.proto new file mode 100644 index 0000000..4587ef1 --- /dev/null +++ b/proto/envoy/config/route/v3/route_components.proto @@ -0,0 +1,2918 @@ +syntax = "proto3"; + +package envoy.config.route.v3; + +import "envoy/config/common/mutation_rules/v3/mutation_rules.proto"; +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/config/core/v3/proxy_protocol.proto"; +import "envoy/config/core/v3/substitution_format_string.proto"; +import "envoy/type/matcher/v3/filter_state.proto"; +import "envoy/type/matcher/v3/metadata.proto"; +import "envoy/type/matcher/v3/regex.proto"; +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/metadata/v3/metadata.proto"; +import "envoy/type/tracing/v3/custom_tag.proto"; +import "envoy/type/v3/percent.proto"; +import "envoy/type/v3/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/type/matcher/v3/matcher.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v3"; +option java_outer_classname = "RouteComponentsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/route/v3;routev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP route components] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#next-free-field: 26] +message VirtualHost { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost"; + + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9, 12; + + reserved "per_filter_config"; + + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // For example, ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + // + // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. + repeated string domains = 2 [(validate.rules).repeated = { + min_items: 1 + items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} + }]; + + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + // Only one of this and ``matcher`` can be specified. + repeated Route routes = 3 [(udpa.annotations.field_migrate).oneof_promotion = "route_selection"]; + + // The match tree to use when resolving route actions for incoming requests. Only one of this and ``routes`` + // can be specified. + xds.type.matcher.v3.Matcher matcher = 21 + [(udpa.annotations.field_migrate).oneof_promotion = "route_selection"]; + + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; + + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + repeated VirtualCluster virtual_clusters = 5; + + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + repeated RateLimit rate_limits = 6; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption request_headers_to_add = 7 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is + // found in the + // :ref:`VirtualHost.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`VirtualHost.typed_per_filter_config` + // to configure the CORS HTTP filter. + CorsPolicy cors = 8 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // This field can be used to provide virtual host level per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`HTTP filter route-specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + map typed_per_filter_config = 15; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. + // + // Defaults to ``false``. + // + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + // + // [#next-major-version: rename to include_attempt_count_in_request.] + bool include_request_attempt_count = 14; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the downstream response. Setting this option will cause the router to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the downstream + // will see the attempt count as perceived by the Envoy closest upstream from itself. + // + // Defaults to ``false``. + // + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + bool include_attempt_count_in_response = 19; + + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g., values are not inherited). + RetryPolicy retry_policy = 16; + + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that setting a route level entry + // will take precedence over this config and it'll be treated independently (e.g., values are not + // inherited). :ref:`Retry policy ` should not be + // set if this field is used. + google.protobuf.Any retry_policy_typed_config = 20; + + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g., values are not inherited). + HedgePolicy hedge_policy = 17; + + // Decides whether to include the :ref:`x-envoy-is-timeout-retry ` + // request header in retries initiated by per-try timeouts. + bool include_is_timeout_retry_header = 23; + + // The maximum bytes which will be buffered for retries and shadowing. If set, the bytes actually buffered will be + // the minimum value of this and the listener ``per_connection_buffer_limit_bytes``. + // + // .. attention:: + // + // This field has been deprecated. Please use :ref:`request_body_buffer_limit + // ` instead. + // Only one of ``per_request_buffer_limit_bytes`` and ``request_body_buffer_limit`` could be set. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // The maximum bytes which will be buffered for request bodies to support large request body + // buffering beyond the ``per_connection_buffer_limit_bytes``. + // + // This limit is specifically for the request body buffering and allows buffering larger payloads while maintaining + // flow control. + // + // Buffer limit precedence (from highest to lowest priority): + // + // 1. If ``request_body_buffer_limit`` is set, then ``request_body_buffer_limit`` will be used. + // 2. If :ref:`per_request_buffer_limit_bytes ` + // is set but ``request_body_buffer_limit`` is not, then ``min(per_request_buffer_limit_bytes, per_connection_buffer_limit_bytes)`` + // will be used. + // 3. If neither is set, then ``per_connection_buffer_limit_bytes`` will be used. + // + // For flow control chunk sizes, ``min(per_connection_buffer_limit_bytes, 16KB)`` will be used. + // + // Only one of :ref:`per_request_buffer_limit_bytes ` + // and ``request_body_buffer_limit`` could be set. + google.protobuf.UInt64Value request_body_buffer_limit = 25 + [(validate.rules).message = {required: false}]; + + // Specify a set of default request mirroring policies for every route under this virtual host. + // It takes precedence over the route config mirror policy entirely. + // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + repeated RouteAction.RequestMirrorPolicy request_mirror_policies = 22; + + // The metadata field can be used to provide additional information + // about the virtual host. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as ``envoy.filters.http.router``. + core.v3.Metadata metadata = 24; +} + +// A filter-defined action type. +message FilterAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.FilterAction"; + + google.protobuf.Any action = 1; +} + +// This can be used in route matcher :ref:`VirtualHost.matcher `. +// When the matcher matches, routes will be matched and run. +message RouteList { + // The list of routes that will be matched and run, in order. The first route that matches will be used. + repeated Route routes = 1; +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// [#next-free-field: 21] +message Route { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Route"; + + reserved 6, 8; + + reserved "per_filter_config"; + + // Name for the route. + string name = 14; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + oneof action { + option (validate.required) = true; + + // Route request to some upstream cluster. + RouteAction route = 2; + + // Return a redirect. + RedirectAction redirect = 3; + + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + + // [#not-implemented-hide:] + // A filter-defined action (e.g., it could dynamically generate the RouteAction). + // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + // implemented] + FilterAction filter_action = 17; + + // [#not-implemented-hide:] + // An action used when the route will generate a response directly, + // without forwarding to an upstream host. This will be used in non-proxy + // xDS clients like the gRPC server. It could also be used in the future + // in Envoy for a filter that directly generates responses for requests. + NonForwardingAction non_forwarding_action = 18; + } + + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as ``envoy.filters.http.router``. + core.v3.Metadata metadata = 4; + + // Decorator for the matched route. + Decorator decorator = 5; + + // This field can be used to provide route specific per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`HTTP filter route-specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + map typed_per_filter_config = 13; + + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + repeated core.v3.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { + items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + // + // .. attention:: + // + // This field has been deprecated. Please use :ref:`request_body_buffer_limit + // ` instead. + // Only one of ``per_request_buffer_limit_bytes`` and ``request_body_buffer_limit`` may be set. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // The human readable prefix to use when emitting statistics for this endpoint. + // The statistics are rooted at vhost..route.. + // This should be set for highly critical + // endpoints that one wishes to get “per-route” statistics on. + // If not set, endpoint statistics are not generated. + // + // The emitted statistics are the same as those documented for :ref:`virtual clusters `. + // + // .. warning:: + // + // We do not recommend setting up a stat prefix for + // every application endpoint. This is both not easily maintainable and + // statistics use a non-trivial amount of memory (approximately 1KiB per route). + string stat_prefix = 19; + + // The maximum bytes which will be buffered for request bodies to support large request body + // buffering beyond the ``per_connection_buffer_limit_bytes``. + // + // This limit is specifically for the request body buffering and allows buffering larger payloads while maintaining + // flow control. + // + // Buffer limit precedence (from highest to lowest priority): + // + // 1. If ``request_body_buffer_limit`` is set: use ``request_body_buffer_limit`` + // 2. If :ref:`per_request_buffer_limit_bytes ` + // is set but ``request_body_buffer_limit`` is not: use ``min(per_request_buffer_limit_bytes, per_connection_buffer_limit_bytes)`` + // 3. If neither is set: use ``per_connection_buffer_limit_bytes`` + // + // For flow control chunk sizes, use ``min(per_connection_buffer_limit_bytes, 16KB)``. + // + // Only one of :ref:`per_request_buffer_limit_bytes ` + // and ``request_body_buffer_limit`` may be set. + google.protobuf.UInt64Value request_body_buffer_limit = 20; +} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +// [#next-free-field: 6] +message WeightedCluster { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; + + // [#next-free-field: 13] + message ClusterWeight { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.WeightedCluster.ClusterWeight"; + + reserved 7, 8; + + reserved "per_filter_config"; + + // Only one of ``name`` and ``cluster_header`` may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"]; + + // Only one of ``name`` and ``cluster_header`` may be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string cluster_header = 12 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier" + ]; + + // The weight of the cluster. This value is relative to the other clusters' + // weights. When a request matches the route, the choice of an upstream cluster + // is determined by its weight. The sum of weights across all + // entries in the clusters array must be greater than 0, and must not exceed + // uint32_t maximal value (4294967295). + google.protobuf.UInt32Value weight = 2; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in + // :ref:`RouteAction.metadata_match `, with + // values here taking precedence. The filter name should be specified as ``envoy.lb``. + core.v3.Metadata metadata_match = 3; + + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption request_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.v3.HeaderValueOption response_headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; + + // This field can be used to provide weighted cluster specific per filter config. The key should match the + // :ref:`filter config name + // `. + // See :ref:`HTTP filter route-specific config ` + // for details. + // [#comment: An entry's value may be wrapped in a + // :ref:`FilterConfig` + // message to specify additional options.] + map typed_per_filter_config = 10; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 11 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, if this is greater than 0. + // This field is now deprecated, and the client will use the sum of all + // cluster weights. It is up to the management server to supply the correct weights. + google.protobuf.UInt32Value total_weight = 3 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the ``runtime_key_prefix`` is + // specified, the router will look for weights associated with each upstream + // cluster under the key ``runtime_key_prefix`` + ``.`` + ``cluster[i].name`` where + // ``cluster[i]`` denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + string runtime_key_prefix = 2; + + oneof random_value_specifier { + // Specifies the header name that is used to look up the random value passed in the request header. + // This is used to ensure consistent cluster picking across multiple proxy levels for weighted traffic. + // If header is not present or invalid, Envoy will fall back to use the internally generated random value. + // This header is expected to be single-valued header as we only want to have one selected value throughout + // the process for the consistency. And the value is a unsigned number between 0 and UINT64_MAX. + string header_name = 4 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // When set to true, the hash policies will be used to generate the random value for weighted cluster selection. + // This could ensure consistent cluster picking across multiple proxy levels for weighted traffic. + google.protobuf.BoolValue use_hash_policy = 5; + } +} + +// Configuration for a cluster specifier plugin. +message ClusterSpecifierPlugin { + // The name of the plugin and its opaque configuration. + // + // [#extension-category: envoy.router.cluster_specifier_plugin] + core.v3.TypedExtensionConfig extension = 1 [(validate.rules).message = {required: true}]; + + // If is_optional is not set or is set to false and the plugin defined by this message is not a + // supported type, the containing resource is NACKed. If is_optional is set to true, the resource + // would not be NACKed for this reason. In this case, routes referencing this plugin's name would + // not be treated as an illegal configuration, but would result in a failure if the route is + // selected. + bool is_optional = 2; +} + +// [#next-free-field: 18] +message RouteMatch { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; + + message GrpcRouteMatchOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions"; + } + + message TlsContextMatchOptions { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteMatch.TlsContextMatchOptions"; + + // If specified, the route will match against whether or not a certificate is presented. + // If not specified, certificate presentation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue presented = 1; + + // If specified, the route will match against whether or not a certificate is validated. + // If not specified, certificate validation status (true or false) will not be considered when route matching. + // + // .. warning:: + // + // Client certificate validation is not currently performed upon TLS session resumption. For + // a resumed TLS session the route will match only when ``validated`` is false, regardless of + // whether the client TLS certificate is valid. + // + // The only known workaround for this issue is to disable TLS session resumption entirely, by + // setting both :ref:`disable_stateless_session_resumption ` + // and :ref:`disable_stateful_session_resumption ` on the DownstreamTlsContext. + google.protobuf.BoolValue validated = 2; + } + + // An extensible message for matching CONNECT or CONNECT-UDP requests. + message ConnectMatcher { + } + + reserved 5, 3; + + reserved "regex"; + + oneof path_specifier { + option (validate.required) = true; + + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the ``:path`` header. + string prefix = 1; + + // If specified, the route is an exact path rule meaning that the path must + // exactly match the ``:path`` header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the ``:path`` header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the ``:path`` header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case-sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + + // If this is used as the matcher, the matcher will only match CONNECT or CONNECT-UDP requests. + // Note that this will not match other Extended CONNECT requests (WebSocket and the like) as + // they are normalized in Envoy as HTTP/1.1 style upgrades. + // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2 and HTTP/3, + // where Extended CONNECT requests may have a path, the path matchers will work if + // there is a path present. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + ConnectMatcher connect_matcher = 12; + + // If specified, the route is a path-separated prefix rule meaning that the + // ``:path`` header (without the query string) must either exactly match the + // ``path_separated_prefix`` or have it as a prefix, followed by ``/`` + // + // For example, ``/api/dev`` would match + // ``/api/dev``, ``/api/dev/``, ``/api/dev/v1``, and ``/api/dev?param=true`` + // but would not match ``/api/developer`` + // + // Expect the value to not contain ``?`` or ``#`` and not to end in ``/`` + string path_separated_prefix = 14 [(validate.rules).string = {pattern: "^[^?#]+[^?#/]$"}]; + + // [#extension-category: envoy.path.match] + core.v3.TypedExtensionConfig path_match_policy = 15; + } + + // Indicates that prefix/path matching should be case-sensitive. The default + // is true. Ignored for safe_regex matching. + google.protobuf.BoolValue case_sensitive = 4; + + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + core.v3.RuntimeFractionalPercent runtime_fraction = 9; + + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; + + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the ``path`` header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the ``path`` header's + // query string for a match to occur. In the event query parameters are + // repeated, only the first value for each key will be considered. + // + // .. note:: + // + // If query parameters are used to pass request message fields when + // `grpc_json_transcoder `_ + // is used, the transcoded message fields may be different. The query parameters are + // URL-encoded, but the message fields are not. For example, if a query + // parameter is "foo%20bar", the message field will be "foo bar". + repeated QueryParameterMatcher query_parameters = 7; + + // Specifies a set of cookies on which the route should match. The router parses the ``Cookie`` + // header and evaluates the named cookie against each matcher. If the number of specified cookie + // matchers is nonzero, they all must match for the route to be selected. + repeated CookieMatcher cookies = 17; + + // If specified, only gRPC requests will be matched. The router will check + // that the ``Content-Type`` header has ``application/grpc`` or one of the various + // ``application/grpc+`` values. + GrpcRouteMatchOptions grpc = 8; + + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContextMatchOptions tls_context = 11; + + // Specifies a set of dynamic metadata matchers on which the route should match. + // The router will check the dynamic metadata against all the specified dynamic metadata matchers. + // If the number of specified dynamic metadata matchers is nonzero, they all must match the + // dynamic metadata for a match to occur. + repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13; + + // Specifies a set of filter state matchers on which the route should match. + // The router will check the filter state against all the specified filter state matchers. + // If the number of specified filter state matchers is nonzero, they all must match the + // filter state for a match to occur. + repeated type.matcher.v3.FilterStateMatcher filter_state = 16; +} + +// Cors policy configuration. +// +// .. attention:: +// +// This message has been deprecated. Please use +// :ref:`CorsPolicy in filter extension ` +// as as alternative. +// +// [#next-free-field: 14] +message CorsPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy"; + + reserved 1, 8, 7; + + reserved "allow_origin", "allow_origin_regex", "enabled"; + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; + + // Specifies the content for the ``access-control-allow-methods`` header. + string allow_methods = 2; + + // Specifies the content for the ``access-control-allow-headers`` header. + string allow_headers = 3; + + // Specifies the content for the ``access-control-expose-headers`` header. + string expose_headers = 4; + + // Specifies the content for the ``access-control-max-age`` header. + string max_age = 5; + + // Specifies whether the resource allows credentials. + google.protobuf.BoolValue allow_credentials = 6; + + oneof enabled_specifier { + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.v3.RuntimeFractionalPercent filter_enabled = 9; + } + + // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + // enforced. + // + // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those + // fields have to explicitly disable the filter in order for this setting to take effect. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's ``Origin`` to determine if it's valid but will not enforce any policies. + core.v3.RuntimeFractionalPercent shadow_enabled = 10; + + // Specify whether allow requests whose target server's IP address is more private than that from + // which the request initiator was fetched. + // + // More details refer to https://developer.chrome.com/blog/private-network-access-preflight. + google.protobuf.BoolValue allow_private_network_access = 12; + + // Specifies if preflight requests not matching the configured allowed origin should be forwarded + // to the upstream. Default is ``true``. + google.protobuf.BoolValue forward_not_matching_preflights = 13; +} + +// [#next-free-field: 46] +message RouteAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; + + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + + // HTTP status code - 500 Internal Server Error. + INTERNAL_SERVER_ERROR = 2; + } + + // Configures :ref:`internal redirect ` behavior. + // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] + enum InternalRedirectAction { + option deprecated = true; + + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is + // useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. This behavior can be + // disabled by setting ``disable_shadow_host_suffix_append`` to ``true``. + // + // .. note:: + // + // Shadowing will not be triggered if the primary cluster does not exist. + // + // .. note:: + // + // Shadowing doesn't support HTTP CONNECT and upgrades. + // [#next-free-field: 9] + message RequestMirrorPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.RequestMirrorPolicy"; + + reserved 2; + + reserved "runtime_key"; + + // Only one of ``cluster`` and ``cluster_header`` can be specified. + // [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"]; + + // Only one of ``cluster`` and ``cluster_header`` can be specified. + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. Only the first value in header is used, + // and no shadow request will happen if the value is not found in headers. Envoy will not wait for + // the shadow cluster to respond before returning the response from the primary cluster. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string cluster_header = 5 [ + (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, + (udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier" + ]; + + // If not specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the ``runtime_key`` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + core.v3.RuntimeFractionalPercent runtime_fraction = 3; + + // Specifies whether the trace span for the shadow request should be sampled. If this field is not explicitly set, + // the shadow request will inherit the sampling decision of its parent span. This ensures consistency with the trace + // sampling policy of the original request and prevents oversampling, especially in scenarios where runtime sampling + // is disabled. + google.protobuf.BoolValue trace_sampled = 4; + + // Disables appending the ``-shadow`` suffix to the shadowed ``Host`` header. + // + // Defaults to ``false``. + bool disable_shadow_host_suffix_append = 6; + + // Specifies a list of header mutations that should be applied to each mirrored request. + // Header mutations are applied in the order they are specified. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated common.mutation_rules.v3.HeaderMutation request_headers_mutations = 7 + [(validate.rules).repeated = {max_items: 1000}]; + + // Indicates that during mirroring, the host header will be swapped with this value. + // :ref:`disable_shadow_host_suffix_append + // ` + // is implicitly enabled if this field is set. + string host_rewrite_literal = 8 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + // [#next-free-field: 7] + message HashPolicy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.HashPolicy"; + + message Header { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.HashPolicy.Header"; + + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // If specified, the request header value will be rewritten and used + // to produce the hash key. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; + } + + // CookieAttribute defines an API for adding additional attributes for a HTTP cookie. + message CookieAttribute { + // The name of the cookie attribute. + string name = 1 + [(validate.rules).string = + {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The optional value of the cookie attribute. + string value = 2 [(validate.rules).string = + {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.HashPolicy.Cookie"; + + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + + // Additional attributes for the cookie. They will be used when generating a new cookie. + repeated CookieAttribute attributes = 4; + } + + message ConnectionProperties { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties"; + + // Hash on source IP address. + bool source_ip = 1; + } + + message QueryParameter { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter"; + + // The name of the URL query parameter that will be used to obtain the hash + // key. If the parameter is not present, no hash will be produced. Query + // parameter names are case-sensitive. If query parameters are repeated, only + // the first value will be considered. + string name = 1 [(validate.rules).string = {min_len: 1}]; + } + + message FilterState { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.HashPolicy.FilterState"; + + // The name of the Object in the per-request filterState, which is an + // Envoy::Hashable object. If there is no data associated with the key, + // or the stored object is not Envoy::Hashable, no hash will be produced. + string key = 1 [(validate.rules).string = {min_len: 1}]; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + + // Query parameter hash policy. + QueryParameter query_parameter = 5; + + // Filter state hash policy. + FilterState filter_state = 6; + } + + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:`upgrade_configs + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RouteAction.UpgradeConfig"; + + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT or POST requests, when forwarding request payload as raw TCP. + message ConnectConfig { + // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + core.v3.ProxyProtocolConfig proxy_protocol_config = 1; + + // If set, the route will also allow forwarding POST payload as raw TCP. + bool allow_post = 2; + } + + // The case-insensitive name of this upgrade, for example, "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Determines if upgrades are available on this route. + // + // Defaults to ``true``. + google.protobuf.BoolValue enabled = 2; + + // Configuration for sending data upstream as a raw data payload. This is used for + // CONNECT requests, when forwarding CONNECT payload as raw TCP. + // Note that CONNECT support is currently considered alpha in Envoy. + // [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + ConnectConfig connect_config = 3; + } + + message MaxStreamDuration { + // Specifies the maximum duration allowed for streams on the route. If not specified, the value + // from the :ref:`max_stream_duration + // ` field in + // :ref:`HttpConnectionManager.common_http_protocol_options + // ` + // is used. If this field is set explicitly to zero, any + // HttpConnectionManager max_stream_duration timeout will be disabled for + // this route. + google.protobuf.Duration max_stream_duration = 1; + + // If present, and the request contains a `grpc-timeout header + // `_, use that value as the + // ``max_stream_duration``, but limit the applied timeout to the maximum value specified here. + // If set to 0, the ``grpc-timeout`` header is used without modification. + google.protobuf.Duration grpc_timeout_header_max = 2; + + // If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by + // subtracting the provided duration from the header. This is useful for allowing Envoy to set + // its global timeout to be less than that of the deadline imposed by the calling client, which + // makes it more likely that Envoy will handle the timeout instead of having the call canceled + // by the client. If, after applying the offset, the resulting timeout is zero or negative, + // the stream will timeout immediately. + google.protobuf.Duration grpc_timeout_header_offset = 3; + } + + reserved 12, 18, 19, 16, 22, 21, 10; + + reserved "request_mirror_policy"; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_len: 1}]; + + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + // ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string cluster_header = 2 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + + // Name of the cluster specifier plugin to use to determine the cluster for requests on this route. + // The cluster specifier plugin name must be defined in the associated + // :ref:`cluster specifier plugins ` + // in the :ref:`name ` field. + string cluster_specifier_plugin = 37; + + // Custom cluster specifier plugin configuration to use to determine the cluster for requests + // on this route. + ClusterSpecifierPlugin inline_cluster_specifier_plugin = 39; + } + + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as ``envoy.lb``. + core.v3.Metadata metadata_match = 4; + + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite `, + // :ref:`path_rewrite_policy `, + // :ref:`path_rewrite `, + // or :ref:`prefix_rewrite ` + // may be specified. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting ``/prefix`` to ``/`` and ``/prefix/etc`` to ``/etc`` cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to ``/prefix`` will be stripped to ``/``, while + // requests to ``/prefix/etc`` will be stripped to ``/etc``. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during forwarding, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. The router filter will place the original path as it was + // before the rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite `, + // :ref:`path_rewrite_policy `, + // :ref:`path_rewrite `, + // or :ref:`prefix_rewrite ` + // may be specified. + // + // Examples using Google's `RE2 `_ engine: + // + // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + // into ``/v1/api/instance/foo``. + // + // * The pattern ``one`` paired with a substitution string of ``two`` would + // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + // + // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + // ``\1two\2`` would replace only the first occurrence of ``one``, + // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + // + // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + // ``/aaa/yyy/bbb``. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; + + // [#extension-category: envoy.path.rewrite] + core.v3.TypedExtensionConfig path_rewrite_policy = 41; + + // Rewrites the whole path (without query parameters) with the given path value. + // The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // Only one of :ref:`regex_rewrite `, + // :ref:`path_rewrite_policy `, + // :ref:`path_rewrite `, + // or :ref:`prefix_rewrite ` + // may be specified. + // + // The :ref:`substitution format specifier ` could be applied here. + // For example, with the following config: + // + // .. code-block:: yaml + // + // path_rewrite: "/new_path_prefix%REQ(custom-path-header-name)%" + // + // Would rewrite the path to ``/new_path_prefix/some_value`` given the header + // ``custom-path-header-name: some_value``. If the header is not present, the path will be + // rewritten to ``/new_path_prefix``. + // + // + // If the final output of the path rewrite is empty, then the update will be ignored and the + // original path will be preserved. + string path_rewrite = 45; + + // If one of the host rewrite specifiers is set and the + // :ref:`suppress_envoy_headers + // ` flag is not + // set to true, the router filter will place the original host header value before + // rewriting into the :ref:`x-envoy-original-host + // ` header. + // + // And if the + // :ref:`append_x_forwarded_host ` + // is set to true, the original host value will also be appended to the + // :ref:`config_http_conn_man_headers_x-forwarded-host` header. + // + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite_literal = 6 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type ``strict_dns`` or ``logical_dns``, + // or when :ref:`hostname ` + // field is not empty. Setting this to true with other cluster types + // has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; + + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string host_rewrite_header = 29 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Indicates that during forwarding, the host header will be swapped with + // the result of the regex substitution executed on path value with query and fragment removed. + // This is useful for transitioning variable content between path segment and subdomain. + // + // For example with the following config: + // + // .. code-block:: yaml + // + // host_rewrite_path_regex: + // pattern: + // google_re2: {} + // regex: "^/(.+)/.+$" + // substitution: \1 + // + // Would rewrite the host header to ``envoyproxy.io`` given the path ``/envoyproxy.io/some/path``. + type.matcher.v3.RegexMatchAndSubstitute host_rewrite_path_regex = 35; + + // Rewrites the host header with the value of this field. The router filter will + // place the original host header value before rewriting into the :ref:`x-envoy-original-host + // ` header. + // + // The :ref:`substitution format specifier ` could be applied here. + // For example, with the following config: + // + // .. code-block:: yaml + // + // host_rewrite: "prefix-%REQ(custom-host-header-name)%" + // + // Would rewrite the host header to ``prefix-some_value`` given the header + // ``custom-host-header-name: some_value``. If the header is not present, the host header will + // be rewritten to an value of ``prefix-``. + // + // If the final output of the host rewrite is empty, then the update will be ignored and the + // original host header will be preserved. + string host_rewrite = 44; + } + + // If set, then a host rewrite action (one of + // :ref:`host_rewrite_literal `, + // :ref:`auto_host_rewrite `, + // :ref:`host_rewrite_header `, or + // :ref:`host_rewrite_path_regex `) + // causes the original value of the host header, if any, to be appended to the + // :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. + bool append_x_forwarded_host = 38; + + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration timeout = 8; + + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + // + // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + // is configured, this timeout is scaled according to the value for + // :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + // + // This timeout may also be used in place of ``flush_timeout`` in very specific cases. See the + // documentation for ``flush_timeout`` for more details. + google.protobuf.Duration idle_timeout = 24; + + // Specifies the codec stream flush timeout for the route. + // + // If not specified, the first preference is the global :ref:`stream_flush_timeout + // `, + // but only if explicitly configured. + // + // If neither the explicit HCM-wide flush timeout nor this route-specific flush timeout is configured, + // the route's stream idle timeout is reused for this timeout. This is for + // backwards compatibility since both behaviors were historically controlled by the one timeout. + // + // If the route also does not have an idle timeout configured, the global :ref:`stream_idle_timeout + // `. used, again + // for backwards compatibility. That timeout defaults to 5 minutes. + // + // A value of 0 via any of the above paths will completely disable the timeout for a given route. + google.protobuf.Duration flush_timeout = 42; + + // Specifies how to send request over TLS early data. + // If absent, allows `safe HTTP requests `_ to be sent on early data. + // [#extension-category: envoy.route.early_data_policy] + core.v3.TypedExtensionConfig early_data_policy = 40; + + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g., policies are not merged, the most internal one becomes the enforced policy). + RetryPolicy retry_policy = 9; + + // [#not-implemented-hide:] + // Specifies the configuration for retry policy extension. Note that if this is set, it'll take + // precedence over the virtual host level retry policy entirely (e.g., policies are not merged, + // the most internal one becomes the enforced policy). :ref:`Retry policy ` + // should not be set if this field is used. + google.protobuf.Any retry_policy_typed_config = 33; + + // Specify a set of route request mirroring policies. + // It takes precedence over the virtual host and route config mirror policy entirely. + // That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + repeated RequestMirrorPolicy request_mirror_policies = 30; + + // Optionally specifies the :ref:`routing priority `. + core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + + // Specifies a set of rate limit configurations that could be applied to the + // route. + repeated RateLimit rate_limits = 13; + + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + // + // .. attention:: + // + // This field is deprecated. Please use :ref:`vh_rate_limits ` + google.protobuf.BoolValue include_vh_rate_limits = 14 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + repeated HashPolicy hash_policy = 15; + + // Indicates that the route has a CORS policy. This field is ignored if related cors policy is + // found in the :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config`. + // + // .. attention:: + // + // This option has been deprecated. Please use + // :ref:`Route.typed_per_filter_config` or + // :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + // to configure the CORS HTTP filter. + CorsPolicy cors = 17 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Deprecated by :ref:`grpc_timeout_header_max ` + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the ``grpc-timeout`` header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + // + // .. note:: + // + // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + // precedence over `grpc-timeout header `_, when + // both are present. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration max_grpc_timeout = 23 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Deprecated by :ref:`grpc_timeout_header_offset `. + // If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + repeated UpgradeConfig upgrade_configs = 25; + + // If present, Envoy will try to follow an upstream redirect response instead of proxying the + // response back to the downstream. An upstream redirect response is defined + // by :ref:`redirect_response_codes + // `. + InternalRedirectPolicy internal_redirect_policy = 34; + + InternalRedirectAction internal_redirect_action = 26 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 31 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g., policies are not merged, the most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; + + // Specifies the maximum stream duration for this route. + MaxStreamDuration max_stream_duration = 36; +} + +// HTTP retry :ref:`architecture overview `. +// [#next-free-field: 14] +message RetryPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy"; + + enum ResetHeaderFormat { + SECONDS = 0; + UNIX_TIMESTAMP = 1; + } + + message RetryPriority { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RetryPolicy.RetryPriority"; + + reserved 2; + + reserved "config"; + + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // [#extension-category: envoy.retry_priorities] + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RetryPolicy.RetryHostPredicate"; + + reserved 2; + + reserved "config"; + + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // [#extension-category: envoy.retry_host_predicates] + oneof config_type { + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RetryPolicy.RetryBackOff"; + + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the ``base_interval`` if set. The default is 10 times the + // ``base_interval``. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + + message ResetHeader { + // The name of the reset header. + // + // .. note:: + // + // If the header appears multiple times only the first value is used. + string name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The format of the reset header. + ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; + } + + // A retry back-off strategy that applies when the upstream server rate limits + // the request. + // + // Given this configuration: + // + // .. code-block:: yaml + // + // rate_limited_retry_back_off: + // reset_headers: + // - name: Retry-After + // format: SECONDS + // - name: X-RateLimit-Reset + // format: UNIX_TIMESTAMP + // max_interval: "300s" + // + // The following algorithm will apply: + // + // 1. If the response contains the header ``Retry-After`` its value must be on + // the form ``120`` (an integer that represents the number of seconds to + // wait before retrying). If so, this value is used as the back-off interval. + // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its + // value must be on the form ``1595320702`` (an integer that represents the + // point in time at which to retry, as a Unix timestamp in seconds). If so, + // the current time is subtracted from this value and the result is used as + // the back-off interval. + // 3. Otherwise, Envoy will use the default + // :ref:`exponential back-off ` + // strategy. + // + // No matter which format is used, if the resulting back-off interval exceeds + // ``max_interval`` it is discarded and the next header in ``reset_headers`` + // is tried. If a request timeout is configured for the route it will further + // limit how long the request will be allowed to run. + // + // To prevent many clients retrying at the same point in time jitter is added + // to the back-off interval, so the resulting interval is decided by taking: + // ``random(interval, interval * 1.5)``. + // + // .. attention:: + // + // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request + // to be retried. You will still need to configure the right retry policy to match + // the responses from the upstream server. + message RateLimitedRetryBackOff { + // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) + // to match against the response. Headers are tried in order, and matched case + // insensitive. The first header to be parsed successfully is used. If no headers + // match the default exponential back-off is used instead. + repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Specifies the maximum back off interval that Envoy will allow. If a reset + // header contains an interval longer than this then it will be discarded and + // the next header will be tried. + // + // Defaults to 300 seconds. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + string retry_on = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + google.protobuf.UInt32Value num_retries = 2 + [(udpa.annotations.field_migrate).rename = "max_retries"]; + + // Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + // parameter is optional. The same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + google.protobuf.Duration per_try_timeout = 3; + + // Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + // parameter is optional and if absent there is no per-try idle timeout. The semantics of the per- + // try idle timeout are similar to the + // :ref:`route idle timeout ` and + // :ref:`stream idle timeout + // ` + // both enforced by the HTTP connection manager. The difference is that this idle timeout + // is enforced by the router for each individual attempt and thus after all previous filters have + // run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + // is useful in cases in which total request timeout is bounded by a number of retries and a + // :ref:`per_try_timeout `, but + // there is a desire to ensure each try is making incremental progress. Note also that similar + // to :ref:`per_try_timeout `, + // this idle timeout does not start until after both the entire request has been received by the + // router *and* a connection pool connection has been obtained. Unlike + // :ref:`per_try_timeout `, + // the idle timer continues once the response starts streaming back to the downstream client. + // This ensures that response data continues to make progress without using one of the HTTP + // connection manager idle timeouts. + google.protobuf.Duration per_try_idle_timeout = 13; + + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority retry_priority = 4; + + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + repeated RetryHostPredicate retry_host_predicate = 5; + + // Retry options predicates that will be applied prior to retrying a request. These predicates + // allow customizing request behavior between retries. + // [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + repeated core.v3.TypedExtensionConfig retry_options_predicates = 12; + + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + int64 host_selection_retry_max_attempts = 6; + + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + repeated uint32 retriable_status_codes = 7; + + // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // ``upstream.base_retry_backoff_ms`` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; + + // Specifies parameters that control a retry back-off strategy that is used + // when the request is rate limited by the upstream server. The server may + // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to + // provide feedback to the client on how long to wait before retrying. If + // configured, this back-off strategy will be used instead of the + // default exponential back off strategy (configured using ``retry_back_off``) + // whenever a response includes the matching headers. + RateLimitedRetryBackOff rate_limited_retry_back_off = 11; + + // HTTP response headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + repeated HeaderMatcher retriable_headers = 9; + + // HTTP headers which must be present in the request for retries to be attempted. + repeated HeaderMatcher retriable_request_headers = 10; +} + +// HTTP request hedging :ref:`architecture overview `. +message HedgePolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HedgePolicy"; + + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // + // Defaults to 1. + // [#not-implemented-hide:] + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // + // Defaults to 0. + // [#not-implemented-hide:] + type.v3.FractionalPercent additional_request_chance = 2; + + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned to the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // .. note:: + // + // For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // + // Defaults to ``false``. + bool hedge_on_per_try_timeout = 3; +} + +// [#next-free-field: 10] +message RedirectAction { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RedirectAction"; + + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is ``http`` and the port is explicitly + // set to ``:80``, the port will be removed after the redirection + // 2. If the source URI scheme is ``https`` and the port is explicitly + // set to ``:443``, the port will be removed after the redirection + oneof scheme_rewrite_specifier { + // The scheme portion of the URL will be swapped with "https". + bool https_redirect = 4; + + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + + // The host portion of the URL will be swapped with this value. + string host_redirect = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + // Please note that query string in path_redirect will override the + // request's query string and will not be stripped. + // + // For example, let's say we have the following routes: + // + // - match: { path: "/old-path-1" } + // redirect: { path_redirect: "/new-path-1" } + // - match: { path: "/old-path-2" } + // redirect: { path_redirect: "/new-path-2", strip-query: "true" } + // - match: { path: "/old-path-3" } + // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + // + // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" + string path_redirect = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + string prefix_rewrite = 5 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; + + // Indicates that during redirect, portions of the path that match the + // pattern should be rewritten, even allowing the substitution of capture + // groups from the pattern into the new path as specified by the rewrite + // substitution string. This is useful to allow application paths to be + // rewritten in a way that is aware of segments with variable content like + // identifiers. + // + // Examples using Google's `RE2 `_ engine: + // + // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + // into ``/v1/api/instance/foo``. + // + // * The pattern ``one`` paired with a substitution string of ``two`` would + // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + // + // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + // ``\1two\2`` would replace only the first occurrence of ``one``, + // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + // + // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + // ``/aaa/yyy/bbb``. + type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 9; + } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; +} + +message DirectResponseAction { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.DirectResponseAction"; + + // Specifies the HTTP response status to be returned. + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 200}]; + + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using ``response_headers_to_add`` in the enclosing + // :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or + // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. + core.v3.DataSource body = 2; + + // Specifies a format string for the response body. If present, the contents of + // ``body_format`` will be formatted and used as the response body, where the + // contents of ``body`` (may be empty) will be passed as the variable ``%LOCAL_REPLY_BODY%``. + // If neither are provided, no body is included in the generated response. + core.v3.SubstitutionFormatString body_format = 3; +} + +// [#not-implemented-hide:] +message NonForwardingAction { +} + +message Decorator { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Decorator"; + + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + string operation = 1 [(validate.rules).string = {min_len: 1}]; + + // Whether the decorated details should be propagated to the other party. The default is ``true``. + google.protobuf.BoolValue propagate = 2; +} + +// [#next-free-field: 7] +message Tracing { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Tracing"; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + type.v3.FractionalPercent client_sampling = 1; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.FractionalPercent random_sampling = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.v3.FractionalPercent overall_sampling = 3; + + // A list of custom tags with unique tag name to create tags for the active span. + // It will take effect after merging with the :ref:`corresponding configuration + // ` + // configured in the HTTP connection manager. If two tags with the same name are configured + // each in the HTTP connection manager and the route level, the one configured here takes + // priority. + repeated type.tracing.v3.CustomTag custom_tags = 4; + + // The operation name of the span which will be used for tracing. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown specifier values are replaced with the empty string instead of ``-``. + // + // This field will take precedence over and make following settings ineffective: + // + // * :ref:`route decorator `. + // * :ref:`x-envoy-decorator-operation `. + // * :ref:`HCM tracing operation + // `. + string operation = 5; + + // The operation name of the upstream span which will be used for tracing. + // This only takes effect when ``spawn_upstream_span`` is set to true and the upstream + // span is created. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown specifier values are replaced with the empty string instead of ``-``. + // + // This field will take precedence over and make following settings ineffective: + // + // * :ref:`HCM tracing upstream operation + // ` + string upstream_operation = 6; +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect” statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +message VirtualCluster { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualCluster"; + + reserved 1, 3; + + reserved "pattern", "method"; + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers ``:path`` and ``:method`` can be used to match the request path and + // method, respectively. + repeated HeaderMatcher headers = 4; + + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + string name = 2 [(validate.rules).string = {min_len: 1}]; +} + +// Global rate limiting :ref:`architecture overview `. +// Also applies to Local rate limiting :ref:`using descriptors `. +// [#next-free-field: 7] +message RateLimit { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; + + // [#next-free-field: 13] + message Action { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action"; + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action.SourceCluster"; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action.DestinationCluster"; + } + + // The following descriptor entry is appended when a header contains a key that matches the + // ``header_name``: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action.RequestHeaders"; + + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; + + // Controls the behavior when the specified header is not present in the request. + // + // If set to ``false`` (default): + // + // * Envoy does **NOT** call the rate limiting service for this descriptor. + // * Useful if the header is optional and you prefer to skip rate limiting when it's absent. + // + // If set to ``true``: + // + // * Envoy calls the rate limiting service but omits this descriptor if the header is missing. + // * Useful if you want Envoy to enforce rate limiting even when the header is not present. + // + bool skip_if_absent = 3; + } + + // The following descriptor entry is appended when a query parameter contains a key that matches the + // ``query_parameter_name``: + // + // .. code-block:: cpp + // + // ("", "") + message QueryParameters { + // The name of the query parameter to use for rate limiting. Value of this query parameter is used to populate + // the value of the descriptor entry for the descriptor_key. + string query_parameter_name = 1 [(validate.rules).string = {min_len: 1}]; + + // The key to use when creating the rate limit descriptor entry. This descriptor key will be used to identify the + // rate limit rule in the rate limiting service. + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; + + // Controls the behavior when the specified query parameter is not present in the request. + // + // If set to ``false`` (default): + // + // * Envoy does **NOT** call the rate limiting service for this descriptor. + // * Useful if the query parameter is optional and you prefer to skip rate limiting when it's absent. + // + // If set to ``true``: + // + // * Envoy calls the rate limiting service but omits this descriptor if the query parameter is missing. + // * Useful if you want Envoy to enforce rate limiting even when the query parameter is not present. + // + bool skip_if_absent = 3; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action.RemoteAddress"; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // masked address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("masked_remote_address", "") + message MaskedRemoteAddress { + // Length of prefix mask len for IPv4 (e.g. 0, 32). + // + // Defaults to 32 when unset. + // + // For example, trusted address from x-forwarded-for is ``192.168.1.1``, + // the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); + // if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). + google.protobuf.UInt32Value v4_prefix_mask_len = 1 [(validate.rules).uint32 = {lte: 32}]; + + // Length of prefix mask len for IPv6 (e.g. 0, 128). + // + // Defaults to 128 when unset. + // + // For example, trusted address from x-forwarded-for is ``2001:abcd:ef01:2345:6789:abcd:ef01:234``, + // the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); + // if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). + google.protobuf.UInt32Value v6_prefix_mask_len = 2 [(validate.rules).uint32 = {lte: 128}]; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action.GenericKey"; + + // Descriptor value of entry. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown specifier values are replaced with the empty string instead of ``-``. + // + // .. note:: + // + // Formatter parsing is controlled by the runtime feature flag + // ``envoy.reloadable_features.enable_formatter_for_ratelimit_action_descriptor_value`` + // (disabled by default). + // + // When enabled: The format string can contain multiple valid substitution + // fields. If multiple substitution fields are present, their results will be concatenated + // to form the final descriptor value. If it contains no substitution fields, the value + // will be used as is. If the final concatenated result is empty and ``default_value`` is set, + // the ``default_value`` will be used. If ``default_value`` is not set and the result is + // empty, this descriptor will be skipped and not included in the rate limit call. + // + // When disabled (default): The descriptor_value is used as a literal string without any formatter + // parsing or substitution. + // + // For example, ``static_value`` will be used as is since there are no substitution fields. + // ``%REQ(:method)%`` will be replaced with the HTTP method, and + // ``%REQ(:method)%%REQ(:path)%`` will be replaced with the concatenation of the HTTP method and path. + // ``%CEL(request.headers['user-id'])%`` will use CEL to extract the user ID from request headers. + // + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // An optional value to use if the final concatenated ``descriptor_value`` result is empty. + // Only applicable when formatter parsing is enabled by the runtime feature flag + // ``envoy.reloadable_features.enable_formatter_for_ratelimit_action_descriptor_value`` (disabled by default). + string default_value = 3; + + // An optional key to use in the descriptor entry. If not set it defaults + // to 'generic_key' as the descriptor key. + string descriptor_key = 2; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + // [#next-free-field: 6] + message HeaderValueMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch"; + + // Descriptor value of entry. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown specifier values are replaced with the empty string instead of ``-``. + // + // .. note:: + // + // Formatter parsing is controlled by the runtime feature flag + // ``envoy.reloadable_features.enable_formatter_for_ratelimit_action_descriptor_value`` + // (disabled by default). + // + // When enabled: The format string can contain multiple valid substitution + // fields. If multiple substitution fields are present, their results will be concatenated + // to form the final descriptor value. If it contains no substitution fields, the value + // will be used as is. All substitution fields will be evaluated and their results + // concatenated. If the final concatenated result is empty and ``default_value`` is set, + // the ``default_value`` will be used. If ``default_value`` is not set and the result is + // empty, this descriptor will be skipped and not included in the rate limit call. + // + // When disabled (default): The descriptor_value is used as a literal string without any formatter + // parsing or substitution. + // + // For example, ``static_value`` will be used as is since there are no substitution fields. + // ``%REQ(:method)%`` will be replaced with the HTTP method, and + // ``%REQ(:method)%%REQ(:path)%`` will be replaced with the concatenation of the HTTP method and path. + // ``%CEL(request.headers['user-id'])%`` will use CEL to extract the user ID from request headers. + // + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // An optional value to use if the final concatenated ``descriptor_value`` result is empty. + // Only applicable when formatter parsing is enabled by the runtime feature flag + // ``envoy.reloadable_features.enable_formatter_for_ratelimit_action_descriptor_value`` (disabled by default). + string default_value = 5; + + // The key to use in the descriptor entry. + // + // Defaults to ``header_match``. + string descriptor_key = 4; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request's headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; + } + + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action + message DynamicMetaData { + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the dynamic metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if ``metadata_key`` is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + } + + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + // [#next-free-field: 6] + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if ``metadata_key`` is empty. If not set and + // no value is present under the metadata_key then ``skip_if_absent`` is followed to + // skip calling the rate limiting service or skip the descriptor. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + + // Controls the behavior when the specified ``metadata_key`` is empty and ``default_value`` is not set. + // + // If set to ``false`` (default): + // + // * Envoy does **NOT** call the rate limiting service for this descriptor. + // * Useful if the metadata is optional and you prefer to skip rate limiting when it's absent. + // + // If set to ``true``: + // + // * Envoy calls the rate limiting service but omits this descriptor if the ``metadata_key`` is empty and + // ``default_value`` is missing. + // * Useful if you want Envoy to enforce rate limiting even when the metadata is not present. + // + bool skip_if_absent = 5; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("query_match", "") + // [#next-free-field: 6] + message QueryParameterValueMatch { + // Descriptor value of entry. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown specifier values are replaced with the empty string instead of ``-``. + // + // .. note:: + // + // Formatter parsing is controlled by the runtime feature flag + // ``envoy.reloadable_features.enable_formatter_for_ratelimit_action_descriptor_value`` + // (disabled by default). + // + // When enabled: The format string can contain multiple valid substitution + // fields. If multiple substitution fields are present, their results will be concatenated + // to form the final descriptor value. If it contains no substitution fields, the value + // will be used as is. All substitution fields will be evaluated and their results + // concatenated. If the final concatenated result is empty and ``default_value`` is set, + // the ``default_value`` will be used. If ``default_value`` is not set and the result is + // empty, this descriptor will be skipped and not included in the rate limit call. + // + // When disabled (default): The descriptor_value is used as a literal string without any formatter + // parsing or substitution. + // + // For example, ``static_value`` will be used as is since there are no substitution fields. + // ``%REQ(:method)%`` will be replaced with the HTTP method, and + // ``%REQ(:method)%%REQ(:path)%`` will be replaced with the concatenation of the HTTP method and path. + // ``%CEL(request.headers['user-id'])%`` will use CEL to extract the user ID from request headers. + // + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // An optional value to use if the final concatenated ``descriptor_value`` result is empty. + // Only applicable when formatter parsing is enabled by the runtime feature flag + // ``envoy.reloadable_features.enable_formatter_for_ratelimit_action_descriptor_value`` (disabled by default). + string default_value = 5; + + // The key to use in the descriptor entry. + // + // Defaults to ``query_match``. + string descriptor_key = 4; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of query parameters that the rate limit action should match + // on. The action will check the request's query parameters against all the + // specified query parameters in the config. A match will happen if all the + // query parameters in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated QueryParameterMatcher query_parameters = 3 + [(validate.rules).repeated = {min_items: 1}]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on query parameters. + QueryParameters query_parameters = 12; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + + // Rate limit on dynamic metadata. + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData dynamic_metadata = 7 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // Rate limit on metadata. + MetaData metadata = 8; + + // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + // + // :ref:`HTTP matching input functions ` are + // permitted as descriptor extensions. The input functions are only + // looked up if there is no rate limit descriptor extension matching + // the type URL. + // + // [#extension-category: envoy.rate_limit_descriptors] + core.v3.TypedExtensionConfig extension = 9; + + // Rate limit on masked remote address. + MaskedRemoteAddress masked_remote_address = 10; + + // Rate limit on the existence of query parameters. + QueryParameterValueMatch query_parameter_value_match = 11; + } + } + + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } + + message HitsAddend { + // Fixed number of hits to add to the rate limit descriptor. + // + // One of the ``number`` or ``format`` fields should be set but not both. + google.protobuf.UInt64Value number = 1 [(validate.rules).uint64 = {lte: 1000000000}]; + + // Substitution format string to extract the number of hits to add to the rate limit descriptor. + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here. + // + // .. note:: + // + // The format string must contains only single valid substitution field. If the format string + // not meets the requirement, the configuration will be rejected. + // + // The substitution field should generates a non-negative number or string representation of + // a non-negative number. The value of the non-negative number should be less than or equal + // to 1000000000 like the ``number`` field. If the output of the substitution field not meet + // the requirement, this will be treated as an error and the current descriptor will be ignored. + // + // For example, the ``%BYTES_RECEIVED%`` format string will be replaced with the number of bytes + // received in the request. + // + // One of the ``number`` or ``format`` fields should be set but not both. + string format = 2 [(validate.rules).string = {prefix: "%" suffix: "%" ignore_empty: true}]; + } + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + // + // .. note:: + // This is not supported if the rate limit action is configured in the ``typed_per_filter_config`` like + // :ref:`VirtualHost.typed_per_filter_config` or + // :ref:`Route.typed_per_filter_config`, etc. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + // + // .. note:: + // This is not supported if the rate limit action is configured in the ``typed_per_filter_config`` like + // :ref:`VirtualHost.typed_per_filter_config` or + // :ref:`Route.typed_per_filter_config`, etc. + string disable_key = 2; + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + // + // .. note:: + // This is not supported if the rate limit action is configured in the ``typed_per_filter_config`` like + // :ref:`VirtualHost.typed_per_filter_config` or + // :ref:`Route.typed_per_filter_config`, etc. + Override limit = 4; + + // An optional hits addend to be appended to the descriptor produced by this rate limit + // configuration. + // + // .. note:: + // This is only supported if the rate limit action is configured in the ``typed_per_filter_config`` like + // :ref:`VirtualHost.typed_per_filter_config` or + // :ref:`Route.typed_per_filter_config`, etc. + HitsAddend hits_addend = 5; + + // If true, the rate limit request will be applied when the stream completes. The default value is false. + // This is useful when the rate limit budget needs to reflect the response context that is not available + // on the request path. + // + // For example, let's say the upstream service calculates the usage statistics and returns them in the response body + // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. + // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), + // this can be used to subtract the usage statistics from the rate limit budget. + // + // A rate limit applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this config. + // In other words, the current request won't be blocked when this is true, but the budget will be updated for the subsequent + // requests based on the action with this field set to true. Users should ensure that the rate limit is enforced by the actions + // applied on the request path, i.e. the ones with this field set to false. + // + // Currently, this is only supported by the HTTP global rate filter. + bool apply_on_stream_done = 6; +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 ``Host`` +// header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 ``:method`` header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "string_match": { +// "exact": "POST" +// } +// } +// +// .. attention:: +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +// [#next-free-field: 15] +message HeaderMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HeaderMatcher"; + + reserved 2, 3, 5; + + reserved "regex_match"; + + // Specifies the name of the header in the request. + string name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // Specifies how the header match will be performed to route the request. + oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + // + // .. attention:: + // + // This field is deprecated. Please use :ref:`string_match `. + string exact_match = 4 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + // + // .. attention:: + // + // This field is deprecated. Please use :ref:`string_match `. + type.matcher.v3.RegexMatcher safe_regex_match = 11 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, ``somestring``, 10.9, + // ``-1somestring`` + type.v3.Int64Range range_match = 6; + + // If specified as true, header match will be performed based on whether the header is in the + // request. If specified as false, header match will be performed based on whether the header is absent. + bool present_match = 7; + + // If specified, header match will be performed based on the prefix of the header value. + // + // .. note:: + // + // Empty prefix is not allowed. Please use ``present_match`` instead. + // + // .. attention:: + // + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The prefix ``abcd`` matches the value ``abcdxyz``, but not for ``abcxyz``. + string prefix_match = 9 [ + deprecated = true, + (validate.rules).string = {min_len: 1}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // If specified, header match will be performed based on the suffix of the header value. + // + // .. note:: + // + // Empty suffix is not allowed. Please use ``present_match`` instead. + // + // .. attention:: + // + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The suffix ``abcd`` matches the value ``xyzabcd``, but not for ``xyzbcd``. + string suffix_match = 10 [ + deprecated = true, + (validate.rules).string = {min_len: 1}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // If specified, header match will be performed based on whether the header value contains + // the given value or not. + // + // .. note:: + // + // Empty contains match is not allowed. Please use ``present_match`` instead. + // + // .. attention:: + // + // This field is deprecated. Please use :ref:`string_match `. + // + // Examples: + // + // * The value ``abcd`` matches the value ``xyzabcdpqr``, but not for ``xyzbcdpqr``. + string contains_match = 12 [ + deprecated = true, + (validate.rules).string = {min_len: 1}, + (envoy.annotations.deprecated_at_minor_version) = "3.0" + ]; + + // If specified, header match will be performed based on the string match of the header value. + type.matcher.v3.StringMatcher string_match = 13; + } + + // If specified, the match result will be inverted before checking. + // + // Defaults to ``false``. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value ``1234``, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; + + // If specified, for any header match rule, if the header match rule specified header + // does not exist, this header value will be treated as empty. + // + // Defaults to ``false``. + // + // Examples: + // + // * The header match rule specified header "header1" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to true; The "header1" header is not present. The match rule will + // treat the "header1" as an empty header. The empty header does not match the range, + // so it will match when inverted. + // * The header match rule specified header "header2" to range match of [0, 10], + // :ref:`invert_match ` + // is set to true and :ref:`treat_missing_header_as_empty ` + // is set to false; The "header2" header is not present and the header + // matcher rule for "header2" will be ignored so it will not match. + // * The header match rule specified header "header3" to a string regex match + // ``^$`` which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to true; The "header3" header is not present. + // The match rule will treat the "header3" header as an empty header so it will match. + // * The header match rule specified header "header4" to a string regex match + // ``^$`` which means an empty string, and + // :ref:`treat_missing_header_as_empty ` + // is set to false; The "header4" header is not present. + // The match rule for "header4" will be ignored so it will not match. + bool treat_missing_header_as_empty = 14; +} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +// [#next-free-field: 7] +message QueryParameterMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.route.QueryParameterMatcher"; + + reserved 3, 4; + + reserved "value", "regex"; + + // Specifies the name of a key that must be present in the requested + // ``path``'s query string. + string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; + + oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. + type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + + // Specifies whether a query parameter should be present. + bool present_match = 6; + } +} + +// Cookie matching inspects individual name/value pairs parsed from the ``Cookie`` header. +message CookieMatcher { + // Specifies the cookie name to evaluate. + string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; + + // Match the cookie value using :ref:`StringMatcher + // ` semantics. + type.matcher.v3.StringMatcher string_match = 2 [(validate.rules).message = {required: true}]; + + // Invert the match result. If the cookie is not present, the match result is false, so + // ``invert_match`` will cause the matcher to succeed when the cookie is absent. + bool invert_match = 3; +} + +// HTTP Internal Redirect :ref:`architecture overview `. +// [#next-free-field: 6] +message InternalRedirectPolicy { + // An internal redirect is not handled, unless the number of previous internal redirects that a + // downstream request has encountered is lower than this value. + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 1; + + // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + // only 302 will be treated as internal redirect. + // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; + + // Specifies a list of predicates that are queried when an upstream response is deemed + // to trigger an internal redirect by all other criteria. Any predicate in the list can reject + // the redirect, causing the response to be proxied to downstream. + // [#extension-category: envoy.internal_redirect_predicates] + repeated core.v3.TypedExtensionConfig predicates = 3; + + // Allow internal redirect to follow a target URI with a different scheme than the value of + // x-forwarded-proto. The default is ``false``. + bool allow_cross_scheme_redirect = 4; + + // Specifies a list of headers, by name, to copy from the internal redirect into the subsequent + // request. If a header is specified here but not present in the redirect, it will be cleared in + // the subsequent request. + repeated string response_headers_to_copy = 5 [(validate.rules).repeated = { + unique: true + items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} + }]; +} + +// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the +// map value in +// :ref:`VirtualHost.typed_per_filter_config`, +// :ref:`Route.typed_per_filter_config`, +// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` +// to add additional flags to the filter. +message FilterConfig { + // The filter config. + google.protobuf.Any config = 1; + + // If true, the filter is optional, meaning that if the client does + // not support the specified filter, it may ignore the map entry rather + // than rejecting the config. + bool is_optional = 2; + + // If true, the filter is disabled in the route or virtual host and the ``config`` field is ignored. + // See :ref:`route based filter chain ` + // for more details. + // + // .. note:: + // + // This field will take effect when the request arrive and filter chain is created for the request. + // If initial route is selected for the request and a filter is disabled in the initial route, then + // the filter will not be added to the filter chain. + // And if the request is mutated later and re-match to another route, the disabled filter by the + // initial route will not be added back to the filter chain because the filter chain is already + // created and it is too late to change the chain. + // + bool disabled = 3; +} diff --git a/proto/envoy/config/route/v3/scoped_route.proto b/proto/envoy/config/route/v3/scoped_route.proto new file mode 100644 index 0000000..ff4cc68 --- /dev/null +++ b/proto/envoy/config/route/v3/scoped_route.proto @@ -0,0 +1,133 @@ +syntax = "proto3"; + +package envoy.config.route.v3; + +import "envoy/config/route/v3/route.proto"; + +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.config.route.v3"; +option java_outer_classname = "ScopedRouteProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/route/v3;routev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP scoped routing configuration] +// * Routing :ref:`architecture overview ` + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. +// The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained dynamically +// via RDS (:ref:`route_configuration_name`) +// or specified inline (:ref:`route_configuration`). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: "," +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the ``route-config1`` +// RouteConfiguration being assigned to the HTTP request/stream. +// +// [#next-free-field: 6] +message ScopedRouteConfiguration { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.ScopedRouteConfiguration"; + + // Specifies a key which is matched against the output of the + // :ref:`scope_key_builder` + // specified in the HttpConnectionManager. The matching is done per HTTP + // request and is dependent on the order of the fragments contained in the + // Key. + message Key { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.ScopedRouteConfiguration.Key"; + + message Fragment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.ScopedRouteConfiguration.Key.Fragment"; + + oneof type { + option (validate.required) = true; + + // A string to match against. + string string_key = 1; + } + } + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // Whether the RouteConfiguration should be loaded on demand. + bool on_demand = 4; + + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated + // with this scope. + string route_configuration_name = 2 + [(udpa.annotations.field_migrate).oneof_promotion = "route_config"]; + + // The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with the scope. + RouteConfiguration route_configuration = 5 + [(udpa.annotations.field_migrate).oneof_promotion = "route_config"]; + + // The key to match against. + Key key = 3 [(validate.rules).message = {required: true}]; +} diff --git a/proto/envoy/extensions/transport_sockets/tls/v3/BUILD b/proto/envoy/extensions/transport_sockets/tls/v3/BUILD new file mode 100644 index 0000000..8a81977 --- /dev/null +++ b/proto/envoy/extensions/transport_sockets/tls/v3/BUILD @@ -0,0 +1,14 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/annotations:pkg", + "//envoy/config/core/v3:pkg", + "//envoy/type/matcher/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + ], +) diff --git a/proto/envoy/extensions/transport_sockets/tls/v3/cert.proto b/proto/envoy/extensions/transport_sockets/tls/v3/cert.proto new file mode 100644 index 0000000..8a5f896 --- /dev/null +++ b/proto/envoy/extensions/transport_sockets/tls/v3/cert.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; +import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "CertProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3;tlsv3"; diff --git a/proto/envoy/extensions/transport_sockets/tls/v3/common.proto b/proto/envoy/extensions/transport_sockets/tls/v3/common.proto new file mode 100644 index 0000000..9bc5fb5 --- /dev/null +++ b/proto/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -0,0 +1,597 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "CommonProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3;tlsv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common TLS configuration] + +// [#next-free-field: 7] +message TlsParameters { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; + + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + enum CompliancePolicy { + // FIPS_202205 configures a TLS connection to use: + // + // * TLS 1.2 or 1.3 + // * For TLS 1.2, only ECDHE_[RSA|ECDSA]_WITH_AES_*_GCM_SHA*. + // * For TLS 1.3, only AES-GCM + // * P-256 or P-384 for key agreement. + // * For server signatures, only ``PKCS#1/PSS`` with ``SHA256/384/512``, or ECDSA + // with P-256 or P-384. + // + // .. attention:: + // + // Please refer to `BoringSSL policies `_ + // for details. + FIPS_202205 = 0; + } + + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for both clients and servers. + // + // TLS protocol versions below TLSv1_2 require setting compatible ciphers with the + // ``cipher_suites`` setting as the default ciphers no longer include compatible ciphers. + // + // .. attention:: + // + // Using TLS protocol versions below TLSv1_2 has serious security considerations and risks. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; + + // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for + // servers. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_ + // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). + // + // If not specified, a default list will be used. Defaults are different for server (downstream) and + // client (upstream) TLS configurations. + // Defaults will change over time in response to security considerations; If you care, configure + // it instead of using the default. + // + // In non-FIPS builds, the default server cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In builds using :ref:`BoringSSL FIPS `, the default server cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In non-FIPS builds, the default client cipher list is: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // + // In builds using :ref:`BoringSSL FIPS `, the default client cipher list is: + // + // .. code-block:: none + // + // ECDHE-ECDSA-AES128-GCM-SHA256 + // ECDHE-RSA-AES128-GCM-SHA256 + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves will be used. + // + // In non-FIPS builds, the default curves are: + // + // .. code-block:: none + // + // X25519 + // P-256 + // + // In builds using :ref:`BoringSSL FIPS `, the default curve is: + // + // .. code-block:: none + // + // P-256 + repeated string ecdh_curves = 4; + + // If specified, the TLS connection will only support the specified signature algorithms. + // The list is ordered by preference. + // If not specified, the default signature algorithms defined by BoringSSL will be used. + // + // Default signature algorithms selected by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // ecdsa_secp256r1_sha256 + // rsa_pss_rsae_sha256 + // rsa_pkcs1_sha256 + // ecdsa_secp384r1_sha384 + // rsa_pss_rsae_sha384 + // rsa_pkcs1_sha384 + // rsa_pss_rsae_sha512 + // rsa_pkcs1_sha512 + // rsa_pkcs1_sha1 + // + // Signature algorithms supported by BoringSSL (may be out of date): + // + // .. code-block:: none + // + // rsa_pkcs1_sha256 + // rsa_pkcs1_sha384 + // rsa_pkcs1_sha512 + // ecdsa_secp256r1_sha256 + // ecdsa_secp384r1_sha384 + // ecdsa_secp521r1_sha512 + // rsa_pss_rsae_sha256 + // rsa_pss_rsae_sha384 + // rsa_pss_rsae_sha512 + // ed25519 + // rsa_pkcs1_sha1 + // ecdsa_sha1 + repeated string signature_algorithms = 5; + + // Compliance policies configure various aspects of the TLS based on the given policy. + // The policies are applied last during configuration and may override the other TLS + // parameters, or any previous policy. + repeated CompliancePolicy compliance_policies = 6 [(validate.rules).repeated = {max_items: 1}]; +} + +// BoringSSL private key method configuration. The private key methods are used for external +// (potentially asynchronous) signing and decryption operations. Some use cases for private key +// methods would be TPM support and TLS acceleration. +message PrivateKeyProvider { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.PrivateKeyProvider"; + + reserved 2; + + reserved "config"; + + // Private key method provider name. The name must match a + // supported private key method provider type. + string provider_name = 1 [(validate.rules).string = {min_len: 1}]; + + // Private key method provider specific configuration. + oneof config_type { + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; + } + + // If the private key provider isn't available (eg. the required hardware capability doesn't existed), + // Envoy will fallback to the BoringSSL default implementation when the ``fallback`` is true. + // The default value is ``false``. + bool fallback = 4; +} + +// [#next-free-field: 9] +message TlsCertificate { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; + + // The TLS certificate chain. + // + // If ``certificate_chain`` is a filesystem path, a watch will be added to the + // parent directory for any file moves to support rotation. This currently + // only applies to dynamic secrets, when the ``TlsCertificate`` is delivered via + // SDS. + config.core.v3.DataSource certificate_chain = 1; + + // The TLS private key. + // + // If ``private_key`` is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the ``TlsCertificate`` is delivered via SDS. + config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; + + // ``Pkcs12`` data containing TLS certificate, chain, and private key. + // + // If ``pkcs12`` is a filesystem path, the file will be read, but no watch will + // be added to the parent directory, since ``pkcs12`` isn't used by SDS. + // This field is mutually exclusive with ``certificate_chain``, ``private_key`` and ``private_key_provider``. + // This can't be marked as ``oneof`` due to API compatibility reasons. Setting + // both :ref:`private_key `, + // :ref:`certificate_chain `, + // or :ref:`private_key_provider ` + // and :ref:`pkcs12 ` + // fields will result in an error. Use :ref:`password + // ` + // to specify the password to unprotect the ``PKCS12`` data, if necessary. + config.core.v3.DataSource pkcs12 = 8 [(udpa.annotations.sensitive) = true]; + + // If specified, updates of file-based ``certificate_chain`` and ``private_key`` + // sources will be triggered by this watch. The certificate/key pair will be + // read together and validated for atomic read consistency (i.e. no + // intervening modification occurred between cert/key read, verified by file + // hash comparisons). This allows explicit control over the path watched, by + // default the parent directories of the filesystem paths in + // ``certificate_chain`` and ``private_key`` are watched if this field is not + // specified. This only applies when a ``TlsCertificate`` is delivered by SDS + // with references to filesystem paths. See the :ref:`SDS key rotation + // ` documentation for further details. + config.core.v3.WatchedDirectory watched_directory = 7; + + // BoringSSL private key method provider. This is an alternative to :ref:`private_key + // ` field. + // When both :ref:`private_key ` and + // :ref:`private_key_provider ` fields are set, + // ``private_key_provider`` takes precedence. + // If ``private_key_provider`` is unavailable and :ref:`fallback + // ` + // is enabled, ``private_key`` will be used. + PrivateKeyProvider private_key_provider = 6; + + // The password to decrypt the TLS private key. If this field is not set, it is assumed that the + // TLS private key is not password encrypted. + config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; + + // The OCSP response to be stapled with this certificate during the handshake. + // The response must be DER-encoded and may only be provided via ``filename`` or + // ``inline_bytes``. The response may pertain to only one certificate. + config.core.v3.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated config.core.v3.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.TlsSessionTicketKeys"; + + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated config.core.v3.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; +} + +// Indicates a certificate to be obtained from a named CertificateProvider plugin instance. +// The plugin instances are defined in the client's bootstrap file. +// The plugin allows certificates to be fetched/refreshed over the network asynchronously with +// respect to the TLS handshake. +// [#not-implemented-hide:] +message CertificateProviderPluginInstance { + // Provider instance name. + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1 [(validate.rules).string = {min_len: 1}]; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; +} + +// Matcher for subject alternative names, to match both type and value of the SAN. +message SubjectAltNameMatcher { + // Indicates the choice of GeneralName as defined in section 4.2.1.5 of RFC 5280 to match + // against. + enum SanType { + SAN_TYPE_UNSPECIFIED = 0; + EMAIL = 1; + DNS = 2; + URI = 3; + IP_ADDRESS = 4; + OTHER_NAME = 5; + } + + // Specification of type of SAN. Note that the default enum value is an invalid choice. + SanType san_type = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; + + // Matcher for SAN value. + // + // If the :ref:`san_type ` + // is :ref:`DNS ` + // and the matcher type is :ref:`exact `, DNS wildcards are evaluated + // according to the rules in https://www.rfc-editor.org/rfc/rfc6125#section-6.4.3. + // For example, ``*.example.com`` would match ``test.example.com`` but not ``example.com`` and not + // ``a.b.example.com``. + // + // The string matching for OTHER_NAME SAN values depends on their ASN.1 type: + // + // * OBJECT: Validated against its dotted numeric notation (e.g., "1.2.3.4") + // * BOOLEAN: Validated against strings "true" or "false" + // * INTEGER/ENUMERATED: Validated against a string containing the integer value + // * NULL: Validated against an empty string + // * Other types: Validated directly against the string value + type.matcher.v3.StringMatcher matcher = 2 [(validate.rules).message = {required: true}]; + + // OID Value which is required if OTHER_NAME SAN type is used. + // For example, UPN OID is 1.3.6.1.4.1.311.20.2.3 + // (Reference: http://oid-info.com/get/1.3.6.1.4.1.311.20.2.3). + // + // If set for SAN types other than OTHER_NAME, it will be ignored. + string oid = 3; +} + +// [#next-free-field: 18] +message CertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CertificateValidationContext"; + + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + + message SystemRootCerts { + } + + reserved 4, 5; + + reserved "verify_subject_alt_name"; + + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_spki + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`match_typed_subject_alt_names + // `) is also + // specified. + // + // It can optionally contain certificate revocation lists, in which case Envoy will verify + // that the presented peer certificate has not been revoked by one of the included CRLs. Note + // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be + // provided for all certificate authorities in that chain. Failure to do so will result in + // verification failure for both revoked and unrevoked certificates from that chain. + // The behavior of requiring all certificates to contain CRLs can be altered by + // setting :ref:`only_verify_leaf_cert_crl ` + // true. If set to true, only the final certificate in the chain undergoes CRL verification. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + // + // If ``trusted_ca`` is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the ``CertificateValidationContext`` is + // delivered via SDS. + // + // X509_V_FLAG_PARTIAL_CHAIN is set by default, so non-root/intermediate ca certificate in ``trusted_ca`` + // can be treated as trust anchor as well. It allows verification with building valid partial chain instead + // of a full chain. + // + // If ``ca_certificate_provider_instance`` is set, it takes precedence over ``trusted_ca``. + config.core.v3.DataSource trusted_ca = 1 + [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; + + // Certificate provider instance for fetching TLS certificates. + // + // If set, takes precedence over ``trusted_ca``. + // [#not-implemented-hide:] + CertificateProviderPluginInstance ca_certificate_provider_instance = 13 + [(udpa.annotations.field_migrate).oneof_promotion = "ca_cert_source"]; + + // Use system root certs for validation. + // If present, system root certs are used only if neither of the ``trusted_ca`` + // or ``ca_certificate_provider_instance`` fields are set. + // [#not-implemented-hide:] + SystemRootCerts system_root_certs = 17; + + // If specified, updates of a file-based ``trusted_ca`` source will be triggered + // by this watch. This allows explicit control over the path watched, by + // default the parent directory of the filesystem path in ``trusted_ca`` is + // watched if this field is not specified. This only applies when a + // ``CertificateValidationContext`` is delivered by SDS with references to + // filesystem paths. See the :ref:`SDS key rotation ` + // documentation for further details. + config.core.v3.WatchedDirectory watched_directory = 11; + + // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the + // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate + // matches one of the specified values. + // + // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -pubkey + // | openssl pkey -pubin -outform DER + // | openssl dgst -sha256 -binary + // | openssl enc -base64 + // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= + // + // This is the format used in HTTP Public Key Pinning. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + // + // .. attention:: + // + // This option is preferred over :ref:`verify_certificate_hash + // `, + // because SPKI is tied to a private key, so it doesn't change when the certificate + // is renewed using the same private key. + repeated string verify_certificate_spki = 3 + [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; + + // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that + // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. + // + // A hex-encoded SHA-256 of the certificate can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 + // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a + // + // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate + // can be generated with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 + // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A + // + // Both of those formats are acceptable. + // + // When both: + // :ref:`verify_certificate_hash + // ` and + // :ref:`verify_certificate_spki + // ` are specified, + // a hash matching value from either of the lists will result in the certificate being accepted. + repeated string verify_certificate_hash = 2 + [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; + + // An optional list of Subject Alternative name matchers. If specified, Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matchers. + // The matching uses "any" semantics, that is to say, the SAN is verified if at least one matcher is + // matched. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_typed_subject_alt_names: + // - san_type: DNS + // matcher: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated SubjectAltNameMatcher match_typed_subject_alt_names = 15; + + // This field is deprecated in favor of + // :ref:`match_typed_subject_alt_names + // `. + // Note that if both this field and :ref:`match_typed_subject_alt_names + // ` + // are specified, the former (deprecated field) is ignored. + repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. Note that if a CRL is provided + // for any certificate authority in a trust chain, a CRL must be provided + // for all certificate authorities in that chain. Failure to do so will + // result in verification failure for both revoked and unrevoked certificates + // from that chain. This default behavior can be altered by setting + // :ref:`only_verify_leaf_cert_crl ` to + // true. + // + // If ``crl`` is a filesystem path, a watch will be added to the parent + // directory for any file moves to support rotation. This currently only + // applies to dynamic secrets, when the ``CertificateValidationContext`` is + // delivered via SDS. + config.core.v3.DataSource crl = 7; + + // If specified, Envoy will not reject expired certificates. + bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; + + // The configuration of an extension specific certificate validator. + // If specified, all validation is done by the specified validator, + // and the behavior of all other validation settings is defined by the specified validator (and may be entirely ignored, unused, and unvalidated). + // Refer to the documentation for the specified validator. If you do not want a custom validation algorithm, do not set this field. + // [#extension-category: envoy.tls.cert_validator] + config.core.v3.TypedExtensionConfig custom_validator_config = 12; + + // If this option is set to true, only the certificate at the end of the + // certificate chain will be subject to validation by :ref:`CRL `. + bool only_verify_leaf_cert_crl = 14; + + // Defines maximum depth of a certificate chain accepted in verification, the default limit is 100, though this can be system-dependent. + // This number does not include the leaf but includes the trust anchor, so a depth of 1 allows the leaf and one CA certificate. If a trusted issuer + // appears in the chain, but in a depth larger than configured, the certificate validation will fail. + // This matches the semantics of ``SSL_CTX_set_verify_depth`` in OpenSSL 1.0.x and older versions of BoringSSL. It differs from ``SSL_CTX_set_verify_depth`` + // in OpenSSL 1.1.x and newer versions of BoringSSL in that the trust anchor is included. + // Trusted issues are specified by setting :ref:`trusted_ca ` + google.protobuf.UInt32Value max_verify_depth = 16 [(validate.rules).uint32 = {lte: 100}]; +} diff --git a/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto b/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto new file mode 100644 index 0000000..94660e2 --- /dev/null +++ b/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/config_source.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "SecretProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3;tlsv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Secrets configuration] + +message GenericSecret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; + + // Secret of generic type and is available to filters. It is expected + // that only only one of secret and secrets is set. + config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; + + // For cases where multiple associated secrets need to be distributed together. It is expected + // that only only one of secret and secrets is set. + map secrets = 2 [(udpa.annotations.sensitive) = true]; +} + +message SdsSecretConfig { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; + + // Name by which the secret can be uniquely referred to. When both name and config are specified, + // then secret can be fetched and/or reloaded via SDS. When only name is specified, then secret + // will be loaded from static resources. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + config.core.v3.ConfigSource sds_config = 2; +} + +// [#next-free-field: 6] +message Secret { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; + + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + + oneof type { + TlsCertificate tls_certificate = 2; + + TlsSessionTicketKeys session_ticket_keys = 3; + + CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; + } +} diff --git a/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto b/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto new file mode 100644 index 0000000..d656c66 --- /dev/null +++ b/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -0,0 +1,366 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/address.proto"; +import "envoy/config/core/v3/extension.proto"; +import "envoy/extensions/transport_sockets/tls/v3/common.proto"; +import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "TlsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3;tlsv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: TLS transport socket] +// [#extension: envoy.transport_sockets.tls] +// The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. + +// [#next-free-field: 8] +message UpstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.UpstreamTlsContext"; + + // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. To enable verification, configure + // :ref:`trusted_ca`. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2 [(validate.rules).string = {max_bytes: 255}]; + + // If true, replaces the SNI for the connection with the hostname of the upstream host, if + // the hostname is known due to either a DNS cluster type or the + // :ref:`hostname ` is set on + // the host. + // + // See :ref:`SNI configuration ` for details on how this + // interacts with other validation options. + bool auto_host_sni = 6; + + // If true, replaces any Subject Alternative Name (SAN) validations with a validation for a DNS SAN matching + // the SNI value sent. The validation uses the actual requested SNI, regardless of how the SNI is configured. + // + // For common cases where an SNI value is present and the server certificate should include a corresponding SAN, + // this option ensures the SAN is properly validated. + // + // See the :ref:`validation configuration ` for how this interacts with + // other validation options. + bool auto_sni_san_validation = 7; + + // If true, server-initiated TLS renegotiation will be allowed. + // + // .. attention:: + // + // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. + bool allow_renegotiation = 3; + + // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets + // for TLSv1.2 and older) to be stored for session resumption. + // + // Defaults to 1, setting this to 0 disables session resumption. + google.protobuf.UInt32Value max_session_keys = 4; + + // Controls enforcement of the ``keyUsage`` extension in peer certificates. If set to ``true``, the handshake will fail if + // the ``keyUsage`` is incompatible with TLS usage. + // + // .. note:: + // The default value is ``false`` (i.e., enforcement off). It is expected to change to ``true`` in a future release. + // + // The ``ssl.was_key_usage_invalid`` in :ref:`listener metrics ` metric will be incremented + // for configurations that would fail if this option were enabled. + google.protobuf.BoolValue enforce_rsa_key_usage = 5; +} + +// [#next-free-field: 12] +message DownstreamTlsContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.DownstreamTlsContext"; + + enum OcspStaplePolicy { + // OCSP responses are optional. If absent or expired, the certificate is used without stapling. + LENIENT_STAPLING = 0; + + // OCSP responses are optional. If absent, the certificate is used without stapling. If present but expired, + // the certificate is not used for subsequent connections. Connections are rejected if no suitable certificate + // is found. + STRICT_STAPLING = 1; + + // OCSP responses are required. Connections fail if a certificate lacks a valid OCSP response. Expired responses + // prevent certificate use in new connections, and connections are rejected if no suitable certificate is available. + MUST_STAPLE = 2; + } + + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // Config for fetching TLS session ticket keys via SDS API. + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + + // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS + // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. + // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using + // the keys specified through either :ref:`session_ticket_keys ` + // or :ref:`session_ticket_keys_sds_secret_config `. + // If this config is set to false and no keys are explicitly configured, the TLS server will issue + // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the + // implication that sessions cannot be resumed across hot restarts or on different hosts. + bool disable_stateless_session_resumption = 7; + } + + // If ``true``, the TLS server will not maintain a session cache of TLS sessions. + // + // .. note:: + // This applies only to TLSv1.2 and earlier. + // + bool disable_stateful_session_resumption = 10; + + // Maximum lifetime of TLS sessions. If specified, ``session_timeout`` will change the maximum lifetime + // of the TLS session. + // + // This serves as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. + // Only whole seconds are considered; fractional seconds are ignored. + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; + + // Configuration for handling certificates without an OCSP response or with expired responses. + // + // Defaults to ``LENIENT_STAPLING`` + OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; + + // Multiple certificates are allowed in Downstream transport socket to serve different SNI. + // This option controls the behavior when no matching certificate is found for the received SNI value, + // or no SNI value was sent. If enabled, all certificates will be evaluated for a match for non-SNI criteria + // such as key type and OCSP settings. If disabled, the first provided certificate will be used. + // Defaults to ``false``. See more details in :ref:`Multiple TLS certificates `. + google.protobuf.BoolValue full_scan_certs_on_sni_mismatch = 9; + + // If ``true``, the downstream client's preferred cipher is used during the handshake. If ``false``, Envoy + // uses its preferred cipher. + // + // .. note:: + // This has no effect when using TLSv1_3. + // + bool prefer_client_ciphers = 11; +} + +// TLS key log configuration. +// The key log file format is "format used by NSS for its SSLKEYLOGFILE debugging output" (text taken from openssl man page) +message TlsKeyLog { + // Path to save the TLS key log. + string path = 1 [(validate.rules).string = {min_len: 1}]; + + // Local IP address ranges to filter connections for TLS key logging. If not set, matches any local IP address. + repeated config.core.v3.CidrRange local_address_range = 2; + + // Remote IP address ranges to filter connections for TLS key logging. If not set, matches any remote IP address. + repeated config.core.v3.CidrRange remote_address_range = 3; +} + +// TLS context shared by both client and server TLS contexts. +// [#next-free-field: 17] +message CommonTlsContext { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; + + // Config for the Certificate Provider to fetch certificates. Certificates are fetched/refreshed asynchronously over + // the network relative to the TLS handshake. + // + // DEPRECATED: This message is not currently used, but if we ever do need it, we will want to + // move it out of CommonTlsContext and into common.proto, similar to the existing + // CertificateProviderPluginInstance message. + // + // [#not-implemented-hide:] + message CertificateProvider { + // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Provider specific config. + // Note: an implementation is expected to dedup multiple instances of the same config + // to maintain a single certificate-provider instance. The sharing can happen, for + // example, among multiple clusters or between the tls_certificate and validation_context + // certificate providers of a cluster. + // This config could be supplied inline or (in future) a named xDS resource. + oneof config { + option (validate.required) = true; + + config.core.v3.TypedExtensionConfig typed_config = 2; + } + } + + // Similar to CertificateProvider above, but allows the provider instances to be configured on + // the client side instead of being sent from the control plane. + // + // DEPRECATED: This message was moved outside of CommonTlsContext + // and now lives in common.proto. + // + // [#not-implemented-hide:] + message CertificateProviderInstance { + // Provider instance name. This name must be defined in the client's configuration (e.g., a + // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config + // field that would be sent in the CertificateProvider message if the config was sent by the + // control plane). If not present, defaults to "default". + // + // Instance names should generally be defined not in terms of the underlying provider + // implementation (e.g., "file_watcher") but rather in terms of the function of the + // certificates (e.g., "foo_deployment_identity"). + string instance_name = 1; + + // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify + // a root-certificate (validation context) or "example.com" to specify a certificate for a + // particular domain. Not all provider instances will actually use this field, so the value + // defaults to the empty string. + string certificate_name = 2; + } + + message CombinedCertificateValidationContext { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; + + // How to validate peer certificates. + CertificateValidationContext default_validation_context = 1 + [(validate.rules).message = {required: true}]; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + SdsSecretConfig validation_context_sds_secret_config = 2 + [(validate.rules).message = {required: true}]; + + // Certificate provider for fetching CA certs. This will populate the + // ``default_validation_context.trusted_ca`` field. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 3 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Certificate provider instance for fetching CA certs. This will populate the + // ``default_validation_context.trusted_ca`` field. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 4 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + } + + reserved 5; + + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // Only a single TLS certificate is supported in client contexts. In server contexts, + // :ref:`Multiple TLS certificates ` can be associated with the + // same context to allow both RSA and ECDSA certificates and support SNI-based selection. + // + // If ``tls_certificate_provider_instance`` is set, this field is ignored. + // If this field is set, ``tls_certificate_sds_secret_configs`` is ignored. + repeated TlsCertificate tls_certificates = 2; + + // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + // + // The same number and types of certificates as :ref:`tls_certificates ` + // are valid in the certificates fetched through this setting. + // + // If ``tls_certificates`` or ``tls_certificate_provider_instance`` are set, this field + // is ignored. + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6; + + // Certificate provider instance for fetching TLS certs. + // + // If this field is set, ``tls_certificates`` and ``tls_certificate_provider_instance`` + // are ignored. + // [#not-implemented-hide:] + CertificateProviderPluginInstance tls_certificate_provider_instance = 14; + + // Custom TLS certificate selector. + // + // Select TLS certificate based on TLS client hello. + // If empty, defaults to native TLS certificate selection behavior: + // DNS SANs or Subject Common Name in TLS certificates is extracted as server name pattern to match SNI. + // [#extension-category: envoy.tls.certificate_selectors] + config.core.v3.TypedExtensionConfig custom_tls_certificate_selector = 16; + + // Certificate provider for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProvider tls_certificate_certificate_provider = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Certificate provider instance for fetching TLS certificates. + // [#not-implemented-hide:] + CertificateProviderInstance tls_certificate_certificate_provider_instance = 11 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + oneof validation_context_type { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Config for fetching validation context via SDS API. Note SDS API allows certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. + SdsSecretConfig validation_context_sds_secret_config = 7; + + // Combines the default ``CertificateValidationContext`` with the SDS-provided dynamic context for certificate + // validation. + // + // When the SDS server returns a dynamic ``CertificateValidationContext``, it is merged + // with the default context using ``Message::MergeFrom()``. The merging rules are as follows: + // + // * **Singular Fields:** Dynamic fields override the default singular fields. + // * **Repeated Fields:** Dynamic repeated fields are concatenated with the default repeated fields. + // * **Boolean Fields:** Boolean fields are combined using a logical OR operation. + // + // The resulting ``CertificateValidationContext`` is used to perform certificate validation. + CombinedCertificateValidationContext combined_validation_context = 8; + + // Certificate provider for fetching validation context. + // [#not-implemented-hide:] + CertificateProvider validation_context_certificate_provider = 10 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + + // Certificate provider instance for fetching validation context. + // [#not-implemented-hide:] + CertificateProviderInstance validation_context_certificate_provider_instance = 12 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + } + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type + // ` + // parameter in the HTTP connection manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; + + // Custom TLS handshaker. If empty, defaults to native TLS handshaking + // behavior. + config.core.v3.TypedExtensionConfig custom_handshaker = 13; + + // TLS key log configuration + TlsKeyLog key_log = 15; +} diff --git a/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto b/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto new file mode 100644 index 0000000..73592f8 --- /dev/null +++ b/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package envoy.extensions.transport_sockets.tls.v3; + +import "envoy/config/core/v3/base.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; +option java_outer_classname = "TlsSpiffeValidatorConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3;tlsv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: SPIFFE Certificate Validator] +// [#extension: envoy.tls.cert_validator.spiffe] + +// Configuration specific to the `SPIFFE `_ certificate validator. +// +// Example: +// +// .. validated-code-block:: yaml +// :type-name: envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext +// +// custom_validator_config: +// name: envoy.tls.cert_validator.spiffe +// typed_config: +// "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig +// trust_domains: +// - name: foo.com +// trust_bundle: +// filename: "foo.pem" +// - name: envoy.com +// trust_bundle: +// filename: "envoy.pem" +// +// In this example, a presented peer certificate whose SAN matches ``spiffe://foo.com/**`` is validated against +// the "foo.pem" x.509 certificate. All the trust bundles are isolated from each other, so no trust domain can mint +// a SVID belonging to another trust domain. That means, in this example, a SVID signed by ``envoy.com``'s CA with ``spiffe://foo.com/**`` +// SAN would be rejected since Envoy selects the trust bundle according to the presented SAN before validate the certificate. +// +// Note that SPIFFE validator inherits and uses the following options from :ref:`CertificateValidationContext `. +// +// - :ref:`allow_expired_certificate ` to allow expired certificates. +// - :ref:`match_typed_subject_alt_names ` to match **URI** SAN of certificates. Unlike the default validator, SPIFFE validator only matches **URI** SAN (which equals to SVID in SPIFFE terminology) and ignore other SAN types. +// +message SPIFFECertValidatorConfig { + message TrustDomain { + // Name of the trust domain, ``example.com``, ``foo.bar.gov`` for example. + // Note that this must *not* have "spiffe://" prefix. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Specify a data source holding x.509 trust bundle used for validating incoming SVID(s) in this trust domain. + config.core.v3.DataSource trust_bundle = 2; + } + + // This field specifies trust domains used for validating incoming X.509-SVID(s). + repeated TrustDomain trust_domains = 1 [(validate.rules).repeated = {min_items: 1}]; + + // This field specifies all trust bundles as a single DataSource. If both + // trust_bundles and trust_domains are specified, trust_bundles will + // take precedence. Currently assumes file will be a SPIFFE Trust Bundle Map. + // If DataSource is a file, dynamic file watching will be enabled, + // and updates to the specified file will trigger a refresh of the trust_bundles. + config.core.v3.DataSource trust_bundles = 2; +} diff --git a/proto/envoy/service/README.md b/proto/envoy/service/README.md new file mode 100644 index 0000000..831b740 --- /dev/null +++ b/proto/envoy/service/README.md @@ -0,0 +1,3 @@ +Protocol buffer definitions for gRPC and REST services. + +Visibility should be constrained to none (default). diff --git a/proto/envoy/service/discovery/v3/BUILD b/proto/envoy/service/discovery/v3/BUILD new file mode 100644 index 0000000..79668d2 --- /dev/null +++ b/proto/envoy/service/discovery/v3/BUILD @@ -0,0 +1,13 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + has_services = True, + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_xds//udpa/annotations:pkg", + ], +) diff --git a/proto/envoy/service/discovery/v3/ads.proto b/proto/envoy/service/discovery/v3/ads.proto new file mode 100644 index 0000000..50f2af4 --- /dev/null +++ b/proto/envoy/service/discovery/v3/ads.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package envoy.service.discovery.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.service.discovery.v3"; +option java_outer_classname = "AdsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3;discoveryv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Aggregated Discovery Service (ADS)] + +// Discovery services for endpoints, clusters, routes, +// and listeners are retained in the package `envoy.api.v2` for backwards +// compatibility with existing management servers. New development in discovery +// services should proceed in the package `envoy.service.discovery.v2`. + +// See https://github.com/envoyproxy/envoy-api#apis for a description of the role of +// ADS and how it is intended to be used by a management server. ADS requests +// have the same structure as their singleton xDS counterparts, but can +// multiplex many resource types on a single stream. The type_url in the +// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover +// the multiplexed singleton APIs at the Envoy instance and management server. +service AggregatedDiscoveryService { + // This is a gRPC-only API. + rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) + returns (stream DeltaDiscoveryResponse) { + } +} + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message AdsDummy { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.discovery.v2.AdsDummy"; +} diff --git a/proto/envoy/service/discovery/v3/discovery.proto b/proto/envoy/service/discovery/v3/discovery.proto new file mode 100644 index 0000000..e1ce827 --- /dev/null +++ b/proto/envoy/service/discovery/v3/discovery.proto @@ -0,0 +1,443 @@ +syntax = "proto3"; + +package envoy.service.discovery.v3; + +import "envoy/config/core/v3/base.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/rpc/status.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.discovery.v3"; +option java_outer_classname = "DiscoveryProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3;discoveryv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common discovery API components] + +// Specifies a resource to be subscribed to. +message ResourceLocator { + // The resource name to subscribe to. + string name = 1; + + // A set of dynamic parameters used to match against the dynamic parameter + // constraints on the resource. This allows clients to select between + // multiple variants of the same resource. + map dynamic_parameters = 2; +} + +// Specifies a concrete resource name. +message ResourceName { + // The name of the resource. + string name = 1; + + // Dynamic parameter constraints associated with this resource. To be used by client-side caches + // (including xDS proxies) when matching subscribed resource locators. + DynamicParameterConstraints dynamic_parameter_constraints = 2; +} + +// [#not-implemented-hide:] +// An error associated with a specific resource name, returned to the +// client by the server. +message ResourceError { + // The name of the resource. + ResourceName resource_name = 1; + + // The error reported for the resource. + google.rpc.Status error_detail = 2; +} + +// A DiscoveryRequest requests a set of versioned resources of the same type for +// a given Envoy node on some API. +// [#next-free-field: 8] +message DiscoveryRequest { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryRequest"; + + // The ``version_info`` provided in the request messages will be the ``version_info`` + // received with the most recent successfully processed response or empty on + // the first request. It is expected that no new request is sent after a + // response is received until the Envoy instance is ready to ACK/NACK the new + // configuration. ACK/NACK takes place by returning the new API config version + // as applied or the previous API config version respectively. Each ``type_url`` + // (see below) has an independent version associated with it. + string version_info = 1; + + // The node making the request. + config.core.v3.Node node = 2; + + // List of resources to subscribe to, e.g. list of cluster names or a route + // configuration name. If this is empty, all resources for the API are + // returned. LDS/CDS may have empty ``resource_names``, which will cause all + // resources for the Envoy instance to be returned. The LDS and CDS responses + // will then imply a number of resources that need to be fetched via EDS/RDS, + // which will be explicitly enumerated in ``resource_names``. + repeated string resource_names = 3; + + // [#not-implemented-hide:] + // Alternative to ``resource_names`` field that allows specifying dynamic + // parameters along with each resource name. Clients that populate this + // field must be able to handle responses from the server where resources + // are wrapped in a Resource message. + // + // .. note:: + // It is legal for a request to have some resources listed + // in ``resource_names`` and others in ``resource_locators``. + // + repeated ResourceLocator resource_locators = 7; + + // Type of the resource that is being requested, e.g. + // ``type.googleapis.com/envoy.api.v2.ClusterLoadAssignment``. This is implicit + // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is + // required for ADS. + string type_url = 4; + + // nonce corresponding to ``DiscoveryResponse`` being ACK/NACKed. See above + // discussion on ``version_info`` and the ``DiscoveryResponse`` nonce comment. This + // may be empty only if: + // + // * This is a non-persistent-stream xDS such as HTTP, or + // * The client has not yet accepted an update in this xDS stream (unlike + // delta, where it is populated only for new explicit ACKs). + // + string response_nonce = 5; + + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The ``message`` field in ``error_details`` provides the Envoy + // internal exception related to the failure. It is only intended for consumption during manual + // debugging, the string provided is not guaranteed to be stable across Envoy versions. + google.rpc.Status error_detail = 6; +} + +// [#next-free-field: 8] +message DiscoveryResponse { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryResponse"; + + // The version of the response data. + string version_info = 1; + + // The response resources. These resources are typed and depend on the API being called. + repeated google.protobuf.Any resources = 2; + + // [#not-implemented-hide:] + // Canary is used to support two Envoy command line flags: + // + // * ``--terminate-on-canary-transition-failure``. When set, Envoy is able to + // terminate if it detects that configuration is stuck at canary. Consider + // this example sequence of updates: + // + // * Management server applies a canary config successfully. + // * Management server rolls back to a production config. + // * Envoy rejects the new production config. + // + // Since there is no sensible way to continue receiving configuration + // updates, Envoy will then terminate and apply production config from a + // clean slate. + // + // * ``--dry-run-canary``. When set, a canary response will never be applied, only + // validated via a dry run. + // + bool canary = 3; + + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the ``type_url`` in the 'resources' repeated Any (if non-empty). + string type_url = 4; + + // For gRPC based subscriptions, the nonce provides a way to explicitly ack a + // specific ``DiscoveryResponse`` in a following ``DiscoveryRequest``. Additional + // messages may have been sent by Envoy to the management server for the + // previous version on the stream prior to this ``DiscoveryResponse``, that were + // unprocessed at response send time. The nonce allows the management server + // to ignore any further ``DiscoveryRequests`` for the previous version until a + // ``DiscoveryRequest`` bearing the nonce. The nonce is optional and is not + // required for non-stream based xDS implementations. + string nonce = 5; + + // The control plane instance that sent the response. + config.core.v3.ControlPlane control_plane = 6; + + // [#not-implemented-hide:] + // Errors associated with specific resources. Clients are expected to + // remember the most recent error for a given resource across responses; + // the error condition is not considered to be cleared until a response is + // received that contains the resource in the 'resources' field. + repeated ResourceError resource_errors = 7; +} + +// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +// endpoint for Delta xDS. +// +// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +// diff to the state of a xDS client. +// In Delta XDS there are per-resource versions, which allow tracking state at +// the resource granularity. +// An xDS Delta session is always in the context of a gRPC bidirectional +// stream. This allows the xDS server to keep track of the state of xDS clients +// connected to it. +// +// In Delta xDS the nonce field is required and used to pair +// ``DeltaDiscoveryResponse`` to a ``DeltaDiscoveryRequest`` ACK or NACK. +// Optionally, a response message level ``system_version_info`` is present for +// debugging purposes only. +// +// ``DeltaDiscoveryRequest`` plays two independent roles. Any ``DeltaDiscoveryRequest`` +// can be either or both of: +// +// * Informing the server of what resources the client has gained/lost interest in +// (using ``resource_names_subscribe`` and ``resource_names_unsubscribe``), or +// * (N)ACKing an earlier resource update from the server (using ``response_nonce``, +// with presence of ``error_detail`` making it a NACK). +// +// Additionally, the first message (for a given ``type_url``) of a reconnected gRPC stream +// has a third role: informing the server of the resources (and their versions) +// that the client already possesses, using the ``initial_resource_versions`` field. +// +// As with state-of-the-world, when multiple resource types are multiplexed (ADS), +// all requests/acknowledgments/updates are logically walled off by ``type_url``: +// a Cluster ACK exists in a completely separate world from a prior Route NACK. +// In particular, ``initial_resource_versions`` being sent at the "start" of every +// gRPC stream actually entails a message for each ``type_url``, each with its own +// ``initial_resource_versions``. +// [#next-free-field: 10] +message DeltaDiscoveryRequest { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; + + // The node making the request. + config.core.v3.Node node = 1; + + // Type of the resource that is being requested, e.g. + // ``type.googleapis.com/envoy.api.v2.ClusterLoadAssignment``. This does not need to be set if + // resources are only referenced via ``xds_resource_subscribe`` and + // ``xds_resources_unsubscribe``. + string type_url = 2; + + // DeltaDiscoveryRequests allow the client to add or remove individual + // resources to the set of tracked resources in the context of a stream. + // All resource names in the ``resource_names_subscribe`` list are added to the + // set of tracked resources and all resource names in the ``resource_names_unsubscribe`` + // list are removed from the set of tracked resources. + // + // *Unlike* state-of-the-world xDS, an empty ``resource_names_subscribe`` or + // ``resource_names_unsubscribe`` list simply means that no resources are to be + // added or removed to the resource list. + // *Like* state-of-the-world xDS, the server must send updates for all tracked + // resources, but can also send updates for resources the client has not subscribed to. + // + // .. note:: + // The server must respond with all resources listed in ``resource_names_subscribe``, + // even if it believes the client has the most recent version of them. The reason: + // the client may have dropped them, but then regained interest before it had a chance + // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. + // + // These two fields can be set in any ``DeltaDiscoveryRequest``, including ACKs + // and ``initial_resource_versions``. + // + // A list of Resource names to add to the list of tracked resources. + repeated string resource_names_subscribe = 3; + + // A list of Resource names to remove from the list of tracked resources. + repeated string resource_names_unsubscribe = 4; + + // [#not-implemented-hide:] + // Alternative to ``resource_names_subscribe`` field that allows specifying dynamic parameters + // along with each resource name. + // + // .. note:: + // It is legal for a request to have some resources listed + // in ``resource_names_subscribe`` and others in ``resource_locators_subscribe``. + // + repeated ResourceLocator resource_locators_subscribe = 8; + + // [#not-implemented-hide:] + // Alternative to ``resource_names_unsubscribe`` field that allows specifying dynamic parameters + // along with each resource name. + // + // .. note:: + // It is legal for a request to have some resources listed + // in ``resource_names_unsubscribe`` and others in ``resource_locators_unsubscribe``. + // + repeated ResourceLocator resource_locators_unsubscribe = 9; + + // Informs the server of the versions of the resources the xDS client knows of, to enable the + // client to continue the same logical xDS session even in the face of gRPC stream reconnection. + // It will not be populated: + // + // * In the very first stream of a session, since the client will not yet have any resources. + // * In any message after the first in a stream (for a given ``type_url``), since the server will + // already be correctly tracking the client's state. + // + // (In ADS, the first message ``of each type_url`` of a reconnected stream populates this map.) + // The map's keys are names of xDS resources known to the xDS client. + // The map's values are opaque resource versions. + map initial_resource_versions = 5; + + // When the ``DeltaDiscoveryRequest`` is a ACK or NACK message in response + // to a previous ``DeltaDiscoveryResponse``, the ``response_nonce`` must be the + // nonce in the ``DeltaDiscoveryResponse``. + // Otherwise (unlike in ``DiscoveryRequest``) ``response_nonce`` must be omitted. + string response_nonce = 6; + + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The ``message`` field in ``error_details`` + // provides the Envoy internal exception related to the failure. + google.rpc.Status error_detail = 7; +} + +// [#next-free-field: 10] +message DeltaDiscoveryResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.api.v2.DeltaDiscoveryResponse"; + + // The version of the response data (used for debugging). + string system_version_info = 1; + + // The response resources. These are typed resources, whose types must match + // the ``type_url`` field. + repeated Resource resources = 2; + + // field id 3 IS available! + + // Type URL for resources. Identifies the xDS API when muxing over ADS. + // Must be consistent with the ``type_url`` in the Any within 'resources' if 'resources' is non-empty. + string type_url = 4; + + // Resource names of resources that have been deleted and to be removed from the xDS Client. + // Removed resources for missing resources can be ignored. + repeated string removed_resources = 6; + + // Alternative to ``removed_resources`` that allows specifying which variant of + // a resource is being removed. This variant must be used for any resource + // for which dynamic parameter constraints were sent to the client. + repeated ResourceName removed_resource_names = 8; + + // The nonce provides a way for ``DeltaDiscoveryRequests`` to uniquely + // reference a ``DeltaDiscoveryResponse`` when (N)ACKing. The nonce is required. + string nonce = 5; + + // [#not-implemented-hide:] + // The control plane instance that sent the response. + config.core.v3.ControlPlane control_plane = 7; + + // [#not-implemented-hide:] + // Errors associated with specific resources. + // + // .. note:: + // A resource in this field with a status of NOT_FOUND should be treated the same as + // a resource listed in the ``removed_resources`` or ``removed_resource_names`` fields. + // + repeated ResourceError resource_errors = 9; +} + +// A set of dynamic parameter constraints associated with a variant of an individual xDS resource. +// These constraints determine whether the resource matches a subscription based on the set of +// dynamic parameters in the subscription, as specified in the +// :ref:`ResourceLocator.dynamic_parameters ` +// field. This allows xDS implementations (clients, servers, and caching proxies) to determine +// which variant of a resource is appropriate for a given client. +message DynamicParameterConstraints { + // A single constraint for a given key. + message SingleConstraint { + message Exists { + } + + // The key to match against. + string key = 1; + + oneof constraint_type { + option (validate.required) = true; + + // Matches this exact value. + string value = 2; + + // Key is present (matches any value except for the key being absent). + // This allows setting a default constraint for clients that do + // not send a key at all, while there may be other clients that need + // special configuration based on that key. + Exists exists = 3; + } + } + + message ConstraintList { + repeated DynamicParameterConstraints constraints = 1; + } + + oneof type { + // A single constraint to evaluate. + SingleConstraint constraint = 1; + + // A list of constraints that match if any one constraint in the list + // matches. + ConstraintList or_constraints = 2; + + // A list of constraints that must all match. + ConstraintList and_constraints = 3; + + // The inverse (NOT) of a set of constraints. + DynamicParameterConstraints not_constraints = 4; + } +} + +// [#next-free-field: 10] +message Resource { + option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; + + // Cache control properties for the resource. + // [#not-implemented-hide:] + message CacheControl { + // If true, xDS proxies may not cache this resource. + // + // .. note:: + // This does not apply to clients other than xDS proxies, which must cache resources + // for their own use, regardless of the value of this field. + // + bool do_not_cache = 1; + } + + // The resource's name, to distinguish it from others of the same type of resource. + // Only one of ``name`` or ``resource_name`` may be set. + string name = 3; + + // Alternative to the ``name`` field, to be used when the server supports + // multiple variants of the named resource that are differentiated by + // dynamic parameter constraints. + // Only one of ``name`` or ``resource_name`` may be set. + ResourceName resource_name = 8; + + // The aliases are a list of other names that this resource can go by. + repeated string aliases = 4; + + // The resource level version. It allows xDS to track the state of individual + // resources. + string version = 1; + + // The resource being tracked. + google.protobuf.Any resource = 2; + + // Time-to-live value for the resource. For each resource, a timer is started. The timer is + // reset each time the resource is received with a new TTL. If the resource is received with + // no TTL set, the timer is removed for the resource. Upon expiration of the timer, the + // configuration for the resource will be removed. + // + // The TTL can be refreshed or changed by sending a response that doesn't change the resource + // version. In this case the ``resource`` field does not need to be populated, which allows for + // light-weight "heartbeat" updates to keep a resource with a TTL alive. + // + // The TTL feature is meant to support configurations that should be removed in the event of + // a management server failure. For example, the feature may be used for fault injection + // testing where the fault injection should be terminated in the event that Envoy loses contact + // with the management server. + google.protobuf.Duration ttl = 6; + + // Cache control properties for the resource. + // [#not-implemented-hide:] + CacheControl cache_control = 7; + + // The Metadata field can be used to provide additional information for the resource. + // E.g. the trace data for debugging. + config.core.v3.Metadata metadata = 9; +} diff --git a/proto/envoy/type/BUILD b/proto/envoy/type/BUILD new file mode 100644 index 0000000..29ebf07 --- /dev/null +++ b/proto/envoy/type/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_xds//udpa/annotations:pkg"], +) diff --git a/proto/envoy/type/hash_policy.proto b/proto/envoy/type/hash_policy.proto new file mode 100644 index 0000000..f022f09 --- /dev/null +++ b/proto/envoy/type/hash_policy.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.type; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "HashPolicyProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Hash Policy] + +// Specifies the hash policy +message HashPolicy { + // The source IP will be used to compute the hash used by hash-based load balancing + // algorithms. + message SourceIp { + } + + oneof policy_specifier { + option (validate.required) = true; + + SourceIp source_ip = 1; + } +} diff --git a/proto/envoy/type/http.proto b/proto/envoy/type/http.proto new file mode 100644 index 0000000..51768f1 --- /dev/null +++ b/proto/envoy/type/http.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.type; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "HttpProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: HTTP] + +enum CodecClientType { + HTTP1 = 0; + + HTTP2 = 1; + + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HTTP3 = 2; +} diff --git a/proto/envoy/type/http_status.proto b/proto/envoy/type/http_status.proto new file mode 100644 index 0000000..0d22234 --- /dev/null +++ b/proto/envoy/type/http_status.proto @@ -0,0 +1,140 @@ +syntax = "proto3"; + +package envoy.type; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "HttpStatusProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: HTTP status codes] + +// HTTP response codes supported in Envoy. +// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml +enum StatusCode { + // Empty - This code not part of the HTTP status code specification, but it is needed for proto + // `enum` type. + Empty = 0; + + Continue = 100; + + OK = 200; + + Created = 201; + + Accepted = 202; + + NonAuthoritativeInformation = 203; + + NoContent = 204; + + ResetContent = 205; + + PartialContent = 206; + + MultiStatus = 207; + + AlreadyReported = 208; + + IMUsed = 226; + + MultipleChoices = 300; + + MovedPermanently = 301; + + Found = 302; + + SeeOther = 303; + + NotModified = 304; + + UseProxy = 305; + + TemporaryRedirect = 307; + + PermanentRedirect = 308; + + BadRequest = 400; + + Unauthorized = 401; + + PaymentRequired = 402; + + Forbidden = 403; + + NotFound = 404; + + MethodNotAllowed = 405; + + NotAcceptable = 406; + + ProxyAuthenticationRequired = 407; + + RequestTimeout = 408; + + Conflict = 409; + + Gone = 410; + + LengthRequired = 411; + + PreconditionFailed = 412; + + PayloadTooLarge = 413; + + URITooLong = 414; + + UnsupportedMediaType = 415; + + RangeNotSatisfiable = 416; + + ExpectationFailed = 417; + + MisdirectedRequest = 421; + + UnprocessableEntity = 422; + + Locked = 423; + + FailedDependency = 424; + + UpgradeRequired = 426; + + PreconditionRequired = 428; + + TooManyRequests = 429; + + RequestHeaderFieldsTooLarge = 431; + + InternalServerError = 500; + + NotImplemented = 501; + + BadGateway = 502; + + ServiceUnavailable = 503; + + GatewayTimeout = 504; + + HTTPVersionNotSupported = 505; + + VariantAlsoNegotiates = 506; + + InsufficientStorage = 507; + + LoopDetected = 508; + + NotExtended = 510; + + NetworkAuthenticationRequired = 511; +} + +// HTTP status. +message HttpStatus { + // Supplies HTTP response code. + StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; +} diff --git a/proto/envoy/type/matcher/v3/address.proto b/proto/envoy/type/matcher/v3/address.proto new file mode 100644 index 0000000..8a03a53 --- /dev/null +++ b/proto/envoy/type/matcher/v3/address.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "xds/core/v3/cidr.proto"; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "AddressProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Address Matcher] + +// Match an IP against a repeated CIDR range. This matcher is intended to be +// used in other matchers, for example in the filter state matcher to match a +// filter state object as an IP. +message AddressMatcher { + repeated xds.core.v3.CidrRange ranges = 1; +} diff --git a/proto/envoy/type/matcher/v3/filter_state.proto b/proto/envoy/type/matcher/v3/filter_state.proto new file mode 100644 index 0000000..8c38a51 --- /dev/null +++ b/proto/envoy/type/matcher/v3/filter_state.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/address.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "FilterStateProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Filter state matcher] + +// FilterStateMatcher provides a general interface for matching the filter state objects. +message FilterStateMatcher { + // The filter state key to retrieve the object. + string key = 1 [(validate.rules).string = {min_len: 1}]; + + oneof matcher { + option (validate.required) = true; + + // Matches the filter state object as a string value. + StringMatcher string_match = 2; + + // Matches the filter state object as a ip Instance. + AddressMatcher address_match = 3; + } +} diff --git a/proto/envoy/type/matcher/v3/http_inputs.proto b/proto/envoy/type/matcher/v3/http_inputs.proto new file mode 100644 index 0000000..c90199e --- /dev/null +++ b/proto/envoy/type/matcher/v3/http_inputs.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "HttpInputsProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common HTTP inputs] + +// Match input indicates that matching should be done on a specific request header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_headers] +message HttpRequestHeaderMatchInput { + // The request header to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicates that matching should be done on a specific request trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.request_trailers] +message HttpRequestTrailerMatchInput { + // The request trailer to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicating that matching should be done on a specific response header. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the response contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_headers] +message HttpResponseHeaderMatchInput { + // The response header to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicates that matching should be done on a specific response trailer. +// The resulting input string will be all headers for the given key joined by a comma, +// e.g. if the request contains two 'foo' headers with value 'bar' and 'baz', the input +// string will be 'bar,baz'. +// [#comment:TODO(snowp): Link to unified matching docs.] +// [#extension: envoy.matching.inputs.response_trailers] +message HttpResponseTrailerMatchInput { + // The response trailer to match on. + string header_name = 1 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; +} + +// Match input indicates that matching should be done on a specific query parameter. +// The resulting input string will be the first query parameter for the value +// 'query_param'. +// [#extension: envoy.matching.inputs.query_params] +message HttpRequestQueryParamMatchInput { + // The query parameter to match on. + string query_param = 1 [(validate.rules).string = {min_len: 1}]; +} diff --git a/proto/envoy/type/matcher/v3/metadata.proto b/proto/envoy/type/matcher/v3/metadata.proto new file mode 100644 index 0000000..30abde9 --- /dev/null +++ b/proto/envoy/type/matcher/v3/metadata.proto @@ -0,0 +1,110 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Metadata matcher] + +// ``MetadataMatcher`` provides a general interface to check if a given value is matched in +// :ref:`Metadata `. It uses ``filter`` and ``path`` to retrieve the value +// from the ``Metadata`` and then check if it's matched to the specified value. +// +// For example, for the following ``Metadata``: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.filters.http.rbac: +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following ``MetadataMatcher`` is matched as the path ``[a, b, c]`` will retrieve a string value ``pro`` +// from the ``Metadata`` which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following ``MetadataMatcher`` is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// filter: envoy.filters.http.rbac +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of ``MetadataMatcher`` is specifying additional metadata in ``envoy.filters.http.rbac`` to +// enforce access control based on dynamic metadata in a request. See :ref:`Permission +// ` and :ref:`Principal +// `. + +// [#next-major-version: MetadataMatcher should use StructMatcher] +message MetadataMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.MetadataMatcher"; + + // Specifies the segment in a path to retrieve value from ``Metadata``. + // + // .. note:: + // Currently it's not supported to retrieve a value from a list in ``Metadata``. This means that + // if the segment key refers to a list, it has to be the last segment in a path. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.MetadataMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a ``Struct``. + string key = 1 [(validate.rules).string = {min_len: 1}]; + } + } + + // The filter name to retrieve the ``Struct`` from the ``Metadata``. + string filter = 1 [(validate.rules).string = {min_len: 1}]; + + // The path to retrieve the ``Value`` from the ``Struct``. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The ``MetadataMatcher`` is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; + + // If true, the match result will be inverted. + bool invert = 4; +} diff --git a/proto/envoy/type/matcher/v3/node.proto b/proto/envoy/type/matcher/v3/node.proto new file mode 100644 index 0000000..baa92fb --- /dev/null +++ b/proto/envoy/type/matcher/v3/node.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/string.proto"; +import "envoy/type/matcher/v3/struct.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "NodeProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Node matcher] + +// Specifies the way to match a Node. +// The match follows AND semantics. +message NodeMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.NodeMatcher"; + + // Specifies match criteria on the node id. + StringMatcher node_id = 1; + + // Specifies match criteria on the node metadata. + repeated StructMatcher node_metadatas = 2; +} diff --git a/proto/envoy/type/matcher/v3/number.proto b/proto/envoy/type/matcher/v3/number.proto new file mode 100644 index 0000000..99681c9 --- /dev/null +++ b/proto/envoy/type/matcher/v3/number.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/v3/range.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "NumberProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Number matcher] + +// Specifies the way to match a double value. +message DoubleMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.DoubleMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, the input double value must be in the range specified here. + // Note: The range is using half-open interval semantics [start, end). + type.v3.DoubleRange range = 1; + + // If specified, the input double value must be equal to the value specified here. + double exact = 2; + } +} diff --git a/proto/envoy/type/matcher/v3/path.proto b/proto/envoy/type/matcher/v3/path.proto new file mode 100644 index 0000000..46b758e --- /dev/null +++ b/proto/envoy/type/matcher/v3/path.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "PathProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Path matcher] + +// Specifies the way to match a path on HTTP request. +message PathMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.PathMatcher"; + + oneof rule { + option (validate.required) = true; + + // The ``path`` must match the URL path portion of the :path header. The query and fragment + // string (if present) are removed in the URL path portion. + // For example, the path ``/data`` will match the ``:path`` header ``/data#fragment?param=value``. + StringMatcher path = 1 [(validate.rules).message = {required: true}]; + } +} diff --git a/proto/envoy/type/matcher/v3/regex.proto b/proto/envoy/type/matcher/v3/regex.proto new file mode 100644 index 0000000..10b3970 --- /dev/null +++ b/proto/envoy/type/matcher/v3/regex.proto @@ -0,0 +1,97 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Regex matcher] + +// A regex matcher designed for safety when used with untrusted input. +message RegexMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher"; + + // Google's `RE2 `_ regex engine. The regex string must adhere to + // the documented `syntax `_. The engine is designed + // to complete execution in linear time as well as limit the amount of memory used. + // + // Envoy supports program size checking via runtime. The runtime keys ``re2.max_program_size.error_level`` + // and ``re2.max_program_size.warn_level`` can be set to integers as the maximum program size or + // complexity that a compiled regex can have before an exception is thrown or a warning is + // logged, respectively. ``re2.max_program_size.error_level`` defaults to 100, and + // ``re2.max_program_size.warn_level`` has no default if unset (will not check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the histogram ``re2.program_size``, + // which records the program size, and the counter ``re2.exceeded_warn_level``, which is incremented + // each time the program size exceeds the warn level threshold. + message GoogleRE2 { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.RegexMatcher.GoogleRE2"; + + // This field controls the RE2 "program size" which is a rough estimate of how complex a + // compiled regex is to evaluate. A regex that has a program size greater than the configured + // value will fail to compile. In this case, the configured max program size can be increased + // or the regex can be simplified. If not specified, the default is 100. + // + // This field is deprecated; regexp validation should be performed on the management server + // instead of being done by each individual client. + // + // .. note:: + // + // Although this field is deprecated, the program size will still be checked against the + // global ``re2.max_program_size.error_level`` runtime value. + // + google.protobuf.UInt32Value max_program_size = 1 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + } + + oneof engine_type { + // Google's RE2 regex engine. + GoogleRE2 google_re2 = 1 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + } + + // The regex match string. The string must be supported by the configured engine. The regex is matched + // against the full string, not as a partial match. + string regex = 2 [(validate.rules).string = {min_len: 1}]; +} + +// Describes how to match a string and then produce a new string using a regular +// expression and a substitution string. +message RegexMatchAndSubstitute { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.RegexMatchAndSubstitute"; + + // The regular expression used to find portions of a string (hereafter called + // the "subject string") that should be replaced. When a new string is + // produced during the substitution operation, the new string is initially + // the same as the subject string, but then all matches in the subject string + // are replaced by the substitution string. If replacing all matches isn't + // desired, regular expression anchors can be used to ensure a single match, + // so as to replace just one occurrence of a pattern. Capture groups can be + // used in the pattern to extract portions of the subject string, and then + // referenced in the substitution string. + RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; + + // The string that should be substituted into matching portions of the + // subject string during a substitution operation to produce a new string. + // Capture groups in the pattern can be referenced in the substitution + // string. Note, however, that the syntax for referring to capture groups is + // defined by the chosen regular expression engine. Google's `RE2 + // `_ regular expression engine uses a + // backslash followed by the capture group number to denote a numbered + // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers + // to capture group 2. + string substitution = 2 + [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; +} diff --git a/proto/envoy/type/matcher/v3/status_code_input.proto b/proto/envoy/type/matcher/v3/status_code_input.proto new file mode 100644 index 0000000..2242aea --- /dev/null +++ b/proto/envoy/type/matcher/v3/status_code_input.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "StatusCodeInputProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Common HTTP Inputs] + +// Match input indicates that matching should be done on the response status +// code. +message HttpResponseStatusCodeMatchInput { +} + +// Match input indicates that the matching should be done on the class of the +// response status code. For eg: 1xx, 2xx, 3xx, 4xx or 5xx. +message HttpResponseStatusCodeClassMatchInput { +} diff --git a/proto/envoy/type/matcher/v3/string.proto b/proto/envoy/type/matcher/v3/string.proto new file mode 100644 index 0000000..56d3956 --- /dev/null +++ b/proto/envoy/type/matcher/v3/string.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/regex.proto"; + +import "xds/core/v3/extension.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: String matcher] + +// Specifies the way to match a string. +// [#next-free-field: 9] +message StringMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; + + reserved 4; + + reserved "regex"; + + oneof match_pattern { + option (validate.required) = true; + + // The input string must match exactly the string specified here. + // + // Examples: + // + // * ``abc`` only matches the value ``abc``. + string exact = 1; + + // The input string must have the prefix specified here. + // + // .. note:: + // + // Empty prefix match is not allowed, please use ``safe_regex`` instead. + // + // Examples: + // + // * ``abc`` matches the value ``abc.xyz`` + string prefix = 2 [(validate.rules).string = {min_len: 1}]; + + // The input string must have the suffix specified here. + // + // .. note:: + // + // Empty suffix match is not allowed, please use ``safe_regex`` instead. + // + // Examples: + // + // * ``abc`` matches the value ``xyz.abc`` + string suffix = 3 [(validate.rules).string = {min_len: 1}]; + + // The input string must match the regular expression specified here. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + + // The input string must have the substring specified here. + // + // .. note:: + // + // Empty contains match is not allowed, please use ``safe_regex`` instead. + // + // Examples: + // + // * ``abc`` matches the value ``xyz.abc.def`` + string contains = 7 [(validate.rules).string = {min_len: 1}]; + + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + xds.core.v3.TypedExtensionConfig custom = 8; + } + + // If ``true``, indicates the exact/prefix/suffix/contains matching should be case insensitive. This + // has no effect for the ``safe_regex`` match. + // For example, the matcher ``data`` will match both input string ``Data`` and ``data`` if this option + // is set to ``true``. + bool ignore_case = 6; +} + +// Specifies a list of ways to match a string. +message ListStringMatcher { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.ListStringMatcher"; + + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/proto/envoy/type/matcher/v3/struct.proto b/proto/envoy/type/matcher/v3/struct.proto new file mode 100644 index 0000000..1b96334 --- /dev/null +++ b/proto/envoy/type/matcher/v3/struct.proto @@ -0,0 +1,91 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/value.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Struct matcher] + +// StructMatcher provides a general interface to check if a given value is matched in +// google.protobuf.Struct. It uses ``path`` to retrieve the value +// from the struct and then check if it's matched to the specified value. +// +// For example, for the following Struct: +// +// .. code-block:: yaml +// +// fields: +// a: +// struct_value: +// fields: +// b: +// struct_value: +// fields: +// c: +// string_value: pro +// t: +// list_value: +// values: +// - string_value: m +// - string_value: n +// +// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" +// from the Metadata which is matched to the specified prefix match. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: b +// - key: c +// value: +// string_match: +// prefix: pr +// +// The following StructMatcher is matched as the code will match one of the string values in the +// list at the path [a, t]. +// +// .. code-block:: yaml +// +// path: +// - key: a +// - key: t +// value: +// list_match: +// one_of: +// string_match: +// exact: m +// +// An example use of StructMatcher is to match metadata in envoy.v*.core.Node. +message StructMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StructMatcher"; + + // Specifies the segment in a path to retrieve value from Struct. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.StructMatcher.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_len: 1}]; + } + } + + // The path to retrieve the Value from the Struct. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The StructMatcher is matched if the value retrieved by path is matched to this value. + ValueMatcher value = 3 [(validate.rules).message = {required: true}]; +} diff --git a/proto/envoy/type/matcher/v3/value.proto b/proto/envoy/type/matcher/v3/value.proto new file mode 100644 index 0000000..8d65c45 --- /dev/null +++ b/proto/envoy/type/matcher/v3/value.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package envoy.type.matcher.v3; + +import "envoy/type/matcher/v3/number.proto"; +import "envoy/type/matcher/v3/string.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.matcher.v3"; +option java_outer_classname = "ValueProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Value matcher] + +// Specifies the way to match a Protobuf::Value. Primitive values and ListValue are supported. +// StructValue is not supported and is always not matched. +// [#next-free-field: 8] +message ValueMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ValueMatcher"; + + // NullMatch is an empty message to specify a null value. + message NullMatch { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.matcher.ValueMatcher.NullMatch"; + } + + // Specifies how to match a value. + oneof match_pattern { + option (validate.required) = true; + + // If specified, a match occurs if and only if the target value is a NullValue. + NullMatch null_match = 1; + + // If specified, a match occurs if and only if the target value is a double value and is + // matched to this field. + DoubleMatcher double_match = 2; + + // If specified, a match occurs if and only if the target value is a string value and is + // matched to this field. + StringMatcher string_match = 3; + + // If specified, a match occurs if and only if the target value is a bool value and is equal + // to this field. + bool bool_match = 4; + + // If specified, value match will be performed based on whether the path is referring to a + // valid primitive value in the metadata. If the path is referring to a non-primitive value, + // the result is always not matched. + bool present_match = 5; + + // If specified, a match occurs if and only if the target value is a list value and + // is matched to this field. + ListMatcher list_match = 6; + + // If specified, a match occurs if and only if any of the alternatives in the match accept the value. + OrMatcher or_match = 7; + } +} + +// Specifies the way to match a list value. +message ListMatcher { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ListMatcher"; + + oneof match_pattern { + option (validate.required) = true; + + // If specified, at least one of the values in the list must match the value specified. + ValueMatcher one_of = 1; + } +} + +// Specifies a list of alternatives for the match. +message OrMatcher { + repeated ValueMatcher value_matchers = 1 [(validate.rules).repeated = {min_items: 2}]; +} diff --git a/proto/envoy/type/metadata/v3/metadata.proto b/proto/envoy/type/metadata/v3/metadata.proto new file mode 100644 index 0000000..d131635 --- /dev/null +++ b/proto/envoy/type/metadata/v3/metadata.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.type.metadata.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.metadata.v3"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3;metadatav3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Metadata] + +// MetadataKey provides a way to retrieve values from +// :ref:`Metadata ` using a ``key`` and a ``path``. +// +// For example, consider the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.xxx: +// prop: +// foo: bar +// xyz: +// hello: envoy +// +// The following MetadataKey would retrieve the string value "bar" from the Metadata: +// +// .. code-block:: yaml +// +// key: envoy.xxx +// path: +// - key: prop +// - key: foo +// +message MetadataKey { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKey"; + + // Specifies a segment in a path for retrieving values from Metadata. + // Currently, only key-based segments (field names) are supported. + message PathSegment { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.metadata.v2.MetadataKey.PathSegment"; + + oneof segment { + option (validate.required) = true; + + // If specified, use this key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_len: 1}]; + } + } + + // The key name of the Metadata from which to retrieve the Struct. + // This typically represents a builtin subsystem or custom extension. + string key = 1 [(validate.rules).string = {min_len: 1}]; + + // The path used to retrieve a specific Value from the Struct. + // This can be either a prefix or a full path, depending on the use case. + // For example, ``[prop, xyz]`` would retrieve a struct or ``[prop, foo]`` would retrieve a string + // in the example above. + // + // .. note:: + // Since only key-type segments are supported, a path cannot specify a list + // unless the list is the last segment. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// Describes different types of metadata sources. +message MetadataKind { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.metadata.v2.MetadataKind"; + + // Represents dynamic metadata associated with the request. + message Request { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.metadata.v2.MetadataKind.Request"; + } + + // Represents metadata from :ref:`the route`. + message Route { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.metadata.v2.MetadataKind.Route"; + } + + // Represents metadata from :ref:`the upstream cluster`. + message Cluster { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.metadata.v2.MetadataKind.Cluster"; + } + + // Represents metadata from :ref:`the upstream + // host`. + message Host { + option (udpa.annotations.versioning).previous_message_type = + "envoy.type.metadata.v2.MetadataKind.Host"; + } + + oneof kind { + option (validate.required) = true; + + // Request kind of metadata. + Request request = 1; + + // Route kind of metadata. + Route route = 2; + + // Cluster kind of metadata. + Cluster cluster = 3; + + // Host kind of metadata. + Host host = 4; + } +} diff --git a/proto/envoy/type/percent.proto b/proto/envoy/type/percent.proto new file mode 100644 index 0000000..6457e2a --- /dev/null +++ b/proto/envoy/type/percent.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.type; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "PercentProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Percent] + +// Identifies a percentage, in the range [0.0, 100.0]. +message Percent { + double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; +} + +// A fractional percentage is used in cases in which for performance reasons performing floating +// point to integer conversions during randomness calculations is undesirable. The message includes +// both a numerator and denominator that together determine the final fractional value. +// +// * **Example**: 1/100 = 1%. +// * **Example**: 3/10000 = 0.03%. +message FractionalPercent { + // Fraction percentages support several fixed denominator values. + enum DenominatorType { + // 100. + // + // **Example**: 1/100 = 1%. + HUNDRED = 0; + + // 10,000. + // + // **Example**: 1/10000 = 0.01%. + TEN_THOUSAND = 1; + + // 1,000,000. + // + // **Example**: 1/1000000 = 0.0001%. + MILLION = 2; + } + + // Specifies the numerator. Defaults to 0. + uint32 numerator = 1; + + // Specifies the denominator. If the denominator specified is less than the numerator, the final + // fractional percentage is capped at 1 (100%). + DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/proto/envoy/type/range.proto b/proto/envoy/type/range.proto new file mode 100644 index 0000000..9e66e6f --- /dev/null +++ b/proto/envoy/type/range.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package envoy.type; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "RangeProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Range] + +// Specifies the int64 start and end of the range using half-open interval semantics [start, +// end). +message Int64Range { + // start of the range (inclusive) + int64 start = 1; + + // end of the range (exclusive) + int64 end = 2; +} + +// Specifies the int32 start and end of the range using half-open interval semantics [start, +// end). +message Int32Range { + // start of the range (inclusive) + int32 start = 1; + + // end of the range (exclusive) + int32 end = 2; +} + +// Specifies the double start and end of the range using half-open interval semantics [start, +// end). +message DoubleRange { + // start of the range (inclusive) + double start = 1; + + // end of the range (exclusive) + double end = 2; +} diff --git a/proto/envoy/type/semantic_version.proto b/proto/envoy/type/semantic_version.proto new file mode 100644 index 0000000..f6a508c --- /dev/null +++ b/proto/envoy/type/semantic_version.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.type; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "SemanticVersionProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Semantic Version] + +// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate +// expected behaviors and APIs, the patch version field is used only +// for security fixes and can be generally ignored. +message SemanticVersion { + uint32 major_number = 1; + + uint32 minor_number = 2; + + uint32 patch = 3; +} diff --git a/proto/envoy/type/token_bucket.proto b/proto/envoy/type/token_bucket.proto new file mode 100644 index 0000000..7419ebc --- /dev/null +++ b/proto/envoy/type/token_bucket.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.type; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "TokenBucketProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Token bucket] + +// Configures a token bucket, typically used for rate limiting. +message TokenBucket { + // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket + // initially contains. + uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; + + // The number of tokens added to the bucket during each fill interval. If not specified, defaults + // to a single token. + google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; + + // The fill interval that tokens are added to the bucket. During each fill interval + // `tokens_per_fill` are added to the bucket. The bucket will never contain more than + // `max_tokens` tokens. + google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { + required: true + gt {} + }]; +} diff --git a/proto/envoy/type/v3/BUILD b/proto/envoy/type/v3/BUILD new file mode 100644 index 0000000..d49202b --- /dev/null +++ b/proto/envoy/type/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "@com_github_cncf_xds//udpa/annotations:pkg", + "@com_github_cncf_xds//xds/annotations/v3:pkg", + ], +) diff --git a/proto/envoy/type/v3/hash_policy.proto b/proto/envoy/type/v3/hash_policy.proto new file mode 100644 index 0000000..69452ca --- /dev/null +++ b/proto/envoy/type/v3/hash_policy.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "HashPolicyProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Hash Policy] + +// Specifies the hash policy +message HashPolicy { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy"; + + // The source IP will be used to compute the hash used by hash-based load balancing + // algorithms. + message SourceIp { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy.SourceIp"; + } + + // An Object in the :ref:`filterState ` will be used + // to compute the hash used by hash-based load balancing algorithms. + message FilterState { + // The name of the Object in the filterState, which is an Envoy::Hashable object. If there is no + // data associated with the key, or the stored object is not Envoy::Hashable, no hash will be + // produced. + string key = 1 [(validate.rules).string = {min_len: 1}]; + } + + oneof policy_specifier { + option (validate.required) = true; + + SourceIp source_ip = 1; + + FilterState filter_state = 2; + } +} diff --git a/proto/envoy/type/v3/http.proto b/proto/envoy/type/v3/http.proto new file mode 100644 index 0000000..a1a5a04 --- /dev/null +++ b/proto/envoy/type/v3/http.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "HttpProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP] + +enum CodecClientType { + HTTP1 = 0; + + HTTP2 = 1; + + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HTTP3 = 2; +} diff --git a/proto/envoy/type/v3/http_status.proto b/proto/envoy/type/v3/http_status.proto new file mode 100644 index 0000000..40d697b --- /dev/null +++ b/proto/envoy/type/v3/http_status.proto @@ -0,0 +1,199 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "HttpStatusProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: HTTP status codes] + +// HTTP response codes supported in Envoy. +// For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml +enum StatusCode { + // Empty - This code not part of the HTTP status code specification, but it is needed for proto + // `enum` type. + Empty = 0; + + // Continue - ``100`` status code. + Continue = 100; + + // OK - ``200`` status code. + OK = 200; + + // Created - ``201`` status code. + Created = 201; + + // Accepted - ``202`` status code. + Accepted = 202; + + // NonAuthoritativeInformation - ``203`` status code. + NonAuthoritativeInformation = 203; + + // NoContent - ``204`` status code. + NoContent = 204; + + // ResetContent - ``205`` status code. + ResetContent = 205; + + // PartialContent - ``206`` status code. + PartialContent = 206; + + // MultiStatus - ``207`` status code. + MultiStatus = 207; + + // AlreadyReported - ``208`` status code. + AlreadyReported = 208; + + // IMUsed - ``226`` status code. + IMUsed = 226; + + // MultipleChoices - ``300`` status code. + MultipleChoices = 300; + + // MovedPermanently - ``301`` status code. + MovedPermanently = 301; + + // Found - ``302`` status code. + Found = 302; + + // SeeOther - ``303`` status code. + SeeOther = 303; + + // NotModified - ``304`` status code. + NotModified = 304; + + // UseProxy - ``305`` status code. + UseProxy = 305; + + // TemporaryRedirect - ``307`` status code. + TemporaryRedirect = 307; + + // PermanentRedirect - ``308`` status code. + PermanentRedirect = 308; + + // BadRequest - ``400`` status code. + BadRequest = 400; + + // Unauthorized - ``401`` status code. + Unauthorized = 401; + + // PaymentRequired - ``402`` status code. + PaymentRequired = 402; + + // Forbidden - ``403`` status code. + Forbidden = 403; + + // NotFound - ``404`` status code. + NotFound = 404; + + // MethodNotAllowed - ``405`` status code. + MethodNotAllowed = 405; + + // NotAcceptable - ``406`` status code. + NotAcceptable = 406; + + // ProxyAuthenticationRequired - ``407`` status code. + ProxyAuthenticationRequired = 407; + + // RequestTimeout - ``408`` status code. + RequestTimeout = 408; + + // Conflict - ``409`` status code. + Conflict = 409; + + // Gone - ``410`` status code. + Gone = 410; + + // LengthRequired - ``411`` status code. + LengthRequired = 411; + + // PreconditionFailed - ``412`` status code. + PreconditionFailed = 412; + + // PayloadTooLarge - ``413`` status code. + PayloadTooLarge = 413; + + // URITooLong - ``414`` status code. + URITooLong = 414; + + // UnsupportedMediaType - ``415`` status code. + UnsupportedMediaType = 415; + + // RangeNotSatisfiable - ``416`` status code. + RangeNotSatisfiable = 416; + + // ExpectationFailed - ``417`` status code. + ExpectationFailed = 417; + + // MisdirectedRequest - ``421`` status code. + MisdirectedRequest = 421; + + // UnprocessableEntity - ``422`` status code. + UnprocessableEntity = 422; + + // Locked - ``423`` status code. + Locked = 423; + + // FailedDependency - ``424`` status code. + FailedDependency = 424; + + // UpgradeRequired - ``426`` status code. + UpgradeRequired = 426; + + // PreconditionRequired - ``428`` status code. + PreconditionRequired = 428; + + // TooManyRequests - ``429`` status code. + TooManyRequests = 429; + + // RequestHeaderFieldsTooLarge - ``431`` status code. + RequestHeaderFieldsTooLarge = 431; + + // InternalServerError - ``500`` status code. + InternalServerError = 500; + + // NotImplemented - ``501`` status code. + NotImplemented = 501; + + // BadGateway - ``502`` status code. + BadGateway = 502; + + // ServiceUnavailable - ``503`` status code. + ServiceUnavailable = 503; + + // GatewayTimeout - ``504`` status code. + GatewayTimeout = 504; + + // HTTPVersionNotSupported - ``505`` status code. + HTTPVersionNotSupported = 505; + + // VariantAlsoNegotiates - ``506`` status code. + VariantAlsoNegotiates = 506; + + // InsufficientStorage - ``507`` status code. + InsufficientStorage = 507; + + // LoopDetected - ``508`` status code. + LoopDetected = 508; + + // NotExtended - ``510`` status code. + NotExtended = 510; + + // NetworkAuthenticationRequired - ``511`` status code. + NetworkAuthenticationRequired = 511; +} + +// HTTP status. +message HttpStatus { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.HttpStatus"; + + // Supplies HTTP response code. + StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; +} diff --git a/proto/envoy/type/v3/percent.proto b/proto/envoy/type/v3/percent.proto new file mode 100644 index 0000000..e041ecd --- /dev/null +++ b/proto/envoy/type/v3/percent.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "PercentProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Percent] + +// Identifies a percentage, in the range [0.0, 100.0]. +message Percent { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.Percent"; + + double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; +} + +// A fractional percentage is used in cases in which for performance reasons performing floating +// point to integer conversions during randomness calculations is undesirable. The message includes +// both a numerator and denominator that together determine the final fractional value. +// +// * **Example**: 1/100 = 1%. +// * **Example**: 3/10000 = 0.03%. +message FractionalPercent { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.FractionalPercent"; + + // Fraction percentages support several fixed denominator values. + enum DenominatorType { + // 100. + // + // **Example**: 1/100 = 1%. + HUNDRED = 0; + + // 10,000. + // + // **Example**: 1/10000 = 0.01%. + TEN_THOUSAND = 1; + + // 1,000,000. + // + // **Example**: 1/1000000 = 0.0001%. + MILLION = 2; + } + + // Specifies the numerator. Defaults to 0. + uint32 numerator = 1; + + // Specifies the denominator. If the denominator specified is less than the numerator, the final + // fractional percentage is capped at 1 (100%). + DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/proto/envoy/type/v3/range.proto b/proto/envoy/type/v3/range.proto new file mode 100644 index 0000000..3b1af81 --- /dev/null +++ b/proto/envoy/type/v3/range.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RangeProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Range] + +// Specifies the int64 start and end of the range using half-open interval semantics [start, +// end). +message Int64Range { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int64Range"; + + // start of the range (inclusive) + int64 start = 1; + + // end of the range (exclusive) + int64 end = 2; +} + +// Specifies the int32 start and end of the range using half-open interval semantics [start, +// end). +message Int32Range { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int32Range"; + + // start of the range (inclusive) + int32 start = 1; + + // end of the range (exclusive) + int32 end = 2; +} + +// Specifies the double start and end of the range using half-open interval semantics [start, +// end). +message DoubleRange { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.DoubleRange"; + + // start of the range (inclusive) + double start = 1; + + // end of the range (exclusive) + double end = 2; +} diff --git a/proto/envoy/type/v3/ratelimit_strategy.proto b/proto/envoy/type/v3/ratelimit_strategy.proto new file mode 100644 index 0000000..a86da55 --- /dev/null +++ b/proto/envoy/type/v3/ratelimit_strategy.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "envoy/type/v3/ratelimit_unit.proto"; +import "envoy/type/v3/token_bucket.proto"; + +import "xds/annotations/v3/status.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitStrategyProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Rate Limit Strategies] + +message RateLimitStrategy { + // Choose between allow all and deny all. + enum BlanketRule { + ALLOW_ALL = 0; + DENY_ALL = 1; + } + + // Best-effort limit of the number of requests per time unit. + // + // Allows to specify the desired requests per second (RPS, QPS), requests per minute (QPM, RPM), + // etc., without specifying a rate limiting algorithm implementation. + // + // ``RequestsPerTimeUnit`` strategy does not demand any specific rate limiting algorithm to be + // used (in contrast to the :ref:`TokenBucket `, + // for example). It implies that the implementation details of rate limiting algorithm are + // irrelevant as long as the configured number of "requests per time unit" is achieved. + // + // Note that the ``TokenBucket`` is still a valid implementation of the ``RequestsPerTimeUnit`` + // strategy, and may be chosen to enforce the rate limit. However, there's no guarantee it will be + // the ``TokenBucket`` in particular, and not the Leaky Bucket, the Sliding Window, or any other + // rate limiting algorithm that fulfills the requirements. + message RequestsPerTimeUnit { + // The desired number of requests per :ref:`time_unit + // ` to allow. + // If set to ``0``, deny all (equivalent to ``BlanketRule.DENY_ALL``). + // + // .. note:: + // Note that the algorithm implementation determines the course of action for the requests + // over the limit. As long as the ``requests_per_time_unit`` converges on the desired value, + // it's allowed to treat this field as a soft-limit: allow bursts, redistribute the allowance + // over time, etc. + // + uint64 requests_per_time_unit = 1; + + // The unit of time. Ignored when :ref:`requests_per_time_unit + // ` + // is ``0`` (deny all). + RateLimitUnit time_unit = 2 [(validate.rules).enum = {defined_only: true}]; + } + + oneof strategy { + option (validate.required) = true; + + // Allow or Deny the requests. + // If unset, allow all. + BlanketRule blanket_rule = 1 [(validate.rules).enum = {defined_only: true}]; + + // Best-effort limit of the number of requests per time unit, f.e. requests per second. + // Does not prescribe any specific rate limiting algorithm, see :ref:`RequestsPerTimeUnit + // ` for details. + RequestsPerTimeUnit requests_per_time_unit = 2; + + // Limit the requests by consuming tokens from the Token Bucket. + // Allow the same number of requests as the number of tokens available in + // the token bucket. + TokenBucket token_bucket = 3; + } +} diff --git a/proto/envoy/type/v3/ratelimit_unit.proto b/proto/envoy/type/v3/ratelimit_unit.proto new file mode 100644 index 0000000..1a96497 --- /dev/null +++ b/proto/envoy/type/v3/ratelimit_unit.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "RatelimitUnitProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Ratelimit Time Unit] + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; + + // The time unit representing a month. + MONTH = 5; + + // The time unit representing a year. + YEAR = 6; +} diff --git a/proto/envoy/type/v3/semantic_version.proto b/proto/envoy/type/v3/semantic_version.proto new file mode 100644 index 0000000..e032b4c --- /dev/null +++ b/proto/envoy/type/v3/semantic_version.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "SemanticVersionProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Semantic version] + +// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate +// expected behaviors and APIs, the patch version field is used only +// for security fixes and can be generally ignored. +message SemanticVersion { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.SemanticVersion"; + + uint32 major_number = 1; + + uint32 minor_number = 2; + + uint32 patch = 3; +} diff --git a/proto/envoy/type/v3/token_bucket.proto b/proto/envoy/type/v3/token_bucket.proto new file mode 100644 index 0000000..157a271 --- /dev/null +++ b/proto/envoy/type/v3/token_bucket.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package envoy.type.v3; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.v3"; +option java_outer_classname = "TokenBucketProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/v3;typev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Token bucket] + +// Configures a token bucket, typically used for rate limiting. +message TokenBucket { + option (udpa.annotations.versioning).previous_message_type = "envoy.type.TokenBucket"; + + // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket + // initially contains. + uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; + + // The number of tokens added to the bucket during each fill interval. If not specified, defaults + // to a single token. + google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; + + // The fill interval that tokens are added to the bucket. During each fill interval + // ``tokens_per_fill`` are added to the bucket. The bucket will never contain more than + // ``max_tokens`` tokens. + google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { + required: true + gt {} + }]; +} diff --git a/proto/google/protobuf/any.proto b/proto/google/protobuf/any.proto new file mode 100644 index 0000000..eff44e5 --- /dev/null +++ b/proto/google/protobuf/any.proto @@ -0,0 +1,162 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/known/anypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/proto/google/protobuf/duration.proto b/proto/google/protobuf/duration.proto new file mode 100644 index 0000000..41f40c2 --- /dev/null +++ b/proto/google/protobuf/duration.proto @@ -0,0 +1,115 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/durationpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/proto/google/protobuf/empty.proto b/proto/google/protobuf/empty.proto new file mode 100644 index 0000000..b87c89d --- /dev/null +++ b/proto/google/protobuf/empty.proto @@ -0,0 +1,51 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/known/emptypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +message Empty {} diff --git a/proto/google/protobuf/struct.proto b/proto/google/protobuf/struct.proto new file mode 100644 index 0000000..1bf0c1a --- /dev/null +++ b/proto/google/protobuf/struct.proto @@ -0,0 +1,95 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/proto/google/protobuf/timestamp.proto b/proto/google/protobuf/timestamp.proto new file mode 100644 index 0000000..6bc1efc --- /dev/null +++ b/proto/google/protobuf/timestamp.proto @@ -0,0 +1,145 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/timestamppb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A ProtoJSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a ProtoJSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +// ) to obtain a formatter capable of generating timestamps in this format. +// +message Timestamp { + // Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must + // be between -62135596800 and 253402300799 inclusive (which corresponds to + // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z). + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. This field is + // the nanosecond portion of the duration, not an alternative to seconds. + // Negative second values with fractions must still have non-negative nanos + // values that count forward in time. Must be between 0 and 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/proto/google/protobuf/wrappers.proto b/proto/google/protobuf/wrappers.proto new file mode 100644 index 0000000..e583e7c --- /dev/null +++ b/proto/google/protobuf/wrappers.proto @@ -0,0 +1,157 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types were needed +// for legacy reasons and are not recommended for use in new APIs. +// +// Historically these wrappers were useful to have presence on proto3 primitive +// fields, but proto3 syntax has been updated to support the `optional` keyword. +// Using that keyword is now the strongly preferred way to add presence to +// proto3 primitive fields. +// +// A secondary usecase was to embed primitives in the `google.protobuf.Any` +// type: it is now recommended that you embed your value in your own wrapper +// message which can be specifically documented. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/proto/google/rpc/status.proto b/proto/google/rpc/status.proto new file mode 100644 index 0000000..3b1f7a9 --- /dev/null +++ b/proto/google/rpc/status.proto @@ -0,0 +1,47 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/proto/udpa/annotations/migrate.proto b/proto/udpa/annotations/migrate.proto new file mode 100644 index 0000000..1c42a64 --- /dev/null +++ b/proto/udpa/annotations/migrate.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "google/protobuf/descriptor.proto"; + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "udpa.annotation.migrate". + +extend google.protobuf.MessageOptions { + MigrateAnnotation message_migrate = 171962766; +} + +extend google.protobuf.FieldOptions { + FieldMigrateAnnotation field_migrate = 171962766; +} + +extend google.protobuf.EnumOptions { + MigrateAnnotation enum_migrate = 171962766; +} + +extend google.protobuf.EnumValueOptions { + MigrateAnnotation enum_value_migrate = 171962766; +} + +extend google.protobuf.FileOptions { + FileMigrateAnnotation file_migrate = 171962766; +} + +message MigrateAnnotation { + // Rename the message/enum/enum value in next version. + string rename = 1; +} + +message FieldMigrateAnnotation { + // Rename the field in next version. + string rename = 1; + + // Add the field to a named oneof in next version. If this already exists, the + // field will join its siblings under the oneof, otherwise a new oneof will be + // created with the given name. + string oneof_promotion = 2; +} + +message FileMigrateAnnotation { + // Move all types in the file to another package, this implies changing proto + // file path. + string move_to_package = 2; +} diff --git a/proto/udpa/annotations/security.proto b/proto/udpa/annotations/security.proto new file mode 100644 index 0000000..7191fe3 --- /dev/null +++ b/proto/udpa/annotations/security.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "udpa/annotations/status.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/descriptor.proto"; + +import "validate/validate.proto"; + +// All annotations in this file are experimental and subject to change. Their +// only consumer today is the Envoy APIs and SecuritAnnotationValidator protoc +// plugin in this repository. +option (udpa.annotations.file_status).work_in_progress = true; + +extend google.protobuf.FieldOptions { + // Magic number is the 28 most significant bits in the sha256sum of + // "udpa.annotations.security". + FieldSecurityAnnotation security = 11122993; +} + +// These annotations indicate metadata for the purpose of understanding the +// security significance of fields. +message FieldSecurityAnnotation { + // Field should be set in the presence of untrusted downstreams. + bool configure_for_untrusted_downstream = 1; + + // Field should be set in the presence of untrusted upstreams. + bool configure_for_untrusted_upstream = 2; +} diff --git a/proto/udpa/annotations/sensitive.proto b/proto/udpa/annotations/sensitive.proto new file mode 100644 index 0000000..8dc921f --- /dev/null +++ b/proto/udpa/annotations/sensitive.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.FieldOptions { + // Magic number is the 28 most significant bits in the sha256sum of "udpa.annotations.sensitive". + // When set to true, `sensitive` indicates that this field contains sensitive data, such as + // personally identifiable information, passwords, or private keys, and should be redacted for + // display by tools aware of this annotation. Note that that this has no effect on standard + // Protobuf functions such as `TextFormat::PrintToString`. + bool sensitive = 76569463; +} diff --git a/proto/udpa/annotations/status.proto b/proto/udpa/annotations/status.proto new file mode 100644 index 0000000..9832ffd --- /dev/null +++ b/proto/udpa/annotations/status.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "google/protobuf/descriptor.proto"; + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "udpa.annotation.status". +extend google.protobuf.FileOptions { + StatusAnnotation file_status = 222707719; +} + +enum PackageVersionStatus { + // Unknown package version status. + UNKNOWN = 0; + + // This version of the package is frozen. + FROZEN = 1; + + // This version of the package is the active development version. + ACTIVE = 2; + + // This version of the package is the candidate for the next major version. It + // is typically machine generated from the active development version. + NEXT_MAJOR_VERSION_CANDIDATE = 3; +} + +message StatusAnnotation { + // The entity is work-in-progress and subject to breaking changes. + bool work_in_progress = 1; + + // The entity belongs to a package with the given version status. + PackageVersionStatus package_version_status = 2; +} diff --git a/proto/udpa/annotations/versioning.proto b/proto/udpa/annotations/versioning.proto new file mode 100644 index 0000000..16f6dc3 --- /dev/null +++ b/proto/udpa/annotations/versioning.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.MessageOptions { + // Magic number derived from 0x78 ('x') 0x44 ('D') 0x53 ('S') + VersioningAnnotation versioning = 7881811; +} + +message VersioningAnnotation { + // Track the previous message type. E.g. this message might be + // udpa.foo.v3alpha.Foo and it was previously udpa.bar.v2.Bar. This + // information is consumed by UDPA via proto descriptors. + string previous_message_type = 1; +} diff --git a/proto/validate/validate.proto b/proto/validate/validate.proto new file mode 100644 index 0000000..5aa9653 --- /dev/null +++ b/proto/validate/validate.proto @@ -0,0 +1,862 @@ +syntax = "proto2"; +package validate; + +option go_package = "github.com/envoyproxy/protoc-gen-validate/validate"; +option java_package = "io.envoyproxy.pgv.validate"; + +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// Validation rules applied at the message level +extend google.protobuf.MessageOptions { + // Disabled nullifies any validation rules for this message, including any + // message fields associated with it that do support validation. + optional bool disabled = 1071; + // Ignore skips generation of validation methods for this message. + optional bool ignored = 1072; +} + +// Validation rules applied at the oneof level +extend google.protobuf.OneofOptions { + // Required ensures that exactly one the field options in a oneof is set; + // validation fails if no fields in the oneof are set. + optional bool required = 1071; +} + +// Validation rules applied at the field level +extend google.protobuf.FieldOptions { + // Rules specify the validations to be performed on this field. By default, + // no validation is performed against a field. + optional FieldRules rules = 1071; +} + +// FieldRules encapsulates the rules for each type of field. Depending on the +// field, the correct set should be used to ensure proper validations. +message FieldRules { + optional MessageRules message = 17; + oneof type { + // Scalar Field Types + FloatRules float = 1; + DoubleRules double = 2; + Int32Rules int32 = 3; + Int64Rules int64 = 4; + UInt32Rules uint32 = 5; + UInt64Rules uint64 = 6; + SInt32Rules sint32 = 7; + SInt64Rules sint64 = 8; + Fixed32Rules fixed32 = 9; + Fixed64Rules fixed64 = 10; + SFixed32Rules sfixed32 = 11; + SFixed64Rules sfixed64 = 12; + BoolRules bool = 13; + StringRules string = 14; + BytesRules bytes = 15; + + // Complex Field Types + EnumRules enum = 16; + RepeatedRules repeated = 18; + MapRules map = 19; + + // Well-Known Field Types + AnyRules any = 20; + DurationRules duration = 21; + TimestampRules timestamp = 22; + } +} + +// FloatRules describes the constraints applied to `float` values +message FloatRules { + // Const specifies that this field must be exactly the specified value + optional float const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional float lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional float lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional float gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional float gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated float in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated float not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// DoubleRules describes the constraints applied to `double` values +message DoubleRules { + // Const specifies that this field must be exactly the specified value + optional double const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional double lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional double lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional double gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional double gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated double in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated double not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Int32Rules describes the constraints applied to `int32` values +message Int32Rules { + // Const specifies that this field must be exactly the specified value + optional int32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional int32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional int32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional int32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional int32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated int32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Int64Rules describes the constraints applied to `int64` values +message Int64Rules { + // Const specifies that this field must be exactly the specified value + optional int64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional int64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional int64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional int64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional int64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated int64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// UInt32Rules describes the constraints applied to `uint32` values +message UInt32Rules { + // Const specifies that this field must be exactly the specified value + optional uint32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional uint32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional uint32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional uint32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional uint32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated uint32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated uint32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// UInt64Rules describes the constraints applied to `uint64` values +message UInt64Rules { + // Const specifies that this field must be exactly the specified value + optional uint64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional uint64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional uint64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional uint64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional uint64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated uint64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated uint64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SInt32Rules describes the constraints applied to `sint32` values +message SInt32Rules { + // Const specifies that this field must be exactly the specified value + optional sint32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sint32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sint32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sint32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sint32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sint32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sint32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SInt64Rules describes the constraints applied to `sint64` values +message SInt64Rules { + // Const specifies that this field must be exactly the specified value + optional sint64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sint64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sint64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sint64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sint64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sint64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sint64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Fixed32Rules describes the constraints applied to `fixed32` values +message Fixed32Rules { + // Const specifies that this field must be exactly the specified value + optional fixed32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional fixed32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional fixed32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional fixed32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional fixed32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated fixed32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated fixed32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// Fixed64Rules describes the constraints applied to `fixed64` values +message Fixed64Rules { + // Const specifies that this field must be exactly the specified value + optional fixed64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional fixed64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional fixed64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional fixed64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional fixed64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated fixed64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated fixed64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SFixed32Rules describes the constraints applied to `sfixed32` values +message SFixed32Rules { + // Const specifies that this field must be exactly the specified value + optional sfixed32 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sfixed32 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sfixed32 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sfixed32 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sfixed32 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sfixed32 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sfixed32 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// SFixed64Rules describes the constraints applied to `sfixed64` values +message SFixed64Rules { + // Const specifies that this field must be exactly the specified value + optional sfixed64 const = 1; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional sfixed64 lt = 2; + + // Lte specifies that this field must be less than or equal to the + // specified value, inclusive + optional sfixed64 lte = 3; + + // Gt specifies that this field must be greater than the specified value, + // exclusive. If the value of Gt is larger than a specified Lt or Lte, the + // range is reversed. + optional sfixed64 gt = 4; + + // Gte specifies that this field must be greater than or equal to the + // specified value, inclusive. If the value of Gte is larger than a + // specified Lt or Lte, the range is reversed. + optional sfixed64 gte = 5; + + // In specifies that this field must be equal to one of the specified + // values + repeated sfixed64 in = 6; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated sfixed64 not_in = 7; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 8; +} + +// BoolRules describes the constraints applied to `bool` values +message BoolRules { + // Const specifies that this field must be exactly the specified value + optional bool const = 1; +} + +// StringRules describe the constraints applied to `string` values +message StringRules { + // Const specifies that this field must be exactly the specified value + optional string const = 1; + + // Len specifies that this field must be the specified number of + // characters (Unicode code points). Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 len = 19; + + // MinLen specifies that this field must be the specified number of + // characters (Unicode code points) at a minimum. Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 min_len = 2; + + // MaxLen specifies that this field must be the specified number of + // characters (Unicode code points) at a maximum. Note that the number of + // characters may differ from the number of bytes in the string. + optional uint64 max_len = 3; + + // LenBytes specifies that this field must be the specified number of bytes + optional uint64 len_bytes = 20; + + // MinBytes specifies that this field must be the specified number of bytes + // at a minimum + optional uint64 min_bytes = 4; + + // MaxBytes specifies that this field must be the specified number of bytes + // at a maximum + optional uint64 max_bytes = 5; + + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + optional string pattern = 6; + + // Prefix specifies that this field must have the specified substring at + // the beginning of the string. + optional string prefix = 7; + + // Suffix specifies that this field must have the specified substring at + // the end of the string. + optional string suffix = 8; + + // Contains specifies that this field must have the specified substring + // anywhere in the string. + optional string contains = 9; + + // NotContains specifies that this field cannot have the specified substring + // anywhere in the string. + optional string not_contains = 23; + + // In specifies that this field must be equal to one of the specified + // values + repeated string in = 10; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated string not_in = 11; + + // WellKnown rules provide advanced constraints against common string + // patterns + oneof well_known { + // Email specifies that the field must be a valid email address as + // defined by RFC 5322 + bool email = 12; + + // Hostname specifies that the field must be a valid hostname as + // defined by RFC 1034. This constraint does not support + // internationalized domain names (IDNs). + bool hostname = 13; + + // Ip specifies that the field must be a valid IP (v4 or v6) address. + // Valid IPv6 addresses should not include surrounding square brackets. + bool ip = 14; + + // Ipv4 specifies that the field must be a valid IPv4 address. + bool ipv4 = 15; + + // Ipv6 specifies that the field must be a valid IPv6 address. Valid + // IPv6 addresses should not include surrounding square brackets. + bool ipv6 = 16; + + // Uri specifies that the field must be a valid, absolute URI as defined + // by RFC 3986 + bool uri = 17; + + // UriRef specifies that the field must be a valid URI as defined by RFC + // 3986 and may be relative or absolute. + bool uri_ref = 18; + + // Address specifies that the field must be either a valid hostname as + // defined by RFC 1034 (which does not support internationalized domain + // names or IDNs), or it can be a valid IP (v4 or v6). + bool address = 21; + + // Uuid specifies that the field must be a valid UUID as defined by + // RFC 4122 + bool uuid = 22; + + // WellKnownRegex specifies a common well known pattern defined as a regex. + KnownRegex well_known_regex = 24; + } + + // This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable + // strict header validation. + // By default, this is true, and HTTP header validations are RFC-compliant. + // Setting to false will enable a looser validations that only disallows + // \r\n\0 characters, which can be used to bypass header matching rules. + optional bool strict = 25 [default = true]; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 26; +} + +// WellKnownRegex contain some well-known patterns. +enum KnownRegex { + UNKNOWN = 0; + + // HTTP header name as defined by RFC 7230. + HTTP_HEADER_NAME = 1; + + // HTTP header value as defined by RFC 7230. + HTTP_HEADER_VALUE = 2; +} + +// BytesRules describe the constraints applied to `bytes` values +message BytesRules { + // Const specifies that this field must be exactly the specified value + optional bytes const = 1; + + // Len specifies that this field must be the specified number of bytes + optional uint64 len = 13; + + // MinLen specifies that this field must be the specified number of bytes + // at a minimum + optional uint64 min_len = 2; + + // MaxLen specifies that this field must be the specified number of bytes + // at a maximum + optional uint64 max_len = 3; + + // Pattern specifies that this field must match against the specified + // regular expression (RE2 syntax). The included expression should elide + // any delimiters. + optional string pattern = 4; + + // Prefix specifies that this field must have the specified bytes at the + // beginning of the string. + optional bytes prefix = 5; + + // Suffix specifies that this field must have the specified bytes at the + // end of the string. + optional bytes suffix = 6; + + // Contains specifies that this field must have the specified bytes + // anywhere in the string. + optional bytes contains = 7; + + // In specifies that this field must be equal to one of the specified + // values + repeated bytes in = 8; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated bytes not_in = 9; + + // WellKnown rules provide advanced constraints against common byte + // patterns + oneof well_known { + // Ip specifies that the field must be a valid IP (v4 or v6) address in + // byte format + bool ip = 10; + + // Ipv4 specifies that the field must be a valid IPv4 address in byte + // format + bool ipv4 = 11; + + // Ipv6 specifies that the field must be a valid IPv6 address in byte + // format + bool ipv6 = 12; + } + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 14; +} + +// EnumRules describe the constraints applied to enum values +message EnumRules { + // Const specifies that this field must be exactly the specified value + optional int32 const = 1; + + // DefinedOnly specifies that this field must be only one of the defined + // values for this enum, failing on any undefined value. + optional bool defined_only = 2; + + // In specifies that this field must be equal to one of the specified + // values + repeated int32 in = 3; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated int32 not_in = 4; +} + +// MessageRules describe the constraints applied to embedded message values. +// For message-type fields, validation is performed recursively. +message MessageRules { + // Skip specifies that the validation rules of this field should not be + // evaluated + optional bool skip = 1; + + // Required specifies that this field must be set + optional bool required = 2; +} + +// RepeatedRules describe the constraints applied to `repeated` values +message RepeatedRules { + // MinItems specifies that this field must have the specified number of + // items at a minimum + optional uint64 min_items = 1; + + // MaxItems specifies that this field must have the specified number of + // items at a maximum + optional uint64 max_items = 2; + + // Unique specifies that all elements in this field must be unique. This + // constraint is only applicable to scalar and enum types (messages are not + // supported). + optional bool unique = 3; + + // Items specifies the constraints to be applied to each item in the field. + // Repeated message fields will still execute validation against each item + // unless skip is specified here. + optional FieldRules items = 4; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 5; +} + +// MapRules describe the constraints applied to `map` values +message MapRules { + // MinPairs specifies that this field must have the specified number of + // KVs at a minimum + optional uint64 min_pairs = 1; + + // MaxPairs specifies that this field must have the specified number of + // KVs at a maximum + optional uint64 max_pairs = 2; + + // NoSparse specifies values in this field cannot be unset. This only + // applies to map's with message value types. + optional bool no_sparse = 3; + + // Keys specifies the constraints to be applied to each key in the field. + optional FieldRules keys = 4; + + // Values specifies the constraints to be applied to the value of each key + // in the field. Message values will still have their validations evaluated + // unless skip is specified here. + optional FieldRules values = 5; + + // IgnoreEmpty specifies that the validation rules of this field should be + // evaluated only if the field is not empty + optional bool ignore_empty = 6; +} + +// AnyRules describe constraints applied exclusively to the +// `google.protobuf.Any` well-known type +message AnyRules { + // Required specifies that this field must be set + optional bool required = 1; + + // In specifies that this field's `type_url` must be equal to one of the + // specified values. + repeated string in = 2; + + // NotIn specifies that this field's `type_url` must not be equal to any of + // the specified values. + repeated string not_in = 3; +} + +// DurationRules describe the constraints applied exclusively to the +// `google.protobuf.Duration` well-known type +message DurationRules { + // Required specifies that this field must be set + optional bool required = 1; + + // Const specifies that this field must be exactly the specified value + optional google.protobuf.Duration const = 2; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional google.protobuf.Duration lt = 3; + + // Lt specifies that this field must be less than the specified value, + // inclusive + optional google.protobuf.Duration lte = 4; + + // Gt specifies that this field must be greater than the specified value, + // exclusive + optional google.protobuf.Duration gt = 5; + + // Gte specifies that this field must be greater than the specified value, + // inclusive + optional google.protobuf.Duration gte = 6; + + // In specifies that this field must be equal to one of the specified + // values + repeated google.protobuf.Duration in = 7; + + // NotIn specifies that this field cannot be equal to one of the specified + // values + repeated google.protobuf.Duration not_in = 8; +} + +// TimestampRules describe the constraints applied exclusively to the +// `google.protobuf.Timestamp` well-known type +message TimestampRules { + // Required specifies that this field must be set + optional bool required = 1; + + // Const specifies that this field must be exactly the specified value + optional google.protobuf.Timestamp const = 2; + + // Lt specifies that this field must be less than the specified value, + // exclusive + optional google.protobuf.Timestamp lt = 3; + + // Lte specifies that this field must be less than the specified value, + // inclusive + optional google.protobuf.Timestamp lte = 4; + + // Gt specifies that this field must be greater than the specified value, + // exclusive + optional google.protobuf.Timestamp gt = 5; + + // Gte specifies that this field must be greater than the specified value, + // inclusive + optional google.protobuf.Timestamp gte = 6; + + // LtNow specifies that this must be less than the current time. LtNow + // can only be used with the Within rule. + optional bool lt_now = 7; + + // GtNow specifies that this must be greater than the current time. GtNow + // can only be used with the Within rule. + optional bool gt_now = 8; + + // Within specifies that this field must be within this duration of the + // current time. This constraint can be used alone or with the LtNow and + // GtNow rules. + optional google.protobuf.Duration within = 9; +} diff --git a/proto/xds/annotations/v3/migrate.proto b/proto/xds/annotations/v3/migrate.proto new file mode 100644 index 0000000..1385927 --- /dev/null +++ b/proto/xds/annotations/v3/migrate.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package xds.annotations.v3; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/cncf/xds/go/xds/annotations/v3"; + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "xds.annotation.v3.migrate". +extend google.protobuf.MessageOptions { + MigrateAnnotation message_migrate = 112948430; +} +extend google.protobuf.FieldOptions { + FieldMigrateAnnotation field_migrate = 112948430; +} +extend google.protobuf.EnumOptions { + MigrateAnnotation enum_migrate = 112948430; +} +extend google.protobuf.EnumValueOptions { + MigrateAnnotation enum_value_migrate = 112948430; +} +extend google.protobuf.FileOptions { + FileMigrateAnnotation file_migrate = 112948430; +} + +message MigrateAnnotation { + // Rename the message/enum/enum value in next version. + string rename = 1; +} + +message FieldMigrateAnnotation { + // Rename the field in next version. + string rename = 1; + + // Add the field to a named oneof in next version. If this already exists, the + // field will join its siblings under the oneof, otherwise a new oneof will be + // created with the given name. + string oneof_promotion = 2; +} + +message FileMigrateAnnotation { + // Move all types in the file to another package, this implies changing proto + // file path. + string move_to_package = 2; +} diff --git a/proto/xds/annotations/v3/security.proto b/proto/xds/annotations/v3/security.proto new file mode 100644 index 0000000..f1f9f40 --- /dev/null +++ b/proto/xds/annotations/v3/security.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package xds.annotations.v3; + +import "xds/annotations/v3/status.proto"; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/cncf/xds/go/xds/annotations/v3"; + +// All annotations in this file are experimental and subject to change. Their +// only consumer today is the Envoy APIs and SecuritAnnotationValidator protoc +// plugin in this repository. +option (xds.annotations.v3.file_status).work_in_progress = true; + +extend google.protobuf.FieldOptions { + // Magic number is the 28 most significant bits in the sha256sum of + // "xds.annotations.v3.security". + FieldSecurityAnnotation security = 99044135; +} + +// These annotations indicate metadata for the purpose of understanding the +// security significance of fields. +message FieldSecurityAnnotation { + // Field should be set in the presence of untrusted downstreams. + bool configure_for_untrusted_downstream = 1; + + // Field should be set in the presence of untrusted upstreams. + bool configure_for_untrusted_upstream = 2; +} diff --git a/proto/xds/annotations/v3/sensitive.proto b/proto/xds/annotations/v3/sensitive.proto new file mode 100644 index 0000000..e2cc0b7 --- /dev/null +++ b/proto/xds/annotations/v3/sensitive.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package xds.annotations.v3; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/cncf/xds/go/xds/annotations/v3"; + +extend google.protobuf.FieldOptions { + // Magic number is the 28 most significant bits in the sha256sum of "xds.annotations.v3.sensitive". + // When set to true, `sensitive` indicates that this field contains sensitive data, such as + // personally identifiable information, passwords, or private keys, and should be redacted for + // display by tools aware of this annotation. Note that that this has no effect on standard + // Protobuf functions such as `TextFormat::PrintToString`. + bool sensitive = 61008053; +} diff --git a/proto/xds/annotations/v3/status.proto b/proto/xds/annotations/v3/status.proto new file mode 100644 index 0000000..367e784 --- /dev/null +++ b/proto/xds/annotations/v3/status.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package xds.annotations.v3; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/cncf/xds/go/xds/annotations/v3"; + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "xds.annotations.v3.status". +extend google.protobuf.FileOptions { + FileStatusAnnotation file_status = 226829418; +} + +extend google.protobuf.MessageOptions { + MessageStatusAnnotation message_status = 226829418; +} + +extend google.protobuf.FieldOptions { + FieldStatusAnnotation field_status = 226829418; +} + +message FileStatusAnnotation { + // The entity is work-in-progress and subject to breaking changes. + bool work_in_progress = 1; +} + +message MessageStatusAnnotation { + // The entity is work-in-progress and subject to breaking changes. + bool work_in_progress = 1; +} + +message FieldStatusAnnotation { + // The entity is work-in-progress and subject to breaking changes. + bool work_in_progress = 1; +} + +enum PackageVersionStatus { + // Unknown package version status. + UNKNOWN = 0; + + // This version of the package is frozen. + FROZEN = 1; + + // This version of the package is the active development version. + ACTIVE = 2; + + // This version of the package is the candidate for the next major version. It + // is typically machine generated from the active development version. + NEXT_MAJOR_VERSION_CANDIDATE = 3; +} + +message StatusAnnotation { + // The entity is work-in-progress and subject to breaking changes. + bool work_in_progress = 1; + + // The entity belongs to a package with the given version status. + PackageVersionStatus package_version_status = 2; +} diff --git a/proto/xds/annotations/v3/versioning.proto b/proto/xds/annotations/v3/versioning.proto new file mode 100644 index 0000000..b6440f1 --- /dev/null +++ b/proto/xds/annotations/v3/versioning.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package xds.annotations.v3; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/cncf/xds/go/xds/annotations/v3"; + +extend google.protobuf.MessageOptions { + // Magic number is the 28 most significant bits in the sha256sum of + // "xds.annotations.v3.versioning". + VersioningAnnotation versioning = 92389011; +} + +message VersioningAnnotation { + // Track the previous message type. E.g. this message might be + // xds.foo.v3alpha.Foo and it was previously xds.bar.v2.Bar. This + // information is consumed by UDPA via proto descriptors. + string previous_message_type = 1; +} diff --git a/proto/xds/core/v3/authority.proto b/proto/xds/core/v3/authority.proto new file mode 100644 index 0000000..d666c38 --- /dev/null +++ b/proto/xds/core/v3/authority.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "xds/annotations/v3/status.proto"; + +import "validate/validate.proto"; + +option java_outer_classname = "AuthorityProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// xDS authority information. +message Authority { + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // .. space reserved for additional authority addressing information, e.g. for + // resource signing, items such as CA trust chain, cert pinning may be added. +} diff --git a/proto/xds/core/v3/cidr.proto b/proto/xds/core/v3/cidr.proto new file mode 100644 index 0000000..c40dab2 --- /dev/null +++ b/proto/xds/core/v3/cidr.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "xds/annotations/v3/status.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +option java_outer_classname = "CidrRangeProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +message CidrRange { + // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. + string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Length of prefix, e.g. 0, 32. Defaults to 0 when unset. + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; +} diff --git a/proto/xds/core/v3/collection_entry.proto b/proto/xds/core/v3/collection_entry.proto new file mode 100644 index 0000000..c844d61 --- /dev/null +++ b/proto/xds/core/v3/collection_entry.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "google/protobuf/any.proto"; + +import "xds/annotations/v3/status.proto"; +import "xds/core/v3/resource_locator.proto"; + +import "validate/validate.proto"; + +option java_outer_classname = "CollectionEntryProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// xDS collection resource wrapper. This encapsulates a xDS resource when +// appearing inside a list collection resource. List collection resources are +// regular Resource messages of type: +// +// .. code-block:: proto +// +// message Collection { +// repeated CollectionEntry resources = 1; +// } +// +message CollectionEntry { + // Inlined resource entry. + message InlineEntry { + // Optional name to describe the inlined resource. Resource names must match + // ``[a-zA-Z0-9_-\./]+`` (TODO(htuch): turn this into a PGV constraint once + // finalized, probably should be a RFC3986 pchar). This name allows + // reference via the #entry directive in ResourceLocator. + string name = 1 [(validate.rules).string.pattern = "^[0-9a-zA-Z_\\-\\.~:]+$"]; + + // The resource's logical version. It is illegal to have the same named xDS + // resource name at a given version with different resource payloads. + string version = 2; + + // The resource payload, including type URL. + google.protobuf.Any resource = 3; + } + + oneof resource_specifier { + option (validate.required) = true; + + // A resource locator describing how the member resource is to be located. + ResourceLocator locator = 1; + + // The resource is inlined in the list collection. + InlineEntry inline_entry = 2; + } +} diff --git a/proto/xds/core/v3/context_params.proto b/proto/xds/core/v3/context_params.proto new file mode 100644 index 0000000..a42c7a8 --- /dev/null +++ b/proto/xds/core/v3/context_params.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "xds/annotations/v3/status.proto"; + +option java_outer_classname = "ContextParamsProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// Additional parameters that can be used to select resource variants. These include any +// global context parameters, per-resource type client feature capabilities and per-resource +// type functional attributes. All per-resource type attributes will be `xds.resource.` +// prefixed and some of these are documented below: +// +// `xds.resource.listening_address`: The value is "IP:port" (e.g. "10.1.1.3:8080") which is +// the listening address of a Listener. Used in a Listener resource query. +message ContextParams { + map params = 1; +} diff --git a/proto/xds/core/v3/extension.proto b/proto/xds/core/v3/extension.proto new file mode 100644 index 0000000..dd489eb --- /dev/null +++ b/proto/xds/core/v3/extension.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package xds.core.v3; + +option java_outer_classname = "ExtensionProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +import "validate/validate.proto"; +import "google/protobuf/any.proto"; + +// Message type for extension configuration. +message TypedExtensionConfig { + // The name of an extension. This is not used to select the extension, instead + // it serves the role of an opaque identifier. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // The typed config for the extension. The type URL will be used to identify + // the extension. In the case that the type URL is *xds.type.v3.TypedStruct* + // (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type + // URL of *TypedStruct* will be utilized. See the + // :ref:`extension configuration overview + // ` for further details. + google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; +} diff --git a/proto/xds/core/v3/resource.proto b/proto/xds/core/v3/resource.proto new file mode 100644 index 0000000..dc06279 --- /dev/null +++ b/proto/xds/core/v3/resource.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "google/protobuf/any.proto"; + +import "xds/annotations/v3/status.proto"; +import "xds/core/v3/resource_name.proto"; + +option java_outer_classname = "ResourceProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// xDS resource wrapper. This encapsulates a xDS resource when appearing in an +// xDS transport discovery response or when accessed as a filesystem object. +message Resource { + // Resource name. This may be omitted for filesystem resources. + ResourceName name = 1; + + // The resource's logical version. It is illegal to have the same named xDS + // resource name at a given version with different resource payloads. + string version = 2; + + // The resource payload, including type URL. + google.protobuf.Any resource = 3; +} diff --git a/proto/xds/core/v3/resource_locator.proto b/proto/xds/core/v3/resource_locator.proto new file mode 100644 index 0000000..9b40d52 --- /dev/null +++ b/proto/xds/core/v3/resource_locator.proto @@ -0,0 +1,118 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "xds/annotations/v3/status.proto"; +import "xds/core/v3/context_params.proto"; + +import "validate/validate.proto"; + +option java_outer_classname = "ResourceLocatorProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// xDS resource locators identify a xDS resource name and instruct the +// data-plane load balancer on how the resource may be located. +// +// Resource locators have a canonical xdstp:// URI representation: +// +// xdstp://{authority}/{type_url}/{id}?{context_params}{#directive,*} +// +// where context_params take the form of URI query parameters. +// +// Resource locators have a similar canonical http:// URI representation: +// +// http://{authority}/{type_url}/{id}?{context_params}{#directive,*} +// +// Resource locators also have a simplified file:// URI representation: +// +// file:///{id}{#directive,*} +// +message ResourceLocator { + enum Scheme { + XDSTP = 0; + HTTP = 1; + FILE = 2; + } + + // URI scheme. + Scheme scheme = 1 [(validate.rules).enum = {defined_only: true}]; + + // Opaque identifier for the resource. Any '/' will not be escaped during URI + // encoding and will form part of the URI path. This may end + // with ‘*’ for glob collection references. + string id = 2; + + // Logical authority for resource (not necessarily transport network address). + // Authorities are opaque in the xDS API, data-plane load balancers will map + // them to concrete network transports such as an xDS management server, e.g. + // via envoy.config.core.v3.ConfigSource. + string authority = 3; + + // Fully qualified resource type (as in type URL without types.googleapis.com/ + // prefix). + string resource_type = 4 [(validate.rules).string = {min_len: 1}]; + + oneof context_param_specifier { + // Additional parameters that can be used to select resource variants. + // Matches must be exact, i.e. all context parameters must match exactly and + // there must be no additional context parameters set on the matched + // resource. + ContextParams exact_context = 5; + + // .. space reserved for future potential matchers, e.g. CEL expressions. + } + + // Directives provide information to data-plane load balancers on how xDS + // resource names are to be interpreted and potentially further resolved. For + // example, they may provide alternative resource locators for when primary + // resolution fails. Directives are not part of resource names and do not + // appear in a xDS transport discovery request. + // + // When encoding to URIs, directives take the form: + // + // = + // + // For example, we can have alt=xdstp://foo/bar or entry=some%20thing. Each + // directive value type may have its own string encoding, in the case of + // ResourceLocator there is a recursive URI encoding. + // + // Percent encoding applies to the URI encoding of the directive value. + // Multiple directives are comma-separated, so the reserved characters that + // require percent encoding in a directive value are [',', '#', '[', ']', + // '%']. These are the RFC3986 fragment reserved characters with the addition + // of the xDS scheme specific ','. See + // https://tools.ietf.org/html/rfc3986#page-49 for further details on URI ABNF + // and reserved characters. + message Directive { + oneof directive { + option (validate.required) = true; + + // An alternative resource locator for fallback if the resource is + // unavailable. For example, take the resource locator: + // + // xdstp://foo/some-type/some-route-table#alt=xdstp://bar/some-type/another-route-table + // + // If the data-plane load balancer is unable to reach `foo` to fetch the + // resource, it will fallback to `bar`. Alternative resources do not need + // to have equivalent content, but they should be functional substitutes. + ResourceLocator alt = 1; + + // List collections support inlining of resources via the entry field in + // Resource. These inlined Resource objects may have an optional name + // field specified. When specified, the entry directive allows + // ResourceLocator to directly reference these inlined resources, e.g. + // xdstp://.../foo#entry=bar. + string entry = 2 [(validate.rules).string = {min_len: 1, pattern: "^[0-9a-zA-Z_\\-\\./~:]+$"}]; + } + } + + // A list of directives that appear in the xDS resource locator #fragment. + // + // When encoding to URI form, directives are percent encoded with comma + // separation. + repeated Directive directives = 6; +} diff --git a/proto/xds/core/v3/resource_name.proto b/proto/xds/core/v3/resource_name.proto new file mode 100644 index 0000000..0f3d997 --- /dev/null +++ b/proto/xds/core/v3/resource_name.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package xds.core.v3; + +import "xds/annotations/v3/status.proto"; +import "xds/core/v3/context_params.proto"; + +import "validate/validate.proto"; + +option java_outer_classname = "ResourceNameProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.core.v3"; +option go_package = "github.com/cncf/xds/go/xds/core/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// xDS resource name. This has a canonical xdstp:// URI representation: +// +// xdstp://{authority}/{type_url}/{id}?{context_params} +// +// where context_params take the form of URI query parameters. +// +// A xDS resource name fully identifies a network resource for transport +// purposes. xDS resource names in this form appear only in discovery +// request/response messages used with the xDS transport. +message ResourceName { + // Opaque identifier for the resource. Any '/' will not be escaped during URI + // encoding and will form part of the URI path. + string id = 1; + + // Logical authority for resource (not necessarily transport network address). + // Authorities are opaque in the xDS API, data-plane load balancers will map + // them to concrete network transports such as an xDS management server. + string authority = 2; + + // Fully qualified resource type (as in type URL without types.googleapis.com/ + // prefix). + string resource_type = 3 [(validate.rules).string = {min_len: 1}]; + + // Additional parameters that can be used to select resource variants. + ContextParams context = 4; +} diff --git a/proto/xds/type/matcher/v3/cel.proto b/proto/xds/type/matcher/v3/cel.proto new file mode 100644 index 0000000..a45af95 --- /dev/null +++ b/proto/xds/type/matcher/v3/cel.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/type/v3/cel.proto"; +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "CelProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +// [#protodoc-title: Common Expression Language (CEL) matchers] + +// Performs a match by evaluating a `Common Expression Language +// `_ (CEL) expression against the standardized set of +// :ref:`HTTP attributes ` specified via ``HttpAttributesCelMatchInput``. +// +// .. attention:: +// +// The match is ``true``, iff the result of the evaluation is a bool AND true. +// In all other cases, the match is ``false``, including but not limited to: non-bool types, +// ``false``, ``null``, ``int(1)``, etc. +// In case CEL expression raises an error, the result of the evaluation is interpreted "no match". +// +// Refer to :ref:`Unified Matcher API ` documentation +// for usage details. +// +// [#comment: envoy.matching.matchers.cel_matcher] +message CelMatcher { + // Either parsed or checked representation of the CEL program. + type.v3.CelExpression expr_match = 1 [(validate.rules).message = {required: true}]; + + // Free-form description of the CEL AST, e.g. the original expression text, to be + // used for debugging assistance. + string description = 2; +} diff --git a/proto/xds/type/matcher/v3/domain.proto b/proto/xds/type/matcher/v3/domain.proto new file mode 100644 index 0000000..06f11d7 --- /dev/null +++ b/proto/xds/type/matcher/v3/domain.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/annotations/v3/status.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "ServerNameMatcherProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Server name matcher] + +// Matches a fully qualified server name against a set of domain +// names with optional wildcards. +message ServerNameMatcher { + // Specifies a set of exact and wildcard domains and a match action. The + // wildcard symbol ``*`` must appear at most once as the left-most part of + // the domain on a dot border. The wildcard matches one or more non-empty + // domain parts. + message DomainMatcher { + // A non-empty set of domain names with optional wildcards, e.g. + // ``www.example.com``, ``*.com``, or ``*``. + repeated string domains = 1 [ (validate.rules).repeated = {min_items : 1} ]; + + // Match action to apply when the server name matches any of the domain + // names in the matcher. + Matcher.OnMatch on_match = 2; + } + + // Match a server name by multiple domain matchers. Each domain, exact or + // wildcard, must appear at most once across all the domain matchers. + // + // The server name will be matched against all wildcard domains starting from + // the longest suffix, i.e. ``www.example.com`` input will be first matched + // against ``www.example.com``, then ``*.example.com``, then ``*.com``, then + // ``*``, until the associated matcher action accepts the input. Note that + // wildcards must be on a dot border, and values like ``*w.example.com`` are + // invalid. + repeated DomainMatcher domain_matchers = 1; +} diff --git a/proto/xds/type/matcher/v3/http_inputs.proto b/proto/xds/type/matcher/v3/http_inputs.proto new file mode 100644 index 0000000..5709d64 --- /dev/null +++ b/proto/xds/type/matcher/v3/http_inputs.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "HttpInputsProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +// [#protodoc-title: Common HTTP Inputs] + +// Specifies that matching should be performed on the set of :ref:`HTTP attributes +// `. +// +// The attributes will be exposed via `Common Expression Language +// `_ runtime to associated CEL matcher. +// +// Refer to :ref:`Unified Matcher API ` documentation +// for usage details. +// +// [#comment: envoy.matching.inputs.cel_data_input] +message HttpAttributesCelMatchInput { +} diff --git a/proto/xds/type/matcher/v3/ip.proto b/proto/xds/type/matcher/v3/ip.proto new file mode 100644 index 0000000..ad3ab06 --- /dev/null +++ b/proto/xds/type/matcher/v3/ip.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/annotations/v3/status.proto"; +import "xds/core/v3/cidr.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "IPMatcherProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: IP matcher] + +// Matches a specific IP address against a set of possibly overlapping subnets using a trie. +message IPMatcher { + // Specifies a list of IP address ranges and a match action. + message IPRangeMatcher { + // A non-empty set of CIDR ranges. + repeated core.v3.CidrRange ranges = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Match action to apply when the IP address is within one of the CIDR ranges. + Matcher.OnMatch on_match = 2; + + // Indicates whether this match option should be considered if there is a + // more specific matcher. Exclusive matchers are not selected whenever a + // more specific matcher exists (e.g. matcher with a longer prefix) even + // when the more specific matcher fails its nested match condition. + // Non-exclusive matchers are considered if the more specific matcher + // exists but its nested match condition does not entirely match. + // Non-exclusive matchers are selected in the order of their specificity + // first (longest prefix first), then the order of declaration next. + // + // For example, consider two range matchers: an exclusive matcher *X* on + // ``0.0.0.0/0`` and a matcher *Y* on ``192.0.0.0/2`` with a nested match + // condition *Z*. For the input IP ``192.168.0.1`` matcher *Y* is the most + // specific. If its nested match condition *Z* does not accept the input, + // then the less specific matcher *X* does not apply either despite the + // input being within the range, because matcher *X* is exclusive. + // + // The opposite is true if matcher *X* is not marked as exclusive. In that + // case matcher *X* always matches whenever matcher "*Y* rejects the input. + bool exclusive = 3; + } + + // Match IP address by CIDR ranges. + repeated IPRangeMatcher range_matchers = 1; +} diff --git a/proto/xds/type/matcher/v3/matcher.proto b/proto/xds/type/matcher/v3/matcher.proto new file mode 100644 index 0000000..cc03ff6 --- /dev/null +++ b/proto/xds/type/matcher/v3/matcher.proto @@ -0,0 +1,144 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/core/v3/extension.proto"; +import "xds/type/matcher/v3/string.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "MatcherProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +// [#protodoc-title: Unified Matcher API] + +// A matcher, which may traverse a matching tree in order to result in a match action. +// During matching, the tree will be traversed until a match is found, or if no match +// is found the action specified by the most specific on_no_match will be evaluated. +// As an on_no_match might result in another matching tree being evaluated, this process +// might repeat several times until the final OnMatch (or no match) is decided. +message Matcher { + // What to do if a match is successful. + message OnMatch { + oneof on_match { + option (validate.required) = true; + + // Nested matcher to evaluate. + // If the nested matcher does not match and does not specify + // on_no_match, then this matcher is considered not to have + // matched, even if a predicate at this level or above returned + // true. + Matcher matcher = 1; + + // Protocol-specific action to take. + core.v3.TypedExtensionConfig action = 2; + } + + // If true and the Matcher matches, the action will be taken but the caller + // will behave as if the Matcher did not match. A subsequent matcher or + // on_no_match action will be used instead. + // This field is not supported in all contexts in which the matcher API is + // used. If this field is set in a context in which it's not supported, + // the resource will be rejected. + bool keep_matching = 3; + } + + // A linear list of field matchers. + // The field matchers are evaluated in order, and the first match + // wins. + message MatcherList { + // Predicate to determine if a match is successful. + message Predicate { + // Predicate for a single input field. + message SinglePredicate { + // Protocol-specific specification of input field to match on. + // [#extension-category: envoy.matching.common_inputs] + core.v3.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; + + oneof matcher { + option (validate.required) = true; + + // Built-in string matcher. + type.matcher.v3.StringMatcher value_match = 2; + + // Extension for custom matching logic. + // [#extension-category: envoy.matching.input_matchers] + core.v3.TypedExtensionConfig custom_match = 3; + } + } + + // A list of two or more matchers. Used to allow using a list within a oneof. + message PredicateList { + repeated Predicate predicate = 1 [(validate.rules).repeated = {min_items: 2}]; + } + + oneof match_type { + option (validate.required) = true; + + // A single predicate to evaluate. + SinglePredicate single_predicate = 1; + + // A list of predicates to be OR-ed together. + PredicateList or_matcher = 2; + + // A list of predicates to be AND-ed together. + PredicateList and_matcher = 3; + + // The invert of a predicate + Predicate not_matcher = 4; + } + } + + // An individual matcher. + message FieldMatcher { + // Determines if the match succeeds. + Predicate predicate = 1 [(validate.rules).message = {required: true}]; + + // What to do if the match succeeds. + OnMatch on_match = 2 [(validate.rules).message = {required: true}]; + } + + // A list of matchers. First match wins. + repeated FieldMatcher matchers = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + message MatcherTree { + // A map of configured matchers. Used to allow using a map within a oneof. + message MatchMap { + map map = 1 [(validate.rules).map = {min_pairs: 1}]; + } + + // Protocol-specific specification of input field to match on. + core.v3.TypedExtensionConfig input = 1 [(validate.rules).message = {required: true}]; + + // Exact or prefix match maps in which to look up the input value. + // If the lookup succeeds, the match is considered successful, and + // the corresponding OnMatch is used. + oneof tree_type { + option (validate.required) = true; + + MatchMap exact_match_map = 2; + + // Longest matching prefix wins. + MatchMap prefix_match_map = 3; + + // Extension for custom matching logic. + core.v3.TypedExtensionConfig custom_match = 4; + } + } + + oneof matcher_type { + // A linear list of matchers to evaluate. + MatcherList matcher_list = 1; + + // A match tree to evaluate. + MatcherTree matcher_tree = 2; + } + + // Optional OnMatch to use if no matcher above matched (e.g., if there are no matchers specified + // above, or if none of the matches specified above succeeded). + // If no matcher above matched and this field is not populated, the match will be considered unsuccessful. + OnMatch on_no_match = 3; +} diff --git a/proto/xds/type/matcher/v3/range.proto b/proto/xds/type/matcher/v3/range.proto new file mode 100644 index 0000000..5834009 --- /dev/null +++ b/proto/xds/type/matcher/v3/range.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/type/v3/range.proto"; +import "xds/type/matcher/v3/matcher.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "RangeProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +// [#protodoc-title: Range matcher] + +// Specifies a set of ranges for matching an int64 number and the associated +// match actions. +message Int64RangeMatcher { + // Specifies a list of number ranges and a match action. + message RangeMatcher { + // A non-empty set of int64 ranges. + repeated xds.type.v3.Int64Range ranges = 1 + [(validate.rules).repeated = { min_items: 1 }]; + + // Match action to apply when the input number is within one of the ranges. + Matcher.OnMatch on_match = 2; + } + + // Match a number by a list of number ranges. If multiple ranges contain the + // input number, then the first action in this list is taken. + repeated RangeMatcher range_matchers = 1; +} + +// Specifies a set of ranges for matching an int32 number and the associated +// match actions. +message Int32RangeMatcher { + // Specifies a list of number ranges and a match action. + message RangeMatcher { + // A non-empty set of int32 ranges. + repeated xds.type.v3.Int32Range ranges = 1 + [(validate.rules).repeated = { min_items: 1 }]; + + // Match action to apply when the input number is within one of the ranges. + Matcher.OnMatch on_match = 2; + } + + // Match a number by a list of number ranges. If multiple ranges contain the + // input number, then the first action in this list is taken. + repeated RangeMatcher range_matchers = 1; +} + +// Specifies a set of ranges for matching a double number and the associated +// match actions. +message DoubleRangeMatcher { + // Specifies a list of number ranges and a match action. + message RangeMatcher { + // A non-empty set of double ranges. + repeated xds.type.v3.DoubleRange ranges = 1 + [(validate.rules).repeated = { min_items: 1 }]; + + // Match action to apply when the input number is within one of the ranges. + Matcher.OnMatch on_match = 2; + } + + // Match a number by a list of number ranges. If multiple ranges contain the + // input number, then the first action in this list is taken. + repeated RangeMatcher range_matchers = 1; +} diff --git a/proto/xds/type/matcher/v3/regex.proto b/proto/xds/type/matcher/v3/regex.proto new file mode 100644 index 0000000..3ff4ca9 --- /dev/null +++ b/proto/xds/type/matcher/v3/regex.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +// [#protodoc-title: Regex matcher] + +// A regex matcher designed for safety when used with untrusted input. +message RegexMatcher { + // Google's `RE2 `_ regex engine. The regex + // string must adhere to the documented `syntax + // `_. The engine is designed to + // complete execution in linear time as well as limit the amount of memory + // used. + // + // Envoy supports program size checking via runtime. The runtime keys + // `re2.max_program_size.error_level` and `re2.max_program_size.warn_level` + // can be set to integers as the maximum program size or complexity that a + // compiled regex can have before an exception is thrown or a warning is + // logged, respectively. `re2.max_program_size.error_level` defaults to 100, + // and `re2.max_program_size.warn_level` has no default if unset (will not + // check/log a warning). + // + // Envoy emits two stats for tracking the program size of regexes: the + // histogram `re2.program_size`, which records the program size, and the + // counter `re2.exceeded_warn_level`, which is incremented each time the + // program size exceeds the warn level threshold. + message GoogleRE2 {} + + oneof engine_type { + option (validate.required) = true; + + // Google's RE2 regex engine. + GoogleRE2 google_re2 = 1 [ (validate.rules).message = {required : true} ]; + } + + // The regex match string. The string must be supported by the configured + // engine. + string regex = 2 [ (validate.rules).string = {min_len : 1} ]; +} diff --git a/proto/xds/type/matcher/v3/string.proto b/proto/xds/type/matcher/v3/string.proto new file mode 100644 index 0000000..e58cb41 --- /dev/null +++ b/proto/xds/type/matcher/v3/string.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package xds.type.matcher.v3; + +import "xds/core/v3/extension.proto"; +import "xds/type/matcher/v3/regex.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.matcher.v3"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; + +// [#protodoc-title: String matcher] + +// Specifies the way to match a string. +// [#next-free-field: 9] +message StringMatcher { + oneof match_pattern { + option (validate.required) = true; + + // The input string must match exactly the string specified here. + // + // Examples: + // + // * *abc* only matches the value *abc*. + string exact = 1; + + // The input string must have the prefix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *abc.xyz* + string prefix = 2 [(validate.rules).string = {min_len: 1}]; + + // The input string must have the suffix specified here. + // Note: empty prefix is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *xyz.abc* + string suffix = 3 [(validate.rules).string = {min_len: 1}]; + + // The input string must match the regular expression specified here. + RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; + + // The input string must have the substring specified here. + // Note: empty contains match is not allowed, please use regex instead. + // + // Examples: + // + // * *abc* matches the value *xyz.abc.def* + string contains = 7 [(validate.rules).string = {min_len: 1}]; + + // Use an extension as the matcher type. + // [#extension-category: envoy.string_matcher] + xds.core.v3.TypedExtensionConfig custom = 8; + } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; +} + +// Specifies a list of ways to match a string. +message ListStringMatcher { + repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; +} diff --git a/proto/xds/type/v3/cel.proto b/proto/xds/type/v3/cel.proto new file mode 100644 index 0000000..0439904 --- /dev/null +++ b/proto/xds/type/v3/cel.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +package xds.type.v3; + +import "google/api/expr/v1alpha1/checked.proto"; +import "google/api/expr/v1alpha1/syntax.proto"; +import "cel/expr/checked.proto"; +import "cel/expr/syntax.proto"; +import "google/protobuf/wrappers.proto"; + +import "xds/annotations/v3/status.proto"; + +import "validate/validate.proto"; + +option java_package = "com.github.xds.type.v3"; +option java_outer_classname = "CelProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/v3"; + +option (xds.annotations.v3.file_status).work_in_progress = true; + +// [#protodoc-title: Common Expression Language (CEL)] + +// Either parsed or checked representation of the `Common Expression Language +// `_ (CEL) program. +message CelExpression { + oneof expr_specifier { + // Parsed expression in abstract syntax tree (AST) form. + // + // Deprecated -- use ``cel_expr_parsed`` field instead. + // If ``cel_expr_parsed`` or ``cel_expr_checked`` is set, this field is not used. + google.api.expr.v1alpha1.ParsedExpr parsed_expr = 1 [deprecated = true]; + + // Parsed expression in abstract syntax tree (AST) form that has been successfully type checked. + // + // Deprecated -- use ``cel_expr_checked`` field instead. + // If ``cel_expr_parsed`` or ``cel_expr_checked`` is set, this field is not used. + google.api.expr.v1alpha1.CheckedExpr checked_expr = 2 [deprecated = true]; + } + + // Parsed expression in abstract syntax tree (AST) form. + // + // If ``cel_expr_checked`` is set, this field is not used. + cel.expr.ParsedExpr cel_expr_parsed = 3; + + // Parsed expression in abstract syntax tree (AST) form that has been successfully type checked. + // + // If set, takes precedence over ``cel_expr_parsed``. + cel.expr.CheckedExpr cel_expr_checked = 4; + + // Unparsed expression in string form. For example, ``request.headers['x-env'] == 'prod'`` will + // get ``x-env`` header value and compare it with ``prod``. + // Check the `Common Expression Language `_ for more details. + // + // If set, takes precedence over ``cel_expr_parsed`` and ``cel_expr_checked``. + string cel_expr_string = 5; +} + +// Extracts a string by evaluating a `Common Expression Language +// `_ (CEL) expression against the standardized set of +// :ref:`HTTP attributes `. +// +// .. attention:: +// +// Besides CEL evaluation raising an error explicitly, CEL program returning a type other than +// the ``string``, or not returning anything, are considered an error as well. +// +// [#comment:TODO(sergiitk): When implemented, add the extension tag.] +message CelExtractString { + // The CEL expression used to extract a string from the CEL environment. + // the "subject string") that should be replaced. + CelExpression expr_extract = 1 [(validate.rules).message = {required: true}]; + + // If CEL expression evaluates to an error, this value is be returned to the caller. + // If not set, the error is propagated to the caller. + google.protobuf.StringValue default_value = 2; +} diff --git a/proto/xds/type/v3/range.proto b/proto/xds/type/v3/range.proto new file mode 100644 index 0000000..8fc97ab --- /dev/null +++ b/proto/xds/type/v3/range.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package xds.type.v3; + +option java_package = "com.github.xds.type.v3"; +option java_outer_classname = "RangeProto"; +option java_multiple_files = true; +option go_package = "github.com/cncf/xds/go/xds/type/v3"; + +// [#protodoc-title: Number range] + +// Specifies the int64 start and end of the range using half-open interval +// semantics [start, end). +message Int64Range { + // start of the range (inclusive) + int64 start = 1; + + // end of the range (exclusive) + int64 end = 2; +} + +// Specifies the int32 start and end of the range using half-open interval +// semantics [start, end). +message Int32Range { + // start of the range (inclusive) + int32 start = 1; + + // end of the range (exclusive) + int32 end = 2; +} + +// Specifies the double start and end of the range using half-open interval +// semantics [start, end). +message DoubleRange { + // start of the range (inclusive) + double start = 1; + + // end of the range (exclusive) + double end = 2; +} diff --git a/proto/xds/type/v3/typed_struct.proto b/proto/xds/type/v3/typed_struct.proto new file mode 100644 index 0000000..40d0a8d --- /dev/null +++ b/proto/xds/type/v3/typed_struct.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package xds.type.v3; + +option java_outer_classname = "TypedStructProto"; +option java_multiple_files = true; +option java_package = "com.github.xds.type.v3"; +option go_package = "github.com/cncf/xds/go/xds/type/v3"; + +import "google/protobuf/struct.proto"; + +// A TypedStruct contains an arbitrary JSON serialized protocol buffer message with a URL that +// describes the type of the serialized message. This is very similar to google.protobuf.Any, +// instead of having protocol buffer binary, this employs google.protobuf.Struct as value. +// +// This message is intended to be embedded inside Any, so it shouldn't be directly referred +// from other UDPA messages. +// +// When packing an opaque extension config, packing the expected type into Any is preferred +// wherever possible for its efficiency. TypedStruct should be used only if a proto descriptor +// is not available, for example if: +// +// - A control plane sends opaque message that is originally from external source in human readable +// format such as JSON or YAML. +// - The control plane doesn't have the knowledge of the protocol buffer schema hence it cannot +// serialize the message in protocol buffer binary format. +// - The DPLB doesn't have have the knowledge of the protocol buffer schema its plugin or extension +// uses. This has to be indicated in the DPLB capability negotiation. +// +// When a DPLB receives a TypedStruct in Any, it should: +// - Check if the type_url of the TypedStruct matches the type the extension expects. +// - Convert value to the type described in type_url and perform validation. +// +// TODO(lizan): Figure out how TypeStruct should be used with DPLB extensions that doesn't link +// protobuf descriptor with DPLB itself, (e.g. gRPC LB Plugin, Envoy WASM extensions). +message TypedStruct { + // A URL that uniquely identifies the type of the serialize protocol buffer message. + // This has same semantics and format described in google.protobuf.Any: + // https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto + string type_url = 1; + + // A JSON representation of the above specified type. + google.protobuf.Struct value = 2; +} diff --git a/spanner_integration.md b/spanner_integration.md deleted file mode 100644 index 73cbbbb..0000000 --- a/spanner_integration.md +++ /dev/null @@ -1,309 +0,0 @@ -# Integrating async-grpc with Google Cloud Spanner - -## Current State Analysis - -### How ruby-spanner Uses gRPC - -Looking at `google-cloud-spanner/lib/google/cloud/spanner/service.rb`: - -```ruby -def channel - require "grpc" - GRPC::Core::Channel.new host, chan_args, chan_creds -end - -def chan_creds - return credentials if insecure? - require "grpc" - GRPC::Core::ChannelCredentials.new.compose \ - GRPC::Core::CallCredentials.new credentials.client.updater_proc -end - -def service - return mocked_service if mocked_service - @service ||= - V1::Spanner::Client.new do |config| - config.credentials = channel # <-- Passes gRPC channel - config.quota_project = @quota_project - config.timeout = timeout if timeout - config.endpoint = host if host - config.universe_domain = @universe_domain - config.lib_name = lib_name_with_prefix - config.lib_version = Google::Cloud::Spanner::VERSION - config.metadata = { "google-cloud-resource-prefix" => "projects/#{@project}" } - end -end -``` - -### Key Dependencies - -1. **gRPC C Extension** (`grpc` gem) - - `GRPC::Core::Channel` - connection management - - `GRPC::Core::ChannelCredentials` - TLS/SSL setup - - `GRPC::Core::CallCredentials` - per-call auth (OAuth2 tokens) - -2. **Generated Client Stubs** (from `google-cloud-spanner-v1` gem) - - `Google::Cloud::Spanner::V1::Spanner::Client` - - Generated by `protoc` with `grpc` plugin - - Expects a gRPC channel as credentials - -## Replacement Strategy - -### Option 1: Drop-In Channel Replacement (Most Feasible) - -Create an adapter that implements the `GRPC::Core::Channel` interface: - -```ruby -module Async - module GRPC - # Adapter that makes Async::GRPC::Client compatible with - # Google's generated gRPC client stubs - class ChannelAdapter - def initialize(endpoint, channel_args = {}, channel_creds = nil) - @endpoint = endpoint - @client = Async::GRPC::Client.new(endpoint) - @channel_creds = channel_creds - end - - # Called by generated stubs to make RPC calls - # Must implement the gRPC::Core::Channel interface - def request_response(path, request, marshal, unmarshal, deadline: nil, metadata: {}) - # Parse service/method from path - # path format: "/google.spanner.v1.Spanner/ExecuteStreamingSql" - parts = path.split("/").last(2) - service = parts[0].split(".").last - method = parts[1] - - # Add auth metadata - if @channel_creds - auth_metadata = @channel_creds.call(method) - metadata.merge!(auth_metadata) - end - - # Marshal request - request_data = marshal.call(request) - - # Make the call - response_data = Async do - @client.unary( - service, - method, - request_data, - metadata: metadata, - timeout: deadline - ) - end.wait - - # Unmarshal response - unmarshal.call(response_data) - end - - # For server-streaming RPCs - def request_stream(path, request, marshal, unmarshal, deadline: nil, metadata: {}) - # Similar but returns enumerator - Enumerator.new do |yielder| - Async do - @client.server_streaming do |response_data| - yielder << unmarshal.call(response_data) - end - end.wait - end - end - - # For client-streaming RPCs - def stream_request(path, marshal, unmarshal, deadline: nil, metadata: {}) - # Returns [input_stream, output_future] - end - - # For bidirectional streaming RPCs - def stream_stream(path, marshal, unmarshal, deadline: nil, metadata: {}) - # Returns [input_stream, output_enumerator] - end - - def close - @client.close - end - end - end -end -``` - -### Option 2: Regenerate Client Stubs (More Invasive) - -Instead of using Google's generated stubs, regenerate them with our own generator: - -```bash -# Generate Spanner service stubs using protocol-grpc -bake protocol:grpc:generate google/spanner/v1/spanner.proto - -# This would generate: -# - lib/google/spanner/v1/spanner_grpc.rb (client stubs) -# - Compatible with Async::GRPC::Client -``` - -Then modify `service.rb`: - -```ruby -require "google/spanner/v1/spanner_grpc" # Our generated stubs - -def service - @service ||= begin - endpoint = Async::HTTP::Endpoint.parse("https://#{host}") - client = Async::GRPC::Client.new(endpoint) - - Google::Spanner::V1::SpannerClient.new(client) # Our stub - end -end -``` - -### Option 3: Monkey-Patch Mock Interface (Testing/Development Only) - -Use Spanner's built-in mocking capability: - -```ruby -# In service.rb -attr_accessor :mocked_service # Already exists! - -# Our replacement -class AsyncGRPCSpannerService - def initialize(client) - @client = client - end - - # Implement all Spanner RPC methods - def execute_streaming_sql(request, options = {}) - # Call via async-grpc - end - - def begin_transaction(request, options = {}) - # ... - end - - # ... implement all 20+ RPC methods -end - -# Usage -service = Google::Cloud::Spanner::Service.new -service.mocked_service = AsyncGRPCSpannerService.new(async_grpc_client) -``` - -## Required Interface - -To make this work, `Async::GRPC::Client` would need to support: - -### 1. Raw Binary Messages - -```ruby -# Currently: pass protobuf objects -client.unary(service, method, request_object, response_class: MyReply) - -# Need to support: pass raw binary -client.unary_binary(service, method, request_binary) # => response_binary -``` - -### 2. Marshal/Unmarshal Callbacks - -```ruby -client.unary( - service, - method, - request, - marshal: ->(obj){obj.to_proto}, - unmarshal: ->(data){MyReply.decode(data)} -) -``` - -### 3. Compatible Metadata/Headers - -Google's stubs expect specific metadata format (OAuth2 tokens, quota project, etc.) - -## Recommendation - -**Option 1 (Channel Adapter) is most feasible** because: - -1. ✅ No need to regenerate all Google API stubs -2. ✅ Works with existing `google-cloud-spanner-v1` gem -3. ✅ Minimal changes to Spanner gem -4. ✅ Can be done incrementally (test with one RPC at a time) -5. ✅ Falls back to standard gRPC if issues arise - -## Implementation Plan - -### Phase 1: Proof of Concept - -1. Implement `Async::GRPC::ChannelAdapter` -2. Test with simple unary RPC (e.g., `CreateSession`) -3. Verify it works end-to-end - -### Phase 2: Full Interface - -1. Implement all four RPC types (unary, client streaming, server streaming, bidirectional) -2. Handle auth metadata properly -3. Support deadlines and cancellation - -### Phase 3: Production Ready - -1. Handle all gRPC edge cases -2. Proper error mapping -3. Connection pooling -4. Performance testing - -## Challenges - -### 1. Generated Stub Format - -Google's generated stubs use Gapic (Google API Client) framework, which has its own conventions. We'd need to understand: -- Exact method signatures expected -- How streaming responses are yielded -- Error handling patterns - -### 2. Authentication - -Google Cloud uses: -- OAuth2 access tokens (refreshed automatically) -- Per-RPC credentials (added as metadata) -- Service account key files - -Our adapter must support this auth flow. - -### 3. Retry Logic - -Google's client has sophisticated retry logic: -- Exponential backoff -- Per-method retry policies -- Idempotency detection - -We'd need to preserve this behavior. - -### 4. Observability - -Google's clients have built-in: -- OpenTelemetry tracing -- Metrics/logging -- Quota tracking - -## Next Steps - -1. **Investigate Gapic internals**: Look at how `google-cloud-spanner-v1` generated code works -2. **Find hook points**: Identify where we can inject our channel -3. **Build minimal adapter**: Implement just enough to make one RPC work -4. **Benchmark**: Compare performance async-grpc vs standard gRPC - -## Benefits if Successful - -- ✅ Pure Ruby implementation (no C extension) -- ✅ Async-first design (better concurrency) -- ✅ Easier debugging (no C stack traces) -- ✅ Potentially better resource usage -- ✅ Could work on platforms where C extensions are problematic (e.g., JRuby, TruffleRuby) - -## Risks - -- ❌ Incomplete gRPC protocol implementation -- ❌ Performance might be worse than C extension -- ❌ Maintenance burden (keep up with gRPC spec changes) -- ❌ Edge cases we haven't thought of - - - - diff --git a/xds.md b/xds.md deleted file mode 100644 index 5948937..0000000 --- a/xds.md +++ /dev/null @@ -1,1458 +0,0 @@ -# xDS Support for Async::GRPC - -This document outlines the design and implementation of xDS (Discovery Service) support for `async-grpc`, enabling dynamic service discovery and configuration for gRPC clients. The design follows patterns established in `async-redis` (SentinelClient and ClusterClient) for service discovery and load balancing. - -## Overview - -xDS is a set of discovery APIs originally created for Envoy proxy and now adopted as a standard for dynamic configuration in gRPC and other systems. It provides a unified mechanism for service mesh control planes to configure data planes. - -### What is xDS? - -xDS consists of multiple discovery service APIs: - -- **LDS** (Listener Discovery Service) - Defines what ports/protocols to listen on -- **RDS** (Route Discovery Service) - Defines how requests are routed -- **CDS** (Cluster Discovery Service) - Defines logical upstream services -- **EDS** (Endpoint Discovery Service) - Defines actual IP:port backends -- **SDS** (Secret Discovery Service) - Distributes certificates and keys -- **TDS** (Transport Discovery Service) - Configures transport sockets -- **ECDS** (Extension Config Discovery Service) - Distributes extension configurations - -### Why xDS for gRPC? - -1. **Dynamic Service Discovery** - Discover backends without hardcoded addresses -2. **Load Balancing** - Intelligent client-side load balancing with health checking -3. **Traffic Management** - Sophisticated routing, retries, timeouts -4. **Security** - Dynamic certificate distribution and mTLS configuration -5. **Observability** - Standardized metrics and tracing integration -6. **Service Mesh Integration** - Compatible with Istio, Linkerd, etc. - -## Architecture - -### Design Pattern: Wrapper Client (Like SentinelClient/ClusterClient) - -Following the pattern from `async-redis`, xDS support is implemented as a **wrapper client** that handles discovery and load balancing, rather than modifying the base `Async::GRPC::Client` class. - -**Key Principles:** -- `XDS::Client` wraps `Async::GRPC::Client` instances -- Implements `Protocol::HTTP::Middleware` interface (same as `Async::GRPC::Client`) -- Lazy endpoint resolution (resolved on first use) -- Client caching per endpoint (reuse connections) -- Error handling with cache invalidation and retry - -### Component Structure - -``` -Async::GRPC::XDS -├── Client # Main wrapper client (like SentinelClient) -├── Context # Manages xDS state and subscriptions -├── DiscoveryClient # xDS API client (ADS or individual xDS APIs) -├── ResourceCache # Caches discovered resources -├── LoadBalancer # Client-side load balancing -├── HealthChecker # Endpoint health checking -└── Resources # Resource data models - ├── Listener - ├── RouteConfiguration - ├── Cluster - ├── ClusterLoadAssignment - └── Secret -``` - -## Core Components - -### 1. `Async::GRPC::XDS::Client` - -The main wrapper client that handles xDS discovery and load balancing. Similar to `SentinelClient` and `ClusterClient` in async-redis. - -```ruby -module Async - module GRPC - module XDS - # Wrapper client for xDS-enabled gRPC connections - # Follows the same pattern as Async::Redis::SentinelClient and ClusterClient - class Client < Protocol::HTTP::Middleware - # Raised when xDS configuration cannot be loaded - class ConfigurationError < StandardError - end - - # Raised when no endpoints are available - class NoEndpointsError < StandardError - end - - # Raised when cluster configuration cannot be reloaded - class ReloadError < StandardError - end - - # Create a new xDS client - # @parameter service_name [String] Target service name (e.g., "myservice") - # @parameter bootstrap [Hash, String, nil] Bootstrap config (hash, file path, or nil for default) - # @parameter headers [Protocol::HTTP::Headers] Default headers - # @parameter options [Hash] Additional options passed to underlying clients - def initialize(service_name, bootstrap: nil, headers: Protocol::HTTP::Headers.new, **options) - @service_name = service_name - @bootstrap = load_bootstrap(bootstrap) - @headers = headers - @options = options - - @context = Context.new(@bootstrap) - @load_balancer = nil - @clients = {} # Cache clients per endpoint (like ClusterClient caches node.client) - @mutex = Mutex.new - end - - # Resolve endpoints lazily (like SentinelClient.resolve_address) - # @returns [Array] Available endpoints - def resolve_endpoints - @mutex.synchronize do - unless @load_balancer - # Discover cluster via CDS - cluster = @context.discover_cluster(@service_name) - - # Discover endpoints via EDS - endpoints = @context.discover_endpoints(cluster) - - # Create load balancer - @load_balancer = LoadBalancer.new(@context, cluster, endpoints) - end - - @load_balancer.healthy_endpoints - end - end - - # Get a client for making calls (like ClusterClient.client_for) - # Resolves endpoints lazily and picks one via load balancer - # @returns [Async::GRPC::Client] gRPC client for selected endpoint - def client_for_call - endpoints = resolve_endpoints - raise NoEndpointsError, "No endpoints available for #{@service_name}" if endpoints.empty? - - # Pick endpoint via load balancer - endpoint = @load_balancer.pick - raise NoEndpointsError, "No healthy endpoints available" unless endpoint - - # Cache client per endpoint (like ClusterClient caches node.client) - @clients[endpoint] ||= begin - http_client = Async::HTTP::Client.new(endpoint) - Async::GRPC::Client.new(http_client, headers: @headers) - end - end - - # Implement Protocol::HTTP::Middleware interface - # This allows XDS::Client to be used anywhere Async::GRPC::Client is used - # @parameter request [Protocol::HTTP::Request] The HTTP request - # @returns [Protocol::HTTP::Response] The HTTP response - def call(request, attempts: 3) - # Get client for this call (load balanced) - client = client_for_call - - begin - client.call(request) - rescue Protocol::GRPC::Error => error - # Handle endpoint changes (like ClusterClient handles MOVED/ASK) - if error.status_code == Protocol::GRPC::Status::UNAVAILABLE - Console.warn(self, error) - - # Invalidate cache, reload configuration - invalidate_cache! - - attempts -= 1 - retry if attempts > 0 - end - - raise - rescue => error - # Network errors might indicate endpoint failure - Console.warn(self, error) - - # Invalidate this specific endpoint - invalidate_endpoint(client) - - attempts -= 1 - retry if attempts > 0 - - raise - end - end - - # Create a stub for the given interface - # Delegates to underlying client (maintains Async::GRPC::Client interface) - # @parameter interface_class [Class] Interface class (subclass of Protocol::GRPC::Interface) - # @parameter service_name [String] Service name (e.g., "hello.Greeter") - # @returns [Async::GRPC::Stub] Stub object with methods for each RPC - def stub(interface_class, service_name) - # Use a client to create stub (will be load balanced per call) - client = client_for_call - client.stub(interface_class, service_name) - end - - # Close xDS client and all connections - def close - @clients.each_value(&:close) - @clients.clear - @context.close - @load_balancer&.close - end - - private - - def load_bootstrap(bootstrap) - case bootstrap - when Hash - bootstrap - when String - load_bootstrap_file(bootstrap) - when nil - load_default_bootstrap - else - raise ArgumentError, "Invalid bootstrap: #{bootstrap.inspect}" - end - end - - def load_bootstrap_file(path) - raise ConfigurationError, "Bootstrap file not found: #{path}" unless File.exist?(path) - - require "json" - JSON.parse(File.read(path)) - rescue JSON::ParserError => error - raise ConfigurationError, "Invalid bootstrap JSON: #{error.message}" - end - - def load_default_bootstrap - # Try environment variable first - if path = ENV["GRPC_XDS_BOOTSTRAP"] - return load_bootstrap_file(path) - end - - # Try default location - default_path = File.expand_path("~/.config/grpc/bootstrap.json") - if File.exist?(default_path) - return load_bootstrap_file(default_path) - end - - raise ConfigurationError, "No bootstrap configuration found" - end - - def invalidate_cache! - @mutex.synchronize do - @clients.each_value(&:close) - @clients.clear - @load_balancer = nil - end - end - - def invalidate_endpoint(client) - @mutex.synchronize do - @clients.delete_if { |endpoint, cached_client| cached_client == client } - client.close - end - end - end - end - end -end -``` - -### 2. `Async::GRPC::XDS::Context` - -Manages xDS subscriptions and maintains discovered resource state. Similar to how `ClusterClient` manages cluster configuration. - -```ruby -module Async - module GRPC - module XDS - # Manages xDS subscriptions and maintains discovered resource state - class Context - # Initialize xDS context - # @parameter bootstrap [Hash] Bootstrap configuration - def initialize(bootstrap) - @bootstrap = bootstrap - @discovery_client = DiscoveryClient.new(bootstrap["xds_servers"].first) - @cache = ResourceCache.new - @subscriptions = {} # Track active subscriptions - @mutex = Mutex.new - end - - # Discover cluster for service (like ClusterClient.reload_cluster!) - # @parameter service_name [String] Service to discover - # @returns [Resources::Cluster] Cluster configuration - def discover_cluster(service_name) - @mutex.synchronize do - # Check cache first - if cluster = @cache.get_cluster(service_name) - return cluster - end - - # Subscribe to CDS if not already subscribed - unless @subscriptions[:cds] - @subscriptions[:cds] = subscribe_cds(service_name) - end - - # Wait for cluster to be discovered - # In practice, this might need async waiting - cluster = @cache.get_cluster(service_name) - raise ReloadError, "Failed to discover cluster: #{service_name}" unless cluster - - cluster - end - end - - # Discover endpoints for cluster (like ClusterClient discovers nodes) - # @parameter cluster [Resources::Cluster] Cluster configuration - # @returns [Array] Discovered endpoints - def discover_endpoints(cluster) - @mutex.synchronize do - # Check cache first - if endpoints = @cache.get_endpoints(cluster.name) - return endpoints - end - - # Subscribe to EDS if not already subscribed - unless @subscriptions[:"eds_#{cluster.name}"] - @subscriptions[:"eds_#{cluster.name}"] = subscribe_eds(cluster.name) - end - - # Wait for endpoints to be discovered - endpoints = @cache.get_endpoints(cluster.name) - raise ReloadError, "Failed to discover endpoints for cluster: #{cluster.name}" unless endpoints - - endpoints - end - end - - # Subscribe to CDS (Cluster Discovery Service) - # @parameter service_name [String] Service name - # @returns [Async::Task] Subscription task - def subscribe_cds(service_name) - @discovery_client.subscribe( - DiscoveryClient::CLUSTER_TYPE, - [service_name] - ) do |resources| - resources.each do |resource| - cluster = Resources::Cluster.new(resource) - @cache.update_cluster(cluster) - end - end - end - - # Subscribe to EDS (Endpoint Discovery Service) - # @parameter cluster_name [String] Cluster name - # @returns [Async::Task] Subscription task - def subscribe_eds(cluster_name) - @discovery_client.subscribe( - DiscoveryClient::ENDPOINT_TYPE, - [cluster_name] - ) do |resources| - resources.each do |resource| - assignment = Resources::ClusterLoadAssignment.new(resource) - endpoints = assignment.endpoints.map do |ep| - Async::HTTP::Endpoint.parse(ep.uri) - end - @cache.update_endpoints(cluster_name, endpoints) - end - end - end - - # Close all subscriptions - def close - @subscriptions.each_value(&:stop) - @subscriptions.clear - @discovery_client.close - end - end - end - end -end -``` - -### 3. `Async::GRPC::XDS::DiscoveryClient` - -Communicates with xDS control plane using ADS (Aggregated Discovery Service). - -```ruby -module Async - module GRPC - module XDS - # Client for xDS APIs (ADS or individual APIs) - class DiscoveryClient - # xDS API type URLs - LISTENER_TYPE = "type.googleapis.com/envoy.config.listener.v3.Listener" - ROUTE_TYPE = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" - CLUSTER_TYPE = "type.googleapis.com/envoy.config.cluster.v3.Cluster" - ENDPOINT_TYPE = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" - SECRET_TYPE = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret" - - # Initialize xDS discovery client - # @parameter server_config [Hash] xDS server configuration from bootstrap - def initialize(server_config) - @server_uri = server_config["server_uri"] - @channel_creds = build_credentials(server_config) - @node = build_node_info - @streams = {} - @versions = {} # Track version_info per type - @nonces = {} # Track nonces per type - @mutex = Mutex.new - end - - # Subscribe to resource type using ADS - # (Aggregated Discovery Service - single stream for all types) - # @parameter type_url [String] Resource type URL - # @parameter resource_names [Array] Resources to subscribe to - # @yields [Array] Updated resources - # @returns [Async::Task] Subscription task - def subscribe(type_url, resource_names, &block) - stream = get_or_create_stream - - request = build_discovery_request( - type_url: type_url, - resource_names: resource_names, - version_info: @versions[type_url] || "", - nonce: @nonces[type_url] || "" - ) - - stream.write(request) - - # Process responses asynchronously - Async do |task| - begin - stream.each do |response| - process_response(response, type_url, &block) - end - rescue => error - Console.error(self, error) - # Stream closed, will reconnect on next subscription - @mutex.synchronize do - @streams.delete(:ads) - end - raise - end - end - end - - # Close xDS discovery client - def close - @streams.each_value(&:close) - @streams.clear - end - - private - - def get_or_create_stream - @mutex.synchronize do - @streams[:ads] ||= create_ads_stream - end - end - - def create_ads_stream - # Create bidirectional streaming RPC to ADS - endpoint = Async::HTTP::Endpoint.parse(@server_uri) - http_client = Async::HTTP::Client.new(endpoint) - grpc_client = Async::GRPC::Client.new(http_client) - - # Use envoy.service.discovery.v3.AggregatedDiscoveryService - # This would require the Envoy protobuf definitions - interface = AggregatedDiscoveryServiceInterface.new( - "envoy.service.discovery.v3.AggregatedDiscoveryService" - ) - stub = grpc_client.stub(interface, "envoy.service.discovery.v3") - - # Create bidirectional stream - stub.stream_aggregated_resources - end - - def build_discovery_request(type_url:, resource_names:, version_info:, nonce:) - # Build DiscoveryRequest protobuf - # This requires Envoy protobuf definitions - Envoy::Service::Discovery::V3::DiscoveryRequest.new( - version_info: version_info, - node: @node, - resource_names: resource_names, - type_url: type_url, - response_nonce: nonce - ) - end - - def build_node_info - # Build node identification for xDS server - Envoy::Config::Core::V3::Node.new( - id: generate_node_id, - cluster: ENV["XDS_CLUSTER"] || "default", - metadata: build_metadata, - locality: build_locality - ) - end - - def process_response(response, type_url, &block) - @mutex.synchronize do - # Update version and nonce - @versions[type_url] = response.version_info - @nonces[type_url] = response.nonce - - # Deserialize resources - resources = response.resources.map do |resource| - # Deserialize Any protobuf to specific type - deserialize_resource(resource, type_url) - end - - # Yield to subscribers - block.call(resources) if block_given? - end - end - - def deserialize_resource(resource, type_url) - # Deserialize protobuf Any to specific message type - # This requires Envoy protobuf definitions - case type_url - when CLUSTER_TYPE - Envoy::Config::Cluster::V3::Cluster.decode(resource.value) - when ENDPOINT_TYPE - Envoy::Config::Endpoint::V3::ClusterLoadAssignment.decode(resource.value) - # ... other types - end - end - - def generate_node_id - # Generate unique node ID - "#{Socket.gethostname}-#{Process.pid}-#{SecureRandom.hex(4)}" - end - - def build_metadata - # Build node metadata - {} - end - - def build_locality - # Build locality information - nil - end - - def build_credentials(server_config) - # Build channel credentials from config - # Support Google Default Credentials, mTLS, etc. - nil - end - end - end - end -end -``` - -### 4. `Async::GRPC::XDS::LoadBalancer` - -Client-side load balancing with health checking. Similar to how `ClusterClient` selects nodes. - -```ruby -module Async - module GRPC - module XDS - # Client-side load balancing with health checking - class LoadBalancer - # Load balancing policies - ROUND_ROBIN = :round_robin - LEAST_REQUEST = :least_request - RANDOM = :random - RING_HASH = :ring_hash - MAGLEV = :maglev - - # Initialize load balancer - # @parameter context [Context] xDS context - # @parameter cluster [Resources::Cluster] Cluster configuration - # @parameter endpoints [Array] Initial endpoints - def initialize(context, cluster, endpoints) - @context = context - @cluster = cluster - @endpoints = endpoints - @policy = parse_policy(cluster.lb_policy) - @health_status = {} # Track health per endpoint - @health_checker = HealthChecker.new(cluster.health_checks) - @current_index = 0 - @in_flight_requests = {} # Track in-flight requests per endpoint - - # Subscribe to endpoint updates - watch_endpoints - - # Start health checking - start_health_checks - end - - # Get healthy endpoints - # @returns [Array] Healthy endpoints - def healthy_endpoints - @endpoints.select { |ep| healthy?(ep) } - end - - # Pick next endpoint using load balancing policy - # @returns [Async::HTTP::Endpoint, nil] Selected endpoint - def pick - healthy = healthy_endpoints - return nil if healthy.empty? - - case @policy - when ROUND_ROBIN - pick_round_robin(healthy) - when LEAST_REQUEST - pick_least_request(healthy) - when RANDOM - pick_random(healthy) - when RING_HASH - pick_ring_hash(healthy) - when MAGLEV - pick_maglev(healthy) - else - healthy.first - end - end - - # Update endpoints from EDS - # @parameter endpoints [Array] New endpoints - def update_endpoints(endpoints) - @endpoints = endpoints - @health_checker.update_endpoints(endpoints) - end - - # Close load balancer - def close - @health_checker.close - end - - private - - def healthy?(endpoint) - @health_status[endpoint] != :unhealthy - end - - def pick_round_robin(endpoints) - @current_index = (@current_index + 1) % endpoints.size - endpoints[@current_index] - end - - def pick_least_request(endpoints) - # Track in-flight requests and pick endpoint with fewest - endpoints.min_by { |ep| @in_flight_requests[ep] || 0 } - end - - def pick_random(endpoints) - endpoints.sample - end - - def pick_ring_hash(endpoints) - # Consistent hashing implementation - # Would need request context to hash - endpoints.first # Placeholder - end - - def pick_maglev(endpoints) - # Maglev hashing implementation - endpoints.first # Placeholder - end - - def parse_policy(lb_policy) - # Parse cluster LB policy to our constants - case lb_policy - when :ROUND_ROBIN then ROUND_ROBIN - when :LEAST_REQUEST then LEAST_REQUEST - when :RANDOM then RANDOM - when :RING_HASH then RING_HASH - when :MAGLEV then MAGLEV - else ROUND_ROBIN - end - end - - def watch_endpoints - # Subscribe to endpoint updates - @context.subscribe_eds(@cluster.name) do |endpoints| - update_endpoints(endpoints) - end - end - - def start_health_checks - return unless @cluster.health_checks.any? - - Async do |task| - loop do - @endpoints.each do |endpoint| - @health_status[endpoint] = @health_checker.check(endpoint) - end - - # Sleep for health check interval - interval = @cluster.health_checks.first&.interval || 30 - task.sleep(interval) - end - end - end - end - end - end -end -``` - -### 5. `Async::GRPC::XDS::HealthChecker` - -Health checking for endpoints. Runs as async tasks. - -```ruby -module Async - module GRPC - module XDS - # Endpoint health checking - class HealthChecker - # Initialize health checker - # @parameter health_checks [Array] Health check configurations from cluster - def initialize(health_checks) - @health_checks = health_checks - @endpoints = [] - @tasks = {} # Track health check tasks per endpoint - end - - # Update endpoints to check - # @parameter endpoints [Array] Endpoints to check - def update_endpoints(endpoints) - # Stop checking removed endpoints - removed = @endpoints - endpoints - removed.each do |endpoint| - @tasks[endpoint]&.stop - @tasks.delete(endpoint) - end - - # Start checking new endpoints - added = endpoints - @endpoints - added.each do |endpoint| - start_checking(endpoint) - end - - @endpoints = endpoints - end - - # Check health of endpoint - # @parameter endpoint [Async::HTTP::Endpoint] Endpoint to check - # @returns [Symbol] :healthy, :unhealthy, or :unknown - def check(endpoint) - # Use cached health status if available - # Otherwise perform check - perform_check(endpoint) - end - - # Close health checker - def close - @tasks.each_value(&:stop) - @tasks.clear - end - - private - - def start_checking(endpoint) - @tasks[endpoint] = Async do |task| - loop do - perform_check(endpoint) - - interval = @health_checks.first&.interval || 30 - task.sleep(interval) - end - end - end - - def perform_check(endpoint) - health_check = @health_checks.first - return :unknown unless health_check - - case health_check.type - when :HTTP - check_http_health(endpoint, health_check) - when :gRPC - check_grpc_health(endpoint, health_check) - else - :unknown - end - end - - def check_http_health(endpoint, health_check) - # Perform HTTP health check - # Use Async::HTTP::Client to make health check request - :healthy # Placeholder - end - - def check_grpc_health(endpoint, health_check) - # Perform gRPC health check - # Use Async::GRPC::Client to call grpc.health.v1.Health service - :healthy # Placeholder - end - end - end - end -end -``` - -### 6. Resource Data Models - -```ruby -module Async - module GRPC - module XDS - module Resources - # Represents a discovered cluster - class Cluster - attr_reader :name, :type, :lb_policy, :health_checks, :circuit_breakers - - def initialize(proto) - @name = proto.name - @type = proto.type - @lb_policy = proto.lb_policy - @health_checks = proto.health_checks - @circuit_breakers = proto.circuit_breakers - end - - def eds_cluster? - @type == :EDS - end - end - - # Represents endpoint assignment - class ClusterLoadAssignment - attr_reader :cluster_name, :endpoints - - def initialize(proto) - @cluster_name = proto.cluster_name - @endpoints = proto.endpoints.flat_map do |locality_endpoints| - locality_endpoints.lb_endpoints.map { |lb_ep| Endpoint.new(lb_ep) } - end - end - end - - # Represents a single endpoint - class Endpoint - attr_reader :address, :port, :health_status, :metadata - - def initialize(lb_endpoint) - socket_address = lb_endpoint.endpoint.address.socket_address - @address = socket_address.address - @port = socket_address.port_value - @health_status = lb_endpoint.health_status - @metadata = lb_endpoint.metadata - end - - def healthy? - @health_status == :HEALTHY || @health_status == :UNKNOWN - end - - def uri - "https://#{@address}:#{@port}" - end - end - end - end - end -end -``` - -### 7. `Async::GRPC::XDS::ResourceCache` - -Caches discovered resources. - -```ruby -module Async - module GRPC - module XDS - # Caches discovered xDS resources - class ResourceCache - def initialize - @clusters = {} - @endpoints = {} - @mutex = Mutex.new - end - - def get_cluster(name) - @mutex.synchronize { @clusters[name] } - end - - def update_cluster(cluster) - @mutex.synchronize { @clusters[cluster.name] = cluster } - end - - def get_endpoints(cluster_name) - @mutex.synchronize { @endpoints[cluster_name] } - end - - def update_endpoints(cluster_name, endpoints) - @mutex.synchronize { @endpoints[cluster_name] = endpoints } - end - end - end - end -end -``` - -## Bootstrap Configuration - -xDS clients require a bootstrap configuration that specifies control plane details: - -```json -{ - "xds_servers": [ - { - "server_uri": "xds.example.com:443", - "channel_creds": [ - { - "type": "google_default" - } - ], - "server_features": ["xds_v3"] - } - ], - "node": { - "id": "async-grpc-client-001", - "cluster": "production", - "locality": { - "zone": "us-central1-a" - }, - "metadata": { - "TRAFFICDIRECTOR_GCP_PROJECT_NUMBER": "123456789" - } - }, - "certificate_providers": { - "default": { - "plugin_name": "file_watcher", - "config": { - "certificate_file": "/path/to/cert.pem", - "private_key_file": "/path/to/key.pem", - "ca_certificate_file": "/path/to/ca.pem", - "refresh_interval": "600s" - } - } - } -} -``` - -Bootstrap can be loaded from: -1. Explicit parameter to `XDS::Client.new` -2. Environment variable `GRPC_XDS_BOOTSTRAP` -3. Default file location `~/.config/grpc/bootstrap.json` - -## Usage Examples - -### Basic Service Discovery - -```ruby -require "async/grpc" -require "async/grpc/xds" - -# Create xDS client (like SentinelClient) -xds_client = Async::GRPC::XDS::Client.new( - "myservice", - bootstrap: "/path/to/bootstrap.json" -) - -# Use it exactly like Async::GRPC::Client -Async do - stub = xds_client.stub(MyServiceInterface, "myservice") - - # Make calls - automatically load balanced across discovered endpoints - response = stub.my_method(request) - puts response.message -ensure - xds_client.close -end -``` - -### With Default Bootstrap - -```ruby -# Uses GRPC_XDS_BOOTSTRAP env var or ~/.config/grpc/bootstrap.json -xds_client = Async::GRPC::XDS::Client.new("myservice") - -Async do - # Use client normally - xds_client.stub(MyServiceInterface, "myservice") do |stub| - response = stub.say_hello(request) - end -ensure - xds_client.close -end -``` - -### Manual Endpoint Resolution - -```ruby -xds_client = Async::GRPC::XDS::Client.new("myservice") - -# Get all healthy endpoints -endpoints = xds_client.resolve_endpoints - -endpoints.each do |endpoint| - puts "Available backend: #{endpoint.authority}" -end - -# Use load balancer directly -lb = xds_client.instance_variable_get(:@load_balancer) - -10.times do - backend = lb.pick - puts "Selected: #{backend.authority}" -end -``` - -### Error Handling - -```ruby -xds_client = Async::GRPC::XDS::Client.new("myservice") - -Async do - begin - stub = xds_client.stub(MyServiceInterface, "myservice") - response = stub.my_method(request) - rescue Async::GRPC::XDS::NoEndpointsError => error - puts "No endpoints available: #{error.message}" - # Fallback to static endpoint or retry later - rescue Async::GRPC::XDS::ConfigurationError => error - puts "Configuration error: #{error.message}" - # Check bootstrap configuration - end -ensure - xds_client.close -end -``` - -## Integration with Existing Code - -Since `XDS::Client` implements `Protocol::HTTP::Middleware` (same as `Async::GRPC::Client`), it can be used as a drop-in replacement: - -```ruby -# Works with any code expecting Async::GRPC::Client interface -def make_call(client) - client.stub(MyServiceInterface, "myservice") do |stub| - stub.say_hello(request) - end -end - -# Can use either regular client or xDS client -regular_client = Async::GRPC::Client.open(endpoint) -xds_client = Async::GRPC::XDS::Client.new("myservice") - -make_call(regular_client) # Works -make_call(xds_client) # Also works! -``` - -## Implementation Phases - -### Phase 1: Core Infrastructure -- [ ] Bootstrap configuration loading -- [ ] Basic `XDS::Client` wrapper implementation -- [ ] `XDS::Context` for state management -- [ ] `XDS::ResourceCache` for discovered resources -- [ ] Basic endpoint resolution - -### Phase 2: Discovery Services -- [ ] `XDS::DiscoveryClient` with ADS support -- [ ] CDS (Cluster Discovery) implementation -- [ ] EDS (Endpoint Discovery) implementation -- [ ] Resource subscription and updates -- [ ] Version tracking and ACK/NACK - -### Phase 3: Load Balancing -- [ ] `XDS::LoadBalancer` base implementation -- [ ] Round-robin policy -- [ ] Least-request policy -- [ ] Random policy -- [ ] Ring-hash/consistent hashing -- [ ] Maglev policy - -### Phase 4: Health Checking -- [ ] `XDS::HealthChecker` implementation -- [ ] HTTP health checks -- [ ] gRPC health checks -- [ ] Health status aggregation -- [ ] Active/passive health checking - -### Phase 5: Advanced Features -- [ ] LDS (Listener Discovery) for servers -- [ ] RDS (Route Discovery) for routing -- [ ] SDS (Secret Discovery) for mTLS -- [ ] Circuit breakers -- [ ] Retry policies -- [ ] Timeout configuration -- [ ] Rate limiting - -### Phase 6: Integration & Testing -- [ ] Integration tests with mock xDS server -- [ ] Error handling and recovery tests -- [ ] Load balancing distribution tests -- [ ] Health check integration tests -- [ ] Performance benchmarks - -## Standards and Specifications - -### xDS Protocol Specifications -- [xDS REST and gRPC protocol](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol) -- [Universal Data Plane API (UDPA)](https://github.com/cncf/xds) -- [gRFC A27: xDS-Based Global Load Balancing](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md) -- [gRFC A28: xDS Traffic Splitting and Routing](https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md) - -### Protobuf Definitions -- [envoy.config.listener.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/listener/v3/listener.proto) -- [envoy.config.route.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route.proto) -- [envoy.config.cluster.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto) -- [envoy.config.endpoint.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/endpoint/v3/endpoint.proto) -- [envoy.service.discovery.v3](https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/discovery/v3/discovery.proto) - -### Compatible Systems -- **Google Cloud Traffic Director** - Managed xDS control plane -- **Istio** - Service mesh with xDS control plane -- **Linkerd** - Service mesh with xDS support -- **Consul Connect** - Service mesh with xDS API -- **Envoy Proxy** - Reference xDS implementation - -## Testing Strategy - -### Unit Tests -- Bootstrap configuration loading and validation -- Resource deserialization -- Load balancing algorithms -- Health checking logic -- Cache invalidation - -### Integration Tests with Docker Compose - -Following the pattern from `async-redis`, integration tests use Docker Compose to spin up a complete xDS test environment with: -- xDS control plane (using go-control-plane or Envoy) -- Multiple backend gRPC servers -- Health check services - -#### Docker Compose Setup - -Create `xds/docker-compose.yaml`: - -```yaml -services: - # xDS control plane (using go-control-plane test server) - xds-control-plane: - image: envoyproxy/go-control-plane:latest - command: > - /go-control-plane - -alsologtostderr - -v 2 - -mode xds - -server_type ADS - -port 18000 - ports: - - "18000:18000" - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "18000"] - interval: 1s - timeout: 3s - retries: 30 - - # Backend gRPC server 1 - backend-1: - build: - context: . - dockerfile: xds/Dockerfile.backend - environment: - - PORT=50051 - - SERVICE_NAME=myservice - ports: - - "50051:50051" - depends_on: - xds-control-plane: - condition: service_healthy - - # Backend gRPC server 2 - backend-2: - build: - context: . - dockerfile: xds/Dockerfile.backend - environment: - - PORT=50052 - - SERVICE_NAME=myservice - ports: - - "50052:50052" - depends_on: - xds-control-plane: - condition: service_healthy - - # Backend gRPC server 3 - backend-3: - build: - context: . - dockerfile: xds/Dockerfile.backend - environment: - - PORT=50053 - - SERVICE_NAME=myservice - ports: - - "50053:50053" - depends_on: - xds-control-plane: - condition: service_healthy - - # Test runner - tests: - image: ruby:${RUBY_VERSION:-latest} - volumes: - - ../:/code - working_dir: /code - command: bash -c "bundle install && bundle exec sus xds/test" - environment: - - COVERAGE=${COVERAGE} - - XDS_SERVER_URI=xds-control-plane:18000 - depends_on: - - xds-control-plane - - backend-1 - - backend-2 - - backend-3 -``` - -#### Test Structure - -Create `xds/test/async/grpc/xds/client.rb`: - -```ruby -# frozen_string_literal: true - -# Released under the MIT License. -# Copyright, 2025-2026, by Samuel Williams. - -require "async/grpc/xds/client" -require "sus/fixtures/async" -require "async/http/endpoint" - -describe Async::GRPC::XDS::Client do - include Sus::Fixtures::Async::ReactorContext - - let(:xds_server_uri) {ENV["XDS_SERVER_URI"] || "xds-control-plane:18000"} - let(:service_name) {"myservice"} - - let(:bootstrap) { - { - "xds_servers" => [ - { - "server_uri" => xds_server_uri, - "channel_creds" => [{"type" => "insecure"}] - } - ], - "node" => { - "id" => "test-client-#{Process.pid}", - "cluster" => "test" - } - } - } - - let(:client) {subject.new(service_name, bootstrap: bootstrap)} - - it "can resolve endpoints" do - endpoints = client.resolve_endpoints - - expect(endpoints).not_to be_empty - expect(endpoints.size).to be >= 1 - end - - it "can make RPC calls through xDS" do - stub = client.stub(MyServiceInterface, service_name) - - request = MyService::HelloRequest.new(name: "test") - response = stub.say_hello(request) - - expect(response).to be_a(MyService::HelloReply) - expect(response.message).to match(/test/) - end - - it "load balances across multiple endpoints" do - # Make multiple calls and verify they hit different backends - endpoints_used = Set.new - - 10.times do - stub = client.stub(MyServiceInterface, service_name) - request = MyService::HelloRequest.new(name: "test") - response = stub.say_hello(request) - - # Extract backend info from response metadata or headers - endpoints_used << extract_backend(response) - end - - # Should use multiple backends (depending on LB policy) - expect(endpoints_used.size).to be > 1 - end - - it "handles endpoint failures gracefully" do - # Start with healthy endpoints - endpoints = client.resolve_endpoints - expect(endpoints).not_to be_empty - - # Simulate endpoint failure (stop one backend) - # xDS should update and remove failed endpoint - - # Wait for xDS update - sleep 5 - - # Should still be able to make calls (using remaining endpoints) - stub = client.stub(MyServiceInterface, service_name) - request = MyService::HelloRequest.new(name: "test") - response = stub.say_hello(request) - - expect(response).to be_a(MyService::HelloReply) - end - - it "reloads configuration on errors" do - # Make initial call - stub = client.stub(MyServiceInterface, service_name) - request = MyService::HelloRequest.new(name: "test") - response = stub.say_hello(request) - expect(response).to be_a(MyService::HelloReply) - - # Invalidate cache (simulate endpoint change) - client.instance_variable_get(:@load_balancer)&.update_endpoints([]) - - # Should reload and work again - response = stub.say_hello(request) - expect(response).to be_a(MyService::HelloReply) - end - - private - - def extract_backend(response) - # Extract backend identifier from response - # This depends on your test service implementation - response.metadata["backend-id"] || "unknown" - end -end -``` - -#### Running Integration Tests - -```bash -# Start docker compose environment -cd xds -docker compose up -d - -# Wait for services to be ready -docker compose ps - -# Run tests -docker compose run --rm tests - -# Or run locally (if services are accessible) -bundle exec sus xds/test - -# Cleanup -docker compose down -``` - -#### Mock xDS Control Plane - -For simpler testing, use a mock xDS server: - -```ruby -# xds/test/mock_xds_server.rb -module Async - module GRPC - module XDS - module Test - # Simple mock xDS server for testing - class MockControlPlane - def initialize - @clusters = {} - @endpoints = {} - end - - def add_cluster(name, config) - @clusters[name] = config - end - - def add_endpoints(cluster_name, endpoints) - @endpoints[cluster_name] = endpoints - end - - # Implement ADS server interface - def stream_aggregated_resources(requests) - # Yield DiscoveryResponse messages - end - end - end - end - end -end -``` - -### System Tests -- Integration with Google Cloud Traffic Director -- Integration with Istio -- Multi-endpoint failover scenarios -- Load balancing distribution -- Health check integration - -## Security Considerations - -### Authentication -- Support for Google Default Credentials -- Support for mTLS with SDS -- Support for OAuth2 tokens -- Channel credential configuration - -### Authorization -- RBAC integration via xDS -- Resource filtering by permissions -- Secure communication with control plane - -### Certificate Management -- Dynamic certificate rotation via SDS -- Certificate validation -- CRL/OCSP checking -- Certificate provider plugins - -## Performance Considerations - -### Resource Caching -- Cache discovered resources locally -- Version-based cache invalidation -- Memory-efficient resource storage - -### Connection Pooling -- Reuse HTTP/2 connections to backends -- Connection pool per endpoint -- Idle connection cleanup - -### Async Operations -- Non-blocking xDS subscriptions -- Async health checks -- Parallel endpoint discovery - -## Open Questions - -1. **Incremental vs. State-of-the-World** - Which xDS update mode to use? - - Incremental allows selective updates - - State-of-the-world is simpler but more bandwidth - - **Recommendation**: Start with state-of-the-world, add incremental later - -2. **Control Plane Failover** - How to handle control plane unavailability? - - Cache last known good configuration - - Fall back to static configuration - - Multiple control plane endpoints - - **Recommendation**: Cache last known config, support multiple endpoints - -3. **Protobuf Dependencies** - How to handle Envoy protos? - - Bundle pre-generated Ruby protos - - Generate from .proto files at build time - - Separate gem for Envoy proto definitions - - **Recommendation**: Separate gem (`envoy-protos-ruby`) for proto definitions - -4. **Backwards Compatibility** - How to maintain compatibility? - - Make xDS optional dependency - - Graceful degradation without xDS - - Clear migration path from static to dynamic - - **Recommendation**: Optional dependency, wrapper pattern maintains compatibility - -5. **Server-Side xDS** - Priority for server features? - - LDS for dynamic listener configuration - - RDS for advanced routing - - Integration with existing `Dispatcher` - - **Recommendation**: Focus on client-side first, server-side later - -## Related Work - -- [grpc-go xDS implementation](https://github.com/grpc/grpc-go/tree/master/xds) -- [grpc-java xDS implementation](https://github.com/grpc/grpc-java/tree/master/xds) -- [Envoy data plane implementation](https://github.com/envoyproxy/envoy) -- [go-control-plane](https://github.com/envoyproxy/go-control-plane) - Reference control plane - -## References - -- [gRPC xDS Documentation](https://grpc.io/docs/guides/xds/) -- [Envoy xDS Documentation](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/operations/dynamic_configuration) -- [Traffic Director Documentation](https://cloud.google.com/traffic-director/docs) -- [CNCF xDS API Working Group](https://github.com/cncf/xds) diff --git a/xds/Dockerfile.backend b/xds/Dockerfile.backend index ec44aee..e9db597 100644 --- a/xds/Dockerfile.backend +++ b/xds/Dockerfile.backend @@ -7,8 +7,10 @@ RUN apt-get update && apt-get install -y \ build-essential \ && rm -rf /var/lib/apt/lists/* -# Copy gemfiles -COPY gems.rb gems.locked ./ +# Copy gemfiles, gemspec, and minimal lib files needed for gemspec +# Note: gems.locked is gitignored, so we don't require it - bundle install will resolve deps +COPY gems.rb *.gemspec ./ +COPY lib/async/grpc/version.rb lib/async/grpc/version.rb RUN bundle install # Copy application code diff --git a/xds/Dockerfile.control-plane b/xds/Dockerfile.control-plane new file mode 100644 index 0000000..6054b25 --- /dev/null +++ b/xds/Dockerfile.control-plane @@ -0,0 +1,23 @@ +FROM golang:1.21-alpine AS builder + +# Install build dependencies +RUN apk add --no-cache git + +# Copy test server source +WORKDIR /build +COPY test_server.go go.mod ./ + +# Download dependencies and build +RUN go mod tidy && \ + go mod download && \ + go build -o /xds-test-server test_server.go + +FROM alpine:latest + +RUN apk add --no-cache netcat-openbsd ca-certificates + +COPY --from=builder /xds-test-server /xds-test-server + +EXPOSE 18000 + +CMD ["/xds-test-server", "-port", "18000"] diff --git a/xds/backend_server.rb b/xds/backend_server.rb index 954edb1..c126c64 100644 --- a/xds/backend_server.rb +++ b/xds/backend_server.rb @@ -7,22 +7,24 @@ require "async" require "async/http/server" require "async/http/endpoint" -require "protocol/grpc/middleware" +require "async/grpc/dispatcher" +require "async/grpc/service" require_relative "../fixtures/async/grpc/test_interface" -class TestBackendService - def initialize(backend_id) +class TestBackendService < Async::GRPC::Service + def initialize(interface_class, service_name, backend_id) + super(interface_class, service_name) @backend_id = backend_id end def unary_call(input, output, call) request = input.read - # Include backend ID in response metadata - call.set_metadata("backend-id", @backend_id) + # Include backend ID in response metadata (trailers) + call.response.headers["backend-id"] = @backend_id response = Protocol::GRPC::Fixtures::TestMessage.new( - value: "Response from #{@backend_id}: #{request.value}" + value: "Response from #{@backend_id}: #{request&.value || 'no value'}" ) output.write(response) @@ -31,7 +33,7 @@ def unary_call(input, output, call) def say_hello(input, output, call) request = input.read - call.set_metadata("backend-id", @backend_id) + call.response.headers["backend-id"] = @backend_id response = Protocol::GRPC::Fixtures::TestMessage.new( value: "Hello from #{@backend_id}, #{request.value}!" @@ -46,21 +48,21 @@ def say_hello(input, output, call) service_name = ENV["SERVICE_NAME"] || "test.Service" Async do - # Create gRPC middleware - grpc = Protocol::GRPC::Middleware.new - service = TestBackendService.new(backend_id) - grpc.register(service_name, service) + # Create gRPC dispatcher + dispatcher = Async::GRPC::Dispatcher.new + service = TestBackendService.new(Async::GRPC::Fixtures::TestInterface, service_name, backend_id) + dispatcher.register(service) - # Create endpoint + # Create endpoint (http for h2c - gRPC without TLS in docker) endpoint = Async::HTTP::Endpoint.parse( - "https://0.0.0.0:#{port}", + "http://0.0.0.0:#{port}", protocol: Async::HTTP::Protocol::HTTP2 ) # Start server - server = Async::HTTP::Server.new(grpc, endpoint) + server = Async::HTTP::Server.new(dispatcher, endpoint) - Console.logger.info(self){"Starting backend server #{backend_id} on port #{port}"} + Console.info{"Starting backend server #{backend_id} on port #{port}"} server.run end diff --git a/xds/docker-compose.yaml b/xds/docker-compose.yaml index 67f7a67..2d704fe 100644 --- a/xds/docker-compose.yaml +++ b/xds/docker-compose.yaml @@ -1,17 +1,14 @@ services: - # xDS control plane (using go-control-plane test server) + # xDS control plane (using custom test server built with go-control-plane) # This provides a simple xDS server for testing xds-control-plane: - image: envoyproxy/go-control-plane:latest - command: > - /go-control-plane - -alsologtostderr - -v 2 - -mode xds - -server_type ADS - -port 18000 + build: + context: . + dockerfile: Dockerfile.control-plane ports: - "18000:18000" + environment: + - UPSTREAM=backend-1:50051,backend-2:50052,backend-3:50053 healthcheck: test: ["CMD", "nc", "-z", "localhost", "18000"] interval: 1s @@ -23,6 +20,10 @@ services: build: context: .. dockerfile: xds/Dockerfile.backend + volumes: + - ../:/code + working_dir: /code + command: bundle exec ruby xds/backend_server.rb environment: - PORT=50051 - SERVICE_NAME=myservice @@ -38,6 +39,10 @@ services: build: context: .. dockerfile: xds/Dockerfile.backend + volumes: + - ../:/code + working_dir: /code + command: bundle exec ruby xds/backend_server.rb environment: - PORT=50052 - SERVICE_NAME=myservice @@ -53,6 +58,10 @@ services: build: context: .. dockerfile: xds/Dockerfile.backend + volumes: + - ../:/code + working_dir: /code + command: bundle exec ruby xds/backend_server.rb environment: - PORT=50053 - SERVICE_NAME=myservice @@ -71,8 +80,10 @@ services: working_dir: /code command: bash -c "bundle install && bundle exec sus xds/test" environment: + - CONSOLE_OUTPUT=XTerm - COVERAGE=${COVERAGE} - XDS_SERVER_URI=xds-control-plane:18000 + - XDS_ENDPOINT_SCHEME=http depends_on: - xds-control-plane - backend-1 diff --git a/xds/go.mod b/xds/go.mod new file mode 100644 index 0000000..f1b8557 --- /dev/null +++ b/xds/go.mod @@ -0,0 +1,8 @@ +module xds-test-server + +go 1.21 + +require ( + github.com/envoyproxy/go-control-plane v0.12.0 + google.golang.org/grpc v1.60.0 +) diff --git a/xds/readme.md b/xds/readme.md index 0e362c9..75a0ae2 100644 --- a/xds/readme.md +++ b/xds/readme.md @@ -93,6 +93,29 @@ Check network connectivity: docker compose exec tests ping xds-control-plane ``` -### Port conflicts +## Protobuf Setup -If ports are already in use, modify `docker-compose.yaml` to use different ports. +The xDS implementation uses Envoy protobuf definitions. Protos come from [envoyproxy/envoy](https://github.com/envoyproxy/envoy) (`api/`) or [envoyproxy/data-plane-api](https://github.com/envoyproxy/data-plane-api). Use **xDS v3** (v2 is deprecated). + +### Required protobuf files + +- `envoy/service/discovery/v3/discovery.proto` - DiscoveryRequest/Response. +- `envoy/service/discovery/v3/ads.proto` - AggregatedDiscoveryService. +- `envoy/config/cluster/v3/cluster.proto` - Cluster (CDS). +- `envoy/config/endpoint/v3/endpoint.proto` - ClusterLoadAssignment (EDS). +- `envoy/config/core/v3/base.proto` - Node, Locality, etc. +- `google/protobuf/any.proto` - For Any type in DiscoveryResponse. + +### Generating Ruby code + +```bash +protoc --ruby_out=lib \ + --proto_path=vendor/envoy-api \ + envoy/service/discovery/v3/discovery.proto \ + envoy/service/discovery/v3/ads.proto \ + envoy/config/cluster/v3/cluster.proto \ + envoy/config/endpoint/v3/endpoint.proto \ + envoy/config/core/v3/base.proto +``` + +Lock the Envoy API version (submodule tag or commit) for compatibility. diff --git a/xds/test/async/grpc/xds/client.rb b/xds/test/async/grpc/xds/client.rb index 0538def..430f3e1 100644 --- a/xds/test/async/grpc/xds/client.rb +++ b/xds/test/async/grpc/xds/client.rb @@ -4,8 +4,11 @@ # Copyright, 2025-2026, by Samuel Williams. require "async/grpc/xds/client" +require "async/grpc/xds/ads_stream" +require "async/grpc/service" require "sus/fixtures/async" require "async/http/endpoint" +require_relative "../../../../../fixtures/async/grpc/test_interface" require "set" describe Async::GRPC::XDS::Client do @@ -14,34 +17,65 @@ let(:xds_server_uri) {ENV["XDS_SERVER_URI"] || "xds-control-plane:18000"} let(:service_name) {"myservice"} - let(:bootstrap) { + let(:bootstrap) do { - "xds_servers" => [ + xds_servers: [ { - "server_uri" => xds_server_uri, - "channel_creds" => [{"type" => "insecure"}] + server_uri: xds_server_uri, + channel_creds: [{type: "insecure"}] } ], - "node" => { - "id" => "test-client-#{Process.pid}", - "cluster" => "test" + node: { + id: "test-client-#{Process.pid}", + cluster: "test" } } - } + end let(:client) {subject.new(service_name, bootstrap: bootstrap)} + it "can stream updates" do + skip "Requires xDS control plane (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] + + received = [] + delegate = Object.new + delegate.define_singleton_method(:discovery_response){|response, _stream| received << response} + + endpoint = Async::HTTP::Endpoint.parse( + "http://#{xds_server_uri}", + protocol: Async::HTTP::Protocol::HTTP2 + ) + http_client = Async::HTTP::Client.new(endpoint) + grpc_client = Async::GRPC::Client.new(http_client) + node = Envoy::Config::Core::V3::Node.new(id: "test-#{Process.pid}", cluster: "test") + + initial = Envoy::Service::Discovery::V3::DiscoveryRequest.new( + type_url: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + resource_names: [service_name], + node: node + ) + stream = Async::GRPC::XDS::ADSStream.new(grpc_client, node, delegate: delegate) + + stream_task = Async{stream.run(initial: initial)} + deadline = Time.now + 10 + while received.empty? && Time.now < deadline + sleep(0.1) + end + stream_task.stop + + expect(received.size).to be >= 1 + end + it "can resolve endpoints" do + skip "Requires docker compose environment (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] + endpoints = client.resolve_endpoints - expect(endpoints).not_to be_empty expect(endpoints.size).to be >= 1 end it "can make RPC calls through xDS" do - # This test requires a working xDS control plane and backend servers - # Skip if not running in docker compose - skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] + skip "Requires docker compose environment (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) @@ -49,76 +83,59 @@ response = stub.unary_call(request) expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) - expect(response.value).to match(/test/) + expect(response.value).to be(:include?, "test") end it "load balances across multiple endpoints" do - skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] + skip "Requires docker compose environment (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] - # Make multiple calls and verify they hit different backends + stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) endpoints_used = Set.new 10.times do - stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") response = stub.unary_call(request) - - # Extract backend info from response metadata - # This would need to be implemented based on how metadata is returned endpoints_used << response.value end - # Should use multiple backends (depending on LB policy) - # Note: This depends on load balancing policy expect(endpoints_used.size).to be >= 1 end - it "handles endpoint failures gracefully" do - skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] - - # Start with healthy endpoints - endpoints = client.resolve_endpoints - expect(endpoints).not_to be_empty - - # Make initial call - stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) - request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") - response = stub.unary_call(request) - expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) - - # Note: Testing actual endpoint failure would require stopping a backend - # This is better done as a separate integration test + it "handles bootstrap configuration errors" do + expect{subject.new(service_name, bootstrap: {invalid: "config" })}.to raise_exception(Async::GRPC::XDS::Client::ConfigurationError) end - it "reloads configuration on errors" do - skip "Requires docker compose environment" unless ENV["XDS_SERVER_URI"] - - # Make initial call - stub = client.stub(Async::GRPC::Fixtures::TestInterface, service_name) - request = Protocol::GRPC::Fixtures::TestMessage.new(value: "test") - response = stub.unary_call(request) - expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) + it "handles no endpoints available" do + skip "Requires docker compose environment (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] - # Invalidate cache (simulate endpoint change) - client.instance_variable_get(:@load_balancer)&.update_endpoints([]) + invalid_client = subject.new("nonexistent-service", bootstrap: bootstrap) - # Should reload and work again - response = stub.unary_call(request) - expect(response).to be_a(Protocol::GRPC::Fixtures::TestMessage) + expect{invalid_client.resolve_endpoints}.to raise_exception(Async::GRPC::XDS::Client::NoEndpointsError) end - it "handles bootstrap configuration errors" do - expect { - subject.new(service_name, bootstrap: {invalid: "config"}) - }.to raise_error(Async::GRPC::XDS::Client::ConfigurationError) + it "evicts resolved promises to prevent unbounded growth" do + skip "Requires docker compose environment (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] + + xds_client = subject.new(service_name, bootstrap: bootstrap) + xds_client.resolve_endpoints + + context = xds_client.instance_variable_get(:@context) + # Resolved promises are evicted immediately; hashes stay bounded + expect(context.instance_variable_get(:@cluster_promises)).to be(:empty?) + expect(context.instance_variable_get(:@endpoint_promises)).to be(:empty?) end - it "handles no endpoints available" do - # Create client with invalid service name - invalid_client = subject.new("nonexistent-service", bootstrap: bootstrap) + it "clears promise caches on close to prevent memory growth" do + skip "Requires docker compose environment (XDS_SERVER_URI)" unless ENV["XDS_SERVER_URI"] + + xds_client = subject.new(service_name, bootstrap: bootstrap) + xds_client.resolve_endpoints + + context = xds_client.instance_variable_get(:@context) + xds_client.close - expect { - invalid_client.resolve_endpoints - }.to raise_error(Async::GRPC::XDS::Client::NoEndpointsError) + # Close clears any remaining promises (e.g. unresolved for nonexistent services) + expect(context.instance_variable_get(:@cluster_promises)).to be(:empty?) + expect(context.instance_variable_get(:@endpoint_promises)).to be(:empty?) end end diff --git a/xds/test_server.go b/xds/test_server.go new file mode 100644 index 0000000..938be1f --- /dev/null +++ b/xds/test_server.go @@ -0,0 +1,178 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +var ( + port = flag.Int("port", 18000, "xDS server port") + upstream = flag.String("upstream", "backend-1:50051,backend-2:50052,backend-3:50053", "Comma-separated list of upstream endpoints") +) + +// Custom hash function that accepts any node ID for testing +type anyNodeHash struct{} + +func (h *anyNodeHash) ID(node *corev3.Node) string { + return "any" +} + +func main() { + flag.Parse() + + ctx := context.Background() + + // Create snapshot with cluster and endpoints + snapshot, err := createSnapshot(*upstream) + if err != nil { + log.Fatalf("Failed to create snapshot: %v", err) + } + + // For testing, accept any node ID by using a custom hash that always returns the same key + // This allows any client to connect and get the same snapshot + snapshotCache := cache.NewSnapshotCache(false, &anyNodeHash{}, nil) + if err := snapshotCache.SetSnapshot(ctx, "any", snapshot); err != nil { + log.Fatalf("Failed to set snapshot: %v", err) + } + log.Printf("Set snapshot for any node ID") + + // Create callbacks for logging + callbacks := serverv3.CallbackFuncs{ + StreamOpenFunc: func(ctx context.Context, streamID int64, typeURL string) error { + log.Printf("Stream opened: streamID=%d, typeURL=%s", streamID, typeURL) + return nil + }, + StreamRequestFunc: func(streamID int64, request *discovery.DiscoveryRequest) error { + log.Printf("Stream request: streamID=%d, typeURL=%s, resource_names=%v", streamID, request.TypeUrl, request.ResourceNames) + return nil + }, + } + + // Create xDS server with callbacks + srv := serverv3.NewServer(ctx, snapshotCache, callbacks) + + // Start gRPC server + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("Failed to listen: %v", err) + } + + // Create gRPC server with insecure credentials (for testing without TLS) + grpcServer := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) + discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, srv) + + log.Printf("xDS test server listening on :%d", *port) + log.Printf("Serving cluster 'myservice' with endpoints: %s", *upstream) + + if err := grpcServer.Serve(lis); err != nil { + log.Fatalf("Failed to serve: %v", err) + } +} + +func createSnapshot(upstreams string) (*cache.Snapshot, error) { + // Parse upstream endpoints + endpoints := parseEndpoints(upstreams) + + // Create cluster + cluster := &clusterv3.Cluster{ + Name: "myservice", + ClusterDiscoveryType: &clusterv3.Cluster_Type{Type: clusterv3.Cluster_EDS}, + LbPolicy: clusterv3.Cluster_ROUND_ROBIN, + EdsClusterConfig: &clusterv3.Cluster_EdsClusterConfig{ + ServiceName: "myservice", + EdsConfig: &corev3.ConfigSource{ + ConfigSourceSpecifier: &corev3.ConfigSource_Ads{}, + }, + }, + } + + // Create endpoint assignment + lbEndpoints := make([]*endpointv3.LbEndpoint, 0, len(endpoints)) + for _, ep := range endpoints { + lbEndpoints = append(lbEndpoints, &endpointv3.LbEndpoint{ + HostIdentifier: &endpointv3.LbEndpoint_Endpoint{ + Endpoint: &endpointv3.Endpoint{ + Address: &corev3.Address{ + Address: &corev3.Address_SocketAddress{ + SocketAddress: &corev3.SocketAddress{ + Protocol: corev3.SocketAddress_TCP, + Address: ep.host, + PortSpecifier: &corev3.SocketAddress_PortValue{ + PortValue: ep.port, + }, + }, + }, + }, + }, + }, + HealthStatus: corev3.HealthStatus_HEALTHY, + }) + } + + endpointAssignment := &endpointv3.ClusterLoadAssignment{ + ClusterName: "myservice", + Endpoints: []*endpointv3.LocalityLbEndpoints{ + { + LbEndpoints: lbEndpoints, + }, + }, + } + + // Create snapshot + // types.Resource is proto.Message, which Cluster and ClusterLoadAssignment implement + return cache.NewSnapshot( + "1", // version + map[resource.Type][]types.Resource{ + resource.ClusterType: {cluster}, + resource.EndpointType: {endpointAssignment}, + }, + ) +} + +type endpoint struct { + host string + port uint32 +} + +func parseEndpoints(upstreams string) []endpoint { + var endpoints []endpoint + parts := splitComma(upstreams) + for _, part := range parts { + host, port := parseHostPort(part) + endpoints = append(endpoints, endpoint{host: host, port: port}) + } + return endpoints +} + +func splitComma(s string) []string { + return strings.Split(s, ",") +} + +func parseHostPort(addr string) (string, uint32) { + parts := strings.Split(addr, ":") + if len(parts) == 2 { + var port uint32 + fmt.Sscanf(parts[1], "%d", &port) + if port > 0 { + return parts[0], port + } + } + return addr, 50051 +} diff --git a/xds/update_protos.sh b/xds/update_protos.sh new file mode 100755 index 0000000..129375b --- /dev/null +++ b/xds/update_protos.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Update Envoy protobuf definitions +# This script clones the envoy data-plane-api and copies only the needed .proto files + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +PROTO_DIR="$PROJECT_ROOT/proto" +TEMP_DIR="/tmp/envoy-api-$$" + +echo "Cloning envoyproxy/data-plane-api..." + +# Clone with sparse checkout to only get what we need +git clone --depth 1 --filter=blob:none --sparse https://github.com/envoyproxy/data-plane-api.git "$TEMP_DIR" + +cd "$TEMP_DIR" + +echo "Setting up sparse checkout..." + +# First get envoy config files +git sparse-checkout set \ + envoy/config/cluster/v3 \ + envoy/config/endpoint/v3 \ + envoy/config/listener/v3 \ + envoy/config/route/v3 \ + envoy/config/core/v3 \ + envoy/extensions/transport_sockets/tls/v3 + +# Copy envoy config files +cp -r envoy "$PROTO_DIR/" + +# Now get discovery service and google protobuf files +git sparse-checkout set \ + envoy/service/discovery/v3 \ + google/protobuf + +# Copy discovery service +cp -r envoy/service "$PROTO_DIR/envoy/" + +# Copy google protobuf (if exists in repo) +if [ -d "google" ]; then + cp -r google "$PROTO_DIR/" +fi + +# Get Google protobuf well-known types from protobuf repo +echo "Fetching Google protobuf well-known types..." +git clone --depth 1 https://github.com/protocolbuffers/protobuf.git /tmp/protobuf-$$ +mkdir -p "$PROTO_DIR/google/protobuf" +cp /tmp/protobuf-$$/src/google/protobuf/{any,duration,timestamp,struct,empty,wrappers}.proto "$PROTO_DIR/google/protobuf/" 2>/dev/null || true +rm -rf /tmp/protobuf-$$ + +# Get google/rpc/status.proto from api-common-protos +echo "Fetching google/rpc/status.proto..." +git clone --depth 1 https://github.com/googleapis/api-common-protos.git /tmp/api-common-$$ +mkdir -p "$PROTO_DIR/google/rpc" +cp /tmp/api-common-$$/google/rpc/status.proto "$PROTO_DIR/google/rpc/" 2>/dev/null || true +rm -rf /tmp/api-common-$$ + +# Get envoy/type/v3 and xds/core/v3 from data-plane-api +echo "Fetching envoy/type/v3 and xds/core/v3..." +git clone --depth 1 --filter=blob:none --sparse https://github.com/envoyproxy/data-plane-api.git /tmp/envoy-types-$$ +cd /tmp/envoy-types-$$ +git sparse-checkout set envoy/type/v3 envoy/type/matcher/v3 envoy/type/metadata/v3 xds/core/v3 +mkdir -p "$PROTO_DIR/envoy/type/v3" "$PROTO_DIR/envoy/type/matcher/v3" "$PROTO_DIR/envoy/type/metadata/v3" "$PROTO_DIR/xds/core/v3" +cp -r envoy/type "$PROTO_DIR/envoy/" 2>/dev/null || true +cp -r xds/core "$PROTO_DIR/xds/" 2>/dev/null || true +rm -rf /tmp/envoy-types-$$ + +# Get xds/type/matcher/v3 and xds/core/v3 from cncf/xds repo +echo "Fetching xds/type/matcher/v3 and xds/core/v3..." +git clone --depth 1 https://github.com/cncf/xds.git /tmp/xds-types-$$ +mkdir -p "$PROTO_DIR/xds/type/matcher/v3" "$PROTO_DIR/xds/core/v3" +find /tmp/xds-types-$$/xds/type/matcher/v3 -name "*.proto" ! -name "*cel*" -exec cp {} "$PROTO_DIR/xds/type/matcher/v3/" \; +find /tmp/xds-types-$$/xds/core/v3 -name "*.proto" -exec cp {} "$PROTO_DIR/xds/core/v3/" \; +rm -rf /tmp/xds-types-$$ + +# Get udpa annotations +echo "Fetching udpa annotations..." +git clone --depth 1 https://github.com/cncf/udpa.git /tmp/udpa-$$ +mkdir -p "$PROTO_DIR/udpa/annotations" +cp /tmp/udpa-$$/udpa/annotations/*.proto "$PROTO_DIR/udpa/annotations/" 2>/dev/null || true +rm -rf /tmp/udpa-$$ + +# Get validate annotations +echo "Fetching validate annotations..." +git clone --depth 1 https://github.com/envoyproxy/protoc-gen-validate.git /tmp/validate-$$ +mkdir -p "$PROTO_DIR/validate" +cp /tmp/validate-$$/validate/validate.proto "$PROTO_DIR/validate/" 2>/dev/null || true +rm -rf /tmp/validate-$$ + +# Get envoy annotations +echo "Fetching envoy annotations..." +git clone --depth 1 --filter=blob:none --sparse https://github.com/envoyproxy/envoy.git /tmp/envoy-annotations-$$ +cd /tmp/envoy-annotations-$$ +git sparse-checkout set api/envoy/annotations +mkdir -p "$PROTO_DIR/envoy/annotations" +cp api/envoy/annotations/*.proto "$PROTO_DIR/envoy/annotations/" 2>/dev/null || true +rm -rf /tmp/envoy-annotations-$$ + +echo "Copying .proto files to $PROTO_DIR..." + +# Create directories +mkdir -p "$PROTO_DIR/envoy/service/discovery/v3" +mkdir -p "$PROTO_DIR/envoy/config/cluster/v3" +mkdir -p "$PROTO_DIR/envoy/config/endpoint/v3" +mkdir -p "$PROTO_DIR/envoy/config/listener/v3" +mkdir -p "$PROTO_DIR/envoy/config/route/v3" +mkdir -p "$PROTO_DIR/envoy/config/core/v3" +mkdir -p "$PROTO_DIR/envoy/extensions/transport_sockets/tls/v3" +mkdir -p "$PROTO_DIR/google/protobuf" + +# Copy files +cp -r envoy "$PROTO_DIR/" +cp -r google "$PROTO_DIR/" + +# Cleanup +rm -rf "$TEMP_DIR" + +echo "Done! Proto files updated in $PROTO_DIR" +echo "" +echo "To generate Ruby code, run:" +echo " bundle exec bake async:grpc:xds:generate_protos" From b4c682ee6cd59a6b6ecd15b9a39b14492cfee0fc Mon Sep 17 00:00:00 2001 From: Samuel Williams Date: Wed, 11 Mar 2026 11:04:14 +1300 Subject: [PATCH 4/6] Try to fix docker tests. --- .github/workflows/test-xds.yaml | 5 +++++ xds/Dockerfile.backend | 4 ++-- xds/readme.md | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-xds.yaml b/.github/workflows/test-xds.yaml index 784188b..50485cd 100644 --- a/.github/workflows/test-xds.yaml +++ b/.github/workflows/test-xds.yaml @@ -29,6 +29,11 @@ jobs: steps: - uses: actions/checkout@v6 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{matrix.ruby}} + bundler-cache: true + - name: Run tests timeout-minutes: 15 env: diff --git a/xds/Dockerfile.backend b/xds/Dockerfile.backend index e9db597..62301e6 100644 --- a/xds/Dockerfile.backend +++ b/xds/Dockerfile.backend @@ -8,8 +8,8 @@ RUN apt-get update && apt-get install -y \ && rm -rf /var/lib/apt/lists/* # Copy gemfiles, gemspec, and minimal lib files needed for gemspec -# Note: gems.locked is gitignored, so we don't require it - bundle install will resolve deps -COPY gems.rb *.gemspec ./ +# Run `bundle install` locally first to generate gems.locked for reproducible builds +COPY gems.rb gems.locked *.gemspec ./ COPY lib/async/grpc/version.rb lib/async/grpc/version.rb RUN bundle install diff --git a/xds/readme.md b/xds/readme.md index 75a0ae2..bf69aab 100644 --- a/xds/readme.md +++ b/xds/readme.md @@ -15,6 +15,7 @@ The Docker Compose setup includes: ```bash cd xds +bundle install # generates gems.locked for Docker build docker compose up -d ``` From de071905dfce0e45f9fcc206635c865b66516360 Mon Sep 17 00:00:00 2001 From: Samuel Williams Date: Wed, 11 Mar 2026 11:44:05 +1300 Subject: [PATCH 5/6] Update logging. --- lib/async/grpc/xds/discovery_client.rb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/async/grpc/xds/discovery_client.rb b/lib/async/grpc/xds/discovery_client.rb index a275bc7..c01d98d 100644 --- a/lib/async/grpc/xds/discovery_client.rb +++ b/lib/async/grpc/xds/discovery_client.rb @@ -223,7 +223,7 @@ def build_initial_requests def process_response(response, stream) type_url = response.type_url - Console.info(self){"Processing response for type_url: #{type_url}"} + Console.debug(self, "Processing response:", type_url: type_url) callback = nil resources = nil @@ -232,7 +232,7 @@ def process_response(response, stream) @mutex.synchronize do subscription = @subscriptions[type_url] unless subscription - Console.warn(self){"No subscription found for type_url: #{type_url}"} + Console.warn(self){"No subscription found!", type_url: type_url} return end @@ -254,7 +254,7 @@ def process_response(response, stream) if callback callback.call(resources) else - Console.warn(self){"No callback found for type_url: #{type_url}"} + Console.warn(self, "No callback found!", type_url: type_url) end # Send ACK (acknowledge receipt) From f2aea6322ebdfb7013ceb35d627c31a26c420ef4 Mon Sep 17 00:00:00 2001 From: Samuel Williams Date: Wed, 11 Mar 2026 11:55:51 +1300 Subject: [PATCH 6/6] More clean up. --- lib/async/grpc/xds/discovery_client.rb | 46 +++++++++++--------------- 1 file changed, 19 insertions(+), 27 deletions(-) diff --git a/lib/async/grpc/xds/discovery_client.rb b/lib/async/grpc/xds/discovery_client.rb index c01d98d..716445a 100644 --- a/lib/async/grpc/xds/discovery_client.rb +++ b/lib/async/grpc/xds/discovery_client.rb @@ -114,13 +114,15 @@ def ensure_stream_running raise rescue => error Console.error(self, error) + @mutex.synchronize do @grpc_client&.close @grpc_client = nil @ads_stream = nil @stream_ready_promise = Async::Promise.new end - task.sleep(backoff) + + sleep(backoff) backoff = [backoff * 2, 60].min end end @@ -129,33 +131,23 @@ def ensure_stream_running end def create_and_run_ads_stream(task) - begin - # Create gRPC client - server_uri = @server_uri - unless server_uri.match?(/^https?:\/\//) - use_insecure = @channel_creds&.any?{|cred| cred[:type] == "insecure"} - scheme = use_insecure ? "http" : "https" - server_uri = "#{scheme}://#{server_uri}" - end - Console.info(self){"Connecting to xDS server: #{server_uri}"} - endpoint = Async::HTTP::Endpoint.parse(server_uri, protocol: Async::HTTP::Protocol::HTTP2) - http_client = Async::HTTP::Client.new(endpoint) - grpc_client = Async::GRPC::Client.new(http_client) - - @mutex.synchronize{@grpc_client = grpc_client} - - # ADSStream owns the stream; we act as delegate receiving discovery_response events - ads_stream = ADSStream.new(grpc_client, @node, delegate: self) - ads_stream.run(initial: build_initial_requests) - rescue => error - Console.error(self, "Failed to create ADS stream: #{error.message}") - @mutex.synchronize do - @grpc_client&.close - @grpc_client = nil - @ads_stream = nil - end - raise + # Create gRPC client + server_uri = @server_uri + unless server_uri.match?(/^https?:\/\//) + use_insecure = @channel_creds&.any?{|cred| cred[:type] == "insecure"} + scheme = use_insecure ? "http" : "https" + server_uri = "#{scheme}://#{server_uri}" end + Console.debug(self, "Connecting to xDS server:", server_uri: server_uri) + endpoint = Async::HTTP::Endpoint.parse(server_uri, protocol: Async::HTTP::Protocol::HTTP2) + http_client = Async::HTTP::Client.new(endpoint) + grpc_client = Async::GRPC::Client.new(http_client) + + @mutex.synchronize{@grpc_client = grpc_client} + + # ADSStream owns the stream; we act as delegate receiving discovery_response events + ads_stream = ADSStream.new(grpc_client, @node, delegate: self) + ads_stream.run(initial: build_initial_requests) end # ADSStream::Delegate interface - must be public for ADSStream to call