From ca56aa30d498c489180bf1969ea727a40e58937b Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 14 Jan 2020 14:12:57 -0800 Subject: [PATCH 01/86] Start 1.28.0 development cycle --- android-interop-testing/app/build.gradle | 14 +++++++------- android/build.gradle | 6 +++--- build.gradle | 2 +- .../src/test/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- .../testLite/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/testLite/golden/TestService.java.txt | 2 +- core/src/main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/android/clientcache/app/build.gradle | 10 +++++----- examples/android/helloworld/app/build.gradle | 8 ++++---- examples/android/routeguide/app/build.gradle | 8 ++++---- examples/android/strictmode/app/build.gradle | 8 ++++---- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 ++-- .../android/helloworld/app/build.gradle | 8 ++++---- examples/example-kotlin/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 ++-- examples/pom.xml | 4 ++-- 21 files changed, 48 insertions(+), 48 deletions(-) diff --git a/android-interop-testing/app/build.gradle b/android-interop-testing/app/build.gradle index 63ce33190bd..394f5319a7d 100644 --- a/android-interop-testing/app/build.gradle +++ b/android-interop-testing/app/build.gradle @@ -41,7 +41,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -72,12 +72,12 @@ dependencies { implementation 'junit:junit:4.12' // You need to build grpc-java to obtain the grpc libraries below. - implementation 'io.grpc:grpc-auth:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-census:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-testing:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-auth:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-census:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-testing:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION androidTestImplementation 'androidx.test:rules:1.1.0-alpha1' androidTestImplementation 'androidx.test:runner:1.1.0-alpha1' diff --git a/android/build.gradle b/android/build.gradle index 50ffee7ec81..3234ec0612b 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'com.android.library' group = "io.grpc" -version = "1.27.0-SNAPSHOT" // CURRENT_GRPC_VERSION +version = "1.28.0-SNAPSHOT" // CURRENT_GRPC_VERSION description = 'gRPC: Android' buildscript { @@ -47,9 +47,9 @@ dependencies { errorprone 'com.google.errorprone:error_prone_core:2.3.4' errorproneJavac 'com.google.errorprone:javac:9+181-r4173-1' - implementation 'io.grpc:grpc-core:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-core:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - testImplementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION testImplementation 'junit:junit:4.12' testImplementation 'org.robolectric:robolectric:3.7.1' testImplementation 'com.google.truth:truth:1.0' diff --git a/build.gradle b/build.gradle index c94b8d58c1b..259d1403d47 100644 --- a/build.gradle +++ b/build.gradle @@ -18,7 +18,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.27.0-SNAPSHOT" // CURRENT_GRPC_VERSION + version = "1.28.0-SNAPSHOT" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index 3db06475b9a..452fbb6aea9 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.27.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @java.lang.Deprecated public final class TestDeprecatedServiceGrpc { diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index 5e0fa0c3080..79618564a48 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.27.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") public final class TestServiceGrpc { diff --git a/compiler/src/testLite/golden/TestDeprecatedService.java.txt b/compiler/src/testLite/golden/TestDeprecatedService.java.txt index 819e4ab0661..d5566f0ee5f 100644 --- a/compiler/src/testLite/golden/TestDeprecatedService.java.txt +++ b/compiler/src/testLite/golden/TestDeprecatedService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.27.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") @java.lang.Deprecated public final class TestDeprecatedServiceGrpc { diff --git a/compiler/src/testLite/golden/TestService.java.txt b/compiler/src/testLite/golden/TestService.java.txt index 280cae39022..e238eecd955 100644 --- a/compiler/src/testLite/golden/TestService.java.txt +++ b/compiler/src/testLite/golden/TestService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.27.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", comments = "Source: grpc/testing/compiler/test.proto") public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index 3459a99a739..c8b71119446 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -195,7 +195,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - private static final String IMPLEMENTATION_VERSION = "1.27.0-SNAPSHOT"; // CURRENT_GRPC_VERSION + private static final String IMPLEMENTATION_VERSION = "1.28.0-SNAPSHOT"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index d1743ccbe88..c7dcbb4742e 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -30,7 +30,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -50,12 +50,12 @@ dependencies { implementation 'com.android.support:appcompat-v7:27.0.2' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' testImplementation 'junit:junit:4.12' testImplementation 'com.google.truth:truth:1.0' - testImplementation 'io.grpc:grpc-testing:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index ed64e56e34d..0c6e7d93b9e 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -29,7 +29,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -49,8 +49,8 @@ dependencies { implementation 'com.android.support:appcompat-v7:27.0.2' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index 6746656a4c0..871c06db7ff 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -28,7 +28,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -48,8 +48,8 @@ dependencies { implementation 'com.android.support:appcompat-v7:27.0.2' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index 116faf2d16c..c364f747f57 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -29,7 +29,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -49,8 +49,8 @@ dependencies { implementation 'com.android.support:appcompat-v7:28.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' } diff --git a/examples/build.gradle b/examples/build.gradle index 0bdc279c48e..56a80523fc7 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -22,7 +22,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.11.0' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index aee0e7f1963..57b8047153d 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -22,7 +22,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protocVersion = '3.11.0' dependencies { diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index 65d402790e7..053be337fac 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -23,7 +23,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION def protobufVersion = '3.11.0' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index 5704add95e1..bf3498e1b1a 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.27.0-SNAPSHOT + 1.28.0-SNAPSHOT example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.27.0-SNAPSHOT + 1.28.0-SNAPSHOT 3.11.0 1.7 diff --git a/examples/example-kotlin/android/helloworld/app/build.gradle b/examples/example-kotlin/android/helloworld/app/build.gradle index 91ae914f76b..bc258da7954 100644 --- a/examples/example-kotlin/android/helloworld/app/build.gradle +++ b/examples/example-kotlin/android/helloworld/app/build.gradle @@ -51,7 +51,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -73,9 +73,9 @@ dependencies { implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION } repositories { mavenCentral() } diff --git a/examples/example-kotlin/build.gradle b/examples/example-kotlin/build.gradle index 983c384311d..d0395b66aac 100644 --- a/examples/example-kotlin/build.gradle +++ b/examples/example-kotlin/build.gradle @@ -25,7 +25,7 @@ repositories { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION dependencies { def kotlinVersion = plugins.findPlugin("org.jetbrains.kotlin.jvm").kotlinPluginVersion diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 875f96112e9..62940bb6096 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -23,7 +23,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.27.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION def nettyTcNativeVersion = '2.0.26.Final' def protocVersion = '3.11.0' diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index 917c33c4f08..9fdd9255882 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.27.0-SNAPSHOT + 1.28.0-SNAPSHOT example-tls https://github.com/grpc/grpc-java UTF-8 - 1.27.0-SNAPSHOT + 1.28.0-SNAPSHOT 3.11.0 2.0.25.Final diff --git a/examples/pom.xml b/examples/pom.xml index 2de20a9e450..bf7b1e534ff 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.27.0-SNAPSHOT + 1.28.0-SNAPSHOT examples https://github.com/grpc/grpc-java UTF-8 - 1.27.0-SNAPSHOT + 1.28.0-SNAPSHOT 3.11.0 3.11.0 From cf89b4b53d1400fdfe33fef8bfa69f0f77233f61 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 14 Jan 2020 17:06:11 -0800 Subject: [PATCH 02/86] examples: Bump Gradle to 5.6.2 The rest of the build went to Gradle 5.6.2 in 3c3a823a, but that commit failed to update the examples. This brings the two back in sync. --- examples/gradle/wrapper/gradle-wrapper.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/gradle/wrapper/gradle-wrapper.properties b/examples/gradle/wrapper/gradle-wrapper.properties index f4d7b2bf616..7c4388a9216 100644 --- a/examples/gradle/wrapper/gradle-wrapper.properties +++ b/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.6.2-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists From 1b5d61f18f8e8b19642adae548f59cc441319eb9 Mon Sep 17 00:00:00 2001 From: sanjaypujare Date: Wed, 15 Jan 2020 11:10:19 -0800 Subject: [PATCH 03/86] xds: mark Sds-Ssl threadfactory daemon to allow apps to exit (#6608) --- .../io/grpc/xds/sds/ClientSslContextProviderFactory.java | 6 +++++- .../io/grpc/xds/sds/ServerSslContextProviderFactory.java | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java b/xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java index 4492d5cb7c7..dfb5ba63f09 100644 --- a/xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java +++ b/xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java @@ -19,6 +19,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; import io.grpc.xds.Bootstrapper; import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; @@ -45,7 +46,10 @@ public SslContextProvider createSslContextProvider( return SdsSslContextProvider.getProviderForClient( upstreamTlsContext, Bootstrapper.getInstance().readBootstrap().getNode(), - Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setNameFormat("client-sds-sslcontext-provider-%d") + .setDaemon(true) + .build()), /* channelExecutor= */ null); } catch (IOException ioe) { throw new RuntimeException(ioe); diff --git a/xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java b/xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java index a36d0d1f250..e5d69c74bc7 100644 --- a/xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java +++ b/xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java @@ -19,6 +19,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.grpc.xds.Bootstrapper; import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; @@ -46,7 +47,10 @@ public SslContextProvider createSslContextProvider( return SdsSslContextProvider.getProviderForServer( downstreamTlsContext, Bootstrapper.getInstance().readBootstrap().getNode(), - Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(new ThreadFactoryBuilder() + .setNameFormat("server-sds-sslcontext-provider-%d") + .setDaemon(true) + .build()), /* channelExecutor= */ null); } catch (IOException ioe) { throw new RuntimeException(ioe); From cb4a7fb2de6d2e5aa58e2db097d7fd0b69afef27 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Wed, 15 Jan 2020 12:33:57 -0800 Subject: [PATCH 04/86] alts: do not exclude opencensus-api from google-oauth2's transitive dependencies (#6607) --- alts/build.gradle | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/alts/build.gradle b/alts/build.gradle index 9d8741d5236..b5342770288 100644 --- a/alts/build.gradle +++ b/alts/build.gradle @@ -24,7 +24,8 @@ dependencies { compile (libraries.google_auth_oauth2_http) { // prefer our own versions instead of google-auth-oauth2-http's dependency exclude group: 'com.google.guava', module: 'guava' - exclude group: 'io.opencensus', module: 'opencensus-api' + // we'll always be more up-to-date + exclude group: 'io.grpc', module: 'grpc-context' } compileOnly libraries.javax_annotation runtime project(':grpc-grpclb') From b8474d61c966f60cc0bad4e5abaca351e90afc9d Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Thu, 16 Jan 2020 11:53:37 -0800 Subject: [PATCH 05/86] netty: fix a race for channelz at server transport creation A race condition was reported by user in #6601: `ServerImpl.start()` calls `NettyServer.start()` while holding `ServerImpl.lock`. `NettyServer.start()` awaits a submitted runnable in eventloop. However, this pending runnable may never be executed because the eventloop might be executing some other task, like `ServerListenerImpl.transportCreated()`, that is trying to acquire `ServerImpl.lock` causing a deadlock. This PR resolves the particular issue reported in #6601 for server with a single port, but `NettyServer` (https://github.com/grpc/grpc-java/blob/v1.26.0/netty/src/main/java/io/grpc/netty/NettyServer.java#L251) and `ServerImpl` (https://github.com/grpc/grpc-java/blob/v1.26.0/core/src/main/java/io/grpc/internal/ServerImpl.java#L184) in general still have the same potential risk of deadlock, which need further fix. --- .../main/java/io/grpc/netty/NettyServer.java | 23 ++++------ .../java/io/grpc/netty/NettyServerTest.java | 46 +++++++++++++------ 2 files changed, 40 insertions(+), 29 deletions(-) diff --git a/netty/src/main/java/io/grpc/netty/NettyServer.java b/netty/src/main/java/io/grpc/netty/NettyServer.java index 5c400198817..478687b6abc 100644 --- a/netty/src/main/java/io/grpc/netty/NettyServer.java +++ b/netty/src/main/java/io/grpc/netty/NettyServer.java @@ -56,7 +56,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; import java.util.logging.Level; import java.util.logging.Logger; @@ -93,9 +92,8 @@ class NettyServer implements InternalServer, InternalWithLogId { private final List streamTracerFactories; private final TransportTracer.Factory transportTracerFactory; private final InternalChannelz channelz; - // Only modified in event loop but safe to read any time. Set at startup and unset at shutdown. - private final AtomicReference> listenSocketStats = - new AtomicReference<>(); + // Only modified in event loop but safe to read any time. + private volatile InternalInstrumented listenSocketStats; NettyServer( SocketAddress address, ChannelFactory channelFactory, @@ -149,7 +147,7 @@ public SocketAddress getListenSocketAddress() { @Override public InternalInstrumented getListenSocketStats() { - return listenSocketStats.get(); + return listenSocketStats; } @Override @@ -251,19 +249,13 @@ public void operationComplete(ChannelFuture future) throws Exception { throw new IOException("Failed to bind", future.cause()); } channel = future.channel(); - Future channelzFuture = channel.eventLoop().submit(new Runnable() { + channel.eventLoop().execute(new Runnable() { @Override public void run() { - InternalInstrumented listenSocket = new ListenSocket(channel); - listenSocketStats.set(listenSocket); - channelz.addListenSocket(listenSocket); + listenSocketStats = new ListenSocket(channel); + channelz.addListenSocket(listenSocketStats); } }); - try { - channelzFuture.await(); - } catch (InterruptedException ex) { - throw new RuntimeException("Interrupted while registering listen socket to channelz", ex); - } } @Override @@ -278,7 +270,8 @@ public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { log.log(Level.WARNING, "Error shutting down server", future.cause()); } - InternalInstrumented stats = listenSocketStats.getAndSet(null); + InternalInstrumented stats = listenSocketStats; + listenSocketStats = null; if (stats != null) { channelz.removeListenSocket(stats); } diff --git a/netty/src/test/java/io/grpc/netty/NettyServerTest.java b/netty/src/test/java/io/grpc/netty/NettyServerTest.java index b873785e4ff..141cb9972b9 100644 --- a/netty/src/test/java/io/grpc/netty/NettyServerTest.java +++ b/netty/src/test/java/io/grpc/netty/NettyServerTest.java @@ -29,16 +29,19 @@ import io.grpc.InternalInstrumented; import io.grpc.Metadata; import io.grpc.ServerStreamTracer; +import io.grpc.internal.FixedObjectPool; import io.grpc.internal.ServerListener; import io.grpc.internal.ServerStream; import io.grpc.internal.ServerTransport; import io.grpc.internal.ServerTransportListener; -import io.grpc.internal.SharedResourcePool; import io.grpc.internal.TransportTracer; import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelOption; +import io.netty.channel.ReflectiveChannelFactory; import io.netty.channel.WriteBufferWaterMark; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.util.AsciiString; import java.net.InetSocketAddress; import java.net.Socket; @@ -48,6 +51,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.junit.After; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -55,6 +59,13 @@ @RunWith(JUnit4.class) public class NettyServerTest { private final InternalChannelz channelz = new InternalChannelz(); + private final NioEventLoopGroup eventLoop = new NioEventLoopGroup(1); + + @After + public void tearDown() throws Exception { + eventLoop.shutdownGracefully(0, 0, TimeUnit.SECONDS); + eventLoop.awaitTermination(5, TimeUnit.SECONDS); + } @Test public void startStop() throws Exception { @@ -79,10 +90,10 @@ class TestProtocolNegotiator implements ProtocolNegotiator { TestProtocolNegotiator protocolNegotiator = new TestProtocolNegotiator(); NettyServer ns = new NettyServer( addr, - Utils.DEFAULT_SERVER_CHANNEL_FACTORY, + new ReflectiveChannelFactory<>(NioServerSocketChannel.class), new HashMap, Object>(), - SharedResourcePool.forResource(Utils.DEFAULT_BOSS_EVENT_LOOP_GROUP), - SharedResourcePool.forResource(Utils.DEFAULT_WORKER_EVENT_LOOP_GROUP), + new FixedObjectPool<>(eventLoop), + new FixedObjectPool<>(eventLoop), protocolNegotiator, Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -119,14 +130,14 @@ public void serverShutdown() { } @Test - public void getPort_notStarted() throws Exception { + public void getPort_notStarted() { InetSocketAddress addr = new InetSocketAddress(0); NettyServer ns = new NettyServer( addr, - Utils.DEFAULT_SERVER_CHANNEL_FACTORY, + new ReflectiveChannelFactory<>(NioServerSocketChannel.class), new HashMap, Object>(), - SharedResourcePool.forResource(Utils.DEFAULT_BOSS_EVENT_LOOP_GROUP), - SharedResourcePool.forResource(Utils.DEFAULT_WORKER_EVENT_LOOP_GROUP), + new FixedObjectPool<>(eventLoop), + new FixedObjectPool<>(eventLoop), ProtocolNegotiators.plaintext(), Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -161,10 +172,10 @@ public void childChannelOptions() throws Exception { InetSocketAddress addr = new InetSocketAddress(0); NettyServer ns = new NettyServer( addr, - Utils.DEFAULT_SERVER_CHANNEL_FACTORY, + new ReflectiveChannelFactory<>(NioServerSocketChannel.class), channelOptions, - SharedResourcePool.forResource(Utils.DEFAULT_BOSS_EVENT_LOOP_GROUP), - SharedResourcePool.forResource(Utils.DEFAULT_WORKER_EVENT_LOOP_GROUP), + new FixedObjectPool<>(eventLoop), + new FixedObjectPool<>(eventLoop), ProtocolNegotiators.plaintext(), Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -211,10 +222,10 @@ public void channelzListenSocket() throws Exception { InetSocketAddress addr = new InetSocketAddress(0); NettyServer ns = new NettyServer( addr, - Utils.DEFAULT_SERVER_CHANNEL_FACTORY, + new ReflectiveChannelFactory<>(NioServerSocketChannel.class), new HashMap, Object>(), - SharedResourcePool.forResource(Utils.DEFAULT_BOSS_EVENT_LOOP_GROUP), - SharedResourcePool.forResource(Utils.DEFAULT_WORKER_EVENT_LOOP_GROUP), + new FixedObjectPool<>(eventLoop), + new FixedObjectPool<>(eventLoop), ProtocolNegotiators.plaintext(), Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -239,8 +250,15 @@ public void serverShutdown() { shutdownCompleted.set(null); } }); + assertThat(((InetSocketAddress) ns.getListenSocketAddress()).getPort()).isGreaterThan(0); + // SocketStats won't be available until the event loop task of adding SocketStats created by + // ns.start() complete. So submit a noop task and await until it's drained. + eventLoop.submit(new Runnable() { + @Override + public void run() {} + }).await(5, TimeUnit.SECONDS); InternalInstrumented listenSocket = ns.getListenSocketStats(); assertSame(listenSocket, channelz.getSocket(id(listenSocket))); From 57bc1910e4b23220e6a4b1f0f9326e5347e2ec41 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 17 Jan 2020 14:50:20 -0800 Subject: [PATCH 06/86] xds: incorporate initial fetch timeout as resource not found for xDS protocol (#6570) Use timeout to conclude resource not exist in xDS protocol. RDS and EDS protocols are quasi-incremental, each response may not include all the requested resources that present on server side. The way to conclude a requested resource not exist is to use a timeout. In Envoy, this timeout is defined as initial fetch timeout, which is set up at the time client starts subscribing to some resource and disarmed at the time client receives update for that resource. In gRPC's implementation, we set this timeout to be constant 15 seconds, instead of getting its value from ConfigSource proto message. Initial fetch timeout was initially considered to be not required for LDS and CDS. But gRPC is trying to avoid the temporary inconsistency in the case of racing request/response. After resource fetch timers are fired, some resources are known to be absent for sure. XdsClient manages its knowledge for resources that are known to be present or absent with caches. --- .../main/java/io/grpc/xds/XdsClientImpl.java | 306 +++++++- .../java/io/grpc/xds/XdsClientImplTest.java | 662 +++++++++++++++++- .../java/io/grpc/xds/XdsNameResolverTest.java | 3 + 3 files changed, 924 insertions(+), 47 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 7869599c81b..70fcd0451e1 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -69,6 +69,10 @@ final class XdsClientImpl extends XdsClient { private static final Logger logger = Logger.getLogger(XdsClientImpl.class.getName()); + // Longest time to wait, since the subscription to some resource, for concluding its absence. + @VisibleForTesting + static final int INITIAL_RESOURCE_FETCH_TIMEOUT_SEC = 15; + @VisibleForTesting static final String ADS_TYPE_URL_LDS = "type.googleapis.com/envoy.api.v2.Listener"; @VisibleForTesting @@ -96,12 +100,18 @@ final class XdsClientImpl extends XdsClient { // of whole Cluster messages to reduce memory usage. private final Map clusterNamesToClusterUpdates = new HashMap<>(); + // Cached CDS resources that are known to be absent. + private final Set absentCdsResources = new HashSet<>(); + // Cached data for EDS responses, keyed by cluster names. // CDS responses indicate absence of clusters and EDS responses indicate presence of clusters. // Optimization: cache EndpointUpdate, which contains only information needed by gRPC, instead // of whole ClusterLoadAssignment messages to reduce memory usage. private final Map clusterNamesToEndpointUpdates = new HashMap<>(); + // Cached EDS resources that are known to be absent. + private final Set absentEdsResources = new HashSet<>(); + // Cluster watchers waiting for cluster information updates. Multiple cluster watchers // can watch on information for the same cluster. private final Map> clusterWatchers = new HashMap<>(); @@ -113,6 +123,23 @@ final class XdsClientImpl extends XdsClient { // Load reporting clients, with each responsible for reporting loads of a single cluster. private final Map lrsClients = new HashMap<>(); + // Resource fetch timers are used to conclude absence of resources. Each timer is activated when + // subscription for the resource starts and disarmed on first update for the resource. + + // Timers for concluding CDS resources not found. + private final Map cdsRespTimers = new HashMap<>(); + + // Timers for concluding EDS resources not found. + private final Map edsRespTimers = new HashMap<>(); + + // Timer for concluding the currently requesting LDS resource not found. + @Nullable + private ScheduledHandle ldsRespTimer; + + // Timer for concluding the currently requesting RDS resource not found. + @Nullable + private ScheduledHandle rdsRespTimer; + @Nullable private AdsStream adsStream; @Nullable @@ -157,6 +184,7 @@ void shutdown() { if (adsStream != null) { adsStream.close(Status.CANCELLED.withDescription("shutdown").asException()); } + cleanUpResources(); for (LoadReportClientImpl lrsClient : lrsClients.values()) { lrsClient.stopLoadReporting(); } @@ -165,6 +193,33 @@ void shutdown() { } } + /** + * Purge cache for resources and cancel resource fetch timers. + */ + private void cleanUpResources() { + clusterNamesToClusterUpdates.clear(); + absentCdsResources.clear(); + clusterNamesToEndpointUpdates.clear(); + absentEdsResources.clear(); + + if (ldsRespTimer != null) { + ldsRespTimer.cancel(); + ldsRespTimer = null; + } + if (rdsRespTimer != null) { + rdsRespTimer.cancel(); + rdsRespTimer = null; + } + for (ScheduledHandle handle : cdsRespTimers.values()) { + handle.cancel(); + } + cdsRespTimers.clear(); + for (ScheduledHandle handle : edsRespTimers.values()) { + handle.cancel(); + } + edsRespTimers.clear(); + } + @Override void watchConfigData(String hostName, int port, ConfigWatcher watcher) { checkState(configWatcher == null, "ConfigWatcher is already registered"); @@ -183,6 +238,11 @@ void watchConfigData(String hostName, int port, ConfigWatcher watcher) { startRpcStream(); } adsStream.sendXdsRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName)); + ldsRespTimer = + syncContext + .schedule( + new LdsResourceFetchTimeoutTask(ldsResourceName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); } @Override @@ -201,18 +261,33 @@ void watchClusterData(String clusterName, ClusterWatcher watcher) { } watchers.add(watcher); // If local cache contains cluster information to be watched, notify the watcher immediately. + if (absentCdsResources.contains(clusterName)) { + watcher.onError( + Status.NOT_FOUND + .withDescription( + "Cluster resource [" + clusterName + "] not found.")); + return; + } if (clusterNamesToClusterUpdates.containsKey(clusterName)) { watcher.onClusterChanged(clusterNamesToClusterUpdates.get(clusterName)); - } - if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { - // Currently in retry backoff. return; } + if (needRequest) { + if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { + // Currently in retry backoff. + return; + } if (adsStream == null) { startRpcStream(); } adsStream.sendXdsRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet()); + ScheduledHandle timeoutHandle = + syncContext + .schedule( + new CdsResourceFetchTimeoutTask(clusterName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); + cdsRespTimers.put(clusterName, timeoutHandle); } } @@ -228,6 +303,15 @@ void cancelClusterDataWatch(String clusterName, ClusterWatcher watcher) { if (watchers.isEmpty()) { logger.log(Level.FINE, "Stop watching cluster {0}", clusterName); clusterWatchers.remove(clusterName); + // Remove the corresponding CDS entry. + absentCdsResources.remove(clusterName); + clusterNamesToClusterUpdates.remove(clusterName); + // Cancel and delete response timer waiting for the corresponding resource. + if (cdsRespTimers.containsKey(clusterName)) { + cdsRespTimers.get(clusterName).cancel(); + cdsRespTimers.remove(clusterName); + } + // If unsubscribe the last resource, do NOT send a CDS request for an empty resource list. // This is a workaround for CDS protocol resource unsubscribe. if (clusterWatchers.isEmpty()) { @@ -262,18 +346,33 @@ void watchEndpointData(String clusterName, EndpointWatcher watcher) { watchers.add(watcher); // If local cache contains endpoint information for the cluster to be watched, notify // the watcher immediately. + if (absentEdsResources.contains(clusterName)) { + watcher.onError( + Status.NOT_FOUND + .withDescription( + "Endpoint resource for cluster [" + clusterName + "] not found.")); + return; + } if (clusterNamesToEndpointUpdates.containsKey(clusterName)) { watcher.onEndpointChanged(clusterNamesToEndpointUpdates.get(clusterName)); - } - if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { - // Currently in retry backoff. return; } + if (needRequest) { + if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { + // Currently in retry backoff. + return; + } if (adsStream == null) { startRpcStream(); } adsStream.sendXdsRequest(ADS_TYPE_URL_EDS, endpointWatchers.keySet()); + ScheduledHandle timeoutHandle = + syncContext + .schedule( + new EdsResourceFetchTimeoutTask(clusterName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); + edsRespTimers.put(clusterName, timeoutHandle); } } @@ -290,7 +389,13 @@ void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) { logger.log(Level.FINE, "Stop watching endpoints in cluster {0}", clusterName); endpointWatchers.remove(clusterName); // Remove the corresponding EDS cache entry. + absentEdsResources.remove(clusterName); clusterNamesToEndpointUpdates.remove(clusterName); + // Cancel and delete response timer waiting for the corresponding resource. + if (edsRespTimers.containsKey(clusterName)) { + edsRespTimers.get(clusterName).cancel(); + edsRespTimers.remove(clusterName); + } // No longer interested in this cluster, send an updated EDS request to unsubscribe // this resource. if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { @@ -428,6 +533,12 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { adsStream.sendAckRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), ldsResponse.getVersionInfo()); + if (clusterName != null || rdsRouteConfigName != null) { + if (ldsRespTimer != null) { + ldsRespTimer.cancel(); + ldsRespTimer = null; + } + } if (clusterName != null) { // Found clusterName in the in-lined RouteConfiguration. ConfigUpdate configUpdate = ConfigUpdate.newBuilder().setClusterName(clusterName).build(); @@ -436,12 +547,24 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { // Send an RDS request if the resource to request has changed. if (!rdsRouteConfigName.equals(adsStream.rdsResourceName)) { adsStream.sendXdsRequest(ADS_TYPE_URL_RDS, ImmutableList.of(rdsRouteConfigName)); + // Cancel the timer for fetching the previous RDS resource. + if (rdsRespTimer != null) { + rdsRespTimer.cancel(); + } + rdsRespTimer = + syncContext + .schedule( + new RdsResourceFetchTimeoutTask(rdsRouteConfigName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); } } else { - // The requested Listener does not exist. - configWatcher.onError( - Status.NOT_FOUND.withDescription( - "Listener for requested resource [" + ldsResourceName + "] does not exist")); + // The requested Listener does not present in this LDS response. + if (ldsRespTimer == null) { + configWatcher.onError( + Status.NOT_FOUND.withDescription( + "Listener resource for listener [" + ldsResourceName + "] does not exist")); + } + } } @@ -489,11 +612,13 @@ private void handleRdsResponse(DiscoveryResponse rdsResponse) { // Notify the ConfigWatcher if this RDS response contains the most recently requested // RDS resource. if (clusterName != null) { + if (rdsRespTimer != null) { + rdsRespTimer.cancel(); + rdsRespTimer = null; + } ConfigUpdate configUpdate = ConfigUpdate.newBuilder().setClusterName(clusterName).build(); configWatcher.onConfigChanged(configUpdate); } - // Do not notify an error to the ConfigWatcher. RDS protocol is incremental, not receiving - // requested RouteConfiguration in this response does not imply absence. } /** @@ -633,24 +758,55 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { cdsResponse.getVersionInfo()); // Update local CDS cache with data in this response. + absentCdsResources.removeAll(clusterUpdates.keySet()); + for (String clusterName : clusterNamesToClusterUpdates.keySet()) { + if (!clusterUpdates.containsKey(clusterName)) { + // Some previously existing resource no longer exists. + absentCdsResources.add(clusterName); + } + } clusterNamesToClusterUpdates.clear(); clusterNamesToClusterUpdates.putAll(clusterUpdates); // Remove EDS cache entries for ClusterLoadAssignments not referenced by this CDS response. + for (String clusterName : clusterNamesToEndpointUpdates.keySet()) { + if (!edsServices.contains(clusterName)) { + absentEdsResources.add(clusterName); + // Notify EDS resource removal to watchers. + if (endpointWatchers.containsKey(clusterName)) { + Set watchers = endpointWatchers.get(clusterName); + for (EndpointWatcher watcher : watchers) { + watcher.onError( + Status.NOT_FOUND + .withDescription( + "Endpoint resource for cluster [" + clusterName + "] is deleted.")); + } + } + } + } clusterNamesToEndpointUpdates.keySet().retainAll(edsServices); - // Notify watchers if clusters interested in present. Otherwise, notify with an error. + for (String clusterName : clusterUpdates.keySet()) { + if (cdsRespTimers.containsKey(clusterName)) { + cdsRespTimers.get(clusterName).cancel(); + cdsRespTimers.remove(clusterName); + } + } + + // Notify watchers if clusters interested in present in this CDS response. for (Map.Entry> entry : clusterWatchers.entrySet()) { - if (clusterUpdates.containsKey(entry.getKey())) { - ClusterUpdate clusterUpdate = clusterUpdates.get(entry.getKey()); + String clusterName = entry.getKey(); + if (clusterUpdates.containsKey(clusterName)) { + ClusterUpdate clusterUpdate = clusterUpdates.get(clusterName); for (ClusterWatcher watcher : entry.getValue()) { watcher.onClusterChanged(clusterUpdate); } - } else { + } else if (!cdsRespTimers.containsKey(clusterName)) { + // Update for previously present resource being removed. for (ClusterWatcher watcher : entry.getValue()) { watcher.onError( - Status.NOT_FOUND.withDescription( - "Requested cluster [" + entry.getKey() + "] does not exist")); + Status.NOT_FOUND + .withDescription("Cluster resource [" + clusterName + "] not found.")); } } } @@ -752,11 +908,20 @@ private void handleEdsResponse(DiscoveryResponse edsResponse) { // Update local EDS cache by inserting updated endpoint information. clusterNamesToEndpointUpdates.putAll(endpointUpdates); + absentEdsResources.removeAll(endpointUpdates.keySet()); // Notify watchers waiting for updates of endpoint information received in this EDS response. for (Map.Entry entry : endpointUpdates.entrySet()) { - for (EndpointWatcher watcher : endpointWatchers.get(entry.getKey())) { - watcher.onEndpointChanged(entry.getValue()); + String clusterName = entry.getKey(); + // Cancel and delete response timeout timer. + if (edsRespTimers.containsKey(clusterName)) { + edsRespTimers.get(clusterName).cancel(); + edsRespTimers.remove(clusterName); + } + if (endpointWatchers.containsKey(clusterName)) { + for (EndpointWatcher watcher : endpointWatchers.get(clusterName)) { + watcher.onEndpointChanged(entry.getValue()); + } } } } @@ -768,12 +933,33 @@ public void run() { startRpcStream(); if (configWatcher != null) { adsStream.sendXdsRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName)); + ldsRespTimer = + syncContext + .schedule( + new LdsResourceFetchTimeoutTask(ldsResourceName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); } if (!clusterWatchers.isEmpty()) { adsStream.sendXdsRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet()); + for (String clusterName : clusterWatchers.keySet()) { + ScheduledHandle timeoutHandle = + syncContext + .schedule( + new CdsResourceFetchTimeoutTask(clusterName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); + cdsRespTimers.put(clusterName, timeoutHandle); + } } if (!endpointWatchers.isEmpty()) { adsStream.sendXdsRequest(ADS_TYPE_URL_EDS, endpointWatchers.keySet()); + for (String clusterName : endpointWatchers.keySet()) { + ScheduledHandle timeoutHandle = + syncContext + .schedule( + new EdsResourceFetchTimeoutTask(clusterName), + INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); + edsRespTimers.put(clusterName, timeoutHandle); + } } } } @@ -892,6 +1078,7 @@ private void handleStreamClosed(Status error) { logger.log(Level.FINE, error.getDescription(), error.getCause()); closed = true; cleanUp(); + cleanUpResources(); if (responseReceived || retryBackoffPolicy == null) { // Reset the backoff sequence if had received a response, or backoff sequence // has never been initialized. @@ -1042,6 +1229,85 @@ private void sendNackRequest(String typeUrl, Collection resourceNames, } } + private abstract static class ResourceFetchTimeoutTask implements Runnable { + protected final String resourceName; + + ResourceFetchTimeoutTask(String resourceName) { + this.resourceName = resourceName; + } + } + + @VisibleForTesting + final class LdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { + + LdsResourceFetchTimeoutTask(String resourceName) { + super(resourceName); + } + + @Override + public void run() { + ldsRespTimer = null; + configWatcher.onError( + Status.NOT_FOUND + .withDescription("Listener resource for listener [" + resourceName + "] not found.")); + } + } + + @VisibleForTesting + final class RdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { + + RdsResourceFetchTimeoutTask(String resourceName) { + super(resourceName); + } + + @Override + public void run() { + rdsRespTimer = null; + configWatcher.onError(Status.NOT_FOUND + .withDescription( + "RouteConfiguration resource for route [" + resourceName + "] not found.")); + } + } + + @VisibleForTesting + final class CdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { + + CdsResourceFetchTimeoutTask(String resourceName) { + super(resourceName); + } + + @Override + public void run() { + cdsRespTimers.remove(resourceName); + absentCdsResources.add(resourceName); + for (ClusterWatcher wat : clusterWatchers.get(resourceName)) { + wat.onError( + Status.NOT_FOUND + .withDescription("Cluster resource [" + resourceName + "] not found.")); + } + } + } + + @VisibleForTesting + final class EdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { + + EdsResourceFetchTimeoutTask(String resourceName) { + super(resourceName); + } + + @Override + public void run() { + edsRespTimers.remove(resourceName); + absentEdsResources.add(resourceName); + for (EndpointWatcher wat : endpointWatchers.get(resourceName)) { + wat.onError( + Status.NOT_FOUND + .withDescription( + "Endpoint resource for cluster [" + resourceName + "] not found.")); + } + } + } + /** * Returns {@code true} iff {@code hostName} matches the domain name {@code pattern} with * case-insensitive. diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index d35e6b24e1a..f322bfed75c 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -75,6 +75,8 @@ import io.grpc.inprocess.InProcessServerBuilder; import io.grpc.internal.BackoffPolicy; import io.grpc.internal.FakeClock; +import io.grpc.internal.FakeClock.ScheduledTask; +import io.grpc.internal.FakeClock.TaskFilter; import io.grpc.stub.StreamObserver; import io.grpc.testing.GrpcCleanupRule; import io.grpc.xds.Bootstrapper.ChannelCreds; @@ -96,6 +98,7 @@ import java.util.List; import java.util.Queue; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.junit.After; @@ -129,6 +132,42 @@ public boolean shouldAccept(Runnable command) { } }; + private static final FakeClock.TaskFilter LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER = + new TaskFilter() { + @Override + public boolean shouldAccept(Runnable command) { + return command.toString() + .contains(XdsClientImpl.LdsResourceFetchTimeoutTask.class.getSimpleName()); + } + }; + + private static final FakeClock.TaskFilter RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER = + new TaskFilter() { + @Override + public boolean shouldAccept(Runnable command) { + return command.toString() + .contains(XdsClientImpl.RdsResourceFetchTimeoutTask.class.getSimpleName()); + } + }; + + private static final FakeClock.TaskFilter CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER = + new TaskFilter() { + @Override + public boolean shouldAccept(Runnable command) { + return command.toString() + .contains(XdsClientImpl.CdsResourceFetchTimeoutTask.class.getSimpleName()); + } + }; + + private static final FakeClock.TaskFilter EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER = + new FakeClock.TaskFilter() { + @Override + public boolean shouldAccept(Runnable command) { + return command.toString() + .contains(XdsClientImpl.EdsResourceFetchTimeoutTask.class.getSimpleName()); + } + }; + @Rule public final GrpcCleanupRule cleanupRule = new GrpcCleanupRule(); @@ -273,7 +312,7 @@ public void tearDown() { /** * Client receives an LDS response that does not contain a Listener for the requested resource. * The LDS response is ACKed. - * The config watcher is notified with an error. + * The config watcher is notified with an error after its response timer expires. */ @Test public void ldsResponseWithoutMatchingResource() { @@ -286,6 +325,8 @@ public void ldsResponseWithoutMatchingResource() { .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", XdsClientImpl.ADS_TYPE_URL_LDS, ""))); + assertThat(fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + List listeners = ImmutableList.of( Any.pack(buildListener("bar.googleapis.com", Any.pack(HttpConnectionManager.newBuilder() @@ -314,14 +355,14 @@ public void ldsResponseWithoutMatchingResource() { .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); + verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); + verify(configWatcher, never()).onError(any(Status.class)); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); ArgumentCaptor errorStatusCaptor = ArgumentCaptor.forClass(null); verify(configWatcher).onError(errorStatusCaptor.capture()); Status error = errorStatusCaptor.getValue(); assertThat(error.getCode()).isEqualTo(Code.NOT_FOUND); - assertThat(error.getDescription()) - .isEqualTo("Listener for requested resource [foo.googleapis.com:8080] does not exist"); - - verifyNoMoreInteractions(requestObserver); + assertThat(fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } /** @@ -329,7 +370,7 @@ public void ldsResponseWithoutMatchingResource() { * that listener. But the RouteConfiguration message is invalid as it does not contain any * VirtualHost with domains matching the requested hostname. * The LDS response is NACKed, as if the XdsClient has not received this response. - * The config watcher is NOT notified with an error. + * The config watcher is notified with an error after its response timer expires.. */ @Test public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { @@ -341,6 +382,7 @@ public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", XdsClientImpl.ADS_TYPE_URL_LDS, ""))); + assertThat(fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); RouteConfiguration routeConfig = buildRouteConfiguration( @@ -366,7 +408,13 @@ public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); verify(configWatcher, never()).onError(any(Status.class)); - verifyNoMoreInteractions(requestObserver); + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + ArgumentCaptor errorStatusCaptor = ArgumentCaptor.forClass(null); + verify(configWatcher).onError(errorStatusCaptor.capture()); + Status error = errorStatusCaptor.getValue(); + assertThat(error.getCode()).isEqualTo(Code.NOT_FOUND); + assertThat(fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } /** @@ -385,6 +433,10 @@ public void resolveVirtualHostInLdsResponse() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", XdsClientImpl.ADS_TYPE_URL_LDS, ""))); + ScheduledTask ldsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(ldsRespTimer.isCancelled()).isFalse(); List listeners = ImmutableList.of( Any.pack(buildListener("bar.googleapis.com", @@ -422,6 +474,8 @@ public void resolveVirtualHostInLdsResponse() { buildDiscoveryResponse("0", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"); responseObserver.onNext(response); + assertThat(ldsRespTimer.isCancelled()).isTrue(); + // Client sends an ACK request. verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", @@ -438,8 +492,8 @@ public void resolveVirtualHostInLdsResponse() { * Client receives an RDS response (after a previous LDS request-response) that does not contain a * RouteConfiguration for the requested resource while each received RouteConfiguration is valid. * The RDS response is ACKed. - * The config watcher is NOT notified with an error (RDS protocol is incremental, responses - * not containing requested resources does not indicate absence). + * After the resource fetch timeout expires, watcher waiting for the resource is notified + * with a resource not found error. */ @Test public void rdsResponseWithoutMatchingResource() { @@ -477,6 +531,8 @@ public void rdsResponseWithoutMatchingResource() { .onNext(eq(buildDiscoveryRequest(NODE, "", "route-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_RDS, ""))); + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + // Management server should only sends RouteConfiguration messages with at least one // VirtualHost with domains matching requested hostname. Otherwise, it is invalid data. List routeConfigs = ImmutableList.of( @@ -502,6 +558,11 @@ public void rdsResponseWithoutMatchingResource() { verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); verify(configWatcher, never()).onError(any(Status.class)); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(configWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } /** @@ -533,6 +594,8 @@ public void resolveVirtualHostInRdsResponse() { // Client sends an ACK LDS request and an RDS request for "route-foo.googleapis.com". (Omitted) + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + // Management server should only sends RouteConfiguration messages with at least one // VirtualHost with domains matching requested hostname. Otherwise, it is invalid data. List routeConfigs = ImmutableList.of( @@ -553,6 +616,8 @@ public void resolveVirtualHostInRdsResponse() { response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); + // Client sent an ACK RDS request. verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "0", "route-foo.googleapis.com", @@ -568,7 +633,7 @@ public void resolveVirtualHostInRdsResponse() { * RouteConfiguration message for the requested resource. But the RouteConfiguration message * is invalid as it does not contain any VirtualHost with domains matching the requested * hostname. - * The LDS response is NACKed, as if the XdsClient has not received this response. + * The RDS response is NACKed, as if the XdsClient has not received this response. * The config watcher is NOT notified with an error. */ @Test @@ -595,6 +660,8 @@ public void failToFindVirtualHostInRdsResponse() { // Client sends an ACK LDS request and an RDS request for "route-foo.googleapis.com". (Omitted) + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( @@ -622,6 +689,11 @@ public void failToFindVirtualHostInRdsResponse() { verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); verify(configWatcher, never()).onError(any(Status.class)); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(configWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } /** @@ -629,7 +701,7 @@ public void failToFindVirtualHostInRdsResponse() { * RouteConfiguration message for the requested resource. But the RouteConfiguration message * is invalid as the VirtualHost with domains matching the requested hostname contains invalid * data, its RouteAction message is absent. - * The LDS response is NACKed, as if the XdsClient has not received this response. + * The RDS response is NACKed, as if the XdsClient has not received this response. * The config watcher is NOT notified with an error. */ @Test @@ -656,6 +728,8 @@ public void matchingVirtualHostDoesNotContainRouteAction() { // Client sends an ACK LDS request and an RDS request for "route-foo.googleapis.com". (Omitted) + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + // A VirtualHost with a Route that contains only redirect configuration. VirtualHost virtualHost = VirtualHost.newBuilder() @@ -684,6 +758,11 @@ public void matchingVirtualHostDoesNotContainRouteAction() { verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); verify(configWatcher, never()).onError(any(Status.class)); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(configWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + assertThat(fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } /** @@ -835,6 +914,16 @@ public void notifyUpdatedResources() { verify(configWatcher, times(4)).onConfigChanged(configUpdateCaptor.capture()); assertThat(configUpdateCaptor.getValue().getClusterName()) .isEqualTo("an-updated-cluster.googleapis.com"); + + // Management server sends back an LDS response indicating all Listener resources are removed. + response = + buildDiscoveryResponse("3", ImmutableList.of(), + XdsClientImpl.ADS_TYPE_URL_LDS, "0003"); + responseObserver.onNext(response); + + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(configWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); } // TODO(chengyuanzhang): tests for timeout waiting for responses for incremental @@ -885,6 +974,13 @@ public void waitRdsResponsesForRequestedResource() { .onNext(eq(buildDiscoveryRequest(NODE, "", "route-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_RDS, ""))); + ScheduledTask rdsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(rdsRespTimer.isCancelled()).isFalse(); + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC - 2, TimeUnit.SECONDS); + // Management server sends back an RDS response that does not contain RouteConfiguration // for the requested resource. List routeConfigs = ImmutableList.of( @@ -904,6 +1000,9 @@ public void waitRdsResponsesForRequestedResource() { // Client waits for future RDS responses silently. verifyNoMoreInteractions(configWatcher); + assertThat(rdsRespTimer.isCancelled()).isFalse(); + + fakeClock.forwardTime(1, TimeUnit.SECONDS); // Management server sends back another RDS response containing the RouteConfiguration // for the requested resource. @@ -929,6 +1028,7 @@ public void waitRdsResponsesForRequestedResource() { verify(configWatcher).onConfigChanged(configUpdateCaptor.capture()); assertThat(configUpdateCaptor.getValue().getClusterName()) .isEqualTo("another-cluster.googleapis.com"); + assertThat(rdsRespTimer.isCancelled()).isTrue(); } /** @@ -1011,14 +1111,91 @@ public void routeConfigurationRemovedNotifiedToWatcher() { verify(configWatcher).onError(errorStatusCaptor.capture()); Status error = errorStatusCaptor.getValue(); assertThat(error.getCode()).isEqualTo(Code.NOT_FOUND); - assertThat(error.getDescription()) - .isEqualTo("Listener for requested resource [foo.googleapis.com:8080] does not exist"); + } + + /** + * Management server sends another LDS response for updating the RDS resource to be requested + * while client is currently requesting for a previously given RDS resource name. + */ + @Test + public void updateRdsRequestResourceWhileInitialResourceFetchInProgress() { + xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + StreamObserver responseObserver = responseObservers.poll(); + StreamObserver requestObserver = requestObservers.poll(); + + // Management sends back an LDS response telling client to do RDS. + Rds rdsConfig = + Rds.newBuilder() + // Must set to use ADS. + .setConfigSource( + ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())) + .setRouteConfigName("route-foo.googleapis.com") + .build(); + + List listeners = ImmutableList.of( + Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) + ); + DiscoveryResponse response = + buildDiscoveryResponse("0", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"); + responseObserver.onNext(response); + + // Client sends an (first) RDS request. + verify(requestObserver) + .onNext(eq(buildDiscoveryRequest(NODE, "", "route-foo.googleapis.com", + XdsClientImpl.ADS_TYPE_URL_RDS, ""))); + + ScheduledTask rdsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(rdsRespTimer.isCancelled()).isFalse(); + + // Management sends back another LDS response updating the Listener information to use + // another resource name for doing RDS. + rdsConfig = + Rds.newBuilder() + // Must set to use ADS. + .setConfigSource( + ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())) + .setRouteConfigName("route-bar.googleapis.com") + .build(); + + listeners = ImmutableList.of( + Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) + ); + response = buildDiscoveryResponse("1", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0001"); + responseObserver.onNext(response); + + // Client sent a new RDS request with updated resource name. + verify(requestObserver) + .onNext(eq(buildDiscoveryRequest(NODE, "", "route-bar.googleapis.com", + XdsClientImpl.ADS_TYPE_URL_RDS, ""))); + + assertThat(rdsRespTimer.isCancelled()).isTrue(); + rdsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(rdsRespTimer.isCancelled()).isFalse(); + + // Management server sends back an RDS response containing RouteConfiguration requested. + List routeConfigs = ImmutableList.of( + Any.pack( + buildRouteConfiguration( + "route-bar.googleapis.com", + ImmutableList.of( + buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + "cluster.googleapis.com"))))); + response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); + responseObserver.onNext(response); + + assertThat(rdsRespTimer.isCancelled()).isTrue(); } /** * Client receives an CDS response that does not contain a Cluster for the requested resource * while each received Cluster is valid. The CDS response is ACKed. Cluster watchers are notified - * with an error for resource not found. + * with an error for resource not found after initial resource fetch timeout has expired. */ @Test public void cdsResponseWithoutMatchingResource() { @@ -1030,6 +1207,7 @@ public void cdsResponseWithoutMatchingResource() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); // Management server sends back a CDS response without Cluster for the requested resource. List clusters = ImmutableList.of( @@ -1043,13 +1221,14 @@ public void cdsResponseWithoutMatchingResource() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "0", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_CDS, "0000"))); + verify(clusterWatcher, never()).onClusterChanged(any(ClusterUpdate.class)); + verify(clusterWatcher, never()).onError(any(Status.class)); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); ArgumentCaptor errorStatusCaptor = ArgumentCaptor.forClass(null); verify(clusterWatcher).onError(errorStatusCaptor.capture()); Status error = errorStatusCaptor.getValue(); assertThat(error.getCode()).isEqualTo(Code.NOT_FOUND); - assertThat(error.getDescription()) - .isEqualTo("Requested cluster [cluster-foo.googleapis.com] does not exist"); } /** @@ -1066,6 +1245,9 @@ public void cdsResponseWithMatchingResource() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + ScheduledTask cdsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); // Management server sends back a CDS response without Cluster for the requested resource. List clusters = ImmutableList.of( @@ -1080,6 +1262,7 @@ public void cdsResponseWithMatchingResource() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "0", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_CDS, "0000"))); + assertThat(cdsRespTimer.isCancelled()).isTrue(); ArgumentCaptor clusterUpdateCaptor = ArgumentCaptor.forClass(null); verify(clusterWatcher).onClusterChanged(clusterUpdateCaptor.capture()); @@ -1165,6 +1348,7 @@ public void multipleClusterWatchers() { new DiscoveryRequestMatcher("", ImmutableList.of("cluster-foo.googleapis.com", "cluster-bar.googleapis.com"), XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(2); // Management server sends back a CDS response contains Cluster for only one of // requested cluster. @@ -1174,6 +1358,7 @@ public void multipleClusterWatchers() { buildDiscoveryResponse("0", clusters, XdsClientImpl.ADS_TYPE_URL_CDS, "0000"); responseObserver.onNext(response); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); // Client sent an ACK CDS request. verify(requestObserver) .onNext( @@ -1203,13 +1388,16 @@ public void multipleClusterWatchers() { assertThat(clusterUpdate2.getLbPolicy()).isEqualTo("round_robin"); assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(false); - // The other watcher gets an error notification for cluster not found. + verify(watcher3, never()).onClusterChanged(any(ClusterUpdate.class)); + verify(watcher3, never()).onError(any(Status.class)); + + // The other watcher gets an error notification for cluster not found after its timer expired. + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); ArgumentCaptor errorStatusCaptor = ArgumentCaptor.forClass(null); verify(watcher3).onError(errorStatusCaptor.capture()); Status error = errorStatusCaptor.getValue(); assertThat(error.getCode()).isEqualTo(Code.NOT_FOUND); - assertThat(error.getDescription()) - .isEqualTo("Requested cluster [cluster-bar.googleapis.com] does not exist"); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); // Management server sends back another CDS response contains Clusters for all // requested clusters. @@ -1279,6 +1467,7 @@ public void watchClusterAlreadyBeingWatched() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); // Management server sends back an CDS response with Cluster for the requested // cluster. @@ -1301,6 +1490,7 @@ public void watchClusterAlreadyBeingWatched() { .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name assertThat(clusterUpdate1.getLbPolicy()).isEqualTo("round_robin"); assertThat(clusterUpdate1.isEnableLrs()).isEqualTo(false); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); // Another cluster watcher interested in the same cluster is added. ClusterWatcher watcher2 = mock(ClusterWatcher.class); @@ -1318,10 +1508,14 @@ public void watchClusterAlreadyBeingWatched() { assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(false); verifyNoMoreInteractions(requestObserver); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } + /** + * Basic operations of adding/canceling cluster data watchers. + */ @Test - public void addRemoveClusterWatchersFreely() { + public void addRemoveClusterWatchers() { ClusterWatcher watcher1 = mock(ClusterWatcher.class); xdsClient.watchClusterData("cluster-foo.googleapis.com", watcher1); @@ -1447,6 +1641,7 @@ public void addRemoveClusterWatchersFreely() { // A new cluster watcher is added to watch cluster foo again. ClusterWatcher watcher3 = mock(ClusterWatcher.class); xdsClient.watchClusterData("cluster-foo.googleapis.com", watcher3); + verify(watcher3, never()).onClusterChanged(any(ClusterUpdate.class)); // A CDS request is sent to indicate subscription of "cluster-foo.googleapis.com" only. verify(requestObserver) @@ -1482,12 +1677,124 @@ public void addRemoveClusterWatchersFreely() { XdsClientImpl.ADS_TYPE_URL_CDS, "0003"))); } + @Test + public void addRemoveClusterWatcherWhileInitialResourceFetchInProgress() { + ClusterWatcher watcher1 = mock(ClusterWatcher.class); + xdsClient.watchClusterData("cluster-foo.googleapis.com", watcher1); + + // Streaming RPC starts after a first watcher is added. + StreamObserver requestObserver = requestObservers.poll(); + + // Client sends an EDS request to management server. + verify(requestObserver) + .onNext( + argThat( + new DiscoveryRequestMatcher("", "cluster-foo.googleapis.com", + XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC - 1, TimeUnit.SECONDS); + + ClusterWatcher watcher2 = mock(ClusterWatcher.class); + ClusterWatcher watcher3 = mock(ClusterWatcher.class); + ClusterWatcher watcher4 = mock(ClusterWatcher.class); + xdsClient.watchClusterData("cluster-foo.googleapis.com", watcher2); + xdsClient.watchClusterData("cluster-bar.googleapis.com", watcher3); + xdsClient.watchClusterData("cluster-bar.googleapis.com", watcher4); + + // Client sends a new CDS request for updating the latest resource subscription. + verify(requestObserver) + .onNext( + argThat( + new DiscoveryRequestMatcher("", + ImmutableList.of("cluster-foo.googleapis.com", "cluster-bar.googleapis.com"), + XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(2); + + fakeClock.forwardTime(1, TimeUnit.SECONDS); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + + // CDS resource "cluster-foo.googleapis.com" is known to be absent. + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(watcher1).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + verify(watcher2).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + + // The absence result is known immediately. + ClusterWatcher watcher5 = mock(ClusterWatcher.class); + xdsClient.watchClusterData("cluster-foo.googleapis.com", watcher5); + + verify(watcher5).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + ScheduledTask timeoutTask = Iterables.getOnlyElement(fakeClock.getPendingTasks()); + + // Cancel watchers while discovery for resource "cluster-bar.googleapis.com" is still + // in progress. + xdsClient.cancelClusterDataWatch("cluster-bar.googleapis.com", watcher3); + assertThat(timeoutTask.isCancelled()).isFalse(); + xdsClient.cancelClusterDataWatch("cluster-bar.googleapis.com", watcher4); + + // Client sends a CDS request for resource subscription update (Omitted). + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); + assertThat(timeoutTask.isCancelled()).isTrue(); + + verifyZeroInteractions(watcher3, watcher4); + } + + @Test + public void cdsUpdateForClusterBeingRemoved() { + xdsClient.watchClusterData("cluster-foo.googleapis.com", clusterWatcher); + StreamObserver responseObserver = responseObservers.poll(); + StreamObserver requestObserver = requestObservers.poll(); + + verify(requestObserver) + .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", + XdsClientImpl.ADS_TYPE_URL_CDS, ""))); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + + // Management server sends back a CDS response containing requested resource. + List clusters = ImmutableList.of( + Any.pack(buildCluster("cluster-foo.googleapis.com", null, true))); + DiscoveryResponse response = + buildDiscoveryResponse("0", clusters, XdsClientImpl.ADS_TYPE_URL_CDS, "0000"); + responseObserver.onNext(response); + + // Client sent an ACK CDS request (Omitted). + + ArgumentCaptor clusterUpdateCaptor = ArgumentCaptor.forClass(null); + verify(clusterWatcher).onClusterChanged(clusterUpdateCaptor.capture()); + ClusterUpdate clusterUpdate = clusterUpdateCaptor.getValue(); + assertThat(clusterUpdate.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); + assertThat(clusterUpdate.getEdsServiceName()) + .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate.getLbPolicy()).isEqualTo("round_robin"); + assertThat(clusterUpdate.isEnableLrs()).isEqualTo(true); + assertThat(clusterUpdate.getLrsServerName()).isEqualTo(""); + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); + + // No cluster is available. + response = + buildDiscoveryResponse("1", ImmutableList.of(), + XdsClientImpl.ADS_TYPE_URL_CDS, "0001"); + responseObserver.onNext(response); + + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(clusterWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + } + /** * Client receives an EDS response that does not contain a ClusterLoadAssignment for the * requested resource while each received ClusterLoadAssignment is valid. * The EDS response is ACKed. - * Endpoint watchers are NOT notified with an error (EDS protocol is incremental, responses - * not containing requested resources does not indicate absence). + * After the resource fetch timeout expires, watchers waiting for the resource is notified + * with a resource not found error. */ @Test public void edsResponseWithoutMatchingResource() { @@ -1499,6 +1806,7 @@ public void edsResponseWithoutMatchingResource() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_EDS, ""))); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); // Management server sends back an EDS response without ClusterLoadAssignment for the requested // cluster. @@ -1528,7 +1836,13 @@ public void edsResponseWithoutMatchingResource() { .onNext(eq(buildDiscoveryRequest(NODE, "0", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_EDS, "0000"))); - verifyZeroInteractions(endpointWatcher); + verify(endpointWatcher, never()).onEndpointChanged(any(EndpointUpdate.class)); + verify(endpointWatcher, never()).onError(any(Status.class)); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(endpointWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } /** @@ -1545,6 +1859,10 @@ public void edsResponseWithMatchingResource() { verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_EDS, ""))); + ScheduledTask edsRespTimeoutTask = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(edsRespTimeoutTask.isCancelled()).isFalse(); // Management server sends back an EDS response with ClusterLoadAssignment for the requested // cluster. @@ -1575,6 +1893,8 @@ public void edsResponseWithMatchingResource() { XdsClientImpl.ADS_TYPE_URL_EDS, "0000"); responseObserver.onNext(response); + assertThat(edsRespTimeoutTask.isCancelled()).isTrue(); + // Client sent an ACK EDS request. verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "0", "cluster-foo.googleapis.com", @@ -1622,6 +1942,8 @@ public void multipleEndpointWatchers() { ImmutableList.of("cluster-foo.googleapis.com", "cluster-bar.googleapis.com"), XdsClientImpl.ADS_TYPE_URL_EDS, ""))); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(2); + // Management server sends back an EDS response contains ClusterLoadAssignment for only one of // requested cluster. List clusterLoadAssignments = ImmutableList.of( @@ -1638,6 +1960,8 @@ public void multipleEndpointWatchers() { XdsClientImpl.ADS_TYPE_URL_EDS, "0000"); responseObserver.onNext(response); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + // Client sent an ACK EDS request. verify(requestObserver) .onNext( @@ -1727,6 +2051,8 @@ public void watchEndpointsForClusterAlreadyBeingWatched() { .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster-foo.googleapis.com", XdsClientImpl.ADS_TYPE_URL_EDS, ""))); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + // Management server sends back an EDS response containing ClusterLoadAssignments for // some cluster not requested. List clusterLoadAssignments = ImmutableList.of( @@ -1743,6 +2069,8 @@ public void watchEndpointsForClusterAlreadyBeingWatched() { XdsClientImpl.ADS_TYPE_URL_EDS, "0000"); responseObserver.onNext(response); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); + // Client sent an ACK EDS request. verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "0", "cluster-foo.googleapis.com", @@ -1781,10 +2109,14 @@ public void watchEndpointsForClusterAlreadyBeingWatched() { 2, true)), 1, 0)); verifyNoMoreInteractions(requestObserver); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); } + /** + * Basic operations of adding/canceling endpoint data watchers. + */ @Test - public void addRemoveEndpointWatchersFreely() { + public void addRemoveEndpointWatchers() { EndpointWatcher watcher1 = mock(EndpointWatcher.class); xdsClient.watchEndpointData("cluster-foo.googleapis.com", watcher1); @@ -1985,8 +2317,133 @@ public void addRemoveEndpointWatchersFreely() { XdsClientImpl.ADS_TYPE_URL_EDS, "0003"))); } + @Test + public void addRemoveEndpointWatcherWhileInitialResourceFetchInProgress() { + EndpointWatcher watcher1 = mock(EndpointWatcher.class); + xdsClient.watchEndpointData("cluster-foo.googleapis.com", watcher1); + + // Streaming RPC starts after a first watcher is added. + StreamObserver requestObserver = requestObservers.poll(); + + // Client sends an EDS request to management server. + verify(requestObserver) + .onNext( + argThat( + new DiscoveryRequestMatcher("", "cluster-foo.googleapis.com", + XdsClientImpl.ADS_TYPE_URL_EDS, ""))); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC - 1, TimeUnit.SECONDS); + + EndpointWatcher watcher2 = mock(EndpointWatcher.class); + EndpointWatcher watcher3 = mock(EndpointWatcher.class); + EndpointWatcher watcher4 = mock(EndpointWatcher.class); + xdsClient.watchEndpointData("cluster-foo.googleapis.com", watcher2); + xdsClient.watchEndpointData("cluster-bar.googleapis.com", watcher3); + xdsClient.watchEndpointData("cluster-bar.googleapis.com", watcher4); + + // Client sends a new EDS request for updating the latest resource subscription. + verify(requestObserver) + .onNext( + argThat( + new DiscoveryRequestMatcher("", + ImmutableList.of("cluster-foo.googleapis.com", "cluster-bar.googleapis.com"), + XdsClientImpl.ADS_TYPE_URL_EDS, ""))); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(2); + + fakeClock.forwardTime(1, TimeUnit.SECONDS); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + + // EDS resource "cluster-foo.googleapis.com" is known to be absent. + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(watcher1).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + verify(watcher2).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + + // The absence result is known immediately. + EndpointWatcher watcher5 = mock(EndpointWatcher.class); + xdsClient.watchEndpointData("cluster-foo.googleapis.com", watcher5); + + verify(watcher5).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + ScheduledTask timeoutTask = Iterables.getOnlyElement(fakeClock.getPendingTasks()); + + // Cancel watchers while discovery for resource "cluster-bar.googleapis.com" is still + // in progress. + xdsClient.cancelEndpointDataWatch("cluster-bar.googleapis.com", watcher3); + assertThat(timeoutTask.isCancelled()).isFalse(); + xdsClient.cancelEndpointDataWatch("cluster-bar.googleapis.com", watcher4); + + // Client sends an EDS request for resource subscription update (Omitted). + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); + assertThat(timeoutTask.isCancelled()).isTrue(); + + verifyZeroInteractions(watcher3, watcher4); + } + + @Test + public void cdsUpdateForEdsServiceNameChange() { + xdsClient.watchClusterData("cluster-foo.googleapis.com", clusterWatcher); + StreamObserver responseObserver = responseObservers.poll(); + + // Management server sends back a CDS response containing requested resource. + List clusters = ImmutableList.of( + Any.pack(buildCluster("cluster-foo.googleapis.com", "cluster-foo:service-bar", false))); + DiscoveryResponse response = + buildDiscoveryResponse("0", clusters, XdsClientImpl.ADS_TYPE_URL_CDS, "0000"); + responseObserver.onNext(response); + + xdsClient.watchEndpointData("cluster-foo:service-bar", endpointWatcher); + + // Management server sends back an EDS response for resource "cluster-foo:service-bar". + List clusterLoadAssignments = ImmutableList.of( + Any.pack(buildClusterLoadAssignment("cluster-foo:service-bar", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HealthStatus.HEALTHY, 2)), + 1, 0)), + ImmutableList.of()))); + response = + buildDiscoveryResponse("0", clusterLoadAssignments, + XdsClientImpl.ADS_TYPE_URL_EDS, "0000"); + responseObserver.onNext(response); + + ArgumentCaptor endpointUpdateCaptor = ArgumentCaptor.forClass(null); + verify(endpointWatcher).onEndpointChanged(endpointUpdateCaptor.capture()); + EndpointUpdate endpointUpdate = endpointUpdateCaptor.getValue(); + assertThat(endpointUpdate.getClusterName()).isEqualTo("cluster-foo:service-bar"); + assertThat(endpointUpdate.getDropPolicies()).isEmpty(); + assertThat(endpointUpdate.getLocalityLbEndpointsMap()) + .containsExactly( + new Locality("region1", "zone1", "subzone1"), + new LocalityLbEndpoints( + ImmutableList.of( + new LbEndpoint("192.168.0.1", 8080, + 2, true)), 1, 0)); + + // Management server sends another CDS response for removing cluster service + // "cluster-foo:service-blade" with replacement of "cluster-foo:service-blade". + clusters = ImmutableList.of( + Any.pack(buildCluster("cluster-foo.googleapis.com", "cluster-foo:service-blade", false))); + response = + buildDiscoveryResponse("1", clusters, XdsClientImpl.ADS_TYPE_URL_CDS, "0001"); + responseObserver.onNext(response); + + // Watcher get notification for endpoint resource "cluster-foo:service-bar" being deleted. + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(endpointWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + } + /** - * RPC stream closed and retry during the period of first tiem resolving service config + * RPC stream closed and retry during the period of first time resolving service config * (LDS/RDS only). */ @Test @@ -2462,6 +2919,159 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { backoffPolicy2); } + @Test + public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { + InOrder inOrder = + Mockito.inOrder(mockedDiscoveryService, backoffPolicyProvider, backoffPolicy1, + backoffPolicy2); + xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + + ArgumentCaptor> responseObserverCaptor = + ArgumentCaptor.forClass(null); + inOrder.verify(mockedDiscoveryService) + .streamAggregatedResources(responseObserverCaptor.capture()); + StreamObserver responseObserver = + responseObserverCaptor.getValue(); // same as responseObservers.poll() + + // Management server sends back an LDS response telling client to do RDS. + Rds rdsConfig = + Rds.newBuilder() + // Must set to use ADS. + .setConfigSource( + ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())) + .setRouteConfigName("route-foo.googleapis.com") + .build(); + + List listeners = ImmutableList.of( + Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) + ); + DiscoveryResponse response = + buildDiscoveryResponse("0", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"); + responseObserver.onNext(response); + + // Client sent an RDS request for resource "route-foo.googleapis.com" (Omitted). + + ScheduledTask rdsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(rdsRespTimer.isCancelled()).isFalse(); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC - 1, TimeUnit.SECONDS); + + // RPC stream is broken while the initial fetch for the resource is not complete. + responseObserver.onError(Status.UNAVAILABLE.asException()); + assertThat(rdsRespTimer.isCancelled()).isTrue(); + + // Reset backoff and retry immediately. + inOrder.verify(backoffPolicyProvider).get(); + fakeClock.runDueTasks(); + inOrder.verify(mockedDiscoveryService) + .streamAggregatedResources(responseObserverCaptor.capture()); + responseObserver = responseObserverCaptor.getValue(); + StreamObserver requestObserver = requestObservers.poll(); + + ScheduledTask ldsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(ldsRespTimer.getDelay(TimeUnit.SECONDS)) + .isEqualTo(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC); + + // Client resumed requests and management server sends back LDS resources again. + verify(requestObserver).onNext( + eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + XdsClientImpl.ADS_TYPE_URL_LDS, ""))); + responseObserver.onNext(response); + + // Client sent an RDS request for resource "route-foo.googleapis.com" (Omitted). + + assertThat(ldsRespTimer.isCancelled()).isTrue(); + rdsRespTimer = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(RDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(rdsRespTimer.getDelay(TimeUnit.SECONDS)) + .isEqualTo(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC); + + // Management server sends back an RDS response containing the RouteConfiguration + // for the requested resource. + List routeConfigs = ImmutableList.of( + Any.pack( + buildRouteConfiguration( + "route-foo.googleapis.com", + ImmutableList.of( + buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + "cluster-foo.googleapis.com"))))); + response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); + responseObserver.onNext(response); + + assertThat(rdsRespTimer.isCancelled()).isTrue(); + + // Resets RPC stream again. + responseObserver.onError(Status.UNAVAILABLE.asException()); + // Reset backoff and retry immediately. + inOrder.verify(backoffPolicyProvider).get(); + fakeClock.runDueTasks(); + inOrder.verify(mockedDiscoveryService) + .streamAggregatedResources(responseObserverCaptor.capture()); + responseObserver = responseObserverCaptor.getValue(); + + // Client/server resumed LDS/RDS request/response (Omitted). + + // Start watching cluster data. + xdsClient.watchClusterData("cluster-foo.googleapis.com", clusterWatcher); + ScheduledTask cdsRespTimeoutTask = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(cdsRespTimeoutTask.isCancelled()).isFalse(); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC - 1, TimeUnit.SECONDS); + + // RPC stream is broken while the initial fetch for the resource is not complete. + responseObserver.onError(Status.UNAVAILABLE.asException()); + assertThat(cdsRespTimeoutTask.isCancelled()).isTrue(); + inOrder.verify(backoffPolicy2).nextBackoffNanos(); + assertThat(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER)).hasSize(1); + + // Retry after backoff. + fakeClock.forwardNanos(20L); + inOrder.verify(mockedDiscoveryService) + .streamAggregatedResources(responseObserverCaptor.capture()); + responseObserver = responseObserverCaptor.getValue(); + + // Timer is rescheduled as the client restarts the resource fetch. + cdsRespTimeoutTask = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(cdsRespTimeoutTask.isCancelled()).isFalse(); + assertThat(cdsRespTimeoutTask.getDelay(TimeUnit.SECONDS)) + .isEqualTo(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC); + + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); + verify(clusterWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); + + // Start watching endpoint data. + xdsClient.watchEndpointData("cluster-foo.googleapis.com", endpointWatcher); + ScheduledTask edsTimeoutTask = + Iterables.getOnlyElement( + fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)); + assertThat(edsTimeoutTask.getDelay(TimeUnit.SECONDS)) + .isEqualTo(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC); + + // RPC stream is broken again. + responseObserver.onError(Status.UNAVAILABLE.asException()); + + assertThat(edsTimeoutTask.isCancelled()).isTrue(); + inOrder.verify(backoffPolicy2).nextBackoffNanos(); + assertThat(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER)).hasSize(1); + + fakeClock.forwardNanos(200L); + inOrder.verify(mockedDiscoveryService) + .streamAggregatedResources(responseObserverCaptor.capture()); + + assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + assertThat(fakeClock.getPendingTasks(EDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); + } + /** * Tests sending a streaming LRS RPC for each cluster to report loads for. */ @@ -2563,8 +3173,6 @@ public void matchHostName_postfixWildCard() { assertThat(XdsClientImpl.matchHostName("foo-bar", pattern)).isTrue(); } - - private static LoadStatsRequest buildInitialLoadStatsRequest(String clusterName) { return LoadStatsRequest.newBuilder() diff --git a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java index 9740e56459b..ad80f79bc6d 100644 --- a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java @@ -60,6 +60,7 @@ import java.util.List; import java.util.Map; import java.util.Queue; +import java.util.concurrent.TimeUnit; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -280,6 +281,7 @@ public void resolve_ResourceNotFound() { responseObserver.onNext( buildLdsResponseForCluster("0", "bar.googleapis.com", 80, clusterName, "0000")); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); ArgumentCaptor resolutionResultCaptor = ArgumentCaptor.forClass(null); verify(mockListener).onResult(resolutionResultCaptor.capture()); ResolutionResult result = resolutionResultCaptor.getValue(); @@ -347,6 +349,7 @@ public void resolve_resourceNewlyAdded() { buildLdsResponseForCluster("0", "bar.googleapis.com", 80, "cluster-bar.googleapis.com", "0000")); + fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); ArgumentCaptor resolutionResultCaptor = ArgumentCaptor.forClass(null); verify(mockListener).onResult(resolutionResultCaptor.capture()); ResolutionResult result = resolutionResultCaptor.getValue(); From e7d7c5bde4caf606f02b2575d5f62e80ad5135b0 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 17 Jan 2020 15:50:49 -0800 Subject: [PATCH 07/86] examples: Add hostname example This is placed in its own directory since it depends on grpc-services. --- .travis.yml | 2 + RELEASING.md | 2 + buildscripts/kokoro/unix.sh | 4 + examples/example-hostname/BUILD.bazel | 50 ++++++++ examples/example-hostname/README.md | 60 +++++++++ examples/example-hostname/build.gradle | 54 ++++++++ examples/example-hostname/pom.xml | 118 ++++++++++++++++++ examples/example-hostname/settings.gradle | 1 + .../examples/hostname/HostnameGreeter.java | 60 +++++++++ .../examples/hostname/HostnameServer.java | 85 +++++++++++++ .../main/proto/helloworld/helloworld.proto | 37 ++++++ .../hostname/HostnameGreeterTest.java | 70 +++++++++++ services/BUILD.bazel | 24 ++++ 13 files changed, 567 insertions(+) create mode 100644 examples/example-hostname/BUILD.bazel create mode 100644 examples/example-hostname/README.md create mode 100644 examples/example-hostname/build.gradle create mode 100644 examples/example-hostname/pom.xml create mode 100644 examples/example-hostname/settings.gradle create mode 100644 examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameGreeter.java create mode 100644 examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameServer.java create mode 100644 examples/example-hostname/src/main/proto/helloworld/helloworld.proto create mode 100644 examples/example-hostname/src/test/java/io/grpc/examples/hostname/HostnameGreeterTest.java diff --git a/.travis.yml b/.travis.yml index 0dc75902eba..53f1d01f8ca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,8 @@ install: - pushd examples && ./gradlew build && popd - pushd examples && mvn verify && popd - pushd examples/example-alts && ../gradlew build && popd + - pushd examples/example-hostname && ../gradlew build && popd + - pushd examples/example-hostname && mvn verify && popd - pushd examples/example-tls && ../gradlew clean build && popd - pushd examples/example-kotlin && ../gradlew build && popd diff --git a/RELEASING.md b/RELEASING.md index c18b117ec4b..e476ab7e5b2 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -45,6 +45,8 @@ $ VERSION_FILES=( examples/example-alts/build.gradle examples/example-gauth/build.gradle examples/example-gauth/pom.xml + examples/example-hostname/build.gradle + examples/example-hostname/pom.xml examples/example-kotlin/build.gradle examples/example-kotlin/android/helloworld/app/build.gradle examples/example-tls/build.gradle diff --git a/buildscripts/kokoro/unix.sh b/buildscripts/kokoro/unix.sh index 343b1ae2c09..ec265009994 100755 --- a/buildscripts/kokoro/unix.sh +++ b/buildscripts/kokoro/unix.sh @@ -64,6 +64,10 @@ if [[ -z "${SKIP_TESTS:-}" ]]; then # --batch-mode reduces log spam mvn clean verify --batch-mode popd + pushd examples/example-hostname + ../gradlew build $GRADLE_FLAGS + mvn verify --batch-mode + popd pushd examples/example-tls mvn clean verify --batch-mode popd diff --git a/examples/example-hostname/BUILD.bazel b/examples/example-hostname/BUILD.bazel new file mode 100644 index 00000000000..8b76f790983 --- /dev/null +++ b/examples/example-hostname/BUILD.bazel @@ -0,0 +1,50 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_grpc_grpc_java//:java_grpc_library.bzl", "java_grpc_library") + +proto_library( + name = "helloworld_proto", + srcs = ["src/main/proto/helloworld/helloworld.proto"], +) + +java_proto_library( + name = "helloworld_java_proto", + deps = [":helloworld_proto"], +) + +java_grpc_library( + name = "helloworld_java_grpc", + srcs = [":helloworld_proto"], + deps = [":helloworld_java_proto"], +) + +java_library( + name = "hostname_greeter", + testonly = 1, + srcs = [ + "src/main/java/io/grpc/examples/hostname/HostnameGreeter.java", + ], + deps = [ + ":helloworld_java_grpc", + ":helloworld_java_proto", + "@io_grpc_grpc_java//stub", + ], +) + +java_binary( + name = "hostname-server", + testonly = 1, + srcs = [ + "src/main/java/io/grpc/examples/hostname/HostnameServer.java", + ], + main_class = "io.grpc.examples.hostname.HostnameServer", + runtime_deps = [ + "@io_grpc_grpc_java//netty", + ], + deps = [ + ":hostname_greeter", + "@io_grpc_grpc_java//api", + "@io_grpc_grpc_java//services:health", + "@io_grpc_grpc_java//services:reflection", + "@io_grpc_grpc_proto//:health_java_proto", + ], +) diff --git a/examples/example-hostname/README.md b/examples/example-hostname/README.md new file mode 100644 index 00000000000..9d47c12445f --- /dev/null +++ b/examples/example-hostname/README.md @@ -0,0 +1,60 @@ +gRPC Hostname Example +===================== + +The hostname example is a Hello World server whose response includes its +hostname. It also supports health and reflection services. This makes it a good +server to test infrastructure, like load balancing. + +The example requires grpc-java to already be built. You are strongly encouraged +to check out a git release tag, since there will already be a build of grpc +available. Otherwise you must follow [COMPILING](../../COMPILING.md). + +### Build the example + +1. Build the hello-world example client. See [the examples README](../README.md) + +2. Build this server. From the `grpc-java/examples/examples-hostname` directory: +``` +$ ../gradlew installDist +``` + +This creates the script `build/install/hostname-server/bin/hostname-server` that +runs the example. + +To run the hostname example, run: + +``` +$ ./build/install/hostname/bin/hostname-server +``` + +And in a different terminal window run the hello-world client: + +``` +$ ../build/install/examples/bin/hello-world-client +``` + +### Maven + +If you prefer to use Maven: +1. Build the hello-world example client. See [the examples README](../README.md) + +2. Run in this directory: +``` +$ mvn verify +$ # Run the server (from the examples-hostname directory) +$ mvn exec:java -Dexec.mainClass=io.grpc.examples.hostname.HostnameServer +$ # In another terminal run the client (from the examples directory) +$ cd .. +$ mvn exec:java -Dexec.mainClass=io.grpc.examples.helloworld.HelloWorldClient +``` + +### Bazel + +If you prefer to use Bazel, run from the `grpc-java/examples` directory: +``` +$ bazel build :hello-world-client example-hostname:hostname-server +$ # Run the server +$ ./bazel-bin/example-hostname/hostname-server +$ # In another terminal run the client +$ ./bazel-bin/hello-world-client +``` diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle new file mode 100644 index 00000000000..5a23ee13def --- /dev/null +++ b/examples/example-hostname/build.gradle @@ -0,0 +1,54 @@ +plugins { + id 'application' // Provide convenience executables for trying out the examples. + id 'java' + + id "com.google.protobuf" version "0.8.10" +} + +repositories { + maven { // The google mirror is less flaky than mavenCentral() + url "https://maven-central.storage-download.googleapis.com/repos/central/data/" } + mavenCentral() + mavenLocal() +} + +sourceCompatibility = 1.7 +targetCompatibility = 1.7 + +// IMPORTANT: You probably want the non-SNAPSHOT version of gRPC. Make sure you +// are looking at a tagged version of the example and not "master"! + +// Feel free to delete the comment at the next line. It is just for safely +// updating the version in our release process. +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def protobufVersion = '3.11.0' + +dependencies { + implementation "io.grpc:grpc-protobuf:${grpcVersion}" + implementation "io.grpc:grpc-stub:${grpcVersion}" + implementation "io.grpc:grpc-services:${grpcVersion}" + compileOnly "javax.annotation:javax.annotation-api:1.2" + runtimeOnly "io.grpc:grpc-netty-shaded:${grpcVersion}" + + testImplementation 'junit:junit:4.12' + testImplementation "io.grpc:grpc-testing:${grpcVersion}" +} + +protobuf { + protoc { + artifact = "com.google.protobuf:protoc:${protobufVersion}" + } + plugins { + grpc { + artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}" + } + } + generateProtoTasks { + all()*.plugins { + grpc {} + } + } +} + +applicationName = 'hostname-server' +mainClassName = 'io.grpc.examples.hostname.HostnameServer' diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml new file mode 100644 index 00000000000..63b8b158bc1 --- /dev/null +++ b/examples/example-hostname/pom.xml @@ -0,0 +1,118 @@ + + 4.0.0 + io.grpc + example-hostname + jar + + 1.28.0-SNAPSHOT + example-hostname + https://github.com/grpc/grpc-java + + + UTF-8 + 1.28.0-SNAPSHOT + 3.11.0 + + 1.7 + 1.7 + + + + + + io.grpc + grpc-bom + ${grpc.version} + pom + import + + + + + + + io.grpc + grpc-protobuf + + + io.grpc + grpc-stub + + + io.grpc + grpc-services + + + javax.annotation + javax.annotation-api + 1.2 + provided + + + io.grpc + grpc-netty-shaded + runtime + + + junit + junit + 4.12 + test + + + io.grpc + grpc-testing + test + + + + + + + kr.motd.maven + os-maven-plugin + 1.6.2 + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + 1.4.1 + + + enforce + + enforce + + + + + + + + + + + + diff --git a/examples/example-hostname/settings.gradle b/examples/example-hostname/settings.gradle new file mode 100644 index 00000000000..aa159eb0946 --- /dev/null +++ b/examples/example-hostname/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'hostname' diff --git a/examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameGreeter.java b/examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameGreeter.java new file mode 100644 index 00000000000..b15ade5684d --- /dev/null +++ b/examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameGreeter.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.hostname; + +import io.grpc.examples.helloworld.GreeterGrpc; +import io.grpc.examples.helloworld.HelloReply; +import io.grpc.examples.helloworld.HelloRequest; +import io.grpc.stub.StreamObserver; +import java.io.IOException; +import java.net.InetAddress; +import java.util.Random; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** Greeter implementation which replies identifying itself with its hostname. */ +public final class HostnameGreeter extends GreeterGrpc.GreeterImplBase { + private static final Logger logger = Logger.getLogger(HostnameGreeter.class.getName()); + + private final String serverName; + + public HostnameGreeter(String serverName) { + if (serverName == null) { + serverName = determineHostname(); + } + this.serverName = serverName; + } + + @Override + public void sayHello(HelloRequest req, StreamObserver responseObserver) { + HelloReply reply = HelloReply.newBuilder() + .setMessage("Hello " + req.getName() + ", from " + serverName) + .build(); + responseObserver.onNext(reply); + responseObserver.onCompleted(); + } + + private static String determineHostname() { + try { + return InetAddress.getLocalHost().getHostName(); + } catch (IOException ex) { + logger.log(Level.INFO, "Failed to determine hostname. Will generate one", ex); + } + // Strange. Well, let's make an identifier for ourselves. + return "generated-" + new Random().nextInt(); + } +} diff --git a/examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameServer.java b/examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameServer.java new file mode 100644 index 00000000000..a6f2175914e --- /dev/null +++ b/examples/example-hostname/src/main/java/io/grpc/examples/hostname/HostnameServer.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.hostname; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import io.grpc.health.v1.HealthCheckResponse.ServingStatus; +import io.grpc.protobuf.services.ProtoReflectionService; +import io.grpc.services.HealthStatusManager; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * A server that hosts HostnameGreeter, plus infrastructure services like health and reflection. + * + *

This server is intended to be a general purpose "dummy" server. + */ +public final class HostnameServer { + public static void main(String[] args) throws IOException, InterruptedException { + int port = 50051; + String hostname = null; + if (args.length >= 1) { + try { + port = Integer.parseInt(args[0]); + } catch (NumberFormatException ex) { + System.err.println("Usage: [port [hostname]]"); + System.err.println(""); + System.err.println(" port The listen port. Defaults to " + port); + System.err.println(" hostname The name clients will see in greet responses. "); + System.err.println(" Defaults to the machine's hostname"); + System.exit(1); + } + } + if (args.length >= 2) { + hostname = args[1]; + } + HealthStatusManager health = new HealthStatusManager(); + final Server server = ServerBuilder.forPort(port) + .addService(new HostnameGreeter(hostname)) + .addService(ProtoReflectionService.newInstance()) + .addService(health.getHealthService()) + .build() + .start(); + System.out.println("Listening on port " + port); + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + // Start graceful shutdown + server.shutdown(); + try { + // Wait for RPCs to complete processing + if (!server.awaitTermination(30, TimeUnit.SECONDS)) { + // That was plenty of time. Let's cancel the remaining RPCs + server.shutdownNow(); + // shutdownNow isn't instantaneous, so give a bit of time to clean resources up + // gracefully. Normally this will be well under a second. + server.awaitTermination(5, TimeUnit.SECONDS); + } + } catch (InterruptedException ex) { + server.shutdownNow(); + } + } + }); + // This would normally be tied to the service's dependencies. For example, if HostnameGreeter + // used a Channel to contact a required service, then when 'channel.getState() == + // TRANSIENT_FAILURE' we'd want to set NOT_SERVING. But HostnameGreeter has no dependencies, so + // hard-coding SERVING is appropriate. + health.setStatus("", ServingStatus.SERVING); + server.awaitTermination(); + } +} diff --git a/examples/example-hostname/src/main/proto/helloworld/helloworld.proto b/examples/example-hostname/src/main/proto/helloworld/helloworld.proto new file mode 100644 index 00000000000..c60d9416f1f --- /dev/null +++ b/examples/example-hostname/src/main/proto/helloworld/helloworld.proto @@ -0,0 +1,37 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; +option objc_class_prefix = "HLW"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/examples/example-hostname/src/test/java/io/grpc/examples/hostname/HostnameGreeterTest.java b/examples/example-hostname/src/test/java/io/grpc/examples/hostname/HostnameGreeterTest.java new file mode 100644 index 00000000000..5420678d036 --- /dev/null +++ b/examples/example-hostname/src/test/java/io/grpc/examples/hostname/HostnameGreeterTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.examples.hostname; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import io.grpc.examples.helloworld.GreeterGrpc; +import io.grpc.examples.helloworld.HelloRequest; +import io.grpc.examples.helloworld.HelloReply; +import io.grpc.examples.hostname.HostnameGreeter; +import io.grpc.inprocess.InProcessChannelBuilder; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.testing.GrpcCleanupRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Unit tests for {@link HostnameGreeter}. + * + *

This is very similar to HelloWorldServerTest, so see it for more descriptions. + */ +@RunWith(JUnit4.class) +public class HostnameGreeterTest { + @Rule + public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule(); + + private GreeterGrpc.GreeterBlockingStub blockingStub = + GreeterGrpc.newBlockingStub(grpcCleanup.register( + InProcessChannelBuilder.forName("hostname").directExecutor().build())); + + @Test + public void sayHello_fixedHostname() throws Exception { + grpcCleanup.register( + InProcessServerBuilder.forName("hostname") + .directExecutor().addService(new HostnameGreeter("me")).build().start()); + + HelloReply reply = + blockingStub.sayHello(HelloRequest.newBuilder().setName("you").build()); + assertEquals("Hello you, from me", reply.getMessage()); + } + + @Test + public void sayHello_dynamicHostname() throws Exception { + grpcCleanup.register( + InProcessServerBuilder.forName("hostname") + .directExecutor().addService(new HostnameGreeter(null)).build().start()); + + // Just verifing the service doesn't crash + HelloReply reply = + blockingStub.sayHello(HelloRequest.newBuilder().setName("anonymous").build()); + assertTrue(reply.getMessage(), reply.getMessage().startsWith("Hello anonymous, from ")); + } +} diff --git a/services/BUILD.bazel b/services/BUILD.bazel index a5386abd0f9..5b31c974a2f 100644 --- a/services/BUILD.bazel +++ b/services/BUILD.bazel @@ -64,6 +64,23 @@ java_library( ], ) +java_library( + name = "health", + srcs = [ + "src/main/java/io/grpc/services/HealthServiceImpl.java", + "src/main/java/io/grpc/services/HealthStatusManager.java", + ], + deps = [ + ":_health_java_grpc", + "//api", + "//context", + "//stub", + "@com_google_code_findbugs_jsr305//jar", + "@com_google_guava_guava//jar", + "@io_grpc_grpc_proto//:health_java_proto", + ], +) + # These shouldn't be here, but this is better than having # a circular dependency on grpc-proto and grpc-java. @@ -80,3 +97,10 @@ java_grpc_library( visibility = ["//visibility:private"], deps = ["@io_grpc_grpc_proto//:channelz_java_proto"], ) + +java_grpc_library( + name = "_health_java_grpc", + srcs = ["@io_grpc_grpc_proto//:health_proto"], + visibility = ["//visibility:private"], + deps = ["@io_grpc_grpc_proto//:health_java_proto"], +) From 74cde7e8b4d4b8e59d6b8383b1557dddbada9f67 Mon Sep 17 00:00:00 2001 From: Kun Zhang Date: Tue, 21 Jan 2020 11:37:02 -0800 Subject: [PATCH 08/86] netty: add an internal option to disable native buffer (#6619) This is needed for internal rollout where the native memory usage from netty makes task more prone to exceeding memory limits. --- .../netty/InternalNettyServerBuilder.java | 4 + .../io/grpc/netty/NettyClientTransport.java | 2 +- .../main/java/io/grpc/netty/NettyServer.java | 7 +- .../io/grpc/netty/NettyServerBuilder.java | 10 ++- netty/src/main/java/io/grpc/netty/Utils.java | 74 +++++++++++-------- .../grpc/netty/NettyClientTransportTest.java | 2 +- .../java/io/grpc/netty/NettyServerTest.java | 4 + 7 files changed, 67 insertions(+), 36 deletions(-) diff --git a/netty/src/main/java/io/grpc/netty/InternalNettyServerBuilder.java b/netty/src/main/java/io/grpc/netty/InternalNettyServerBuilder.java index a4e4e103b5b..e89593364b3 100644 --- a/netty/src/main/java/io/grpc/netty/InternalNettyServerBuilder.java +++ b/netty/src/main/java/io/grpc/netty/InternalNettyServerBuilder.java @@ -43,6 +43,10 @@ public static void setTracingEnabled(NettyServerBuilder builder, boolean value) builder.setTracingEnabled(value); } + public static void setForceHeapBuffer(NettyServerBuilder builder, boolean value) { + builder.setForceHeapBuffer(value); + } + /** * Sets {@link io.grpc.Channel} and {@link io.netty.channel.EventLoopGroup}s to Nio. A major * benefit over using existing setters is gRPC will manage the life cycle of {@link diff --git a/netty/src/main/java/io/grpc/netty/NettyClientTransport.java b/netty/src/main/java/io/grpc/netty/NettyClientTransport.java index 753c1e4842b..81831ac918c 100644 --- a/netty/src/main/java/io/grpc/netty/NettyClientTransport.java +++ b/netty/src/main/java/io/grpc/netty/NettyClientTransport.java @@ -226,7 +226,7 @@ public Runnable start(Listener transportListener) { ChannelHandler negotiationHandler = negotiator.newHandler(handler); Bootstrap b = new Bootstrap(); - b.option(ALLOCATOR, Utils.getByteBufAllocator()); + b.option(ALLOCATOR, Utils.getByteBufAllocator(false)); b.attr(LOGGER_KEY, channelLogger); b.group(eventLoop); b.channelFactory(channelFactory); diff --git a/netty/src/main/java/io/grpc/netty/NettyServer.java b/netty/src/main/java/io/grpc/netty/NettyServer.java index 478687b6abc..e50d2a57e9c 100644 --- a/netty/src/main/java/io/grpc/netty/NettyServer.java +++ b/netty/src/main/java/io/grpc/netty/NettyServer.java @@ -73,6 +73,7 @@ class NettyServer implements InternalServer, InternalWithLogId { private final int maxStreamsPerConnection; private final ObjectPool bossGroupPool; private final ObjectPool workerGroupPool; + private final boolean forceHeapBuffer; private EventLoopGroup bossGroup; private EventLoopGroup workerGroup; private ServerListener listener; @@ -100,6 +101,7 @@ class NettyServer implements InternalServer, InternalWithLogId { Map, ?> channelOptions, ObjectPool bossGroupPool, ObjectPool workerGroupPool, + boolean forceHeapBuffer, ProtocolNegotiator protocolNegotiator, List streamTracerFactories, TransportTracer.Factory transportTracerFactory, @@ -115,6 +117,7 @@ class NettyServer implements InternalServer, InternalWithLogId { this.channelOptions = new HashMap, Object>(channelOptions); this.bossGroupPool = checkNotNull(bossGroupPool, "bossGroupPool"); this.workerGroupPool = checkNotNull(workerGroupPool, "workerGroupPool"); + this.forceHeapBuffer = forceHeapBuffer; this.bossGroup = bossGroupPool.getObject(); this.workerGroup = workerGroupPool.getObject(); this.protocolNegotiator = checkNotNull(protocolNegotiator, "protocolNegotiator"); @@ -155,8 +158,8 @@ public void start(ServerListener serverListener) throws IOException { listener = checkNotNull(serverListener, "serverListener"); ServerBootstrap b = new ServerBootstrap(); - b.option(ALLOCATOR, Utils.getByteBufAllocator()); - b.childOption(ALLOCATOR, Utils.getByteBufAllocator()); + b.option(ALLOCATOR, Utils.getByteBufAllocator(forceHeapBuffer)); + b.childOption(ALLOCATOR, Utils.getByteBufAllocator(forceHeapBuffer)); b.group(bossGroup, workerGroup); b.channelFactory(channelFactory); // For non-socket based channel, the option will be ignored. diff --git a/netty/src/main/java/io/grpc/netty/NettyServerBuilder.java b/netty/src/main/java/io/grpc/netty/NettyServerBuilder.java index 287200a20c1..1f48a331a3f 100644 --- a/netty/src/main/java/io/grpc/netty/NettyServerBuilder.java +++ b/netty/src/main/java/io/grpc/netty/NettyServerBuilder.java @@ -89,6 +89,7 @@ public final class NettyServerBuilder extends AbstractServerImplBuilder workerEventLoopGroupPool = DEFAULT_WORKER_EVENT_LOOP_GROUP_POOL; + private boolean forceHeapBuffer; private SslContext sslContext; private ProtocolNegotiator protocolNegotiator; private int maxConcurrentCallsPerConnection = Integer.MAX_VALUE; @@ -268,6 +269,13 @@ NettyServerBuilder workerEventLoopGroupPool( return this; } + /** + * Force using heap buffer when custom allocator is enabled. + */ + void setForceHeapBuffer(boolean value) { + forceHeapBuffer = value; + } + /** * Sets the TLS context to use for encryption. Providing a context enables encryption. It must * have been configured with {@link GrpcSslContexts}, but options could have been overridden. @@ -542,7 +550,7 @@ protected List buildTransportServers( for (SocketAddress listenAddress : listenAddresses) { NettyServer transportServer = new NettyServer( listenAddress, channelFactory, channelOptions, bossEventLoopGroupPool, - workerEventLoopGroupPool, negotiator, streamTracerFactories, + workerEventLoopGroupPool, forceHeapBuffer, negotiator, streamTracerFactories, getTransportTracerFactory(), maxConcurrentCallsPerConnection, flowControlWindow, maxMessageSize, maxHeaderListSize, keepAliveTimeInNanos, keepAliveTimeoutInNanos, maxConnectionIdleInNanos, maxConnectionAgeInNanos, maxConnectionAgeGraceInNanos, diff --git a/netty/src/main/java/io/grpc/netty/Utils.java b/netty/src/main/java/io/grpc/netty/Utils.java index 2e9c5c512d8..7a3cc760339 100644 --- a/netty/src/main/java/io/grpc/netty/Utils.java +++ b/netty/src/main/java/io/grpc/netty/Utils.java @@ -87,35 +87,13 @@ class Utils { public static final Resource DEFAULT_WORKER_EVENT_LOOP_GROUP; // This class is initialized on first use, thus provides delayed allocator creation. - private static final class ByteBufAllocatorHolder { - private static final ByteBufAllocator allocator; - - static { - if (Boolean.parseBoolean( - System.getProperty("io.grpc.netty.useCustomAllocator", "true"))) { - int maxOrder; - if (System.getProperty("io.netty.allocator.maxOrder") == null) { - // See the implementation of PooledByteBufAllocator. DEFAULT_MAX_ORDER in there is - // 11, which makes chunk size to be 8192 << 11 = 16 MiB. We want the chunk size to be - // 2MiB, thus reducing the maxOrder to 8. - maxOrder = 8; - } else { - maxOrder = PooledByteBufAllocator.defaultMaxOrder(); - } - allocator = new PooledByteBufAllocator( - PooledByteBufAllocator.defaultPreferDirect(), - PooledByteBufAllocator.defaultNumHeapArena(), - PooledByteBufAllocator.defaultNumDirectArena(), - PooledByteBufAllocator.defaultPageSize(), - maxOrder, - PooledByteBufAllocator.defaultTinyCacheSize(), - PooledByteBufAllocator.defaultSmallCacheSize(), - PooledByteBufAllocator.defaultNormalCacheSize(), - PooledByteBufAllocator.defaultUseCacheForAllThreads()); - } else { - allocator = ByteBufAllocator.DEFAULT; - } - } + private static final class ByteBufAllocatorPreferDirectHolder { + private static final ByteBufAllocator allocator = createByteBufAllocator(true); + } + + // This class is initialized on first use, thus provides delayed allocator creation. + private static final class ByteBufAllocatorPreferHeapHolder { + private static final ByteBufAllocator allocator = createByteBufAllocator(false); } public static final ChannelFactory DEFAULT_SERVER_CHANNEL_FACTORY; @@ -144,8 +122,42 @@ private static final class ByteBufAllocatorHolder { } } - public static ByteBufAllocator getByteBufAllocator() { - return ByteBufAllocatorHolder.allocator; + public static ByteBufAllocator getByteBufAllocator(boolean forceHeapBuffer) { + if (Boolean.parseBoolean( + System.getProperty("io.grpc.netty.useCustomAllocator", "true"))) { + if (forceHeapBuffer || !PooledByteBufAllocator.defaultPreferDirect()) { + return ByteBufAllocatorPreferHeapHolder.allocator; + } else { + return ByteBufAllocatorPreferDirectHolder.allocator; + } + } else { + return ByteBufAllocator.DEFAULT; + } + } + + private static ByteBufAllocator createByteBufAllocator(boolean preferDirect) { + int maxOrder; + if (System.getProperty("io.netty.allocator.maxOrder") == null) { + // See the implementation of PooledByteBufAllocator. DEFAULT_MAX_ORDER in there is + // 11, which makes chunk size to be 8192 << 11 = 16 MiB. We want the chunk size to be + // 2MiB, thus reducing the maxOrder to 8. + maxOrder = 8; + } else { + maxOrder = PooledByteBufAllocator.defaultMaxOrder(); + } + return new PooledByteBufAllocator( + preferDirect, + PooledByteBufAllocator.defaultNumHeapArena(), + // Assuming neither gRPC nor netty are using allocator.directBuffer() to request + // specifically for direct buffers, which is true as I just checked, setting arenas to 0 + // will make sure no direct buffer is ever created. + preferDirect ? PooledByteBufAllocator.defaultNumDirectArena() : 0, + PooledByteBufAllocator.defaultPageSize(), + maxOrder, + PooledByteBufAllocator.defaultTinyCacheSize(), + PooledByteBufAllocator.defaultSmallCacheSize(), + PooledByteBufAllocator.defaultNormalCacheSize(), + PooledByteBufAllocator.defaultUseCacheForAllThreads()); } public static Metadata convertHeaders(Http2Headers http2Headers) { diff --git a/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java b/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java index f773aafd6df..07979ab11ad 100644 --- a/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java +++ b/netty/src/test/java/io/grpc/netty/NettyClientTransportTest.java @@ -769,7 +769,7 @@ private void startServer(int maxStreamsPerConnection, int maxHeaderListSize) thr TestUtils.testServerAddress(new InetSocketAddress(0)), new ReflectiveChannelFactory<>(NioServerSocketChannel.class), new HashMap, Object>(), - new FixedObjectPool<>(group), new FixedObjectPool<>(group), negotiator, + new FixedObjectPool<>(group), new FixedObjectPool<>(group), false, negotiator, Collections.emptyList(), TransportTracer.getDefaultFactory(), maxStreamsPerConnection, diff --git a/netty/src/test/java/io/grpc/netty/NettyServerTest.java b/netty/src/test/java/io/grpc/netty/NettyServerTest.java index 141cb9972b9..a5b8e8ecf1a 100644 --- a/netty/src/test/java/io/grpc/netty/NettyServerTest.java +++ b/netty/src/test/java/io/grpc/netty/NettyServerTest.java @@ -94,6 +94,7 @@ class TestProtocolNegotiator implements ProtocolNegotiator { new HashMap, Object>(), new FixedObjectPool<>(eventLoop), new FixedObjectPool<>(eventLoop), + false, protocolNegotiator, Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -138,6 +139,7 @@ public void getPort_notStarted() { new HashMap, Object>(), new FixedObjectPool<>(eventLoop), new FixedObjectPool<>(eventLoop), + false, ProtocolNegotiators.plaintext(), Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -176,6 +178,7 @@ public void childChannelOptions() throws Exception { channelOptions, new FixedObjectPool<>(eventLoop), new FixedObjectPool<>(eventLoop), + false, ProtocolNegotiators.plaintext(), Collections.emptyList(), TransportTracer.getDefaultFactory(), @@ -226,6 +229,7 @@ public void channelzListenSocket() throws Exception { new HashMap, Object>(), new FixedObjectPool<>(eventLoop), new FixedObjectPool<>(eventLoop), + false, ProtocolNegotiators.plaintext(), Collections.emptyList(), TransportTracer.getDefaultFactory(), From 42f2e7f733f48b3bf21d28eb7484af077ddd3c4d Mon Sep 17 00:00:00 2001 From: Eric Gribkoff Date: Tue, 21 Jan 2020 14:38:10 -0800 Subject: [PATCH 09/86] core: change catch blocks for Android <19 (#6611) Caught by lint warning during import --- .../AbstractManagedChannelImplBuilder.java | 22 ++++++++++++++----- .../internal/AbstractServerImplBuilder.java | 22 ++++++++++++++----- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java b/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java index 93883da921f..507b5d185fe 100644 --- a/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java +++ b/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java @@ -576,8 +576,14 @@ final List getEffectiveInterceptors() { recordStartedRpcs, recordFinishedRpcs, recordRealTimeMetrics); - } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + } catch (ClassNotFoundException e) { + // Replace these separate catch statements with multicatch when Android min-API >= 19 + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (NoSuchMethodException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (IllegalAccessException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (InvocationTargetException e) { log.log(Level.FINE, "Unable to apply census stats", e); } if (statsInterceptor != null) { @@ -595,9 +601,15 @@ final List getEffectiveInterceptors() { Method getClientInterceptroMethod = censusTracingAccessor.getDeclaredMethod("getClientInterceptor"); tracingInterceptor = (ClientInterceptor) getClientInterceptroMethod.invoke(null); - } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { - log.log(Level.FINE, "Unable to apply census tracing", e); + } catch (ClassNotFoundException e) { + // Replace these separate catch statements with multicatch when Android min-API >= 19 + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (NoSuchMethodException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (IllegalAccessException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (InvocationTargetException e) { + log.log(Level.FINE, "Unable to apply census stats", e); } if (tracingInterceptor != null) { effectiveInterceptors.add(0, tracingInterceptor); diff --git a/core/src/main/java/io/grpc/internal/AbstractServerImplBuilder.java b/core/src/main/java/io/grpc/internal/AbstractServerImplBuilder.java index ff151d1a6d0..6928434df3c 100644 --- a/core/src/main/java/io/grpc/internal/AbstractServerImplBuilder.java +++ b/core/src/main/java/io/grpc/internal/AbstractServerImplBuilder.java @@ -253,8 +253,14 @@ final List getTracerFactories() { recordStartedRpcs, recordFinishedRpcs, recordRealTimeMetrics); - } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + } catch (ClassNotFoundException e) { + // Replace these separate catch statements with multicatch when Android min-API >= 19 + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (NoSuchMethodException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (IllegalAccessException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (InvocationTargetException e) { log.log(Level.FINE, "Unable to apply census stats", e); } if (censusStatsTracerFactory != null) { @@ -270,9 +276,15 @@ final List getTracerFactories() { censusTracingAccessor.getDeclaredMethod("getServerStreamTracerFactory"); tracingStreamTracerFactory = (ServerStreamTracer.Factory) getServerStreamTracerFactoryMethod.invoke(null); - } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { - log.log(Level.FINE, "Unable to apply census tracing", e); + } catch (ClassNotFoundException e) { + // Replace these separate catch statements with multicatch when Android min-API >= 19 + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (NoSuchMethodException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (IllegalAccessException e) { + log.log(Level.FINE, "Unable to apply census stats", e); + } catch (InvocationTargetException e) { + log.log(Level.FINE, "Unable to apply census stats", e); } if (tracingStreamTracerFactory != null) { tracerFactories.add(tracingStreamTracerFactory); From 1f1ccf2641d35fdc5910b2780885ac694355f504 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Tue, 21 Jan 2020 17:03:37 -0800 Subject: [PATCH 10/86] xds: enable keepalive for XDS channel XDS clients will use a keepalive time of 5 minutes. --- xds/src/main/java/io/grpc/xds/XdsClient.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClient.java b/xds/src/main/java/io/grpc/xds/XdsClient.java index a19fd301c27..85d15f5bca9 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClient.java +++ b/xds/src/main/java/io/grpc/xds/XdsClient.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; /** @@ -491,19 +492,22 @@ ManagedChannel createChannel(List servers) { ServerInfo serverInfo = servers.get(0); String serverUri = serverInfo.getServerUri(); List channelCredsList = serverInfo.getChannelCredentials(); - ManagedChannel ch = null; + ManagedChannelBuilder channelBuilder = null; // Use the first supported channel credentials configuration. // Currently, only "google_default" is supported. for (ChannelCreds creds : channelCredsList) { if (creds.getType().equals("google_default")) { - ch = GoogleDefaultChannelBuilder.forTarget(serverUri).build(); + channelBuilder = GoogleDefaultChannelBuilder.forTarget(serverUri); break; } } - if (ch == null) { - ch = ManagedChannelBuilder.forTarget(serverUri).build(); + if (channelBuilder == null) { + channelBuilder = ManagedChannelBuilder.forTarget(serverUri); } - return ch; + + return channelBuilder + .keepAliveTime(5, TimeUnit.MINUTES) + .build(); } }; From 9cf45e960b41773e1a2c8f303acd4dbdd4dfb2fd Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 22 Jan 2020 11:07:18 -0800 Subject: [PATCH 11/86] xds: Truth Lint fixes --- xds/src/test/java/io/grpc/xds/XdsClientImplTest.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index f322bfed75c..7395a364cb3 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -1294,8 +1294,8 @@ public void cdsResponseWithMatchingResource() { assertThat(clusterUpdate.getEdsServiceName()) .isEqualTo("eds-cluster-foo.googleapis.com"); assertThat(clusterUpdate.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate.getLrsServerName()).isEqualTo(""); + assertThat(clusterUpdate.isEnableLrs()).isTrue(); + assertThat(clusterUpdate.getLrsServerName()).isEmpty(); } /** @@ -1446,7 +1446,7 @@ public void multipleClusterWatchers() { .isEqualTo("eds-cluster-bar.googleapis.com"); assertThat(clusterUpdate3.getLbPolicy()).isEqualTo("round_robin"); assertThat(clusterUpdate3.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate3.getLrsServerName()).isEqualTo(""); + assertThat(clusterUpdate3.getLrsServerName()).isEmpty(); } /** @@ -1596,7 +1596,7 @@ public void addRemoveClusterWatchers() { .isEqualTo("eds-cluster-bar.googleapis.com"); assertThat(clusterUpdate2.getLbPolicy()).isEqualTo("round_robin"); assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate2.getLrsServerName()).isEqualTo(""); + assertThat(clusterUpdate2.getLrsServerName()).isEmpty(); // Cancel one of the watcher. xdsClient.cancelClusterDataWatch("cluster-foo.googleapis.com", watcher1); @@ -1667,7 +1667,7 @@ public void addRemoveClusterWatchers() { .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name assertThat(clusterUpdate3.getLbPolicy()).isEqualTo("round_robin"); assertThat(clusterUpdate3.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate2.getLrsServerName()).isEqualTo(""); + assertThat(clusterUpdate2.getLrsServerName()).isEmpty(); verifyNoMoreInteractions(watcher1, watcher2); @@ -1775,7 +1775,7 @@ public void cdsUpdateForClusterBeingRemoved() { .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name assertThat(clusterUpdate.getLbPolicy()).isEqualTo("round_robin"); assertThat(clusterUpdate.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate.getLrsServerName()).isEqualTo(""); + assertThat(clusterUpdate.getLrsServerName()).isEmpty(); assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); // No cluster is available. From ee661d45ebea7470e0486a0f4aa1332ef8b36e14 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Wed, 22 Jan 2020 14:27:06 -0800 Subject: [PATCH 12/86] xds: notify all watchers when RPC stream is closed by server (#6629) --- .../main/java/io/grpc/xds/XdsClientImpl.java | 13 ++++++++ .../java/io/grpc/xds/XdsClientImplTest.java | 33 ++++++++++++++++--- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 70fcd0451e1..20bdf069885 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -1077,6 +1077,19 @@ private void handleStreamClosed(Status error) { } logger.log(Level.FINE, error.getDescription(), error.getCause()); closed = true; + if (configWatcher != null) { + configWatcher.onError(error); + } + for (Set watchers : clusterWatchers.values()) { + for (ClusterWatcher watcher : watchers) { + watcher.onError(error); + } + } + for (Set watchers : endpointWatchers.values()) { + for (EndpointWatcher watcher : watchers) { + watcher.onError(error); + } + } cleanUp(); cleanUpResources(); if (responseReceived || retryBackoffPolicy == null) { diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index 7395a364cb3..a9c139eaaed 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -2618,6 +2618,7 @@ public void streamClosedAndRetry() { StreamObserver requestObserver = requestObservers.poll(); waitUntilConfigResolved(responseObserver); + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); // Start watching cluster information. xdsClient.watchClusterData("cluster.googleapis.com", clusterWatcher); @@ -2637,6 +2638,12 @@ public void streamClosedAndRetry() { // Management server closes the RPC stream with an error. responseObserver.onError(Status.UNKNOWN.asException()); + verify(configWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNKNOWN); + verify(clusterWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNKNOWN); + verify(endpointWatcher).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNKNOWN); // Resets backoff and retry immediately. inOrder.verify(backoffPolicyProvider).get(); @@ -2659,6 +2666,12 @@ public void streamClosedAndRetry() { // Management server becomes unreachable. responseObserver.onError(Status.UNAVAILABLE.asException()); + verify(configWatcher, times(2)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); + verify(clusterWatcher, times(2)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); + verify(endpointWatcher, times(2)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); inOrder.verify(backoffPolicy1).nextBackoffNanos(); assertThat(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER)).hasSize(1); @@ -2682,6 +2695,12 @@ public void streamClosedAndRetry() { // Management server is still not reachable. responseObserver.onError(Status.UNAVAILABLE.asException()); + verify(configWatcher, times(3)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); + verify(clusterWatcher, times(3)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); + verify(endpointWatcher, times(3)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); inOrder.verify(backoffPolicy1).nextBackoffNanos(); assertThat(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER)).hasSize(1); @@ -2714,6 +2733,9 @@ public void streamClosedAndRetry() { // Management server closes the RPC stream. responseObserver.onCompleted(); + verify(configWatcher, times(4)).onError(any(Status.class)); + verify(clusterWatcher, times(4)).onError(any(Status.class)); + verify(endpointWatcher, times(4)).onError(any(Status.class)); // Resets backoff and retry immediately inOrder.verify(backoffPolicyProvider).get(); @@ -2735,6 +2757,12 @@ public void streamClosedAndRetry() { // Management server becomes unreachable again. responseObserver.onError(Status.UNAVAILABLE.asException()); + verify(configWatcher, times(5)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); + verify(clusterWatcher, times(5)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); + verify(endpointWatcher, times(5)).onError(statusCaptor.capture()); + assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); inOrder.verify(backoffPolicy2).nextBackoffNanos(); assertThat(fakeClock.getPendingTasks(RPC_RETRY_TASK_FILTER)).hasSize(1); @@ -3044,11 +3072,6 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { assertThat(cdsRespTimeoutTask.getDelay(TimeUnit.SECONDS)) .isEqualTo(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC); - fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); - ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(null); - verify(clusterWatcher).onError(statusCaptor.capture()); - assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.NOT_FOUND); - // Start watching endpoint data. xdsClient.watchEndpointData("cluster-foo.googleapis.com", endpointWatcher); ScheduledTask edsTimeoutTask = From a223263134c2bd4bf97b943bc52a0a128bc67268 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Wed, 22 Jan 2020 15:38:38 -0800 Subject: [PATCH 13/86] xds: better error handling to avoid RPC hangup This change will fail application RPC immediately if XdsClient encounters any error instead of retrying or getting to fallback silently. There could be optimization if the channel is currently READY while XdsClient stream just closed due to connection error, in which case we could still be using the current available subchannels while retrying, but this requires the LB knows the semantics of error status from the XdsClient. This optimization is not worth the effort for now. --- xds/src/main/java/io/grpc/xds/LookasideLb.java | 1 + xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java | 5 +++++ xds/src/test/java/io/grpc/xds/LookasideLbTest.java | 3 +++ xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java | 5 +++++ 4 files changed, 14 insertions(+) diff --git a/xds/src/main/java/io/grpc/xds/LookasideLb.java b/xds/src/main/java/io/grpc/xds/LookasideLb.java index 7c1c0e4837c..68c834f0f69 100644 --- a/xds/src/main/java/io/grpc/xds/LookasideLb.java +++ b/xds/src/main/java/io/grpc/xds/LookasideLb.java @@ -502,6 +502,7 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { @Override public void onError(Status error) { channelLogger.log(ChannelLogLevel.ERROR, "EDS load balancer received an error: {0}", error); + lookasideLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); endpointUpdateCallback.onError(); } } diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java index fdbfdf5149c..564b085fdbb 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java @@ -28,6 +28,7 @@ import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.util.ForwardingLoadBalancerHelper; import io.grpc.xds.LookasideLb.EndpointUpdateCallback; +import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; import java.util.concurrent.TimeUnit; import javax.annotation.CheckForNull; import javax.annotation.Nullable; @@ -118,6 +119,10 @@ class EnterFallbackTask implements Runnable { @Override public void run() { + helper.updateBalancingState( + ConnectivityState.TRANSIENT_FAILURE, + new ErrorPicker(Status.UNAVAILABLE.withDescription( + "Channel is not ready when timeout for entering fallback mode happens"))); useFallbackPolicy(); } } diff --git a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java index fd907d341c7..7baf2ce7a71 100644 --- a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java +++ b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java @@ -787,8 +787,11 @@ public void handleLocalityAssignmentUpdates() { public void verifyRpcErrorPropagation() { lookasideLb.handleResolvedAddresses(defaultResolvedAddress); + verify(helper, never()).updateBalancingState( + eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verify(edsUpdateCallback, never()).onError(); serverResponseWriter.onError(new RuntimeException()); + verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verify(edsUpdateCallback).onError(); } diff --git a/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java b/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java index db8b82e2ffc..694f5e5707c 100644 --- a/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java +++ b/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java @@ -19,6 +19,9 @@ import static com.google.common.truth.Truth.assertThat; import static io.grpc.ConnectivityState.CONNECTING; import static io.grpc.ConnectivityState.READY; +import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -144,7 +147,9 @@ public void timeoutAtStartup_expectUseFallback_thenBackendReady_expectExitFallba fakeClock.forwardTime(9, TimeUnit.SECONDS); edsUpdateCallback.onWorking(); verifyNotInFallbackMode(); + fakeClock.forwardTime(1, TimeUnit.SECONDS); + verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verifyInFallbackMode(); SubchannelPicker subchannelPicker = mock(SubchannelPicker.class); From b1209b7ed5c8c840243d5406dec5772404cc6e27 Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Wed, 22 Jan 2020 16:52:43 -0800 Subject: [PATCH 14/86] core: fix AutoConfiguredLBFactory populates wrong type (#6600) --- .../AutoConfiguredLoadBalancerFactory2.java | 8 +++--- ...utoConfiguredLoadBalancerFactoryTest2.java | 10 +++---- .../internal/ManagedChannelImplTest2.java | 4 +-- .../ServiceConfigErrorHandlingTest.java | 26 ++++++------------- 4 files changed, 19 insertions(+), 29 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java index 0d2cecf32cf..d463293659d 100644 --- a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java +++ b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java @@ -149,7 +149,7 @@ Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { ChannelLogLevel.INFO, "Load balancer changed from {0} to {1}", old.getClass().getSimpleName(), delegate.getClass().getSimpleName()); } - ConfigOrError lbConfig = selection.config; + Object lbConfig = selection.config; if (lbConfig != null) { helper.getChannelLogger().log( ChannelLogLevel.DEBUG, "Load-balancing config: {0}", selection.config); @@ -337,7 +337,7 @@ ConfigOrError parseLoadBalancerPolicy(Map serviceConfig, ChannelLogge return parsedLbPolicyConfig; } return ConfigOrError.fromConfig( - new PolicySelection(provider, serviceConfig, parsedLbPolicyConfig)); + new PolicySelection(provider, serviceConfig, parsedLbPolicyConfig.getConfig())); } } return ConfigOrError.fromError( @@ -364,12 +364,12 @@ private PolicyException(String msg) { static final class PolicySelection { final LoadBalancerProvider provider; @Nullable final Map rawConfig; - @Nullable final ConfigOrError config; + @Nullable final Object config; PolicySelection( LoadBalancerProvider provider, @Nullable Map rawConfig, - @Nullable ConfigOrError config) { + @Nullable Object config) { this.provider = checkNotNull(provider, "provider"); this.rawConfig = rawConfig; this.config = config; diff --git a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java index 0692beb8877..c60c6eb4397 100644 --- a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java +++ b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java @@ -386,7 +386,7 @@ public void handleResolvedAddressGroups_delegateAcceptsEmptyAddressList() verify(testLbBalancer2).handleResolvedAddresses(resultCaptor.capture()); assertThat(resultCaptor.getValue().getAddresses()).isEmpty(); assertThat(resultCaptor.getValue().getLoadBalancingPolicyConfig()) - .isEqualTo(nextParsedConfigOrError2.get()); + .isEqualTo(nextParsedConfigOrError2.get().getConfig()); assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) .isEqualTo(rawServiceConfig); } @@ -687,7 +687,7 @@ public ScheduledExecutorService getScheduledExecutorService() { verify(channelLogger).log( eq(ChannelLogLevel.DEBUG), eq("Load-balancing config: {0}"), - eq(testLbParsedConfig)); + eq(testLbParsedConfig.getConfig())); verifyNoMoreInteractions(channelLogger); testLbParsedConfig = ConfigOrError.fromConfig("bar"); @@ -703,7 +703,7 @@ public ScheduledExecutorService getScheduledExecutorService() { verify(channelLogger).log( eq(ChannelLogLevel.DEBUG), eq("Load-balancing config: {0}"), - eq(testLbParsedConfig)); + eq(testLbParsedConfig.getConfig())); verifyNoMoreInteractions(channelLogger); servers = Collections.singletonList(new EquivalentAddressGroup( @@ -815,7 +815,7 @@ public void parseLoadBalancerConfig_firstValidSecondInvalidPolicy() throws Excep ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); assertThat(parsed).isNotNull(); assertThat(parsed.getConfig()).isNotNull(); - assertThat(((PolicySelection) parsed.getConfig()).config.getConfig()).isNotNull(); + assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull(); } @Test @@ -827,7 +827,7 @@ public void parseLoadBalancerConfig_someProvidesAreNotAvailable() throws Excepti ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); assertThat(parsed).isNotNull(); assertThat(parsed.getConfig()).isNotNull(); - assertThat(((PolicySelection) parsed.getConfig()).config.getConfig()).isNotNull(); + assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull(); verify(channelLogger).log( eq(ChannelLogLevel.DEBUG), eq("{0} specified by Service Config are not available"), diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java index 38d6cde120d..bde0d08caa4 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java @@ -1006,7 +1006,7 @@ public void nameResolverReturnsEmptySubLists_optionallyAllowed() throws Exceptio new PolicySelection( mockLoadBalancerProvider, parseConfig(rawLbConfig), - ConfigOrError.fromConfig(new Object()))); + new Object())); nameResolverFactory.nextConfigOrError.set(ConfigOrError.fromConfig(parsedServiceConfig)); nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); channelBuilder.nameResolverFactory(nameResolverFactory); @@ -3486,7 +3486,7 @@ protected ClientTransportFactory buildTransportFactory() { Object fakeLbConfig = new Object(); PolicySelection lbConfigs = new PolicySelection( - mockLoadBalancerProvider, rawServiceConfig, ConfigOrError.fromConfig(fakeLbConfig)); + mockLoadBalancerProvider, rawServiceConfig, fakeLbConfig); mockLoadBalancerProvider.parseLoadBalancingPolicyConfig(rawServiceConfig); ManagedChannelServiceConfig2 managedChannelServiceConfig = createManagedChannelServiceConfig(rawServiceConfig, lbConfigs); diff --git a/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java b/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java index 4b89ae889e5..24cbd589a28 100644 --- a/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java +++ b/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java @@ -153,8 +153,6 @@ public ConfigOrError parseLoadBalancingPolicyConfig( private Executor blockingExecutor; private ChannelBuilder channelBuilder; - - private void createChannel(ClientInterceptor... interceptors) { checkState(channel == null); @@ -270,7 +268,7 @@ public void emptyAddresses_validConfig_2ndResolution_lbNeedsAddress() throws Exc verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).containsExactly(addressGroup); - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("12"); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("12"); assertThat(resolvedAddresses.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) .isEqualTo(rawServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); @@ -315,7 +313,7 @@ public void emptyAddresses_validConfig_lbDoesNotNeedAddress() throws Exception { ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).isEmpty(); - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("val");; + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("val");; verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); assertThat(channel.getState(false)).isNotEqualTo(ConnectivityState.TRANSIENT_FAILURE); @@ -340,7 +338,7 @@ public void validConfig_lbDoesNotNeedAddress() throws Exception { verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).containsExactly(addressGroup); - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("foo"); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("foo"); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); assertThat(channel.getState(false)).isNotEqualTo(ConnectivityState.TRANSIENT_FAILURE); @@ -362,7 +360,7 @@ public void noConfig_noDefaultConfig() { verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).containsExactly(addressGroup); - assertThat(getLbPolicyConfig(resolvedAddresses)).isNull(); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isNull(); assertThat(resolvedAddresses.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) .isEmpty(); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); @@ -390,7 +388,7 @@ public void noConfig_usingDefaultConfig() throws Exception { verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).containsExactly(addressGroup); - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("foo"); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("foo"); assertThat(resolvedAddresses.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) .isEqualTo(defaultServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); @@ -438,7 +436,7 @@ public void invalidConfig_withDefaultConfig() throws Exception { ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).containsExactly(addressGroup); - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("mate"); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("mate"); assertThat(resolvedAddresses.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) .isEqualTo(defaultServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); @@ -465,7 +463,7 @@ public void invalidConfig_2ndResolution() throws Exception { verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); ResolvedAddresses resolvedAddresses = resultCaptor.getValue(); assertThat(resolvedAddresses.getAddresses()).containsExactly(addressGroup); - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("1st raw config"); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("1st raw config"); assertThat(resolvedAddresses.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) .isEqualTo(rawServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); @@ -481,7 +479,7 @@ public void invalidConfig_2ndResolution() throws Exception { verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); ResolvedAddresses newResolvedAddress = resultCaptor.getValue(); // should use previous service config because new service config is invalid. - assertThat(getLbPolicyConfig(resolvedAddresses)).isEqualTo("1st raw config"); + assertThat(resolvedAddresses.getLoadBalancingPolicyConfig()).isEqualTo("1st raw config"); assertThat(newResolvedAddress.getAttributes()).isNotEqualTo(Attributes.EMPTY); assertThat(newResolvedAddress.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) .isEqualTo(rawServiceConfig); @@ -661,12 +659,4 @@ public void shutdown() {} private static Map parseJson(String json) throws Exception { return (Map) JsonParser.parse(json); } - - @Nullable - private static Object getLbPolicyConfig(ResolvedAddresses resolvedAddresses) { - ConfigOrError loadBalancingPolicyConfig = - (ConfigOrError) resolvedAddresses.getLoadBalancingPolicyConfig(); - return - loadBalancingPolicyConfig == null ? null : loadBalancingPolicyConfig.getConfig(); - } } From 27f12a90f32d04869d4993c7a0ceaf35bf5b6012 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Wed, 22 Jan 2020 17:05:18 -0800 Subject: [PATCH 15/86] xds: refactor LookasideLb for EDS-only usecase and integrate load stats report - Replace XdsComms2 with XdsClientImpl - Enable/Disable load report stats with `XdsClient` APIs. Testing strategy: - Use real XdsClientImp for EDS-only because the balancer creates the XdsClient by itself. The state of the XdsClientImpl will be the actual state as real. - Use mock XdsClient for non EDS-only case because the XdsClient in resolved addresses attributes is supposed to be a stateful XdsClient with some pre-existing CDS state, so creating a brand new real XdsClientImp in test can not simulate the same state. In this case only verify interaction with XdsClient APIs. - Use a `LocalityStoreFactory` to verify interaction with `LocalityStore` APIs. However, this can not cover any interaction with the `Helper` and `LoadStatsStore` inputs of `LocalityStoreFactory.newLocalityStore(Helper, LoadBalancerRegistry, LoadStatsStore)`, so some basic non-exhaustive tests are added to cover the gap. The testing strategy is imperfect but is a trade-off considering load stats report is very hard to test here and LocalityStore/real balancing behavior is too much to be exhaustively tested in `LookasideLb`. --- .../io/grpc/xds/LoadReportClientImpl.java | 32 - .../main/java/io/grpc/xds/LookasideLb.java | 291 ++-- xds/src/main/java/io/grpc/xds/XdsComms2.java | 298 ----- .../java/io/grpc/xds/LookasideLbTest.java | 1168 ++++++++--------- .../test/java/io/grpc/xds/XdsCommsTest.java | 594 --------- 5 files changed, 690 insertions(+), 1693 deletions(-) delete mode 100644 xds/src/main/java/io/grpc/xds/XdsComms2.java delete mode 100644 xds/src/test/java/io/grpc/xds/XdsCommsTest.java diff --git a/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java b/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java index 8fd5028913d..57c4c971d5e 100644 --- a/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java @@ -342,36 +342,4 @@ private void cleanUp() { } } } - - /** - * Factory class for creating {@link LoadReportClient} instances. - */ - // TODO(chengyuanzhang): eliminate this factory after migrating EDS load balancer to - // use XdsClient. - abstract static class LoadReportClientFactory { - - private static final LoadReportClientFactory DEFAULT_INSTANCE = - new LoadReportClientFactory() { - @Override - LoadReportClient createLoadReportClient( - ManagedChannel channel, - String clusterName, - Node node, - SynchronizationContext syncContext, - ScheduledExecutorService timeService, - BackoffPolicy.Provider backoffPolicyProvider, - Supplier stopwatchSupplier) { - return new LoadReportClientImpl(channel, clusterName, node, syncContext, timeService, - backoffPolicyProvider, stopwatchSupplier); - } - }; - - static LoadReportClientFactory getInstance() { - return DEFAULT_INSTANCE; - } - - abstract LoadReportClient createLoadReportClient(ManagedChannel channel, String clusterName, - Node node, SynchronizationContext syncContext, ScheduledExecutorService timeService, - BackoffPolicy.Provider backoffPolicyProvider, Supplier stopwatchSupplier); - } } diff --git a/xds/src/main/java/io/grpc/xds/LookasideLb.java b/xds/src/main/java/io/grpc/xds/LookasideLb.java index 68c834f0f69..57013e32425 100644 --- a/xds/src/main/java/io/grpc/xds/LookasideLb.java +++ b/xds/src/main/java/io/grpc/xds/LookasideLb.java @@ -18,7 +18,6 @@ import static com.google.common.base.Preconditions.checkNotNull; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static java.util.logging.Level.FINEST; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; @@ -29,34 +28,29 @@ import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerRegistry; -import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; -import io.grpc.alts.GoogleDefaultChannelBuilder; import io.grpc.internal.ExponentialBackoffPolicy; import io.grpc.internal.GrpcUtil; import io.grpc.internal.ObjectPool; import io.grpc.util.GracefulSwitchLoadBalancer; import io.grpc.xds.Bootstrapper.BootstrapInfo; -import io.grpc.xds.Bootstrapper.ChannelCreds; import io.grpc.xds.Bootstrapper.ServerInfo; import io.grpc.xds.EnvoyProtoData.DropOverload; import io.grpc.xds.EnvoyProtoData.Locality; import io.grpc.xds.EnvoyProtoData.LocalityLbEndpoints; -import io.grpc.xds.LoadReportClient.LoadReportCallback; -import io.grpc.xds.LoadReportClientImpl.LoadReportClientFactory; import io.grpc.xds.LocalityStore.LocalityStoreFactory; import io.grpc.xds.XdsClient.EndpointUpdate; import io.grpc.xds.XdsClient.EndpointWatcher; import io.grpc.xds.XdsClient.RefCountedXdsClientObjectPool; +import io.grpc.xds.XdsClient.XdsChannelFactory; import io.grpc.xds.XdsClient.XdsClientFactory; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.logging.Logger; import javax.annotation.Nullable; /** Lookaside load balancer that handles EDS config. */ @@ -67,9 +61,11 @@ final class LookasideLb extends LoadBalancer { private final GracefulSwitchLoadBalancer switchingLoadBalancer; private final LoadBalancerRegistry lbRegistry; private final LocalityStoreFactory localityStoreFactory; - private final LoadReportClientFactory loadReportClientFactory; private final Bootstrapper bootstrapper; + private final XdsChannelFactory channelFactory; private final Helper lookasideLbHelper; + // Cache for load stats stores for each service in cluster keyed by cluster service names. + private final Map loadStatsStoreMap = new HashMap<>(); // Most recent XdsConfig. @Nullable @@ -80,11 +76,11 @@ final class LookasideLb extends LoadBalancer { @Nullable private ObjectPool xdsClientPool; @Nullable - XdsClient xdsClient; - // Only for EDS-only case. - // TODO(zdapeng): Stop using it once XdsClientImpl is used. + private XdsClient xdsClient; @Nullable - ManagedChannel channel; + private LoadReportClient loadReportClient; + @Nullable + private String clusterName; LookasideLb(Helper lookasideLbHelper, EndpointUpdateCallback endpointUpdateCallback) { this( @@ -92,8 +88,8 @@ final class LookasideLb extends LoadBalancer { checkNotNull(endpointUpdateCallback, "endpointUpdateCallback"), LoadBalancerRegistry.getDefaultRegistry(), LocalityStoreFactory.getInstance(), - LoadReportClientFactory.getInstance(), - Bootstrapper.getInstance()); + Bootstrapper.getInstance(), + XdsChannelFactory.getInstance()); } @VisibleForTesting @@ -102,16 +98,16 @@ final class LookasideLb extends LoadBalancer { EndpointUpdateCallback endpointUpdateCallback, LoadBalancerRegistry lbRegistry, LocalityStoreFactory localityStoreFactory, - LoadReportClientFactory loadReportClientFactory, - Bootstrapper bootstrapper) { + Bootstrapper bootstrapper, + XdsChannelFactory channelFactory) { this.lookasideLbHelper = lookasideLbHelper; this.channelLogger = lookasideLbHelper.getChannelLogger(); this.endpointUpdateCallback = endpointUpdateCallback; this.lbRegistry = lbRegistry; - this.switchingLoadBalancer = new GracefulSwitchLoadBalancer(lookasideLbHelper); this.localityStoreFactory = localityStoreFactory; - this.loadReportClientFactory = loadReportClientFactory; + this.switchingLoadBalancer = new GracefulSwitchLoadBalancer(lookasideLbHelper); this.bootstrapper = bootstrapper; + this.channelFactory = channelFactory; } @Override @@ -131,7 +127,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { } newXdsConfig = (XdsConfig) lbConfig; } else { - // In the future, in all cases xdsConfig can be gotten directly by + // In the future, in all cases xdsConfig can be obtained directly by // resolvedAddresses.getLoadBalancingPolicyConfig(). Map newRawLbConfig = attributes.get(ATTR_LOAD_BALANCING_CONFIG); if (newRawLbConfig == null) { @@ -157,10 +153,10 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { if (xdsClientPool == null) { // Init xdsClientPool and xdsClient. // There are two usecases: - // 1. The EDS-only: + // 1. EDS-only: // The name resolver resolves a ResolvedAddresses with an XdsConfig. Use the bootstrap // information to create a channel. - // 2. Non EDS-only usecase: + // 2. Non EDS-only: // XDS_CLIENT_POOL attribute is available from ResolvedAddresses either from // XdsNameResolver or CDS policy. // @@ -179,7 +175,8 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { return; } - List serverList = bootstrapInfo.getServers(); + final List serverList = bootstrapInfo.getServers(); + final Node node = bootstrapInfo.getNode(); if (serverList.isEmpty()) { lookasideLbHelper.updateBalancingState( TRANSIENT_FAILURE, @@ -188,40 +185,59 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { .withDescription("No traffic director provided by bootstrap"))); return; } - // Currently we only support using the first server from bootstrap. - ServerInfo serverInfo = serverList.get(0); - channel = initLbChannel( - lookasideLbHelper, serverInfo.getServerUri(), - serverInfo.getChannelCredentials()); - xdsClientPool = new RefCountedXdsClientObjectPool(new XdsClientFactory() { + XdsClientFactory xdsClientFactory = new XdsClientFactory() { @Override XdsClient createXdsClient() { - // TODO(zdapeng): Replace XdsComms2 with XdsClientImpl. - return new XdsComms2( - channel, lookasideLbHelper, new ExponentialBackoffPolicy.Provider(), - GrpcUtil.STOPWATCH_SUPPLIER, bootstrapInfo.getNode()); + return + new XdsClientImpl( + serverList, + channelFactory, + node, + lookasideLbHelper.getSynchronizationContext(), + lookasideLbHelper.getScheduledExecutorService(), + new ExponentialBackoffPolicy.Provider(), + GrpcUtil.STOPWATCH_SUPPLIER); } - }); + }; + xdsClientPool = new RefCountedXdsClientObjectPool(xdsClientFactory); } xdsClient = xdsClientPool.getObject(); } + // The edsServiceName field is null in legacy gRPC client with EDS: use target authority for + // querying endpoints, but in the future we expect this to be explicitly given by EDS config. + // We assume if edsServiceName is null, it will always be null in later resolver updates; + // and if edsServiceName is not null, it will always be not null. + String clusterServiceName = newXdsConfig.edsServiceName; + if (clusterServiceName == null) { + clusterServiceName = lookasideLbHelper.getAuthority(); + } + if (clusterName == null) { + // TODO(zdapeng): Use the correct cluster name. Currently load reporting will be broken if + // edsServiceName is changed because we are using edsServiceName for the cluster name. + clusterName = clusterServiceName; + } + + boolean shouldReportStats = newXdsConfig.lrsServerName != null; + if (shouldReportStats && !isReportingStats()) { + // Start load reporting. This may be a restarting after previously stopping the load + // reporting, so need to re-add all the pre-existing loadStatsStores to the new + // loadReportClient. + loadReportClient = xdsClient.reportClientStats(clusterName, newXdsConfig.lrsServerName); + for (Map.Entry entry : loadStatsStoreMap.entrySet()) { + loadReportClient.addLoadStatsStore(entry.getKey(), entry.getValue()); + } + } + if (!shouldReportStats && isReportingStats()) { + cancelClientStatsReport(); + } + // Note: childPolicy change will be handled in LocalityStore, to be implemented. // If edsServiceName in XdsConfig is changed, do a graceful switch. if (xdsConfig == null || !Objects.equals(newXdsConfig.edsServiceName, xdsConfig.edsServiceName)) { - String edsServiceName = newXdsConfig.edsServiceName; - - // The edsServiceName field is null in legacy gRPC client with EDS: use target authority for - // querying endpoints, but in the future we expect this to be explicitly given by EDS config. - // We assume if edsServiceName is null, it will always be null in later resolver updates; - // and if edsServiceName is not null, it will always be not null. - if (edsServiceName == null) { - edsServiceName = lookasideLbHelper.getAuthority(); - } - LoadBalancer.Factory clusterEndpointsLoadBalancerFactory = - new ClusterEndpointsBalancerFactory(edsServiceName); + new ClusterEndpointsBalancerFactory(clusterServiceName); switchingLoadBalancer.switchTo(clusterEndpointsLoadBalancerFactory); } resolvedAddresses = resolvedAddresses.toBuilder() @@ -229,22 +245,16 @@ XdsClient createXdsClient() { .setLoadBalancingPolicyConfig(newXdsConfig) .build(); switchingLoadBalancer.handleResolvedAddresses(resolvedAddresses); - this.xdsConfig = newXdsConfig; - // TODO(zdapeng): If lrsServerName in XdsConfig is changed, call xdsClient.reportClientStats() - // and/or xdsClient.cancelClientStatsReport(). + this.xdsConfig = newXdsConfig; } @Override public void handleNameResolutionError(Status error) { channelLogger.log(ChannelLogLevel.ERROR, "Name resolution error: {0}", error); - // Go into TRANSIENT_FAILURE if we have not yet received any endpoint update. Otherwise, - // we keep running with the data we had previously. - if (endpointWatcher == null) { - lookasideLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); - } else { - switchingLoadBalancer.handleNameResolutionError(error); - } + // This will go into TRANSIENT_FAILURE if we have not yet received any endpoint update and + // otherwise keep running with the data we had previously. + switchingLoadBalancer.handleNameResolutionError(error); } @Override @@ -256,63 +266,40 @@ public boolean canHandleEmptyAddressListFromNameResolution() { public void shutdown() { channelLogger.log(ChannelLogLevel.DEBUG, "EDS load balancer is shutting down"); switchingLoadBalancer.shutdown(); - if (xdsClientPool != null) { - xdsClientPool.returnObject(xdsClient); + if (isReportingStats()) { + cancelClientStatsReport(); + } + if (xdsClient != null) { + xdsClient = xdsClientPool.returnObject(xdsClient); } } - private static ManagedChannel initLbChannel( - Helper helper, - String serverUri, - List channelCredsList) { - ManagedChannel channel = null; - try { - channel = helper.createResolvingOobChannel(serverUri); - } catch (UnsupportedOperationException uoe) { - // Temporary solution until createResolvingOobChannel is implemented. - // FIXME (https://github.com/grpc/grpc-java/issues/5495) - Logger logger = Logger.getLogger(LookasideLb.class.getName()); - if (logger.isLoggable(FINEST)) { - logger.log( - FINEST, - "createResolvingOobChannel() not supported by the helper: " + helper, - uoe); - logger.log(FINEST, "creating oob channel for target {0}", serverUri); - } + /** Whether the client stats for the cluster is currently reported to the traffic director. */ + private boolean isReportingStats() { + return loadReportClient != null; + } - // Use the first supported channel credentials configuration. - // Currently, only "google_default" is supported. - for (ChannelCreds creds : channelCredsList) { - if (creds.getType().equals("google_default")) { - channel = GoogleDefaultChannelBuilder.forTarget(serverUri).build(); - break; - } - } - if (channel == null) { - channel = ManagedChannelBuilder.forTarget(serverUri).build(); - } - } - return channel; + /** Stops to report client stats for the cluster. */ + private void cancelClientStatsReport() { + xdsClient.cancelClientStatsReport(clusterName); + loadReportClient = null; } /** - * A load balancer factory that provides a load balancer for a given cluster. + * A load balancer factory that provides a load balancer for a given cluster service. */ private final class ClusterEndpointsBalancerFactory extends LoadBalancer.Factory { - final String edsServiceName; - @Nullable - final String oldEdsServiceName; + final String clusterServiceName; @Nullable - final EndpointWatcher oldEndpointWatcher; + final String oldClusterServiceName; - ClusterEndpointsBalancerFactory(String edsServiceName) { - this.edsServiceName = edsServiceName; + ClusterEndpointsBalancerFactory(String clusterServiceName) { + this.clusterServiceName = clusterServiceName; if (xdsConfig != null) { - oldEdsServiceName = xdsConfig.edsServiceName; + oldClusterServiceName = xdsConfig.edsServiceName; } else { - oldEdsServiceName = null; + oldClusterServiceName = null; } - oldEndpointWatcher = endpointWatcher; } @Override @@ -326,12 +313,12 @@ public boolean equals(Object o) { return false; } ClusterEndpointsBalancerFactory that = (ClusterEndpointsBalancerFactory) o; - return edsServiceName.equals(that.edsServiceName); + return clusterServiceName.equals(that.clusterServiceName); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), edsServiceName); + return Objects.hash(super.hashCode(), clusterServiceName); } /** @@ -339,25 +326,35 @@ public int hashCode() { */ final class ClusterEndpointsBalancer extends LoadBalancer { final Helper helper; - - // All fields become non-null once handleResolvedAddresses() successfully. - // All fields are assigned at most once. - @Nullable - LocalityStore localityStore; - @Nullable - LoadReportClient lrsClient; - @Nullable - EndpointWatcherImpl endpointWatcher; + final EndpointWatcherImpl endpointWatcher; + final LocalityStore localityStore; ClusterEndpointsBalancer(Helper helper) { this.helper = helper; + + LoadStatsStore loadStatsStore = new LoadStatsStoreImpl(); + loadStatsStoreMap.put(clusterServiceName, loadStatsStore); + if (isReportingStats()) { + loadReportClient.addLoadStatsStore(clusterServiceName, loadStatsStore); + } + localityStore = localityStoreFactory.newLocalityStore(helper, lbRegistry, loadStatsStore); + + endpointWatcher = new EndpointWatcherImpl(localityStore); + xdsClient.watchEndpointData(clusterServiceName, endpointWatcher); + if (LookasideLb.this.endpointWatcher != null) { + xdsClient.cancelEndpointDataWatch( + oldClusterServiceName, LookasideLb.this.endpointWatcher); + } + LookasideLb.this.endpointWatcher = endpointWatcher; } + // TODO(zddapeng): In handleResolvedAddresses() handle child policy change if any. + @Override public void handleNameResolutionError(Status error) { // Go into TRANSIENT_FAILURE if we have not yet received any endpoint update. Otherwise, // we keep running with the data we had previously. - if (endpointWatcher == null || !endpointWatcher.firstEndpointUpdateReceived) { + if (!endpointWatcher.firstEndpointUpdateReceived) { helper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); } } @@ -367,70 +364,14 @@ public boolean canHandleEmptyAddressListFromNameResolution() { return true; } - @Override - public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - if (endpointWatcher != null) { - // TODO(zddapeng): Handle child policy changed if any. - return; - } - - LoadStatsStore loadStatsStore = new LoadStatsStoreImpl(); - localityStore = localityStoreFactory.newLocalityStore(helper, lbRegistry, loadStatsStore); - LoadReportCallback lrsCallback = - new LoadReportCallback() { - @Override - public void onReportResponse(long reportIntervalNano) { - localityStore.updateOobMetricsReportInterval(reportIntervalNano); - } - }; - - // TODO(zdapeng): Use XdsClient to do Lrs directly. - // For now create an LRS Client. - if (channel != null) { - lrsClient = - loadReportClientFactory.createLoadReportClient( - channel, - helper.getAuthority(), - Node.getDefaultInstance(), - helper.getSynchronizationContext(), - helper.getScheduledExecutorService(), - new ExponentialBackoffPolicy.Provider(), - GrpcUtil.STOPWATCH_SUPPLIER); - lrsClient.addLoadStatsStore(edsServiceName, loadStatsStore); - } else { - lrsClient = new LoadReportClient() { - @Override - public void startLoadReporting(LoadReportCallback callback) {} - - @Override - public void stopLoadReporting() {} - - @Override - public void addLoadStatsStore( - String clusterServiceName, LoadStatsStore loadStatsStore) { - } - - @Override - public void removeLoadStatsStore(String clusterServiceName) { - } - }; - } - - endpointWatcher = new EndpointWatcherImpl(lrsClient, lrsCallback, localityStore); - xdsClient.watchEndpointData(edsServiceName, endpointWatcher); - if (oldEndpointWatcher != null && oldEdsServiceName != null) { - xdsClient.cancelEndpointDataWatch(oldEdsServiceName, oldEndpointWatcher); - } - LookasideLb.this.endpointWatcher = endpointWatcher; - } - @Override public void shutdown() { - if (endpointWatcher != null) { - lrsClient.stopLoadReporting(); - localityStore.reset(); - xdsClient.cancelEndpointDataWatch(edsServiceName, endpointWatcher); + loadStatsStoreMap.remove(clusterServiceName); + if (isReportingStats()) { + loadReportClient.removeLoadStatsStore(clusterServiceName); } + localityStore.reset(); + xdsClient.cancelEndpointDataWatch(clusterServiceName, endpointWatcher); } } } @@ -449,15 +390,10 @@ interface EndpointUpdateCallback { private final class EndpointWatcherImpl implements EndpointWatcher { - final LoadReportClient lrsClient; - final LoadReportCallback lrsCallback; final LocalityStore localityStore; boolean firstEndpointUpdateReceived; - EndpointWatcherImpl( - LoadReportClient lrsClient, LoadReportCallback lrsCallback, LocalityStore localityStore) { - this.lrsClient = lrsClient; - this.lrsCallback = lrsCallback; + EndpointWatcherImpl(LocalityStore localityStore) { this.localityStore = localityStore; } @@ -471,7 +407,6 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { if (!firstEndpointUpdateReceived) { firstEndpointUpdateReceived = true; endpointUpdateCallback.onWorking(); - lrsClient.startLoadReporting(lrsCallback); } List dropOverloads = endpointUpdate.getDropPolicies(); diff --git a/xds/src/main/java/io/grpc/xds/XdsComms2.java b/xds/src/main/java/io/grpc/xds/XdsComms2.java deleted file mode 100644 index ef90698e1ef..00000000000 --- a/xds/src/main/java/io/grpc/xds/XdsComms2.java +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Copyright 2019 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.xds; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Stopwatch; -import com.google.common.base.Supplier; -import com.google.protobuf.InvalidProtocolBufferException; -import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment; -import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload; -import io.envoyproxy.envoy.api.v2.DiscoveryRequest; -import io.envoyproxy.envoy.api.v2.DiscoveryResponse; -import io.envoyproxy.envoy.api.v2.core.Node; -import io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints; -import io.envoyproxy.envoy.service.discovery.v2.AggregatedDiscoveryServiceGrpc; -import io.grpc.ChannelLogger.ChannelLogLevel; -import io.grpc.LoadBalancer.Helper; -import io.grpc.ManagedChannel; -import io.grpc.Status; -import io.grpc.SynchronizationContext.ScheduledHandle; -import io.grpc.internal.BackoffPolicy; -import io.grpc.stub.StreamObserver; -import java.util.concurrent.TimeUnit; -import javax.annotation.CheckForNull; - -/** - * ADS client implementation. - */ -// TODO(zdapeng): This is a temporary and easy refactor of XdsComms, will be replaced by XdsClient. -// Tests are deferred in XdsClientTest, otherwise it's just a refactor of XdsCommsTest. -final class XdsComms2 extends XdsClient { - private final ManagedChannel channel; - private final Helper helper; - private final BackoffPolicy.Provider backoffPolicyProvider; - private final Supplier stopwatchSupplier; - // Metadata to be included in every xDS request. - private final Node node; - - @CheckForNull - private ScheduledHandle adsRpcRetryTimer; - - // never null - private BackoffPolicy adsRpcRetryPolicy; - // never null - private AdsStream adsStream; - - private final class AdsStream { - static final String EDS_TYPE_URL = - "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"; - - final XdsClient.EndpointWatcher endpointWatcher; - final StreamObserver xdsRequestWriter; - final Stopwatch retryStopwatch = stopwatchSupplier.get().start(); - - final StreamObserver xdsResponseReader = - new StreamObserver() { - // Must be accessed in SynchronizationContext - boolean firstEdsResponseReceived; - - @Override - public void onNext(final DiscoveryResponse value) { - - class HandleResponseRunnable implements Runnable { - - @Override - public void run() { - String typeUrl = value.getTypeUrl(); - if (EDS_TYPE_URL.equals(typeUrl)) { - // Assuming standard mode. - - ClusterLoadAssignment clusterLoadAssignment; - try { - // maybe better to run this deserialization task out of syncContext? - clusterLoadAssignment = - value.getResources(0).unpack(ClusterLoadAssignment.class); - } catch (InvalidProtocolBufferException | RuntimeException e) { - cancelRpc("Received invalid EDS response", e); - endpointWatcher.onError(Status.fromThrowable(e)); - scheduleRetry(); - return; - } - - helper.getChannelLogger().log( - ChannelLogLevel.DEBUG, - "Received an EDS response: {0}", clusterLoadAssignment); - firstEdsResponseReceived = true; - - // Converts clusterLoadAssignment data to EndpointUpdate - EndpointUpdate.Builder endpointUpdateBuilder = EndpointUpdate.newBuilder(); - endpointUpdateBuilder.setClusterName(clusterLoadAssignment.getClusterName()); - for (DropOverload dropOverload : - clusterLoadAssignment.getPolicy().getDropOverloadsList()) { - endpointUpdateBuilder.addDropPolicy( - EnvoyProtoData.DropOverload.fromEnvoyProtoDropOverload(dropOverload)); - } - for (LocalityLbEndpoints localityLbEndpoints : - clusterLoadAssignment.getEndpointsList()) { - endpointUpdateBuilder.addLocalityLbEndpoints( - EnvoyProtoData.Locality.fromEnvoyProtoLocality( - localityLbEndpoints.getLocality()), - EnvoyProtoData.LocalityLbEndpoints.fromEnvoyProtoLocalityLbEndpoints( - localityLbEndpoints)); - - } - endpointWatcher.onEndpointChanged(endpointUpdateBuilder.build()); - } - } - } - - helper.getSynchronizationContext().execute(new HandleResponseRunnable()); - } - - @Override - public void onError(final Throwable t) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - closed = true; - if (cancelled) { - return; - } - endpointWatcher.onError(Status.fromThrowable(t)); - scheduleRetry(); - } - }); - } - - @Override - public void onCompleted() { - onError(Status.UNAVAILABLE.withDescription("Server closed the ADS streaming RPC") - .asException()); - } - - // run in SynchronizationContext - void scheduleRetry() { - if (channel.isShutdown()) { - return; - } - - checkState( - cancelled || closed, - "Scheduling retry while the stream is neither cancelled nor closed"); - - checkState( - adsRpcRetryTimer == null, "Scheduling retry while a retry is already pending"); - - class AdsRpcRetryTask implements Runnable { - @Override - public void run() { - adsRpcRetryTimer = null; - refreshAdsStream(); - } - } - - if (firstEdsResponseReceived) { - // Reset the backoff sequence if balancer has sent the initial response - adsRpcRetryPolicy = backoffPolicyProvider.get(); - // Retry immediately - helper.getSynchronizationContext().execute(new AdsRpcRetryTask()); - return; - } - - adsRpcRetryTimer = helper.getSynchronizationContext().schedule( - new AdsRpcRetryTask(), - adsRpcRetryPolicy.nextBackoffNanos() - retryStopwatch.elapsed(TimeUnit.NANOSECONDS), - TimeUnit.NANOSECONDS, - helper.getScheduledExecutorService()); - } - }; - - boolean cancelled; - boolean closed; - - AdsStream(XdsClient.EndpointWatcher endpointWatcher) { - this.endpointWatcher = endpointWatcher; - this.xdsRequestWriter = AggregatedDiscoveryServiceGrpc.newStub(channel).withWaitForReady() - .streamAggregatedResources(xdsResponseReader); - - checkState(adsRpcRetryTimer == null, "Creating AdsStream while retry is pending"); - // Assuming standard mode, and send EDS request only - DiscoveryRequest edsRequest = - DiscoveryRequest.newBuilder() - .setNode(node) - .setTypeUrl(EDS_TYPE_URL) - // In the future, the right resource name can be obtained from CDS response. - .addResourceNames(helper.getAuthority()).build(); - helper.getChannelLogger().log(ChannelLogLevel.DEBUG, "Sending EDS request {0}", edsRequest); - xdsRequestWriter.onNext(edsRequest); - } - - AdsStream(AdsStream adsStream) { - this(adsStream.endpointWatcher); - } - - // run in SynchronizationContext - void cancelRpc(String message, Throwable cause) { - if (cancelled) { - return; - } - cancelled = true; - xdsRequestWriter.onError( - Status.CANCELLED.withDescription(message).withCause(cause).asRuntimeException()); - } - } - - /** - * Starts a new ADS streaming RPC. - */ - XdsComms2( - ManagedChannel channel, Helper helper, - BackoffPolicy.Provider backoffPolicyProvider, Supplier stopwatchSupplier, - Node node) { - this.channel = checkNotNull(channel, "channel"); - this.helper = checkNotNull(helper, "helper"); - this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); - this.node = node; - this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); - this.adsRpcRetryPolicy = backoffPolicyProvider.get(); - } - - // run in SynchronizationContext - void refreshAdsStream() { - checkState(!channel.isShutdown(), "channel is alreday shutdown"); - - if (adsStream.closed || adsStream.cancelled) { - cancelRetryTimer(); - adsStream = new AdsStream(adsStream); - } - } - - @Override - void watchEndpointData(String clusterName, EndpointWatcher watcher) { - if (adsStream == null) { - adsStream = new AdsStream(watcher); - } - } - - @Override - void shutdown() { - if (adsStream != null) { - adsStream.cancelRpc("shutdown", null); - } - cancelRetryTimer(); - channel.shutdown(); - } - - // run in SynchronizationContext - private void cancelRetryTimer() { - if (adsRpcRetryTimer != null) { - adsRpcRetryTimer.cancel(); - adsRpcRetryTimer = null; - } - } - - /** - * Converts ClusterLoadAssignment data to {@link EndpointUpdate}. All the needed data, that is - * clusterName, localityLbEndpointsMap and dropPolicies, is extracted from ClusterLoadAssignment, - * and all other data is ignored. - */ - @VisibleForTesting - static EndpointUpdate getEndpointUpdatefromClusterAssignment( - ClusterLoadAssignment clusterLoadAssignment) { - EndpointUpdate.Builder endpointUpdateBuilder = EndpointUpdate.newBuilder(); - endpointUpdateBuilder.setClusterName(clusterLoadAssignment.getClusterName()); - for (DropOverload dropOverload : - clusterLoadAssignment.getPolicy().getDropOverloadsList()) { - endpointUpdateBuilder.addDropPolicy( - EnvoyProtoData.DropOverload.fromEnvoyProtoDropOverload(dropOverload)); - } - for (LocalityLbEndpoints localityLbEndpoints : clusterLoadAssignment.getEndpointsList()) { - endpointUpdateBuilder.addLocalityLbEndpoints( - EnvoyProtoData.Locality.fromEnvoyProtoLocality( - localityLbEndpoints.getLocality()), - EnvoyProtoData.LocalityLbEndpoints.fromEnvoyProtoLocalityLbEndpoints( - localityLbEndpoints)); - - } - return endpointUpdateBuilder.build(); - } -} diff --git a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java index 7baf2ce7a71..2c1ce73d982 100644 --- a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java +++ b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java @@ -17,47 +17,46 @@ package io.grpc.xds; import static com.google.common.truth.Truth.assertThat; +import static io.envoyproxy.envoy.api.v2.core.HealthStatus.HEALTHY; import static io.grpc.ConnectivityState.CONNECTING; import static io.grpc.ConnectivityState.READY; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.LoadBalancer.ATTR_LOAD_BALANCING_CONFIG; +import static io.grpc.xds.XdsClientTestHelper.buildClusterLoadAssignment; +import static io.grpc.xds.XdsClientTestHelper.buildDiscoveryResponse; +import static io.grpc.xds.XdsClientTestHelper.buildDropOverload; +import static io.grpc.xds.XdsClientTestHelper.buildLbEndpoint; +import static io.grpc.xds.XdsClientTestHelper.buildLocalityLbEndpoints; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import com.google.common.base.Stopwatch; -import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.protobuf.Any; -import com.google.protobuf.UInt32Value; import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment; -import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment.Policy; +import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload; import io.envoyproxy.envoy.api.v2.DiscoveryRequest; import io.envoyproxy.envoy.api.v2.DiscoveryResponse; -import io.envoyproxy.envoy.api.v2.core.Address; import io.envoyproxy.envoy.api.v2.core.Node; -import io.envoyproxy.envoy.api.v2.core.SocketAddress; -import io.envoyproxy.envoy.api.v2.endpoint.Endpoint; import io.envoyproxy.envoy.api.v2.endpoint.LbEndpoint; +import io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints; import io.envoyproxy.envoy.service.discovery.v2.AggregatedDiscoveryServiceGrpc.AggregatedDiscoveryServiceImplBase; -import io.envoyproxy.envoy.type.FractionalPercent; -import io.envoyproxy.envoy.type.FractionalPercent.DenominatorType; import io.grpc.Attributes; import io.grpc.ChannelLogger; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; +import io.grpc.LoadBalancer.PickResult; +import io.grpc.LoadBalancer.PickSubchannelArgs; import io.grpc.LoadBalancer.ResolvedAddresses; +import io.grpc.LoadBalancer.Subchannel; import io.grpc.LoadBalancer.SubchannelPicker; import io.grpc.LoadBalancerProvider; import io.grpc.LoadBalancerRegistry; @@ -68,39 +67,37 @@ import io.grpc.inprocess.InProcessServerBuilder; import io.grpc.internal.BackoffPolicy; import io.grpc.internal.FakeClock; -import io.grpc.internal.JsonParser; import io.grpc.internal.ObjectPool; -import io.grpc.internal.testing.StreamRecorder; import io.grpc.stub.StreamObserver; import io.grpc.testing.GrpcCleanupRule; import io.grpc.xds.Bootstrapper.BootstrapInfo; import io.grpc.xds.Bootstrapper.ChannelCreds; import io.grpc.xds.Bootstrapper.ServerInfo; -import io.grpc.xds.EnvoyProtoData.DropOverload; -import io.grpc.xds.EnvoyProtoData.Locality; -import io.grpc.xds.EnvoyProtoData.LocalityLbEndpoints; -import io.grpc.xds.LoadReportClient.LoadReportCallback; -import io.grpc.xds.LoadReportClientImpl.LoadReportClientFactory; import io.grpc.xds.LocalityStore.LocalityStoreFactory; import io.grpc.xds.LookasideLb.EndpointUpdateCallback; import io.grpc.xds.XdsClient.EndpointUpdate; -import io.grpc.xds.XdsClient.EndpointWatcher; -import io.grpc.xds.XdsClient.RefCountedXdsClientObjectPool; -import io.grpc.xds.XdsClient.XdsClientFactory; +import io.grpc.xds.XdsClient.XdsChannelFactory; +import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; +import java.net.InetSocketAddress; import java.util.ArrayDeque; -import java.util.Deque; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; import org.mockito.ArgumentCaptor; -import org.mockito.ArgumentMatchers; +import org.mockito.ArgumentMatcher; import org.mockito.Captor; -import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; @@ -108,12 +105,10 @@ /** * Tests for {@link LookasideLb}. */ -@RunWith(JUnit4.class) -// TODO(creamsoup) use parsed service config -@SuppressWarnings("deprecation") +@RunWith(Parameterized.class) public class LookasideLbTest { - private static final String SERVICE_AUTHORITY = "test authority"; + private static final String SERVICE_AUTHORITY = "test.authority.example.com"; @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); @@ -127,18 +122,23 @@ public void uncaughtException(Thread t, Throwable e) { throw new AssertionError(e); } }); - private final StreamRecorder streamRecorder = StreamRecorder.create(); - - private final DiscoveryResponse edsResponse = - DiscoveryResponse.newBuilder() - .addResources(Any.pack(ClusterLoadAssignment.getDefaultInstance())) - .setTypeUrl("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment") - .build(); - private final Deque helpers = new ArrayDeque<>(); - private final Deque localityStores = new ArrayDeque<>(); - private final Deque loadReportClients = new ArrayDeque<>(); private final FakeClock fakeClock = new FakeClock(); + private final LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry(); + + // Child helpers keyed by locality names. + private final Map childHelpers = new HashMap<>(); + // Child balancers keyed by locality names. + private final Map childBalancers = new HashMap<>(); + private final XdsChannelFactory channelFactory = new XdsChannelFactory() { + @Override + ManagedChannel createChannel(List servers) { + assertThat(Iterables.getOnlyElement(servers).getServerUri()) + .isEqualTo("trafficdirector.googleapis.com"); + return channel; + } + }; + @Mock private Helper helper; @Mock @@ -146,92 +146,36 @@ public void uncaughtException(Thread t, Throwable e) { @Mock private Bootstrapper bootstrapper; @Captor - private ArgumentCaptor> - localityEndpointsMappingCaptor; + ArgumentCaptor connectivityStateCaptor; + @Captor + ArgumentCaptor pickerCaptor; - private ManagedChannel channel; - private ManagedChannel channel2; - private StreamObserver serverResponseWriter; private LoadBalancer lookasideLb; - private ResolvedAddresses defaultResolvedAddress; + // Simulating a CDS to EDS flow, otherwise EDS only. + @Parameter + public boolean isFullFlow; + private ManagedChannel channel; + // Response observer on server side. + private StreamObserver responseObserver; + @Nullable + private FakeXdsClientPool xdsClientPoolFromResolveAddresses; + private LocalityStoreFactory localityStoreFactory = LocalityStoreFactory.getInstance(); + private int versionIno; + private int nonce; + + @Parameters + public static Collection isFullFlow() { + return ImmutableList.of(false, true ); + } @Before public void setUp() throws Exception { - AggregatedDiscoveryServiceImplBase serviceImpl = new AggregatedDiscoveryServiceImplBase() { - @Override - public StreamObserver streamAggregatedResources( - final StreamObserver responseObserver) { - serverResponseWriter = responseObserver; - - return new StreamObserver() { - - @Override - public void onNext(DiscoveryRequest value) { - streamRecorder.onNext(value); - } - - @Override - public void onError(Throwable t) { - streamRecorder.onError(t); - } - - @Override - public void onCompleted() { - streamRecorder.onCompleted(); - responseObserver.onCompleted(); - } - }; - } - }; - - String serverName = InProcessServerBuilder.generateName(); - cleanupRule.register( - InProcessServerBuilder - .forName(serverName) - .directExecutor() - .addService(serviceImpl) - .build() - .start()); - channel = cleanupRule.register( - InProcessChannelBuilder - .forName(serverName) - .directExecutor() - .build()); - channel2 = cleanupRule.register( - InProcessChannelBuilder - .forName(serverName) - .directExecutor() - .build()); - doReturn(SERVICE_AUTHORITY).when(helper).getAuthority(); doReturn(syncContext).when(helper).getSynchronizationContext(); doReturn(mock(ChannelLogger.class)).when(helper).getChannelLogger(); - doReturn(channel, channel2).when(helper).createResolvingOobChannel(anyString()); doReturn(fakeClock.getScheduledExecutorService()).when(helper).getScheduledExecutorService(); - LocalityStoreFactory localityStoreFactory = new LocalityStoreFactory() { - @Override - public LocalityStore newLocalityStore( - Helper helper, LoadBalancerRegistry lbRegistry, LoadStatsStore loadStatsStore) { - helpers.add(helper); - LocalityStore localityStore = mock(LocalityStore.class); - localityStores.add(localityStore); - return localityStore; - } - }; - - LoadReportClientFactory loadReportClientFactory = new LoadReportClientFactory() { - @Override - LoadReportClient createLoadReportClient(ManagedChannel channel, String clusterName, - Node node, SynchronizationContext syncContext, ScheduledExecutorService timeService, - BackoffPolicy.Provider backoffPolicyProvider, Supplier stopwatchSupplier) { - LoadReportClient loadReportClient = mock(LoadReportClient.class); - loadReportClients.add(loadReportClient); - return loadReportClient; - } - }; - - LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry(); + // Register a fake round robin balancer provider. lbRegistry.register(new LoadBalancerProvider() { @Override public boolean isAvailable() { @@ -245,80 +189,97 @@ public int getPriority() { @Override public String getPolicyName() { - return "supported1"; + return "round_robin"; } @Override public LoadBalancer newLoadBalancer(Helper helper) { - return mock(LoadBalancer.class); + String localityName = helper.getAuthority(); + childHelpers.put(localityName, helper); + LoadBalancer balancer = mock(LoadBalancer.class); + childBalancers.put(localityName, balancer); + return balancer; } }); - List serverList = + AggregatedDiscoveryServiceImplBase serviceImpl = new AggregatedDiscoveryServiceImplBase() { + @Override + public StreamObserver streamAggregatedResources( + final StreamObserver responseObserver) { + LookasideLbTest.this.responseObserver = responseObserver; + @SuppressWarnings("unchecked") + StreamObserver requestObserver = mock(StreamObserver.class); + return requestObserver; + } + }; + String serverName = InProcessServerBuilder.generateName(); + cleanupRule.register( + InProcessServerBuilder + .forName(serverName) + .directExecutor() + .addService(serviceImpl) + .build() + .start()); + channel = cleanupRule.register( + InProcessChannelBuilder + .forName(serverName) + .directExecutor() + .build()); + final List serverList = ImmutableList.of( new ServerInfo("trafficdirector.googleapis.com", ImmutableList.of())); BootstrapInfo bootstrapInfo = new BootstrapInfo(serverList, Node.getDefaultInstance()); doReturn(bootstrapInfo).when(bootstrapper).readBootstrap(); - lookasideLb = new LookasideLb( - helper, edsUpdateCallback, lbRegistry, localityStoreFactory, loadReportClientFactory, - bootstrapper); + if (isFullFlow) { + xdsClientPoolFromResolveAddresses = new FakeXdsClientPool( + new XdsClientImpl( + serverList, channelFactory, Node.getDefaultInstance(), syncContext, + fakeClock.getScheduledExecutorService(), mock(BackoffPolicy.Provider.class), + fakeClock.getStopwatchSupplier())); + } - String lbConfigRaw11 = "{}"; - @SuppressWarnings("unchecked") - Map lbConfig11 = (Map) JsonParser.parse(lbConfigRaw11); - defaultResolvedAddress = ResolvedAddresses.newBuilder() - .setAddresses(ImmutableList.of()) - .setAttributes(Attributes.newBuilder().set(ATTR_LOAD_BALANCING_CONFIG, lbConfig11).build()) - .build(); + lookasideLb = new LookasideLb( + helper, edsUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, channelFactory); } - @Test - public void canHandleEmptyAddressListFromNameResolution() { - assertThat(lookasideLb.canHandleEmptyAddressListFromNameResolution()).isTrue(); - } + @After + public void tearDown() { + lookasideLb.shutdown(); - @Test - public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() throws Exception { - XdsClientFactory xdsClientFactory = new XdsClientFactory() { - @Override - XdsClient createXdsClient() { - return mock(XdsClient.class); - } - }; - ObjectPool xdsClientPool = new RefCountedXdsClientObjectPool(xdsClientFactory); - XdsClient xdsClientFromResolver = xdsClientPool.getObject(); - - String lbConfigRaw = - "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName1'}" - .replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); - ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(ImmutableList.of()) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); + for (LoadBalancer childBalancer : childBalancers.values()) { + verify(childBalancer).shutdown(); + } - lookasideLb.handleResolvedAddresses(resolvedAddresses); + if (isFullFlow) { + assertThat(xdsClientPoolFromResolveAddresses.timesGetObjectCalled) + .isEqualTo(xdsClientPoolFromResolveAddresses.timesReturnObjectCalled); - assertThat(helpers).hasSize(1); - assertThat(localityStores).hasSize(1); - ArgumentCaptor endpointWatcherCaptor = - ArgumentCaptor.forClass(EndpointWatcher.class); - verify(xdsClientFromResolver).watchEndpointData( - eq("edsServiceName1"), endpointWatcherCaptor.capture()); - EndpointWatcher endpointWatcher = endpointWatcherCaptor.getValue(); + // Just for cleaning up the test. + xdsClientPoolFromResolveAddresses.xdsClient.shutdown(); + } + + assertThat(channel.isShutdown()).isTrue(); + } + + @Test + public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() { + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); // handleResolutionError() before receiving any endpoint update. lookasideLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); // Endpoint update received. - endpointWatcher.onEndpointChanged( - EndpointUpdate.newBuilder().setClusterName("edsServiceName1").build()); + ClusterLoadAssignment clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of(buildDropOverload("throttle", 1000))); + receiveEndpointUpdate(clusterLoadAssignment); // handleResolutionError() after receiving endpoint update. lookasideLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); @@ -327,488 +288,513 @@ XdsClient createXdsClient() { eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); } - @SuppressWarnings("unchecked") @Test - public void handleEdsServiceNameChangeInXdsConfig_swtichGracefully() - throws Exception { - assertThat(helpers).isEmpty(); - assertThat(localityStores).isEmpty(); - assertThat(loadReportClients).isEmpty(); - - List eags = ImmutableList.of(); - XdsClientFactory xdsClientFactory = new XdsClientFactory() { + public void handleEdsServiceNameChangeInXdsConfig() { + assertThat(childHelpers).isEmpty(); + + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + ClusterLoadAssignment clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + assertThat(childHelpers).hasSize(1); + Helper childHelper1 = childHelpers.get("subzone1"); + LoadBalancer childBalancer1 = childBalancers.get("subzone1"); + verify(childBalancer1).handleResolvedAddresses( + argThat(RoundRobinBackendsMatcher.builder().addHostAndPort("192.168.0.1", 8080).build())); + + childHelper1.updateBalancingState(CONNECTING, mock(SubchannelPicker.class)); + assertLatestConnectivityState(CONNECTING); + + // Change edsServicename to edsServiceName2. + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName2", null)); + // The old balancer was not READY, so it will be shutdown immediately. + verify(childBalancer1).shutdown(); + + clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName2", + ImmutableList.of( + buildLocalityLbEndpoints("region2", "zone2", "subzone2", + ImmutableList.of( + buildLbEndpoint("192.168.0.2", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + assertThat(childHelpers).hasSize(2); + Helper childHelper2 = childHelpers.get("subzone2"); + LoadBalancer childBalancer2 = childBalancers.get("subzone2"); + verify(childBalancer2).handleResolvedAddresses( + argThat(RoundRobinBackendsMatcher.builder().addHostAndPort("192.168.0.2", 8080).build())); + + final Subchannel subchannel2 = mock(Subchannel.class); + SubchannelPicker picker2 = new SubchannelPicker() { @Override - XdsClient createXdsClient() { - return mock(XdsClient.class); + public PickResult pickSubchannel(PickSubchannelArgs args) { + return PickResult.withSubchannel(subchannel2); } }; - ObjectPool xdsClientPool = new RefCountedXdsClientObjectPool(xdsClientFactory); - XdsClient xdsClientFromResolver = xdsClientPool.getObject(); - - String lbConfigRaw = - "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName1'}" - .replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); - ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); - lookasideLb.handleResolvedAddresses(resolvedAddresses); - - assertThat(helpers).hasSize(1); - assertThat(localityStores).hasSize(1); - Helper helper1 = helpers.peekLast(); - LocalityStore localityStore1 = localityStores.peekLast(); - - SubchannelPicker picker1 = mock(SubchannelPicker.class); - helper1.updateBalancingState(CONNECTING, picker1); - verify(helper).updateBalancingState(CONNECTING, picker1); - - // Change edsServicename to edsServiceName2. - lbConfigRaw = "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName2'}" - .replace("'", "\""); - lbConfig = (Map) JsonParser.parse(lbConfigRaw); - resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); - lookasideLb.handleResolvedAddresses(resolvedAddresses); - assertThat(helpers).hasSize(2); - assertThat(localityStores).hasSize(2); - Helper helper2 = helpers.peekLast(); - LocalityStore localityStore2 = localityStores.peekLast(); - SubchannelPicker picker2 = mock(SubchannelPicker.class); - helper2.updateBalancingState(CONNECTING, picker2); - verify(helper).updateBalancingState(CONNECTING, picker2); - verify(localityStore1).reset(); - helper2.updateBalancingState(READY, picker2); - verify(helper).updateBalancingState(READY, picker2); + childHelper2.updateBalancingState(READY, picker2); + assertLatestSubchannelPicker(subchannel2); // Change edsServiceName to edsServiceName3. - lbConfigRaw = "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName3'}" - .replace("'", "\""); - lbConfig = (Map) JsonParser.parse(lbConfigRaw); - resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); - lookasideLb.handleResolvedAddresses(resolvedAddresses); - - assertThat(helpers).hasSize(3); - assertThat(localityStores).hasSize(3); - Helper helper3 = helpers.peekLast(); - LocalityStore localityStore3 = localityStores.peekLast(); - - SubchannelPicker picker3 = mock(SubchannelPicker.class); - helper3.updateBalancingState(CONNECTING, picker3); - verify(helper, never()).updateBalancingState(CONNECTING, picker3); - verify(localityStore2, never()).reset(); - picker2 = mock(SubchannelPicker.class); - helper2.updateBalancingState(CONNECTING, picker2); + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName3", null)); + clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName3", + ImmutableList.of( + buildLocalityLbEndpoints("region3", "zone3", "subzone3", + ImmutableList.of( + buildLbEndpoint("192.168.0.3", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + + assertThat(childHelpers).hasSize(3); + Helper childHelper3 = childHelpers.get("subzone3"); + LoadBalancer childBalancer3 = childBalancers.get("subzone3"); + + childHelper3.updateBalancingState(CONNECTING, mock(SubchannelPicker.class)); + // The new balancer is not READY while the old one is still READY. + verify(childBalancer2, never()).shutdown(); + assertLatestSubchannelPicker(subchannel2); + + childHelper2.updateBalancingState(CONNECTING, mock(SubchannelPicker.class)); // The old balancer becomes not READY, so the new balancer will update picker immediately. - verify(helper).updateBalancingState(CONNECTING, picker3); - verify(localityStore2).reset(); + verify(childBalancer2).shutdown(); + assertLatestConnectivityState(CONNECTING); // Change edsServiceName to edsServiceName4. - lbConfigRaw = "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName4'}" - .replace("'", "\""); - lbConfig = (Map) JsonParser.parse(lbConfigRaw); - resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); - lookasideLb.handleResolvedAddresses(resolvedAddresses); - - assertThat(helpers).hasSize(4); - assertThat(localityStores).hasSize(4); - Helper helper4 = helpers.peekLast(); - LocalityStore localityStore4 = localityStores.peekLast(); - verify(localityStore3).reset(); - SubchannelPicker picker4 = mock(SubchannelPicker.class); - helper4.updateBalancingState(READY, picker4); - verify(helper).updateBalancingState(READY, picker4); + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName4", null)); + verify(childBalancer3).shutdown(); + + clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName4", + ImmutableList.of( + buildLocalityLbEndpoints("region4", "zone4", "subzone4", + ImmutableList.of( + buildLbEndpoint("192.168.0.4", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + + assertThat(childHelpers).hasSize(4); + Helper childHelper4 = childHelpers.get("subzone4"); + LoadBalancer childBalancer4 = childBalancers.get("subzone4"); + + final Subchannel subchannel4 = mock(Subchannel.class); + SubchannelPicker picker4 = new SubchannelPicker() { + @Override + public PickResult pickSubchannel(PickSubchannelArgs args) { + return PickResult.withSubchannel(subchannel4); + } + }; + childHelper4.updateBalancingState(READY, picker4); + assertLatestSubchannelPicker(subchannel4); // Change edsServiceName to edsServiceName5. - lbConfigRaw = "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName5'}" - .replace("'", "\""); - lbConfig = (Map) JsonParser.parse(lbConfigRaw); - resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); - lookasideLb.handleResolvedAddresses(resolvedAddresses); - - assertThat(helpers).hasSize(5); - assertThat(localityStores).hasSize(5); - - Helper helper5 = helpers.peekLast(); - LocalityStore localityStore5 = localityStores.peekLast(); - SubchannelPicker picker5 = mock(SubchannelPicker.class); - helper5.updateBalancingState(CONNECTING, picker5); + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName5", null)); + clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName5", + ImmutableList.of( + buildLocalityLbEndpoints("region5", "zone5", "subzone5", + ImmutableList.of( + buildLbEndpoint("192.168.0.5", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + + assertThat(childHelpers).hasSize(5); + Helper childHelper5 = childHelpers.get("subzone5"); + LoadBalancer childBalancer5 = childBalancers.get("subzone5"); + childHelper5.updateBalancingState(CONNECTING, mock(SubchannelPicker.class)); // The old balancer was READY, so the new balancer will gracefully switch and not update // non-READY picker. - verify(helper, never()).updateBalancingState(any(ConnectivityState.class), eq(picker5)); - verify(localityStore4, never()).reset(); - - helper5.updateBalancingState(READY, picker5); - verify(helper).updateBalancingState(READY, picker5); - verify(localityStore4).reset(); + verify(childBalancer4, never()).shutdown(); + assertLatestSubchannelPicker(subchannel4); - verify(localityStore5, never()).reset(); - lookasideLb.shutdown(); - verify(localityStore5).reset(); - - xdsClientPool.returnObject(xdsClientFromResolver); - } - - @Test - public void handleResolvedAddress_withBootstrap() throws Exception { - List serverList = - ImmutableList.of( - new ServerInfo("trafficdirector.googleapis.com", ImmutableList.of())); - BootstrapInfo bootstrapInfo = new BootstrapInfo(serverList, Node.getDefaultInstance()); - doReturn(bootstrapInfo).when(bootstrapper).readBootstrap(); - - String lbConfigRaw = - "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName1'}" - .replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); - ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(ImmutableList.of()) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .build()) - .build(); - - verify(helper, never()).createResolvingOobChannel(anyString()); - lookasideLb.handleResolvedAddresses(resolvedAddresses); - verify(helper).createResolvingOobChannel("trafficdirector.googleapis.com"); - - assertThat(helpers).hasSize(1); - assertThat(localityStores).hasSize(1); - Helper helper1 = helpers.peekLast(); - LocalityStore localityStore1 = localityStores.peekLast(); - SubchannelPicker picker = mock(SubchannelPicker.class); - helper1.updateBalancingState(READY, picker); - verify(helper).updateBalancingState(READY, picker); - - lookasideLb.shutdown(); - verify(localityStore1).reset(); - } - - @Test - public void handleResolvedAddress_withxdsClientPoolAttributes() throws Exception { - XdsClientFactory xdsClientFactory = new XdsClientFactory() { + final Subchannel subchannel5 = mock(Subchannel.class); + SubchannelPicker picker5 = new SubchannelPicker() { @Override - XdsClient createXdsClient() { - return mock(XdsClient.class); + public PickResult pickSubchannel(PickSubchannelArgs args) { + return PickResult.withSubchannel(subchannel5); } }; - ObjectPool xdsClientPool = new RefCountedXdsClientObjectPool(xdsClientFactory); - XdsClient xdsClientFromResolver = xdsClientPool.getObject(); - - String lbConfigRaw = - "{'childPolicy' : [{'supported1' : {}}], 'edsServiceName' : 'edsServiceName1'}" - .replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); - ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(ImmutableList.of()) - .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .build(); - - lookasideLb.handleResolvedAddresses(resolvedAddresses); - - assertThat(helpers).hasSize(1); - assertThat(localityStores).hasSize(1); - ArgumentCaptor endpointWatcherCaptor = - ArgumentCaptor.forClass(EndpointWatcher.class); - verify(xdsClientFromResolver).watchEndpointData( - eq("edsServiceName1"), endpointWatcherCaptor.capture()); - EndpointWatcher endpointWatcher = endpointWatcherCaptor.getValue(); - - Helper helper1 = helpers.peekLast(); - SubchannelPicker picker = mock(SubchannelPicker.class); - helper1.updateBalancingState(READY, picker); - verify(helper).updateBalancingState(READY, picker); - - // Mimic resolver shutdown - xdsClientPool.returnObject(xdsClientFromResolver); - verify(xdsClientFromResolver, never()).shutdown(); - lookasideLb.shutdown(); - verify(xdsClientFromResolver).cancelEndpointDataWatch("edsServiceName1", endpointWatcher); - verify(xdsClientFromResolver).shutdown(); + childHelper5.updateBalancingState(READY, picker5); + verify(childBalancer4).shutdown(); + assertLatestSubchannelPicker(subchannel5); + verify(childBalancer5, never()).shutdown(); } @Test - public void firstAndSecondEdsResponseReceived() { - lookasideLb.handleResolvedAddresses(defaultResolvedAddress); + public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); verify(edsUpdateCallback, never()).onWorking(); - LoadReportClient loadReportClient = Iterables.getOnlyElement(loadReportClients); - verify(loadReportClient, never()).startLoadReporting(any(LoadReportCallback.class)); // first EDS response - serverResponseWriter.onNext(edsResponse); + ClusterLoadAssignment clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + verify(edsUpdateCallback).onWorking(); - ArgumentCaptor loadReportCallbackCaptor = - ArgumentCaptor.forClass(LoadReportCallback.class); - verify(loadReportClient).startLoadReporting(loadReportCallbackCaptor.capture()); - LoadReportCallback loadReportCallback = loadReportCallbackCaptor.getValue(); // second EDS response - serverResponseWriter.onNext(edsResponse); + clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2), + buildLbEndpoint("192.168.0.2", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); verify(edsUpdateCallback, times(1)).onWorking(); - verify(loadReportClient, times(1)).startLoadReporting(any(LoadReportCallback.class)); - - LocalityStore localityStore = Iterables.getOnlyElement(localityStores); - verify(localityStore, never()).updateOobMetricsReportInterval(anyLong()); - loadReportCallback.onReportResponse(1234); - verify(localityStore).updateOobMetricsReportInterval(1234); - verify(edsUpdateCallback, never()).onError(); - - lookasideLb.shutdown(); } @Test - public void handleDropUpdates() { - lookasideLb.handleResolvedAddresses(defaultResolvedAddress); - - LocalityStore localityStore = Iterables.getOnlyElement(localityStores); - verify(localityStore, never()).updateDropPercentage( - ArgumentMatchers.>any()); - - serverResponseWriter.onNext(edsResponse); - verify(localityStore).updateDropPercentage(eq(ImmutableList.of())); - - ClusterLoadAssignment clusterLoadAssignment = ClusterLoadAssignment.newBuilder() - .setPolicy(Policy.newBuilder() - .addDropOverloads(Policy.DropOverload.newBuilder() - .setCategory("cat_1").setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.HUNDRED) - .setNumerator(3) - .build()) - .build()) - - .addDropOverloads(Policy.DropOverload.newBuilder() - .setCategory("cat_2").setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.TEN_THOUSAND) - .setNumerator(45) - .build()) - .build()) - .addDropOverloads(Policy.DropOverload.newBuilder() - .setCategory("cat_3").setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.MILLION) - .setNumerator(6789) - .build()) - .build()) - .build()) - .build(); - serverResponseWriter.onNext( - DiscoveryResponse.newBuilder() - .addResources(Any.pack(clusterLoadAssignment)) - .setTypeUrl("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment") - .build()); + public void handleAllDropUpdates_pickersAreDropped() { + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + + ClusterLoadAssignment clusterLoadAssignment = buildClusterLoadAssignment( + "edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); verify(edsUpdateCallback, never()).onAllDrop(); - verify(localityStore).updateDropPercentage(ImmutableList.of( - new DropOverload("cat_1", 300_00), - new DropOverload("cat_2", 45_00), - new DropOverload("cat_3", 6789))); - - - clusterLoadAssignment = ClusterLoadAssignment.newBuilder() - .setPolicy(Policy.newBuilder() - .addDropOverloads(Policy.DropOverload.newBuilder() - .setCategory("cat_1").setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.HUNDRED) - .setNumerator(3) - .build()) - .build()) - .addDropOverloads(Policy.DropOverload.newBuilder() - .setCategory("cat_2").setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.HUNDRED) - .setNumerator(101) - .build()) - .build()) - .addDropOverloads(Policy.DropOverload.newBuilder() - .setCategory("cat_3").setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.HUNDRED) - .setNumerator(23) - .build()) - .build()) - .build()) - .build(); - serverResponseWriter.onNext( - DiscoveryResponse.newBuilder() - .addResources(Any.pack(clusterLoadAssignment)) - .setTypeUrl("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment") - .build()); + assertThat(childBalancers).hasSize(1); + verify(childBalancers.get("subzone1")).handleResolvedAddresses( + argThat(RoundRobinBackendsMatcher.builder().addHostAndPort("192.168.0.1", 8080).build())); + assertThat(childHelpers).hasSize(1); + Helper childHelper = childHelpers.get("subzone1"); + + final Subchannel subchannel = mock(Subchannel.class); + SubchannelPicker picker = new SubchannelPicker() { + @Override + public PickResult pickSubchannel(PickSubchannelArgs args) { + return PickResult.withSubchannel(subchannel); + } + }; + childHelper.updateBalancingState(READY, picker); + assertLatestSubchannelPicker(subchannel); + + clusterLoadAssignment = buildClusterLoadAssignment( + "edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of( + buildDropOverload("cat_1", 3), + buildDropOverload("cat_2", 1_000_001), + buildDropOverload("cat_3", 4))); + receiveEndpointUpdate(clusterLoadAssignment); verify(edsUpdateCallback).onAllDrop(); - verify(localityStore).updateDropPercentage(ImmutableList.of( - new DropOverload("cat_1", 300_00), - new DropOverload("cat_2", 100_00_00))); + verify(helper, atLeastOnce()).updateBalancingState(eq(READY), pickerCaptor.capture()); + SubchannelPicker pickerExpectedDropAll = pickerCaptor.getValue(); + assertThat(pickerExpectedDropAll.pickSubchannel(mock(PickSubchannelArgs.class)).isDrop()) + .isTrue(); verify(edsUpdateCallback, never()).onError(); + } - lookasideLb.shutdown(); + @Test + public void handleLocalityAssignmentUpdates_pickersUpdatedFromChildBalancer() { + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + + LbEndpoint endpoint11 = buildLbEndpoint("addr11.example.com", 8011, HEALTHY, 11); + LbEndpoint endpoint12 = buildLbEndpoint("addr12.example.com", 8012, HEALTHY, 12); + LocalityLbEndpoints localityLbEndpoints1 = buildLocalityLbEndpoints( + "region1", "zone1", "subzone1", + ImmutableList.of(endpoint11, endpoint12), + 1, + 0); + + LbEndpoint endpoint21 = buildLbEndpoint("addr21.example.com", 8021, HEALTHY, 21); + LbEndpoint endpoint22 = buildLbEndpoint("addr22.example.com", 8022, HEALTHY, 22); + LocalityLbEndpoints localityLbEndpoints2 = buildLocalityLbEndpoints( + "region2", "zone2", "subzone2", + ImmutableList.of(endpoint21, endpoint22), + 2, + 0); + + LbEndpoint endpoint31 = buildLbEndpoint("addr31.example.com", 8031, HEALTHY, 31); + LocalityLbEndpoints localityLbEndpoints3 = buildLocalityLbEndpoints( + "region3", "zone3", "subzone3", + ImmutableList.of(endpoint31), + 3, + 0); + + ClusterLoadAssignment clusterLoadAssignment = buildClusterLoadAssignment( + "edsServiceName1", + ImmutableList.of(localityLbEndpoints1, localityLbEndpoints2, localityLbEndpoints3), + ImmutableList.of()); + receiveEndpointUpdate(clusterLoadAssignment); + + assertThat(childBalancers).hasSize(3); + verify(childBalancers.get("subzone1")).handleResolvedAddresses( + argThat(RoundRobinBackendsMatcher.builder() + .addHostAndPort("addr11.example.com", 8011) + .addHostAndPort("addr12.example.com", 8012) + .build())); + verify(childBalancers.get("subzone2")).handleResolvedAddresses( + argThat(RoundRobinBackendsMatcher.builder() + .addHostAndPort("addr21.example.com", 8021) + .addHostAndPort("addr22.example.com", 8022) + .build())); + verify(childBalancers.get("subzone3")).handleResolvedAddresses( + argThat(RoundRobinBackendsMatcher.builder() + .addHostAndPort("addr31.example.com", 8031) + .build())); + assertThat(childHelpers).hasSize(3); + Helper childHelper2 = childHelpers.get("subzone2"); + final Subchannel subchannel = mock(Subchannel.class); + SubchannelPicker picker = new SubchannelPicker() { + @Override + public PickResult pickSubchannel(PickSubchannelArgs args) { + return PickResult.withSubchannel(subchannel); + } + }; + verify(helper, never()).updateBalancingState(eq(READY), any(SubchannelPicker.class)); + childHelper2.updateBalancingState(READY, picker); + assertLatestSubchannelPicker(subchannel); + + verify(edsUpdateCallback, never()).onError(); } + // Uses a fake LocalityStoreFactory that creates a mock LocalityStore, and verifies interaction + // between the EDS balancer and LocalityStore. @Test - public void handleLocalityAssignmentUpdates() { - lookasideLb.handleResolvedAddresses(defaultResolvedAddress); - - io.envoyproxy.envoy.api.v2.core.Locality localityProto1 = - io.envoyproxy.envoy.api.v2.core.Locality - .newBuilder() - .setRegion("region1") - .setZone("zone1") - .setSubZone("subzone1") - .build(); - LbEndpoint endpoint11 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr11").setPortValue(11)))) - .setLoadBalancingWeight(UInt32Value.of(11)) - .build(); - LbEndpoint endpoint12 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr12").setPortValue(12)))) - .setLoadBalancingWeight(UInt32Value.of(12)) - .build(); - io.envoyproxy.envoy.api.v2.core.Locality localityProto2 = - io.envoyproxy.envoy.api.v2.core.Locality - .newBuilder() - .setRegion("region2") - .setZone("zone2") - .setSubZone("subzone2") - .build(); - LbEndpoint endpoint21 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr21").setPortValue(21)))) - .setLoadBalancingWeight(UInt32Value.of(21)) - .build(); - LbEndpoint endpoint22 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr22").setPortValue(22)))) - .setLoadBalancingWeight(UInt32Value.of(22)) - .build(); - io.envoyproxy.envoy.api.v2.core.Locality localityProto3 = - io.envoyproxy.envoy.api.v2.core.Locality - .newBuilder() - .setRegion("region3") - .setZone("zone3") - .setSubZone("subzone3") - .build(); - LbEndpoint endpoint3 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr31").setPortValue(31)))) - .setLoadBalancingWeight(UInt32Value.of(31)) - .build(); - ClusterLoadAssignment clusterLoadAssignment = ClusterLoadAssignment.newBuilder() - .addEndpoints(io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints.newBuilder() - .setLocality(localityProto1) - .addLbEndpoints(endpoint11) - .addLbEndpoints(endpoint12) - .setLoadBalancingWeight(UInt32Value.of(1))) - .addEndpoints(io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints.newBuilder() - .setLocality(localityProto2) - .addLbEndpoints(endpoint21) - .addLbEndpoints(endpoint22) - .setLoadBalancingWeight(UInt32Value.of(2))) - .addEndpoints(io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints.newBuilder() - .setLocality(localityProto3) - .addLbEndpoints(endpoint3) - .setLoadBalancingWeight(UInt32Value.of(0))) - .build(); - serverResponseWriter.onNext( - DiscoveryResponse.newBuilder() - .addResources(Any.pack(clusterLoadAssignment)) - .setTypeUrl("type.googleapis.com/envoy.api.v2.ClusterLoadAssignment") - .build()); + public void handleEndpointUpdates_delegateUpdatesToLocalityStore() { + final ArrayDeque localityStores = new ArrayDeque<>(); + localityStoreFactory = new LocalityStoreFactory() { + @Override + LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, + LoadStatsStore loadStatsStore) { + // Note that this test approach can not verify anything about how localityStore will use the + // helper in the arguments to delegate updates from localityStore to the EDS balancer, and + // can not verify anything about how loadStatsStore updates localities and drop information. + // To cover the gap, some non-exhaustive tests like + // handleAllDropUpdates_pickersAreDropped() and + // handleLocalityAssignmentUpdates_pickersUpdatedFromChildBalancer()are added to verify some + // very basic behaviors. + LocalityStore localityStore = mock(LocalityStore.class); + localityStores.add(localityStore); + return localityStore; + } + }; + lookasideLb = new LookasideLb( + helper, edsUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, channelFactory); - Locality locality1 = Locality.fromEnvoyProtoLocality(localityProto1); - LocalityLbEndpoints localityInfo1 = new LocalityLbEndpoints( + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + assertThat(localityStores).hasSize(1); + LocalityStore localityStore = localityStores.peekLast(); + + ClusterLoadAssignment clusterLoadAssignment = buildClusterLoadAssignment( + "edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), ImmutableList.of( - EnvoyProtoData.LbEndpoint.fromEnvoyProtoLbEndpoint(endpoint11), - EnvoyProtoData.LbEndpoint.fromEnvoyProtoLbEndpoint(endpoint12)), - 1, 0); - LocalityLbEndpoints localityInfo2 = new LocalityLbEndpoints( + buildDropOverload("cat_1", 3), + buildDropOverload("cat_2", 456))); + receiveEndpointUpdate(clusterLoadAssignment); + EndpointUpdate endpointUpdate = getEndpointUpdateFromClusterAssignment(clusterLoadAssignment); + verify(localityStore).updateDropPercentage(endpointUpdate.getDropPolicies()); + verify(localityStore).updateLocalityStore(endpointUpdate.getLocalityLbEndpointsMap()); + + clusterLoadAssignment = buildClusterLoadAssignment( + "edsServiceName1", ImmutableList.of( - EnvoyProtoData.LbEndpoint.fromEnvoyProtoLbEndpoint(endpoint21), - EnvoyProtoData.LbEndpoint.fromEnvoyProtoLbEndpoint(endpoint22)), - 2, 0); - Locality locality2 = Locality.fromEnvoyProtoLocality(localityProto2); - - LocalityStore localityStore = Iterables.getOnlyElement(localityStores); - InOrder inOrder = inOrder(localityStore); - inOrder.verify(localityStore).updateDropPercentage(ImmutableList.of()); - inOrder.verify(localityStore).updateLocalityStore(localityEndpointsMappingCaptor.capture()); - assertThat(localityEndpointsMappingCaptor.getValue()).containsExactly( - locality1, localityInfo1, locality2, localityInfo2).inOrder(); + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2), + buildLbEndpoint("192.168.0.1", 8088, HEALTHY, 2)), + 1, 0)), + ImmutableList.of( + buildDropOverload("cat_1", 3), + buildDropOverload("cat_3", 4))); + receiveEndpointUpdate(clusterLoadAssignment); - verify(edsUpdateCallback, never()).onError(); + endpointUpdate = getEndpointUpdateFromClusterAssignment(clusterLoadAssignment); + verify(localityStore).updateDropPercentage(endpointUpdate.getDropPolicies()); + verify(localityStore).updateLocalityStore(endpointUpdate.getLocalityLbEndpointsMap()); - lookasideLb.shutdown(); + // Change cluster name. + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName2", null)); + assertThat(localityStores).hasSize(2); + localityStore = localityStores.peekLast(); + + clusterLoadAssignment = buildClusterLoadAssignment( + "edsServiceName2", + ImmutableList.of( + buildLocalityLbEndpoints("region2", "zone2", "subzone2", + ImmutableList.of( + buildLbEndpoint("192.168.0.2", 8080, HEALTHY, 2), + buildLbEndpoint("192.168.0.2", 8088, HEALTHY, 2)), + 1, 0)), + ImmutableList.of( + buildDropOverload("cat_1", 3), + buildDropOverload("cat_3", 4))); + receiveEndpointUpdate(clusterLoadAssignment); + endpointUpdate = getEndpointUpdateFromClusterAssignment(clusterLoadAssignment); + verify(localityStore).updateDropPercentage(endpointUpdate.getDropPolicies()); + verify(localityStore).updateLocalityStore(endpointUpdate.getLocalityLbEndpointsMap()); } @Test - public void verifyRpcErrorPropagation() { - lookasideLb.handleResolvedAddresses(defaultResolvedAddress); + public void verifyErrorPropagation() { + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); verify(helper, never()).updateBalancingState( eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verify(edsUpdateCallback, never()).onError(); - serverResponseWriter.onError(new RuntimeException()); + // Forwarding 20 seconds so that the xds client will deem EDS resource not available. + fakeClock.forwardTime(20, TimeUnit.SECONDS); verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verify(edsUpdateCallback).onError(); } - @Test - public void shutdown() { - lookasideLb.handleResolvedAddresses(defaultResolvedAddress); + /** + * Converts ClusterLoadAssignment data to {@link EndpointUpdate}. All the needed data, that is + * clusterName, localityLbEndpointsMap and dropPolicies, is extracted from ClusterLoadAssignment, + * and all other data is ignored. + */ + private static EndpointUpdate getEndpointUpdateFromClusterAssignment( + ClusterLoadAssignment clusterLoadAssignment) { + EndpointUpdate.Builder endpointUpdateBuilder = EndpointUpdate.newBuilder(); + endpointUpdateBuilder.setClusterName(clusterLoadAssignment.getClusterName()); + for (DropOverload dropOverload : clusterLoadAssignment.getPolicy().getDropOverloadsList()) { + endpointUpdateBuilder.addDropPolicy( + EnvoyProtoData.DropOverload.fromEnvoyProtoDropOverload(dropOverload)); + } + for (LocalityLbEndpoints localityLbEndpoints : clusterLoadAssignment.getEndpointsList()) { + endpointUpdateBuilder.addLocalityLbEndpoints( + EnvoyProtoData.Locality.fromEnvoyProtoLocality( + localityLbEndpoints.getLocality()), + EnvoyProtoData.LocalityLbEndpoints.fromEnvoyProtoLocalityLbEndpoints( + localityLbEndpoints)); + } + return endpointUpdateBuilder.build(); + } - LocalityStore localityStore = Iterables.getOnlyElement(localityStores); - LoadReportClient loadReportClient = Iterables.getOnlyElement(loadReportClients); - verify(localityStore, never()).reset(); - verify(loadReportClient, never()).stopLoadReporting(); - assertThat(channel.isShutdown()).isFalse(); + private void deliverResolvedAddresses(XdsConfig xdsConfig) { + ResolvedAddresses.Builder resolvedAddressBuilder = ResolvedAddresses.newBuilder() + .setAddresses(ImmutableList.of()) + .setLoadBalancingPolicyConfig(xdsConfig); + if (isFullFlow) { + resolvedAddressBuilder.setAttributes( + Attributes.newBuilder().set(XdsAttributes.XDS_CLIENT_POOL, + xdsClientPoolFromResolveAddresses).build()); + } + lookasideLb.handleResolvedAddresses(resolvedAddressBuilder.build()); + } - lookasideLb.shutdown(); + private void receiveEndpointUpdate(ClusterLoadAssignment clusterLoadAssignment) { + responseObserver.onNext( + buildDiscoveryResponse( + String.valueOf(versionIno++), + ImmutableList.of(Any.pack(clusterLoadAssignment)), + XdsClientImpl.ADS_TYPE_URL_EDS, + String.valueOf(nonce++))); + } - verify(localityStore).reset(); - verify(loadReportClient).stopLoadReporting(); - assertThat(channel.isShutdown()).isTrue(); + private void assertLatestConnectivityState(ConnectivityState expectedState) { + verify(helper, atLeastOnce()).updateBalancingState( + connectivityStateCaptor.capture(), pickerCaptor.capture()); + assertThat(connectivityStateCaptor.getValue()).isEqualTo(expectedState); + } + + private void assertLatestSubchannelPicker(Subchannel expectedSubchannelToPick) { + assertLatestConnectivityState(READY); + assertThat( + pickerCaptor.getValue().pickSubchannel(mock(PickSubchannelArgs.class)).getSubchannel()) + .isEqualTo(expectedSubchannelToPick); + } + + /** + * Matcher of ResolvedAddresses for round robin load balancer based on the set of backends. + */ + private static final class RoundRobinBackendsMatcher + implements ArgumentMatcher { + + final List socketAddresses; + + RoundRobinBackendsMatcher(List socketAddresses) { + this.socketAddresses = socketAddresses; + } + + @Override + public boolean matches(ResolvedAddresses argument) { + List backends = new ArrayList<>(); + for (EquivalentAddressGroup eag : argument.getAddresses()) { + backends.add(Iterables.getOnlyElement(eag.getAddresses())); + } + return socketAddresses.equals(backends); + } + + static Builder builder() { + return new Builder(); + } + + static final class Builder { + final List socketAddresses = new ArrayList<>(); + + Builder addHostAndPort(String host, int port) { + socketAddresses.add(new InetSocketAddress(host, port)); + return this; + } + + RoundRobinBackendsMatcher build() { + return new RoundRobinBackendsMatcher(socketAddresses); + } + } + } + + /** + * A fake ObjectPool of XdsClient that keeps track of invocation times of getObject() and + * returnObject(). + */ + private static final class FakeXdsClientPool implements ObjectPool { + final XdsClient xdsClient; + int timesGetObjectCalled; + int timesReturnObjectCalled; + + FakeXdsClientPool(XdsClient xdsClient) { + this.xdsClient = xdsClient; + } + + @Override + public synchronized XdsClient getObject() { + timesGetObjectCalled++; + return xdsClient; + } + + @Override + public synchronized XdsClient returnObject(Object object) { + timesReturnObjectCalled++; + assertThat(timesReturnObjectCalled).isAtMost(timesGetObjectCalled); + return null; + } } } diff --git a/xds/src/test/java/io/grpc/xds/XdsCommsTest.java b/xds/src/test/java/io/grpc/xds/XdsCommsTest.java deleted file mode 100644 index 5d0828b9a43..00000000000 --- a/xds/src/test/java/io/grpc/xds/XdsCommsTest.java +++ /dev/null @@ -1,594 +0,0 @@ -/* - * Copyright 2019 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.xds; - -import static com.google.common.truth.Truth.assertThat; -import static io.grpc.xds.XdsComms2.getEndpointUpdatefromClusterAssignment; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - -import com.google.protobuf.Any; -import com.google.protobuf.UInt32Value; -import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment; -import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment.Policy; -import io.envoyproxy.envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload; -import io.envoyproxy.envoy.api.v2.DiscoveryRequest; -import io.envoyproxy.envoy.api.v2.DiscoveryResponse; -import io.envoyproxy.envoy.api.v2.core.Address; -import io.envoyproxy.envoy.api.v2.core.Locality; -import io.envoyproxy.envoy.api.v2.core.Node; -import io.envoyproxy.envoy.api.v2.core.SocketAddress; -import io.envoyproxy.envoy.api.v2.endpoint.Endpoint; -import io.envoyproxy.envoy.api.v2.endpoint.LbEndpoint; -import io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints; -import io.envoyproxy.envoy.service.discovery.v2.AggregatedDiscoveryServiceGrpc.AggregatedDiscoveryServiceImplBase; -import io.envoyproxy.envoy.type.FractionalPercent; -import io.envoyproxy.envoy.type.FractionalPercent.DenominatorType; -import io.grpc.ChannelLogger; -import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.Helper; -import io.grpc.LoadBalancerProvider; -import io.grpc.LoadBalancerRegistry; -import io.grpc.ManagedChannel; -import io.grpc.Status; -import io.grpc.SynchronizationContext; -import io.grpc.inprocess.InProcessChannelBuilder; -import io.grpc.inprocess.InProcessServerBuilder; -import io.grpc.internal.BackoffPolicy; -import io.grpc.internal.FakeClock; -import io.grpc.internal.testing.StreamRecorder; -import io.grpc.stub.StreamObserver; -import io.grpc.testing.GrpcCleanupRule; -import io.grpc.xds.XdsClient.EndpointUpdate; -import io.grpc.xds.XdsClient.EndpointWatcher; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -/** - * Unit tests for {@link XdsComms2}. - */ -@RunWith(JUnit4.class) -public class XdsCommsTest { - private static final String EDS_TYPE_URL = - "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"; - private static final FakeClock.TaskFilter LB_RPC_RETRY_TASK_FILTER = - new FakeClock.TaskFilter() { - @Override - public boolean shouldAccept(Runnable command) { - return command.toString().contains("AdsRpcRetryTask"); - } - }; - - @Rule - public final GrpcCleanupRule cleanupRule = new GrpcCleanupRule(); - @Mock - private Helper helper; - @Mock - private EndpointWatcher endpointWatcher; - @Mock - private BackoffPolicy.Provider backoffPolicyProvider; - @Mock - private BackoffPolicy backoffPolicy1; - @Mock - private BackoffPolicy backoffPolicy2; - - private final FakeClock fakeClock = new FakeClock(); - private final SynchronizationContext syncContext = new SynchronizationContext( - new Thread.UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - throw new AssertionError(e); - } - }); - private final LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry(); - - private StreamRecorder streamRecorder; - private StreamObserver responseWriter; - - private ManagedChannel channel; - private XdsComms2 xdsComms; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - - String serverName = InProcessServerBuilder.generateName(); - - AggregatedDiscoveryServiceImplBase serviceImpl = new AggregatedDiscoveryServiceImplBase() { - @Override - public StreamObserver streamAggregatedResources( - final StreamObserver responseObserver) { - responseWriter = responseObserver; - streamRecorder = StreamRecorder.create(); - - return new StreamObserver() { - - @Override - public void onNext(DiscoveryRequest value) { - streamRecorder.onNext(value); - } - - @Override - public void onError(Throwable t) { - streamRecorder.onError(t); - } - - @Override - public void onCompleted() { - streamRecorder.onCompleted(); - } - }; - } - }; - - cleanupRule.register( - InProcessServerBuilder - .forName(serverName) - .addService(serviceImpl) - .directExecutor() - .build() - .start()); - channel = - cleanupRule.register(InProcessChannelBuilder.forName(serverName).directExecutor().build()); - doReturn("fake_authority").when(helper).getAuthority(); - doReturn(syncContext).when(helper).getSynchronizationContext(); - doReturn(fakeClock.getScheduledExecutorService()).when(helper).getScheduledExecutorService(); - doReturn(mock(ChannelLogger.class)).when(helper).getChannelLogger(); - lbRegistry.register(new LoadBalancerProvider() { - @Override - public boolean isAvailable() { - return true; - } - - @Override - public int getPriority() { - return 0; - } - - @Override - public String getPolicyName() { - return "round_robin"; - } - - @Override - public LoadBalancer newLoadBalancer(Helper helper) { - return null; - } - }); - doReturn(backoffPolicy1, backoffPolicy2).when(backoffPolicyProvider).get(); - doReturn(10L, 100L, 1000L).when(backoffPolicy1).nextBackoffNanos(); - doReturn(20L, 200L).when(backoffPolicy2).nextBackoffNanos(); - xdsComms = new XdsComms2( - channel, helper, backoffPolicyProvider, - fakeClock.getStopwatchSupplier(), Node.getDefaultInstance()); - xdsComms.watchEndpointData("", endpointWatcher); - } - - @After - public void tearDown() { - xdsComms.shutdown(); - } - - @Test - public void shutdownLbRpc_verifyChannelShutdown() throws Exception { - xdsComms.shutdown(); - assertTrue(streamRecorder.awaitCompletion(1, TimeUnit.SECONDS)); - assertEquals(Status.Code.CANCELLED, Status.fromThrowable(streamRecorder.getError()).getCode()); - assertTrue(channel.isShutdown()); - } - - @Test - public void cancel() throws Exception { - xdsComms.shutdown(); - assertTrue(streamRecorder.awaitCompletion(1, TimeUnit.SECONDS)); - assertEquals(Status.Code.CANCELLED, Status.fromThrowable(streamRecorder.getError()).getCode()); - } - - @Test - public void handleEdsResponse() { - assertThat(streamRecorder.getValues()).hasSize(1); - DiscoveryRequest request = streamRecorder.getValues().get(0); - assertThat(request.getTypeUrl()).isEqualTo(EDS_TYPE_URL); - assertThat(request.getResourceNamesList()).hasSize(1); - - Locality localityProto1 = Locality.newBuilder() - .setRegion("region1").setZone("zone1").setSubZone("subzone1").build(); - LbEndpoint endpoint11 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr11").setPortValue(11)))) - .setLoadBalancingWeight(UInt32Value.of(11)) - .build(); - LbEndpoint endpoint12 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr12").setPortValue(12)))) - .setLoadBalancingWeight(UInt32Value.of(12)) - .build(); - Locality localityProto2 = Locality.newBuilder() - .setRegion("region2").setZone("zone2").setSubZone("subzone2").build(); - LbEndpoint endpoint21 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr21").setPortValue(21)))) - .setLoadBalancingWeight(UInt32Value.of(21)) - .build(); - LbEndpoint endpoint22 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr22").setPortValue(22)))) - .setLoadBalancingWeight(UInt32Value.of(22)) - .build(); - Locality localityProto3 = Locality.newBuilder() - .setRegion("region3").setZone("zone3").setSubZone("subzone3").build(); - LbEndpoint endpoint3 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr31").setPortValue(31)))) - .setLoadBalancingWeight(UInt32Value.of(31)) - .build(); - ClusterLoadAssignment clusterLoadAssignment = ClusterLoadAssignment.newBuilder() - .addEndpoints(LocalityLbEndpoints.newBuilder() - .setLocality(localityProto1) - .addLbEndpoints(endpoint11) - .addLbEndpoints(endpoint12) - .setLoadBalancingWeight(UInt32Value.of(1))) - .addEndpoints(LocalityLbEndpoints.newBuilder() - .setLocality(localityProto2) - .addLbEndpoints(endpoint21) - .addLbEndpoints(endpoint22) - .setLoadBalancingWeight(UInt32Value.of(2))) - .addEndpoints(LocalityLbEndpoints.newBuilder() - .setLocality(localityProto3) - .addLbEndpoints(endpoint3) - .setLoadBalancingWeight(UInt32Value.of(0))) - .build(); - DiscoveryResponse edsResponse = DiscoveryResponse.newBuilder() - .addResources(Any.pack(clusterLoadAssignment)) - .setTypeUrl(EDS_TYPE_URL) - .build(); - responseWriter.onNext(edsResponse); - - verify(endpointWatcher).onEndpointChanged( - getEndpointUpdatefromClusterAssignment(clusterLoadAssignment)); - - ClusterLoadAssignment clusterLoadAssignment2 = ClusterLoadAssignment.newBuilder() - .addEndpoints(LocalityLbEndpoints.newBuilder() - .setLocality(localityProto2) - .addLbEndpoints(endpoint21) - .addLbEndpoints(endpoint22) - .setLoadBalancingWeight(UInt32Value.of(2))) - .addEndpoints(LocalityLbEndpoints.newBuilder() - .setLocality(localityProto1) - .addLbEndpoints(endpoint11) - .addLbEndpoints(endpoint12) - .setLoadBalancingWeight(UInt32Value.of(3))) - .build(); - edsResponse = DiscoveryResponse.newBuilder() - .addResources(Any.pack(clusterLoadAssignment2)) - .setTypeUrl(EDS_TYPE_URL) - .build(); - responseWriter.onNext(edsResponse); - - verify(endpointWatcher).onEndpointChanged( - getEndpointUpdatefromClusterAssignment(clusterLoadAssignment2)); - verifyNoMoreInteractions(endpointWatcher); - } - - @Test - public void serverOnCompleteShouldFailClient() { - responseWriter.onCompleted(); - - ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(Status.class); - verify(endpointWatcher).onError(statusCaptor.capture()); - assertThat(statusCaptor.getValue().getCode()).isEqualTo(Status.Code.UNAVAILABLE); - verifyNoMoreInteractions(endpointWatcher); - } - - /** - * The 1st ADS RPC receives invalid response. Verify retry is scheduled. - * Verify the 2nd RPC (retry) starts after backoff. - * - *

The 2nd RPC fails with response observer onError() without receiving initial response. - * Verify retry is scheduled. Verify the 3rd PRC starts after backoff. - * - *

The 3rd PRC receives invalid initial response. Verify retry is scheduled. - * Verify the 4th PRC starts after backoff. - * - *

The 4th RPC receives valid initial response and then fails with response observer - * onError(). Verify retry is scheduled. Verify the backoff is reset. Verify the 5th PRC starts - * immediately. - * - *

The 5th RPC fails with response observer onError() without receiving initial response. - * Verify retry is scheduled. Verify the 6th PRC starts after backoff. - * - *

The 6th RPC fails with response observer onError() without receiving initial response. - * Verify retry is scheduled. Call {@link XdsComms2#shutdown()} ()}, verify retry timer is - * cancelled. - */ - @Test - public void adsRpcRetry() { - StreamRecorder currentStreamRecorder = streamRecorder; - assertThat(currentStreamRecorder.getValues()).hasSize(1); - InOrder inOrder = - inOrder(backoffPolicyProvider, backoffPolicy1, backoffPolicy2, endpointWatcher); - inOrder.verify(backoffPolicyProvider).get(); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - - DiscoveryResponse invalidResponse = - DiscoveryResponse.newBuilder().setTypeUrl(EDS_TYPE_URL).build(); - // The 1st ADS RPC receives invalid response - responseWriter.onNext(invalidResponse); - inOrder.verify(endpointWatcher).onError(any(Status.class)); - assertThat(currentStreamRecorder.getError()).isNotNull(); - - // Will start backoff sequence 1 (10ns) - inOrder.verify(backoffPolicy1).nextBackoffNanos(); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - - // Fast-forward to a moment before the retry - fakeClock.forwardNanos(9); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertSame(streamRecorder, currentStreamRecorder); - - // Then time for retry - fakeClock.forwardNanos(1); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertNotSame(currentStreamRecorder, streamRecorder); - currentStreamRecorder = streamRecorder; - assertThat(currentStreamRecorder.getValues()).hasSize(1); - - // Fail the retry after spending 4ns - fakeClock.forwardNanos(4); - // The 2nd RPC fails with response observer onError() without receiving initial response - responseWriter.onError(new Exception("fake error")); - inOrder.verify(endpointWatcher).onError(any(Status.class)); - - // Will start backoff sequence 2 (100ns) - inOrder.verify(backoffPolicy1).nextBackoffNanos(); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - // Fast-forward to a moment before the retry, the time spent in the last try is deducted. - fakeClock.forwardNanos(100 - 4 - 1); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertSame(streamRecorder, currentStreamRecorder); - - // Then time for retry - fakeClock.forwardNanos(1); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertNotSame(currentStreamRecorder, streamRecorder); - currentStreamRecorder = streamRecorder; - assertThat(currentStreamRecorder.getValues()).hasSize(1); - assertThat(currentStreamRecorder.getError()).isNull(); - - // Fail the retry after spending 5ns - fakeClock.forwardNanos(5); - // The 3rd PRC receives invalid initial response. - responseWriter.onNext(invalidResponse); - inOrder.verify(endpointWatcher).onError(any(Status.class)); - assertThat(currentStreamRecorder.getError()).isNotNull(); - - // Will start backoff sequence 3 (1000ns) - inOrder.verify(backoffPolicy1).nextBackoffNanos(); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - - // Fast-forward to a moment before the retry, the time spent in the last try is deducted. - fakeClock.forwardNanos(1000 - 5 - 1); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertSame(streamRecorder, currentStreamRecorder); - - // Then time for retry - fakeClock.forwardNanos(1); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertNotSame(currentStreamRecorder, streamRecorder); - currentStreamRecorder = streamRecorder; - assertThat(currentStreamRecorder.getValues()).hasSize(1); - assertThat(currentStreamRecorder.getError()).isNull(); - - // The 4th RPC receives valid initial response - fakeClock.forwardNanos(6); - Locality localityProto1 = Locality.newBuilder() - .setRegion("region1").setZone("zone1").setSubZone("subzone1").build(); - LbEndpoint endpoint11 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr11").setPortValue(11)))) - .setLoadBalancingWeight(UInt32Value.of(11)) - .build(); - DiscoveryResponse validEdsResponse = DiscoveryResponse.newBuilder() - .addResources(Any.pack(ClusterLoadAssignment.newBuilder() - .addEndpoints(LocalityLbEndpoints.newBuilder() - .setLocality(localityProto1) - .addLbEndpoints(endpoint11) - .setLoadBalancingWeight(UInt32Value.of(1))) - .build())) - .setTypeUrl(EDS_TYPE_URL) - .build(); - responseWriter.onNext(validEdsResponse); - - inOrder.verify(backoffPolicyProvider, never()).get(); - inOrder.verify(backoffPolicy2, never()).nextBackoffNanos(); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - - // The 4th RPC then fails with response observer onError() - fakeClock.forwardNanos(7); - responseWriter.onError(new Exception("fake error")); - - // Will reset the retry sequence and retry immediately, because balancer has responded. - inOrder.verify(backoffPolicyProvider).get(); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertNotSame(currentStreamRecorder, streamRecorder); - currentStreamRecorder = streamRecorder; - assertThat(currentStreamRecorder.getValues()).hasSize(1); - assertThat(currentStreamRecorder.getError()).isNull(); - - // The 5th RPC fails with response observer onError() without receiving initial response - fakeClock.forwardNanos(8); - responseWriter.onError(new Exception("fake error")); - inOrder.verify(endpointWatcher).onError(any(Status.class)); - - // Will start backoff sequence 1 (20ns) - inOrder.verify(backoffPolicy2).nextBackoffNanos(); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - // Fast-forward to a moment before the retry, the time spent in the last try is deducted. - fakeClock.forwardNanos(20 - 8 - 1); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertSame(streamRecorder, currentStreamRecorder); - - // Then time for retry - fakeClock.forwardNanos(1); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - assertNotSame(currentStreamRecorder, streamRecorder); - currentStreamRecorder = streamRecorder; - assertThat(currentStreamRecorder.getValues()).hasSize(1); - assertThat(currentStreamRecorder.getError()).isNull(); - - // Wrapping up - verify(backoffPolicyProvider, times(2)).get(); - verify(backoffPolicy1, times(3)).nextBackoffNanos(); // for 2nd, 3rd, 4th RPC - verify(backoffPolicy2, times(1)).nextBackoffNanos(); // for 6th RPC - - // The 6th RPC fails with response observer onError() without receiving initial response - responseWriter.onError(new Exception("fake error")); - inOrder.verify(endpointWatcher).onError(any(Status.class)); - - // Retry is scheduled - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - - // Shutdown cancels retry - xdsComms.shutdown(); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - } - - @Test - public void refreshAdsStreamCancelsExistingRetry() { - responseWriter.onError(new Exception("fake error")); - verify(endpointWatcher).onError(any(Status.class)); - assertEquals(1, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - - xdsComms.refreshAdsStream(); - assertEquals(0, fakeClock.numPendingTasks(LB_RPC_RETRY_TASK_FILTER)); - } - - @Test - public void convertClusterLoadAssignmentToEndpointUpdate() { - Locality localityProto1 = Locality.newBuilder() - .setRegion("region1").setZone("zone1").setSubZone("subzone1").build(); - LbEndpoint endpoint11 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr11").setPortValue(11)))) - .setLoadBalancingWeight(UInt32Value.of(11)) - .build(); - LbEndpoint endpoint12 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr12").setPortValue(12)))) - .setLoadBalancingWeight(UInt32Value.of(12)) - .build(); - Locality localityProto2 = Locality.newBuilder() - .setRegion("region2").setZone("zone2").setSubZone("subzone2").build(); - LbEndpoint endpoint21 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr21").setPortValue(21)))) - .setLoadBalancingWeight(UInt32Value.of(21)) - .build(); - LbEndpoint endpoint22 = LbEndpoint.newBuilder() - .setEndpoint(Endpoint.newBuilder() - .setAddress(Address.newBuilder() - .setSocketAddress(SocketAddress.newBuilder() - .setAddress("addr22").setPortValue(22)))) - .setLoadBalancingWeight(UInt32Value.of(22)) - .build(); - LocalityLbEndpoints localityLbEndpointsProto1 = LocalityLbEndpoints.newBuilder() - .setLocality(localityProto1) - .setPriority(1) - .addLbEndpoints(endpoint11) - .addLbEndpoints(endpoint12) - .setLoadBalancingWeight(UInt32Value.of(1)) - .build(); - LocalityLbEndpoints localityLbEndpointsProto2 = LocalityLbEndpoints.newBuilder() - .setLocality(localityProto2) - .addLbEndpoints(endpoint21) - .addLbEndpoints(endpoint22) - .setLoadBalancingWeight(UInt32Value.of(2)) - .build(); - DropOverload dropOverloadProto1 = DropOverload.newBuilder() - .setCategory("cat1") - .setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.TEN_THOUSAND).setNumerator(123)) - .build(); - DropOverload dropOverloadProto2 = DropOverload.newBuilder() - .setCategory("cat2") - .setDropPercentage(FractionalPercent.newBuilder() - .setDenominator(DenominatorType.TEN_THOUSAND).setNumerator(456)) - .build(); - ClusterLoadAssignment clusterLoadAssignment = ClusterLoadAssignment.newBuilder() - .setClusterName("cluster1") - .addEndpoints(localityLbEndpointsProto1) - .addEndpoints(localityLbEndpointsProto2) - .setPolicy(Policy.newBuilder() - .addDropOverloads(dropOverloadProto1) - .addDropOverloads(dropOverloadProto2)) - .build(); - - EndpointUpdate endpointUpdate = getEndpointUpdatefromClusterAssignment(clusterLoadAssignment); - - assertThat(endpointUpdate.getClusterName()).isEqualTo("cluster1"); - Map localityLbEndpointsMap = - endpointUpdate.getLocalityLbEndpointsMap(); - assertThat(localityLbEndpointsMap).containsExactly( - EnvoyProtoData.Locality.fromEnvoyProtoLocality(localityProto1), - EnvoyProtoData.LocalityLbEndpoints.fromEnvoyProtoLocalityLbEndpoints( - localityLbEndpointsProto1), - EnvoyProtoData.Locality.fromEnvoyProtoLocality(localityProto2), - EnvoyProtoData.LocalityLbEndpoints.fromEnvoyProtoLocalityLbEndpoints( - localityLbEndpointsProto2)); - assertThat(endpointUpdate.getDropPolicies()).containsExactly( - EnvoyProtoData.DropOverload.fromEnvoyProtoDropOverload(dropOverloadProto1), - EnvoyProtoData.DropOverload.fromEnvoyProtoDropOverload(dropOverloadProto2)); - } -} From 5b7f5b8c3bde36e1b107da3d8385c597bb193a8c Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Wed, 22 Jan 2020 13:28:43 -0800 Subject: [PATCH 16/86] examples: Allow passing target and simplify lifecycle The target can be provided on the command line to avoid needing to recompile the example just to change where the server is located. We use a target instead of addresses as that is the approach we have wanted to encourage for a while since it allows choosing alternative name resolvers. We typically encourage injecting Channels, not ManagedChannels, which has the added benefit of simplifying the example. Less indirection makes for a better example. Swapping to target string could be done to examples-tls and examples-gauth as well, but it would be much more invasive to the tls example and the gauth example would need proper testing after the change. --- .../examples/helloworld/HelloWorldClient.java | 61 +++++++++++-------- .../examples/routeguide/RouteGuideClient.java | 30 ++++----- .../routeguide/RouteGuideClientTest.java | 10 +-- 3 files changed, 55 insertions(+), 46 deletions(-) diff --git a/examples/src/main/java/io/grpc/examples/helloworld/HelloWorldClient.java b/examples/src/main/java/io/grpc/examples/helloworld/HelloWorldClient.java index 322c0f25f72..d00bca1e216 100644 --- a/examples/src/main/java/io/grpc/examples/helloworld/HelloWorldClient.java +++ b/examples/src/main/java/io/grpc/examples/helloworld/HelloWorldClient.java @@ -16,6 +16,7 @@ package io.grpc.examples.helloworld; +import io.grpc.Channel; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.StatusRuntimeException; @@ -29,26 +30,15 @@ public class HelloWorldClient { private static final Logger logger = Logger.getLogger(HelloWorldClient.class.getName()); - private final ManagedChannel channel; private final GreeterGrpc.GreeterBlockingStub blockingStub; - /** Construct client connecting to HelloWorld server at {@code host:port}. */ - public HelloWorldClient(String host, int port) { - this(ManagedChannelBuilder.forAddress(host, port) - // Channels are secure by default (via SSL/TLS). For the example we disable TLS to avoid - // needing certificates. - .usePlaintext() - .build()); - } - /** Construct client for accessing HelloWorld server using the existing channel. */ - HelloWorldClient(ManagedChannel channel) { - this.channel = channel; - blockingStub = GreeterGrpc.newBlockingStub(channel); - } + public HelloWorldClient(Channel channel) { + // 'channel' here is a Channel, not a ManagedChannel, so it is not this code's responsibility to + // shut it down. - public void shutdown() throws InterruptedException { - channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + // Passing Channels to code makes code easier to test and makes it easier to reuse Channels. + blockingStub = GreeterGrpc.newBlockingStub(channel); } /** Say hello to server. */ @@ -67,20 +57,43 @@ public void greet(String name) { /** * Greet server. If provided, the first element of {@code args} is the name to use in the - * greeting. + * greeting. The second argument is the target server. */ public static void main(String[] args) throws Exception { + String user = "world"; // Access a service running on the local machine on port 50051 - HelloWorldClient client = new HelloWorldClient("localhost", 50051); - try { - String user = "world"; - // Use the arg as the name to greet if provided - if (args.length > 0) { - user = args[0]; + String target = "localhost:50051"; + // Allow passing in the user and target strings as command line arguments + if (args.length > 0) { + if ("--help".equals(args[0])) { + System.err.println("Usage: [name [target]]"); + System.err.println(""); + System.err.println(" name The name you wish to be greeted by. Defaults to " + user); + System.err.println(" target The server to connect to. Defaults to " + target); + System.exit(1); } + user = args[0]; + } + if (args.length > 1) { + target = args[1]; + } + + // Create a communication channel to the server, known as a Channel. Channels are thread-safe + // and reusable. It is common to create channels at the beginning of your application and reuse + // them until the application shuts down. + ManagedChannel channel = ManagedChannelBuilder.forTarget(target) + // Channels are secure by default (via SSL/TLS). For the example we disable TLS to avoid + // needing certificates. + .usePlaintext() + .build(); + try { + HelloWorldClient client = new HelloWorldClient(channel); client.greet(user); } finally { - client.shutdown(); + // ManagedChannels use resources like threads and TCP connections. To prevent leaking these + // resources the channel should be shut down when it will no longer be used. If it may be used + // again leave it running. + channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS); } } } diff --git a/examples/src/main/java/io/grpc/examples/routeguide/RouteGuideClient.java b/examples/src/main/java/io/grpc/examples/routeguide/RouteGuideClient.java index 6e22ba52861..b3f46b48bf9 100644 --- a/examples/src/main/java/io/grpc/examples/routeguide/RouteGuideClient.java +++ b/examples/src/main/java/io/grpc/examples/routeguide/RouteGuideClient.java @@ -18,6 +18,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.Message; +import io.grpc.Channel; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.Status; @@ -40,29 +41,18 @@ public class RouteGuideClient { private static final Logger logger = Logger.getLogger(RouteGuideClient.class.getName()); - private final ManagedChannel channel; private final RouteGuideBlockingStub blockingStub; private final RouteGuideStub asyncStub; private Random random = new Random(); private TestHelper testHelper; - /** Construct client for accessing RouteGuide server at {@code host:port}. */ - public RouteGuideClient(String host, int port) { - this(ManagedChannelBuilder.forAddress(host, port).usePlaintext()); - } - /** Construct client for accessing RouteGuide server using the existing channel. */ - public RouteGuideClient(ManagedChannelBuilder channelBuilder) { - channel = channelBuilder.build(); + public RouteGuideClient(Channel channel) { blockingStub = RouteGuideGrpc.newBlockingStub(channel); asyncStub = RouteGuideGrpc.newStub(channel); } - public void shutdown() throws InterruptedException { - channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); - } - /** * Blocking unary call example. Calls getFeature and prints the response. */ @@ -250,6 +240,17 @@ public void onCompleted() { /** Issues several different requests and then exits. */ public static void main(String[] args) throws InterruptedException { + String target = "localhost:8980"; + if (args.length > 0) { + if ("--help".equals(args[0])) { + System.err.println("Usage: [target]"); + System.err.println(""); + System.err.println(" target The server to connect to. Defaults to " + target); + System.exit(1); + } + target = args[0]; + } + List features; try { features = RouteGuideUtil.parseFeatures(RouteGuideUtil.getDefaultFeaturesFile()); @@ -258,8 +259,9 @@ public static void main(String[] args) throws InterruptedException { return; } - RouteGuideClient client = new RouteGuideClient("localhost", 8980); + ManagedChannel channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build(); try { + RouteGuideClient client = new RouteGuideClient(channel); // Looking for a valid feature client.getFeature(409146138, -746188906); @@ -279,7 +281,7 @@ public static void main(String[] args) throws InterruptedException { client.warning("routeChat can not finish within 1 minutes"); } } finally { - client.shutdown(); + channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS); } } diff --git a/examples/src/test/java/io/grpc/examples/routeguide/RouteGuideClientTest.java b/examples/src/test/java/io/grpc/examples/routeguide/RouteGuideClientTest.java index fbe1ae20b1d..27fe5b70c9b 100644 --- a/examples/src/test/java/io/grpc/examples/routeguide/RouteGuideClientTest.java +++ b/examples/src/test/java/io/grpc/examples/routeguide/RouteGuideClientTest.java @@ -40,7 +40,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -96,16 +95,11 @@ public void setUp() throws Exception { // Use a mutable service registry for later registering the service impl for each test case. grpcCleanup.register(InProcessServerBuilder.forName(serverName) .fallbackHandlerRegistry(serviceRegistry).directExecutor().build().start()); - client = - new RouteGuideClient(InProcessChannelBuilder.forName(serverName).directExecutor()); + client = new RouteGuideClient(grpcCleanup.register( + InProcessChannelBuilder.forName(serverName).directExecutor().build())); client.setTestHelper(testHelper); } - @After - public void tearDown() throws Exception { - client.shutdown(); - } - /** * Example for testing blocking unary call. */ From 751faa6faa3fbccf46af74c6597218438f39c2ca Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Thu, 23 Jan 2020 17:59:27 -0800 Subject: [PATCH 17/86] core: promote ServiceConfigErrorHandling (#6633) --- .../AbstractManagedChannelImplBuilder.java | 30 - .../AutoConfiguredLoadBalancerFactory.java | 213 +- .../AutoConfiguredLoadBalancerFactory2.java | 447 -- .../io/grpc/internal/ManagedChannelImpl.java | 225 +- .../io/grpc/internal/ManagedChannelImpl2.java | 2007 -------- .../internal/ManagedChannelServiceConfig.java | 47 +- .../ManagedChannelServiceConfig2.java | 322 -- .../internal/ServiceConfigInterceptor.java | 26 +- .../internal/ServiceConfigInterceptor2.java | 199 - ...AbstractManagedChannelImplBuilderTest.java | 14 - ...AutoConfiguredLoadBalancerFactoryTest.java | 465 +- ...utoConfiguredLoadBalancerFactoryTest2.java | 981 ---- .../io/grpc/internal/HedgingPolicyTest.java | 30 +- .../ManagedChannelImplIdlenessTest.java | 11 +- .../ManagedChannelImplIdlenessTest2.java | 560 --- .../grpc/internal/ManagedChannelImplTest.java | 257 +- .../internal/ManagedChannelImplTest2.java | 4263 ----------------- .../io/grpc/internal/RetryPolicyTest.java | 30 +- .../ServiceConfigErrorHandlingTest.java | 8 +- .../ServiceConfigInterceptorTest.java | 105 +- .../ServiceConfigInterceptorTest2.java | 493 -- 21 files changed, 897 insertions(+), 9836 deletions(-) delete mode 100644 core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java delete mode 100644 core/src/main/java/io/grpc/internal/ManagedChannelImpl2.java delete mode 100644 core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig2.java delete mode 100644 core/src/main/java/io/grpc/internal/ServiceConfigInterceptor2.java delete mode 100644 core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java delete mode 100644 core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest2.java delete mode 100644 core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java delete mode 100644 core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest2.java diff --git a/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java b/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java index 507b5d185fe..6bc54d62d05 100644 --- a/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java +++ b/core/src/main/java/io/grpc/internal/AbstractManagedChannelImplBuilder.java @@ -99,13 +99,6 @@ public static ManagedChannelBuilder forTarget(String target) { private static final long DEFAULT_RETRY_BUFFER_SIZE_IN_BYTES = 1L << 24; // 16M private static final long DEFAULT_PER_RPC_BUFFER_LIMIT_IN_BYTES = 1L << 20; // 1M - @VisibleForTesting - static final String ENABLE_SERVICE_CONFIG_ERROR_HANDLING_PROPERTY = - "io.grpc.internal.ManagedChannelImpl.enableServiceConfigErrorHandling"; - private static final boolean DEFAULT_ENABLE_SERVICE_CONFIG_ERROR_HANDLING = - Boolean.parseBoolean( - System.getProperty(ENABLE_SERVICE_CONFIG_ERROR_HANDLING_PROPERTY, "false")); - ObjectPool executorPool = DEFAULT_EXECUTOR_POOL; ObjectPool offloadExecutorPool = DEFAULT_EXECUTOR_POOL; @@ -165,8 +158,6 @@ public static ManagedChannelBuilder forTarget(String target) { @Nullable ProxyDetector proxyDetector; - boolean enableServiceConfigErrorHandling = DEFAULT_ENABLE_SERVICE_CONFIG_ERROR_HANDLING; - /** * Sets the maximum message size allowed for a single gRPC frame. If an inbound messages * larger than this limit is received it will not be processed and the RPC will fail with @@ -457,16 +448,6 @@ public T disableServiceConfigLookUp() { return thisT(); } - /** - * Enables service config error handling implemented in {@link ManagedChannelImpl2}. By default, - * it is disabled unless system property {@link #ENABLE_SERVICE_CONFIG_ERROR_HANDLING_PROPERTY} is - * set to {@code "true"}. - */ - protected T enableServiceConfigErrorHandling() { - this.enableServiceConfigErrorHandling = true; - return thisT(); - } - /** * Disable or enable stats features. Enabled by default. * @@ -527,17 +508,6 @@ protected String checkAuthority(String authority) { @Override public ManagedChannel build() { - if (this.enableServiceConfigErrorHandling) { - return new ManagedChannelOrphanWrapper(new ManagedChannelImpl2( - this, - buildTransportFactory(), - // TODO(carl-mastrangelo): Allow clients to pass this in - new ExponentialBackoffPolicy.Provider(), - SharedResourcePool.forResource(GrpcUtil.SHARED_CHANNEL_EXECUTOR), - GrpcUtil.STOPWATCH_SUPPLIER, - getEffectiveInterceptors(), - TimeProvider.SYSTEM_TIME_PROVIDER)); - } return new ManagedChannelOrphanWrapper(new ManagedChannelImpl( this, buildTransportFactory(), diff --git a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java index ac926693f82..263a20d4a8d 100644 --- a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java +++ b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java @@ -20,7 +20,10 @@ import static io.grpc.LoadBalancer.ATTR_LOAD_BALANCING_CONFIG; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; import io.grpc.Attributes; +import io.grpc.ChannelLogger; import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.ConnectivityState; import io.grpc.ConnectivityStateInfo; @@ -39,13 +42,13 @@ import io.grpc.internal.ServiceConfigUtil.LbConfig; import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.logging.Logger; import javax.annotation.Nullable; -@SuppressWarnings("deprecation") // migrate to AutoConfiguredLoadBalancerFactory2 is required +// TODO(creamsoup) fully deprecate LoadBalancer.ATTR_LOAD_BALANCING_CONFIG +@SuppressWarnings("deprecation") public final class AutoConfiguredLoadBalancerFactory { private static final Logger logger = Logger.getLogger(AutoConfiguredLoadBalancerFactory.class.getName()); @@ -107,8 +110,10 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { } /** - * Returns non-OK status if resolvedAddresses is rejected and should be considered as a - * name-resolution error. + * Returns non-OK status if resolvedAddresses is empty and delegate lb requires address ({@link + * LoadBalancer#canHandleEmptyAddressListFromNameResolution()} returns {@code false}). {@code + * AutoConfiguredLoadBalancer} doesn't expose {@code + * canHandleEmptyAddressListFromNameResolution} because it depends on the delegated LB. */ Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { List servers = resolvedAddresses.getAddresses(); @@ -116,12 +121,14 @@ Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { if (attributes.get(ATTR_LOAD_BALANCING_CONFIG) != null) { throw new IllegalArgumentException( "Unexpected ATTR_LOAD_BALANCING_CONFIG from upstream: " - + attributes.get(ATTR_LOAD_BALANCING_CONFIG)); + + attributes.get(ATTR_LOAD_BALANCING_CONFIG)); } - Map configMap = attributes.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); - PolicySelection selection; + PolicySelection policySelection = + (PolicySelection) resolvedAddresses.getLoadBalancingPolicyConfig(); + ResolvedPolicySelection resolvedSelection; + try { - selection = decideLoadBalancerProvider(servers, configMap); + resolvedSelection = resolveLoadBalancerProvider(servers, policySelection); } catch (PolicyException e) { Status s = Status.INTERNAL.withDescription(e.getMessage()); helper.updateBalancingState(ConnectivityState.TRANSIENT_FAILURE, new FailingPicker(s)); @@ -130,6 +137,7 @@ Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { delegate = new NoopLoadBalancer(); return Status.OK; } + PolicySelection selection = resolvedSelection.policySelection; if (delegateProvider == null || !selection.provider.getPolicyName().equals(delegateProvider.getPolicyName())) { @@ -142,24 +150,25 @@ Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { ChannelLogLevel.INFO, "Load balancer changed from {0} to {1}", old.getClass().getSimpleName(), delegate.getClass().getSimpleName()); } - - if (selection.config != null) { + Object lbConfig = selection.config; + if (lbConfig != null) { helper.getChannelLogger().log( ChannelLogLevel.DEBUG, "Load-balancing config: {0}", selection.config); attributes = - attributes.toBuilder().set(ATTR_LOAD_BALANCING_CONFIG, selection.config).build(); + attributes.toBuilder().set(ATTR_LOAD_BALANCING_CONFIG, selection.rawConfig).build(); } LoadBalancer delegate = getDelegate(); - if (selection.serverList.isEmpty() + if (resolvedSelection.serverList.isEmpty() && !delegate.canHandleEmptyAddressListFromNameResolution()) { return Status.UNAVAILABLE.withDescription( "NameResolver returned no usable address. addrs=" + servers + ", attrs=" + attributes); } else { delegate.handleResolvedAddresses( ResolvedAddresses.newBuilder() - .setAddresses(selection.serverList) + .setAddresses(resolvedSelection.serverList) .setAttributes(attributes) + .setLoadBalancingPolicyConfig(lbConfig) .build()); return Status.OK; } @@ -199,24 +208,17 @@ LoadBalancerProvider getDelegateProvider() { } /** - * Picks a load balancer based on given criteria. In order of preference: - * - *

    - *
  1. User provided lb on the channel. This is a degenerate case and not handled here. - * This options is deprecated.
  2. - *
  3. Policy from "loadBalancingConfig" if present. This is not covered here.
  4. - *
  5. "grpclb" if any gRPC LB balancer addresses are present
  6. - *
  7. The policy from "loadBalancingPolicy" if present
  8. - *
  9. "pick_first" if the service config choice does not specify
  10. - *
+ * Resolves a load balancer based on given criteria. If policySelection is {@code null} and + * given servers contains any gRPC LB addresses, it will fall back to "grpclb". If no gRPC LB + * addresses are not present, it will fall back to {@link #defaultPolicy}. * * @param servers The list of servers reported - * @param config the service config object - * @return the new load balancer factory, never null + * @param policySelection the selected policy from raw service config + * @return the resolved policy selection */ @VisibleForTesting - PolicySelection decideLoadBalancerProvider( - List servers, @Nullable Map config) + ResolvedPolicySelection resolveLoadBalancerProvider( + List servers, @Nullable PolicySelection policySelection) throws PolicyException { // Check for balancer addresses boolean haveBalancerAddress = false; @@ -229,36 +231,10 @@ PolicySelection decideLoadBalancerProvider( } } - List lbConfigs = null; - if (config != null) { - List> rawLbConfigs = - ServiceConfigUtil.getLoadBalancingConfigsFromServiceConfig(config); - lbConfigs = ServiceConfigUtil.unwrapLoadBalancingConfigList(rawLbConfigs); - } - if (lbConfigs != null && !lbConfigs.isEmpty()) { - LinkedHashSet policiesTried = new LinkedHashSet<>(); - for (LbConfig lbConfig : lbConfigs) { - String policy = lbConfig.getPolicyName(); - LoadBalancerProvider provider = registry.getProvider(policy); - if (provider == null) { - policiesTried.add(policy); - } else { - if (!policiesTried.isEmpty()) { - // Before returning, log all previously tried policies - helper.getChannelLogger().log( - ChannelLogLevel.DEBUG, - "{0} specified by Service Config are not available", policiesTried); - } - return new PolicySelection( - provider, - policy.equals(GRPCLB_POLICY_NAME) ? servers : backendAddrs, - lbConfig.getRawConfigValue()); - } - } - if (!haveBalancerAddress) { - throw new PolicyException( - "None of " + policiesTried + " specified by Service Config are available."); - } + if (policySelection != null) { + String policyName = policySelection.provider.getPolicyName(); + return new ResolvedPolicySelection( + policySelection, policyName.equals(GRPCLB_POLICY_NAME) ? servers : backendAddrs); } if (haveBalancerAddress) { @@ -278,20 +254,29 @@ PolicySelection decideLoadBalancerProvider( helper.getChannelLogger().log(ChannelLogLevel.ERROR, errorMsg); logger.warning(errorMsg); } - return new PolicySelection( - getProviderOrThrow( - "round_robin", "received balancer addresses but grpclb runtime is missing"), - backendAddrs, null); + return new ResolvedPolicySelection( + new PolicySelection( + getProviderOrThrow( + "round_robin", "received balancer addresses but grpclb runtime is missing"), + /* rawConfig = */ null, + /* config= */ null), + backendAddrs); } - return new PolicySelection(grpclbProvider, servers, null); + return new ResolvedPolicySelection( + new PolicySelection( + grpclbProvider, /* rawConfig= */ null, /* config= */ null), servers); } // No balancer address this time. If balancer address shows up later, we want to make sure // the warning is logged one more time. roundRobinDueToGrpclbDepMissing = false; // No config nor balancer address. Use default. - return new PolicySelection( - getProviderOrThrow(defaultPolicy, "using default policy"), servers, null); + return new ResolvedPolicySelection( + new PolicySelection( + getProviderOrThrow(defaultPolicy, "using default policy"), + /* rawConfig= */ null, + /* config= */ null), + servers); } } @@ -306,13 +291,27 @@ private LoadBalancerProvider getProviderOrThrow(String policy, String choiceReas } /** - * Unlike a normal {@link LoadBalancer.Factory}, this accepts a full service config rather than + * Parses first available LoadBalancer policy from service config. Available LoadBalancer should + * be registered to {@link LoadBalancerRegistry}. If the first available LoadBalancer policy is + * invalid, it doesn't fall-back to next available policy, instead it returns error. This also + * means, it ignores LoadBalancer policies after the first available one even if any of them are + * invalid. + * + *

Order of policy preference: + * + *

    + *
  1. Policy from "loadBalancingConfig" if present
  2. + *
  3. The policy from deprecated "loadBalancingPolicy" if present
  4. + *
+ *

+ * + *

Unlike a normal {@link LoadBalancer.Factory}, this accepts a full service config rather than * the LoadBalancingConfig. * - * @return null if no selection could be made. + * @return the parsed {@link PolicySelection}, or {@code null} if no selection could be made. */ @Nullable - ConfigOrError selectLoadBalancerPolicy(Map serviceConfig) { + ConfigOrError parseLoadBalancerPolicy(Map serviceConfig, ChannelLogger channelLogger) { try { List loadBalancerConfigs = null; if (serviceConfig != null) { @@ -328,10 +327,18 @@ ConfigOrError selectLoadBalancerPolicy(Map serviceConfig) { if (provider == null) { policiesTried.add(policy); } else { - return ConfigOrError.fromConfig(new PolicySelection( - provider, - /* serverList= */ null, - lbConfig.getRawConfigValue())); + if (!policiesTried.isEmpty()) { + channelLogger.log( + ChannelLogLevel.DEBUG, + "{0} specified by Service Config are not available", policiesTried); + } + ConfigOrError parsedLbPolicyConfig = + provider.parseLoadBalancingPolicyConfig(lbConfig.getRawConfigValue()); + if (parsedLbPolicyConfig.getError() != null) { + return parsedLbPolicyConfig; + } + return ConfigOrError.fromConfig( + new PolicySelection(provider, serviceConfig, parsedLbPolicyConfig.getConfig())); } } return ConfigOrError.fromError( @@ -357,24 +364,64 @@ private PolicyException(String msg) { @VisibleForTesting static final class PolicySelection { final LoadBalancerProvider provider; - @Nullable final List serverList; - // TODO(carl-mastrangelo): make this the non-raw service config object. - @Nullable final Map config; + @Nullable final Map rawConfig; + @Nullable final Object config; PolicySelection( - LoadBalancerProvider provider, List serverList, - @Nullable Map config) { + LoadBalancerProvider provider, + @Nullable Map rawConfig, + @Nullable Object config) { this.provider = checkNotNull(provider, "provider"); - this.serverList = Collections.unmodifiableList(checkNotNull(serverList, "serverList")); + this.rawConfig = rawConfig; this.config = config; } - PolicySelection( - LoadBalancerProvider provider, - @Nullable Map config) { - this.provider = checkNotNull(provider, "provider"); - this.serverList = null; - this.config = config; + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PolicySelection that = (PolicySelection) o; + return Objects.equal(provider, that.provider) + && Objects.equal(rawConfig, that.rawConfig) + && Objects.equal(config, that.config); + } + + @Override + public int hashCode() { + return Objects.hashCode(provider, rawConfig, config); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("provider", provider) + .add("rawConfig", rawConfig) + .add("config", config) + .toString(); + } + } + + @VisibleForTesting + static final class ResolvedPolicySelection { + final PolicySelection policySelection; + final List serverList; + + ResolvedPolicySelection( + PolicySelection policySelection, List serverList) { + this.policySelection = checkNotNull(policySelection, "policySelection"); + this.serverList = Collections.unmodifiableList(checkNotNull(serverList, "serverList")); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("policySelection", policySelection) + .add("serverList", serverList) + .toString(); } } diff --git a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java deleted file mode 100644 index d463293659d..00000000000 --- a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory2.java +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Copyright 2018 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.base.Preconditions.checkNotNull; -import static io.grpc.LoadBalancer.ATTR_LOAD_BALANCING_CONFIG; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.MoreObjects; -import com.google.common.base.Objects; -import io.grpc.Attributes; -import io.grpc.ChannelLogger; -import io.grpc.ChannelLogger.ChannelLogLevel; -import io.grpc.ConnectivityState; -import io.grpc.ConnectivityStateInfo; -import io.grpc.EquivalentAddressGroup; -import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.Helper; -import io.grpc.LoadBalancer.PickResult; -import io.grpc.LoadBalancer.PickSubchannelArgs; -import io.grpc.LoadBalancer.ResolvedAddresses; -import io.grpc.LoadBalancer.Subchannel; -import io.grpc.LoadBalancer.SubchannelPicker; -import io.grpc.LoadBalancerProvider; -import io.grpc.LoadBalancerRegistry; -import io.grpc.NameResolver.ConfigOrError; -import io.grpc.Status; -import io.grpc.internal.ServiceConfigUtil.LbConfig; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -@SuppressWarnings("deprecation") // after migrated to 2, we can deprecate it -public final class AutoConfiguredLoadBalancerFactory2 { - private static final Logger logger = - Logger.getLogger(AutoConfiguredLoadBalancerFactory2.class.getName()); - private static final String GRPCLB_POLICY_NAME = "grpclb"; - - private final LoadBalancerRegistry registry; - private final String defaultPolicy; - - public AutoConfiguredLoadBalancerFactory2(String defaultPolicy) { - this(LoadBalancerRegistry.getDefaultRegistry(), defaultPolicy); - } - - @VisibleForTesting - AutoConfiguredLoadBalancerFactory2(LoadBalancerRegistry registry, String defaultPolicy) { - this.registry = checkNotNull(registry, "registry"); - this.defaultPolicy = checkNotNull(defaultPolicy, "defaultPolicy"); - } - - public AutoConfiguredLoadBalancer newLoadBalancer(Helper helper) { - return new AutoConfiguredLoadBalancer(helper); - } - - private static final class NoopLoadBalancer extends LoadBalancer { - - @Override - @Deprecated - public void handleResolvedAddressGroups(List s, Attributes a) {} - - @Override - public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) {} - - @Override - public void handleNameResolutionError(Status error) {} - - @Override - public void shutdown() {} - } - - @VisibleForTesting - public final class AutoConfiguredLoadBalancer { - private final Helper helper; - private LoadBalancer delegate; - private LoadBalancerProvider delegateProvider; - private boolean roundRobinDueToGrpclbDepMissing; - - AutoConfiguredLoadBalancer(Helper helper) { - this.helper = helper; - delegateProvider = registry.getProvider(defaultPolicy); - if (delegateProvider == null) { - throw new IllegalStateException("Could not find policy '" + defaultPolicy - + "'. Make sure its implementation is either registered to LoadBalancerRegistry or" - + " included in META-INF/services/io.grpc.LoadBalancerProvider from your jar files."); - } - delegate = delegateProvider.newLoadBalancer(helper); - } - - public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - tryHandleResolvedAddresses(resolvedAddresses); - } - - /** - * Returns non-OK status if resolvedAddresses is empty and delegate lb requires address ({@link - * LoadBalancer#canHandleEmptyAddressListFromNameResolution()} returns {@code false}). {@code - * AutoConfiguredLoadBalancer} doesn't expose {@code - * canHandleEmptyAddressListFromNameResolution} because it depends on the delegated LB. - */ - Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - List servers = resolvedAddresses.getAddresses(); - Attributes attributes = resolvedAddresses.getAttributes(); - if (attributes.get(ATTR_LOAD_BALANCING_CONFIG) != null) { - throw new IllegalArgumentException( - "Unexpected ATTR_LOAD_BALANCING_CONFIG from upstream: " - + attributes.get(ATTR_LOAD_BALANCING_CONFIG)); - } - PolicySelection policySelection = - (PolicySelection) resolvedAddresses.getLoadBalancingPolicyConfig(); - ResolvedPolicySelection resolvedSelection; - - try { - resolvedSelection = resolveLoadBalancerProvider(servers, policySelection); - } catch (PolicyException e) { - Status s = Status.INTERNAL.withDescription(e.getMessage()); - helper.updateBalancingState(ConnectivityState.TRANSIENT_FAILURE, new FailingPicker(s)); - delegate.shutdown(); - delegateProvider = null; - delegate = new NoopLoadBalancer(); - return Status.OK; - } - PolicySelection selection = resolvedSelection.policySelection; - - if (delegateProvider == null - || !selection.provider.getPolicyName().equals(delegateProvider.getPolicyName())) { - helper.updateBalancingState(ConnectivityState.CONNECTING, new EmptyPicker()); - delegate.shutdown(); - delegateProvider = selection.provider; - LoadBalancer old = delegate; - delegate = delegateProvider.newLoadBalancer(helper); - helper.getChannelLogger().log( - ChannelLogLevel.INFO, "Load balancer changed from {0} to {1}", - old.getClass().getSimpleName(), delegate.getClass().getSimpleName()); - } - Object lbConfig = selection.config; - if (lbConfig != null) { - helper.getChannelLogger().log( - ChannelLogLevel.DEBUG, "Load-balancing config: {0}", selection.config); - attributes = - attributes.toBuilder().set(ATTR_LOAD_BALANCING_CONFIG, selection.rawConfig).build(); - } - - LoadBalancer delegate = getDelegate(); - if (resolvedSelection.serverList.isEmpty() - && !delegate.canHandleEmptyAddressListFromNameResolution()) { - return Status.UNAVAILABLE.withDescription( - "NameResolver returned no usable address. addrs=" + servers + ", attrs=" + attributes); - } else { - delegate.handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(resolvedSelection.serverList) - .setAttributes(attributes) - .setLoadBalancingPolicyConfig(lbConfig) - .build()); - return Status.OK; - } - } - - void handleNameResolutionError(Status error) { - getDelegate().handleNameResolutionError(error); - } - - @Deprecated - void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateInfo) { - getDelegate().handleSubchannelState(subchannel, stateInfo); - } - - void requestConnection() { - getDelegate().requestConnection(); - } - - void shutdown() { - delegate.shutdown(); - delegate = null; - } - - @VisibleForTesting - public LoadBalancer getDelegate() { - return delegate; - } - - @VisibleForTesting - void setDelegate(LoadBalancer lb) { - delegate = lb; - } - - @VisibleForTesting - LoadBalancerProvider getDelegateProvider() { - return delegateProvider; - } - - /** - * Resolves a load balancer based on given criteria. If policySelection is {@code null} and - * given servers contains any gRPC LB addresses, it will fall back to "grpclb". If no gRPC LB - * addresses are not present, it will fall back to {@link #defaultPolicy}. - * - * @param servers The list of servers reported - * @param policySelection the selected policy from raw service config - * @return the resolved policy selection - */ - @VisibleForTesting - ResolvedPolicySelection resolveLoadBalancerProvider( - List servers, @Nullable PolicySelection policySelection) - throws PolicyException { - // Check for balancer addresses - boolean haveBalancerAddress = false; - List backendAddrs = new ArrayList<>(); - for (EquivalentAddressGroup s : servers) { - if (s.getAttributes().get(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY) != null) { - haveBalancerAddress = true; - } else { - backendAddrs.add(s); - } - } - - if (policySelection != null) { - String policyName = policySelection.provider.getPolicyName(); - return new ResolvedPolicySelection( - policySelection, policyName.equals(GRPCLB_POLICY_NAME) ? servers : backendAddrs); - } - - if (haveBalancerAddress) { - // This is a special case where the existence of balancer address in the resolved address - // selects "grpclb" policy if the service config couldn't select a policy - LoadBalancerProvider grpclbProvider = registry.getProvider(GRPCLB_POLICY_NAME); - if (grpclbProvider == null) { - if (backendAddrs.isEmpty()) { - throw new PolicyException( - "Received ONLY balancer addresses but grpclb runtime is missing"); - } - if (!roundRobinDueToGrpclbDepMissing) { - // We don't log the warning every time we have an update. - roundRobinDueToGrpclbDepMissing = true; - String errorMsg = "Found balancer addresses but grpclb runtime is missing." - + " Will use round_robin. Please include grpc-grpclb in your runtime dependencies."; - helper.getChannelLogger().log(ChannelLogLevel.ERROR, errorMsg); - logger.warning(errorMsg); - } - return new ResolvedPolicySelection( - new PolicySelection( - getProviderOrThrow( - "round_robin", "received balancer addresses but grpclb runtime is missing"), - /* rawConfig = */ null, - /* config= */ null), - backendAddrs); - } - return new ResolvedPolicySelection( - new PolicySelection( - grpclbProvider, /* rawConfig= */ null, /* config= */ null), servers); - } - // No balancer address this time. If balancer address shows up later, we want to make sure - // the warning is logged one more time. - roundRobinDueToGrpclbDepMissing = false; - - // No config nor balancer address. Use default. - return new ResolvedPolicySelection( - new PolicySelection( - getProviderOrThrow(defaultPolicy, "using default policy"), - /* rawConfig= */ null, - /* config= */ null), - servers); - } - } - - private LoadBalancerProvider getProviderOrThrow(String policy, String choiceReason) - throws PolicyException { - LoadBalancerProvider provider = registry.getProvider(policy); - if (provider == null) { - throw new PolicyException( - "Trying to load '" + policy + "' because " + choiceReason + ", but it's unavailable"); - } - return provider; - } - - /** - * Parses first available LoadBalancer policy from service config. Available LoadBalancer should - * be registered to {@link LoadBalancerRegistry}. If the first available LoadBalancer policy is - * invalid, it doesn't fall-back to next available policy, instead it returns error. This also - * means, it ignores LoadBalancer policies after the first available one even if any of them are - * invalid. - * - *

Order of policy preference: - * - *

    - *
  1. Policy from "loadBalancingConfig" if present
  2. - *
  3. The policy from deprecated "loadBalancingPolicy" if present
  4. - *
- *

- * - *

Unlike a normal {@link LoadBalancer.Factory}, this accepts a full service config rather than - * the LoadBalancingConfig. - * - * @return the parsed {@link PolicySelection}, or {@code null} if no selection could be made. - */ - @Nullable - ConfigOrError parseLoadBalancerPolicy(Map serviceConfig, ChannelLogger channelLogger) { - try { - List loadBalancerConfigs = null; - if (serviceConfig != null) { - List> rawLbConfigs = - ServiceConfigUtil.getLoadBalancingConfigsFromServiceConfig(serviceConfig); - loadBalancerConfigs = ServiceConfigUtil.unwrapLoadBalancingConfigList(rawLbConfigs); - } - if (loadBalancerConfigs != null && !loadBalancerConfigs.isEmpty()) { - List policiesTried = new ArrayList<>(); - for (LbConfig lbConfig : loadBalancerConfigs) { - String policy = lbConfig.getPolicyName(); - LoadBalancerProvider provider = registry.getProvider(policy); - if (provider == null) { - policiesTried.add(policy); - } else { - if (!policiesTried.isEmpty()) { - channelLogger.log( - ChannelLogLevel.DEBUG, - "{0} specified by Service Config are not available", policiesTried); - } - ConfigOrError parsedLbPolicyConfig = - provider.parseLoadBalancingPolicyConfig(lbConfig.getRawConfigValue()); - if (parsedLbPolicyConfig.getError() != null) { - return parsedLbPolicyConfig; - } - return ConfigOrError.fromConfig( - new PolicySelection(provider, serviceConfig, parsedLbPolicyConfig.getConfig())); - } - } - return ConfigOrError.fromError( - Status.UNKNOWN.withDescription( - "None of " + policiesTried + " specified by Service Config are available.")); - } - return null; - } catch (RuntimeException e) { - return ConfigOrError.fromError( - Status.UNKNOWN.withDescription("can't parse load balancer configuration").withCause(e)); - } - } - - @VisibleForTesting - static final class PolicyException extends Exception { - private static final long serialVersionUID = 1L; - - private PolicyException(String msg) { - super(msg); - } - } - - @VisibleForTesting - static final class PolicySelection { - final LoadBalancerProvider provider; - @Nullable final Map rawConfig; - @Nullable final Object config; - - PolicySelection( - LoadBalancerProvider provider, - @Nullable Map rawConfig, - @Nullable Object config) { - this.provider = checkNotNull(provider, "provider"); - this.rawConfig = rawConfig; - this.config = config; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - PolicySelection that = (PolicySelection) o; - return Objects.equal(provider, that.provider) - && Objects.equal(rawConfig, that.rawConfig) - && Objects.equal(config, that.config); - } - - @Override - public int hashCode() { - return Objects.hashCode(provider, rawConfig, config); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("provider", provider) - .add("rawConfig", rawConfig) - .add("config", config) - .toString(); - } - } - - @VisibleForTesting - static final class ResolvedPolicySelection { - final PolicySelection policySelection; - final List serverList; - - ResolvedPolicySelection( - PolicySelection policySelection, List serverList) { - this.policySelection = checkNotNull(policySelection, "policySelection"); - this.serverList = Collections.unmodifiableList(checkNotNull(serverList, "serverList")); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("policySelection", policySelection) - .add("serverList", serverList) - .toString(); - } - } - - private static final class EmptyPicker extends SubchannelPicker { - - @Override - public PickResult pickSubchannel(PickSubchannelArgs args) { - return PickResult.withNoResult(); - } - } - - private static final class FailingPicker extends SubchannelPicker { - private final Status failure; - - FailingPicker(Status failure) { - this.failure = failure; - } - - @Override - public PickResult pickSubchannel(PickSubchannelArgs args) { - return PickResult.withError(failure); - } - } -} diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java index 86def9eb0b1..062acc3a597 100644 --- a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java +++ b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java @@ -27,6 +27,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; import com.google.common.base.Stopwatch; import com.google.common.base.Supplier; import com.google.common.util.concurrent.ListenableFuture; @@ -96,7 +97,6 @@ import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Pattern; -import javax.annotation.CheckForNull; import javax.annotation.Nullable; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; @@ -130,6 +130,11 @@ final class ManagedChannelImpl extends ManagedChannel implements static final Status SUBCHANNEL_SHUTDOWN_STATUS = Status.UNAVAILABLE.withDescription("Subchannel shutdown invoked"); + private static final ServiceConfigHolder EMPTY_SERVICE_CONFIG = + new ServiceConfigHolder( + Collections.emptyMap(), + ManagedChannelServiceConfig.empty()); + private final InternalLogId logId; private final String target; private final NameResolverRegistry nameResolverRegistry; @@ -243,28 +248,22 @@ public void uncaughtException(Thread t, Throwable e) { private final ChannelTracer channelTracer; private final ChannelLogger channelLogger; private final InternalChannelz channelz; + // Must be mutated and read from syncContext - @CheckForNull - private Boolean haveBackends; // a flag for doing channel tracing when flipped + // a flag for doing channel tracing when flipped + private ResolutionState lastResolutionState = ResolutionState.NO_RESOLUTION; // Must be mutated and read from constructor or syncContext - // TODO(notcarl): check this value when error in service config resolution - @Nullable - private Map lastServiceConfig; // used for channel tracing when value changed + // used for channel tracing when value changed + private ServiceConfigHolder lastServiceConfig = EMPTY_SERVICE_CONFIG; @Nullable - private final Map defaultServiceConfig; + private final ServiceConfigHolder defaultServiceConfig; // Must be mutated and read from constructor or syncContext - // See service config error handling spec for reference. - // TODO(notcarl): check this value when error in service config resolution - @SuppressWarnings("UnusedVariable") - private boolean waitingForServiceConfig = true; + private boolean serviceConfigUpdated = false; private final boolean lookUpServiceConfig; // One instance per channel. private final ChannelBufferMeter channelBufferUsed = new ChannelBufferMeter(); - @Nullable - private Throttle throttle; - private final long perRpcBufferLimit; private final long channelBufferLimit; @@ -504,6 +503,7 @@ public ClientStream newRetriableStream( final Metadata headers, final Context context) { checkState(retryEnabled, "retry should be enabled"); + final Throttle throttle = lastServiceConfig.managedChannelServiceConfig.getRetryThrottling(); final class RetryStream extends RetriableStream { RetryStream() { super( @@ -582,18 +582,20 @@ ClientStream newSubstream(ClientStreamTracer.Factory tracerFactory, Metadata new new ExecutorHolder( checkNotNull(builder.offloadExecutorPool, "offloadExecutorPool")); this.nameResolverRegistry = builder.nameResolverRegistry; + ScParser serviceConfigParser = + new ScParser( + retryEnabled, + builder.maxRetryAttempts, + builder.maxHedgedAttempts, + loadBalancerFactory, + channelLogger); this.nameResolverArgs = NameResolver.Args.newBuilder() .setDefaultPort(builder.getDefaultPort()) .setProxyDetector(proxyDetector) .setSynchronizationContext(syncContext) .setScheduledExecutorService(scheduledExecutor) - .setServiceConfigParser( - new ScParser( - retryEnabled, - builder.maxRetryAttempts, - builder.maxHedgedAttempts, - loadBalancerFactory)) + .setServiceConfigParser(serviceConfigParser) .setChannelLogger(channelLogger) .setOffloadExecutor( // Avoid creating the offloadExecutor until it is first used @@ -610,10 +612,23 @@ public void execute(Runnable command) { this.delayedTransport = new DelayedClientTransport(this.executor, this.syncContext); this.delayedTransport.start(delayedTransportListener); this.backoffPolicyProvider = backoffPolicyProvider; - serviceConfigInterceptor = new ServiceConfigInterceptor( - retryEnabled, builder.maxRetryAttempts, builder.maxHedgedAttempts); - this.defaultServiceConfig = builder.defaultServiceConfig; - this.lastServiceConfig = defaultServiceConfig; + + serviceConfigInterceptor = new ServiceConfigInterceptor(retryEnabled); + if (builder.defaultServiceConfig != null) { + ConfigOrError parsedDefaultServiceConfig = + serviceConfigParser.parseServiceConfig(builder.defaultServiceConfig); + checkState( + parsedDefaultServiceConfig.getError() == null, + "Default config is invalid: %s", + parsedDefaultServiceConfig.getError()); + this.defaultServiceConfig = + new ServiceConfigHolder( + builder.defaultServiceConfig, + (ManagedChannelServiceConfig) parsedDefaultServiceConfig.getConfig()); + this.lastServiceConfig = this.defaultServiceConfig; + } else { + this.defaultServiceConfig = null; + } this.lookUpServiceConfig = builder.lookUpServiceConfig; Channel channel = new RealChannel(nameResolver.getServiceAuthority()); channel = ClientInterceptors.intercept(channel, serviceConfigInterceptor); @@ -667,11 +682,8 @@ public CallTracer create() { // May only be called in constructor or syncContext private void handleServiceConfigUpdate() { - waitingForServiceConfig = false; - serviceConfigInterceptor.handleUpdate(lastServiceConfig); - if (retryEnabled) { - throttle = ServiceConfigUtil.getThrottlePolicy(lastServiceConfig); - } + serviceConfigUpdated = true; + serviceConfigInterceptor.handleUpdate(lastServiceConfig.managedChannelServiceConfig); } @VisibleForTesting @@ -1309,48 +1321,73 @@ public void run() { Attributes attrs = resolutionResult.getAttributes(); channelLogger.log( ChannelLogLevel.DEBUG, "Resolved address: {0}, config={1}", servers, attrs); + ResolutionState lastResolutionStateCopy = lastResolutionState; - if (haveBackends == null || !haveBackends) { + if (lastResolutionState != ResolutionState.SUCCESS) { channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); - haveBackends = true; + lastResolutionState = ResolutionState.SUCCESS; } nameResolverBackoffPolicy = null; + ConfigOrError configOrError = resolutionResult.getServiceConfig(); + ServiceConfigHolder validServiceConfig = null; + Status serviceConfigError = null; + if (configOrError != null) { + Map rawServiceConfig = + resolutionResult.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); + validServiceConfig = configOrError.getConfig() == null + ? null + : new ServiceConfigHolder( + rawServiceConfig, (ManagedChannelServiceConfig) configOrError.getConfig()); + serviceConfigError = configOrError.getError(); + } - // Assuming no error in config resolution for now. - final Map serviceConfig = - attrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); - Map effectiveServiceConfig; + ServiceConfigHolder effectiveServiceConfig; if (!lookUpServiceConfig) { - if (serviceConfig != null) { + if (validServiceConfig != null) { channelLogger.log( ChannelLogLevel.INFO, "Service config from name resolver discarded by channel settings"); } - effectiveServiceConfig = defaultServiceConfig; + effectiveServiceConfig = + defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; + attrs = attrs.toBuilder().discard(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG).build(); } else { // Try to use config if returned from name resolver // Otherwise, try to use the default config if available - if (serviceConfig != null) { - effectiveServiceConfig = serviceConfig; - } else { + if (validServiceConfig != null) { + effectiveServiceConfig = validServiceConfig; + } else if (defaultServiceConfig != null) { effectiveServiceConfig = defaultServiceConfig; - if (defaultServiceConfig != null) { + channelLogger.log( + ChannelLogLevel.INFO, + "Received no service config, using default service config"); + } else if (serviceConfigError != null) { + if (!serviceConfigUpdated) { + // First DNS lookup has invalid service config, and cannot fall back to default channelLogger.log( ChannelLogLevel.INFO, - "Received no service config, using default service config"); + "Fallback to error due to invalid first service config without default config"); + onError(configOrError.getError()); + return; + } else { + effectiveServiceConfig = lastServiceConfig; } + } else { + effectiveServiceConfig = EMPTY_SERVICE_CONFIG; } - - // FIXME(notcarl): reference equality is not right (although not harmful) right now. - // Name resolver should return the same config if txt record is the same - if (effectiveServiceConfig != lastServiceConfig) { - channelLogger.log(ChannelLogLevel.INFO, - "Service config changed{0}", effectiveServiceConfig == null ? " to null" : ""); + if (!effectiveServiceConfig.equals(lastServiceConfig)) { + channelLogger.log( + ChannelLogLevel.INFO, + "Service config changed{0}", + effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); lastServiceConfig = effectiveServiceConfig; } try { + // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS + // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, + // lbNeedAddress is not deterministic handleServiceConfigUpdate(); } catch (RuntimeException re) { logger.log( @@ -1363,18 +1400,31 @@ public void run() { // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. if (NameResolverListener.this.helper == ManagedChannelImpl.this.lbHelper) { Attributes effectiveAttrs = attrs; - if (effectiveServiceConfig != serviceConfig) { + if (effectiveServiceConfig != validServiceConfig) { effectiveAttrs = attrs.toBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, effectiveServiceConfig) + .set( + GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, + effectiveServiceConfig.rawServiceConfig) .build(); } + Status handleResult = helper.lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(effectiveAttrs) - .build()); + .setAddresses(servers) + .setAttributes(effectiveAttrs) + .setLoadBalancingPolicyConfig( + effectiveServiceConfig.managedChannelServiceConfig.getLoadBalancingConfig()) + .build()); + if (!handleResult.isOk()) { - handleErrorInSyncContext(handleResult.augmentDescription(resolver + " was used")); + if (servers.isEmpty() && lastResolutionStateCopy == ResolutionState.SUCCESS) { + // lb doesn't expose that it needs address or not, because for some LB it is not + // deterministic. Assuming lb needs address if LB returns error when the address is + // empty and it is not the first resolution. + scheduleExponentialBackOffInSyncContext(); + } else { + handleErrorInSyncContext(handleResult.augmentDescription(resolver + " was used")); + } } } } @@ -1399,15 +1449,21 @@ public void run() { private void handleErrorInSyncContext(Status error) { logger.log(Level.WARNING, "[{0}] Failed to resolve name. status={1}", new Object[] {getLogId(), error}); - if (haveBackends == null || haveBackends) { + if (lastResolutionState != ResolutionState.ERROR) { channelLogger.log(ChannelLogLevel.WARNING, "Failed to resolve name: {0}", error); - haveBackends = false; + lastResolutionState = ResolutionState.ERROR; } // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. if (NameResolverListener.this.helper != ManagedChannelImpl.this.lbHelper) { return; } + helper.lb.handleNameResolutionError(error); + + scheduleExponentialBackOffInSyncContext(); + } + + private void scheduleExponentialBackOffInSyncContext() { if (scheduledNameResolverRefresh != null && scheduledNameResolverRefresh.isPending()) { // The name resolver may invoke onError multiple times, but we only want to // schedule one backoff attempt @@ -1845,17 +1901,20 @@ static final class ScParser extends NameResolver.ServiceConfigParser { private final int maxRetryAttemptsLimit; private final int maxHedgedAttemptsLimit; private final AutoConfiguredLoadBalancerFactory autoLoadBalancerFactory; + private final ChannelLogger channelLogger; ScParser( boolean retryEnabled, int maxRetryAttemptsLimit, int maxHedgedAttemptsLimit, - AutoConfiguredLoadBalancerFactory autoLoadBalancerFactory) { + AutoConfiguredLoadBalancerFactory autoLoadBalancerFactory, + ChannelLogger channelLogger) { this.retryEnabled = retryEnabled; this.maxRetryAttemptsLimit = maxRetryAttemptsLimit; this.maxHedgedAttemptsLimit = maxHedgedAttemptsLimit; this.autoLoadBalancerFactory = checkNotNull(autoLoadBalancerFactory, "autoLoadBalancerFactory"); + this.channelLogger = checkNotNull(channelLogger, "channelLogger"); } @Override @@ -1863,7 +1922,7 @@ public ConfigOrError parseServiceConfig(Map rawServiceConfig) { try { Object loadBalancingPolicySelection; ConfigOrError choiceFromLoadBalancer = - autoLoadBalancerFactory.selectLoadBalancerPolicy(rawServiceConfig); + autoLoadBalancerFactory.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); if (choiceFromLoadBalancer == null) { loadBalancingPolicySelection = null; } else if (choiceFromLoadBalancer.getError() != null) { @@ -1895,4 +1954,54 @@ private void logWarningIfNotInSyncContext(String method) { + "See https://github.com/grpc/grpc-java/issues/5015 for more details", e); } } + + /** + * A ResolutionState indicates the status of last name resolution. + */ + enum ResolutionState { + NO_RESOLUTION, + SUCCESS, + ERROR + } + + // TODO(creamsoup) remove this class when AutoConfiguredLoadBalancerFactory doesn't require raw + // service config. + private static final class ServiceConfigHolder { + Map rawServiceConfig; + ManagedChannelServiceConfig managedChannelServiceConfig; + + ServiceConfigHolder( + Map rawServiceConfig, ManagedChannelServiceConfig managedChannelServiceConfig) { + this.rawServiceConfig = checkNotNull(rawServiceConfig, "rawServiceConfig"); + this.managedChannelServiceConfig = + checkNotNull(managedChannelServiceConfig, "managedChannelServiceConfig"); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ServiceConfigHolder that = (ServiceConfigHolder) o; + return Objects.equal(rawServiceConfig, that.rawServiceConfig) + && Objects + .equal(managedChannelServiceConfig, that.managedChannelServiceConfig); + } + + @Override + public int hashCode() { + return Objects.hashCode(rawServiceConfig, managedChannelServiceConfig); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("rawServiceConfig", rawServiceConfig) + .add("managedChannelServiceConfig", managedChannelServiceConfig) + .toString(); + } + } } diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelImpl2.java b/core/src/main/java/io/grpc/internal/ManagedChannelImpl2.java deleted file mode 100644 index d2444f1713a..00000000000 --- a/core/src/main/java/io/grpc/internal/ManagedChannelImpl2.java +++ /dev/null @@ -1,2007 +0,0 @@ -/* - * Copyright 2016 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static io.grpc.ConnectivityState.IDLE; -import static io.grpc.ConnectivityState.SHUTDOWN; -import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.internal.ServiceConfigInterceptor2.HEDGING_POLICY_KEY; -import static io.grpc.internal.ServiceConfigInterceptor2.RETRY_POLICY_KEY; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.MoreObjects; -import com.google.common.base.Objects; -import com.google.common.base.Stopwatch; -import com.google.common.base.Supplier; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import io.grpc.Attributes; -import io.grpc.CallOptions; -import io.grpc.Channel; -import io.grpc.ChannelLogger; -import io.grpc.ChannelLogger.ChannelLogLevel; -import io.grpc.ClientCall; -import io.grpc.ClientInterceptor; -import io.grpc.ClientInterceptors; -import io.grpc.ClientStreamTracer; -import io.grpc.CompressorRegistry; -import io.grpc.ConnectivityState; -import io.grpc.ConnectivityStateInfo; -import io.grpc.Context; -import io.grpc.DecompressorRegistry; -import io.grpc.EquivalentAddressGroup; -import io.grpc.InternalChannelz; -import io.grpc.InternalChannelz.ChannelStats; -import io.grpc.InternalChannelz.ChannelTrace; -import io.grpc.InternalInstrumented; -import io.grpc.InternalLogId; -import io.grpc.InternalWithLogId; -import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.CreateSubchannelArgs; -import io.grpc.LoadBalancer.PickResult; -import io.grpc.LoadBalancer.PickSubchannelArgs; -import io.grpc.LoadBalancer.ResolvedAddresses; -import io.grpc.LoadBalancer.SubchannelPicker; -import io.grpc.LoadBalancer.SubchannelStateListener; -import io.grpc.ManagedChannel; -import io.grpc.Metadata; -import io.grpc.MethodDescriptor; -import io.grpc.NameResolver; -import io.grpc.NameResolver.ConfigOrError; -import io.grpc.NameResolver.ResolutionResult; -import io.grpc.NameResolverRegistry; -import io.grpc.ProxyDetector; -import io.grpc.Status; -import io.grpc.SynchronizationContext; -import io.grpc.SynchronizationContext.ScheduledHandle; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory2.AutoConfiguredLoadBalancer; -import io.grpc.internal.ClientCallImpl.ClientTransportProvider; -import io.grpc.internal.RetriableStream.ChannelBufferMeter; -import io.grpc.internal.RetriableStream.Throttle; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.regex.Pattern; -import javax.annotation.Nullable; -import javax.annotation.concurrent.GuardedBy; -import javax.annotation.concurrent.ThreadSafe; - -/** A communication channel for making outgoing RPCs. */ -@ThreadSafe -final class ManagedChannelImpl2 extends ManagedChannel implements - InternalInstrumented { - static final Logger logger = Logger.getLogger(ManagedChannelImpl2.class.getName()); - - // Matching this pattern means the target string is a URI target or at least intended to be one. - // A URI target must be an absolute hierarchical URI. - // From RFC 2396: scheme = alpha *( alpha | digit | "+" | "-" | "." ) - @VisibleForTesting - static final Pattern URI_PATTERN = Pattern.compile("[a-zA-Z][a-zA-Z0-9+.-]*:/.*"); - - static final long IDLE_TIMEOUT_MILLIS_DISABLE = -1; - - @VisibleForTesting - static final long SUBCHANNEL_SHUTDOWN_DELAY_SECONDS = 5; - - @VisibleForTesting - static final Status SHUTDOWN_NOW_STATUS = - Status.UNAVAILABLE.withDescription("Channel shutdownNow invoked"); - - @VisibleForTesting - static final Status SHUTDOWN_STATUS = - Status.UNAVAILABLE.withDescription("Channel shutdown invoked"); - - @VisibleForTesting - static final Status SUBCHANNEL_SHUTDOWN_STATUS = - Status.UNAVAILABLE.withDescription("Subchannel shutdown invoked"); - - private static final ServiceConfigHolder EMPTY_SERVICE_CONFIG = - new ServiceConfigHolder( - Collections.emptyMap(), - ManagedChannelServiceConfig2.empty()); - - private final InternalLogId logId; - private final String target; - private final NameResolverRegistry nameResolverRegistry; - private final NameResolver.Factory nameResolverFactory; - private final NameResolver.Args nameResolverArgs; - private final AutoConfiguredLoadBalancerFactory2 loadBalancerFactory; - private final ClientTransportFactory transportFactory; - private final RestrictedScheduledExecutor scheduledExecutor; - private final Executor executor; - private final ObjectPool executorPool; - private final ObjectPool balancerRpcExecutorPool; - private final ExecutorHolder balancerRpcExecutorHolder; - private final ExecutorHolder offloadExecutorHolder; - private final TimeProvider timeProvider; - private final int maxTraceEvents; - - @VisibleForTesting - final SynchronizationContext syncContext = new SynchronizationContext( - new Thread.UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - logger.log( - Level.SEVERE, - "[" + getLogId() + "] Uncaught exception in the SynchronizationContext. Panic!", - e); - panic(e); - } - }); - - private boolean fullStreamDecompression; - - private final DecompressorRegistry decompressorRegistry; - private final CompressorRegistry compressorRegistry; - - private final Supplier stopwatchSupplier; - /** The timout before entering idle mode. */ - private final long idleTimeoutMillis; - - private final ConnectivityStateManager channelStateManager = new ConnectivityStateManager(); - - private final ServiceConfigInterceptor2 serviceConfigInterceptor; - - private final BackoffPolicy.Provider backoffPolicyProvider; - - /** - * We delegate to this channel, so that we can have interceptors as necessary. If there aren't - * any interceptors and the {@link io.grpc.BinaryLog} is {@code null} then this will just be a - * {@link RealChannel}. - */ - private final Channel interceptorChannel; - @Nullable private final String userAgent; - - // Only null after channel is terminated. Must be assigned from the syncContext. - private NameResolver nameResolver; - - // Must be accessed from the syncContext. - private boolean nameResolverStarted; - - // null when channel is in idle mode. Must be assigned from syncContext. - @Nullable - private LbHelperImpl lbHelper; - - // Must ONLY be assigned from updateSubchannelPicker(), which is called from syncContext. - // null if channel is in idle mode. - @Nullable - private volatile SubchannelPicker subchannelPicker; - - // Must be accessed from the syncContext - private boolean panicMode; - - // Must be mutated from syncContext - // If any monitoring hook to be added later needs to get a snapshot of this Set, we could - // switch to a ConcurrentHashMap. - private final Set subchannels = new HashSet<>(16, .75f); - - // Must be mutated from syncContext - private final Set oobChannels = new HashSet<>(1, .75f); - - // reprocess() must be run from syncContext - private final DelayedClientTransport delayedTransport; - private final UncommittedRetriableStreamsRegistry uncommittedRetriableStreamsRegistry - = new UncommittedRetriableStreamsRegistry(); - - // Shutdown states. - // - // Channel's shutdown process: - // 1. shutdown(): stop accepting new calls from applications - // 1a shutdown <- true - // 1b subchannelPicker <- null - // 1c delayedTransport.shutdown() - // 2. delayedTransport terminated: stop stream-creation functionality - // 2a terminating <- true - // 2b loadBalancer.shutdown() - // * LoadBalancer will shutdown subchannels and OOB channels - // 2c loadBalancer <- null - // 2d nameResolver.shutdown() - // 2e nameResolver <- null - // 3. All subchannels and OOB channels terminated: Channel considered terminated - - private final AtomicBoolean shutdown = new AtomicBoolean(false); - // Must only be mutated and read from syncContext - private boolean shutdownNowed; - // Must only be mutated from syncContext - private volatile boolean terminating; - // Must be mutated from syncContext - private volatile boolean terminated; - private final CountDownLatch terminatedLatch = new CountDownLatch(1); - - private final CallTracer.Factory callTracerFactory; - private final CallTracer channelCallTracer; - private final ChannelTracer channelTracer; - private final ChannelLogger channelLogger; - private final InternalChannelz channelz; - - // Must be mutated and read from syncContext - // a flag for doing channel tracing when flipped - private ResolutionState lastResolutionState = ResolutionState.NO_RESOLUTION; - // Must be mutated and read from constructor or syncContext - // used for channel tracing when value changed - private ServiceConfigHolder lastServiceConfig = EMPTY_SERVICE_CONFIG; - @Nullable - private final ServiceConfigHolder defaultServiceConfig; - // Must be mutated and read from constructor or syncContext - private boolean serviceConfigUpdated = false; - private final boolean lookUpServiceConfig; - - // One instance per channel. - private final ChannelBufferMeter channelBufferUsed = new ChannelBufferMeter(); - - private final long perRpcBufferLimit; - private final long channelBufferLimit; - - // Temporary false flag that can skip the retry code path. - private final boolean retryEnabled; - - // Called from syncContext - private final ManagedClientTransport.Listener delayedTransportListener = - new DelayedTransportListener(); - - // Must be called from syncContext - private void maybeShutdownNowSubchannels() { - if (shutdownNowed) { - for (InternalSubchannel subchannel : subchannels) { - subchannel.shutdownNow(SHUTDOWN_NOW_STATUS); - } - for (OobChannel oobChannel : oobChannels) { - oobChannel.getInternalSubchannel().shutdownNow(SHUTDOWN_NOW_STATUS); - } - } - } - - // Must be accessed from syncContext - @VisibleForTesting - final InUseStateAggregator inUseStateAggregator = new IdleModeStateAggregator(); - - @Override - public ListenableFuture getStats() { - final SettableFuture ret = SettableFuture.create(); - final class StatsFetcher implements Runnable { - @Override - public void run() { - ChannelStats.Builder builder = new InternalChannelz.ChannelStats.Builder(); - channelCallTracer.updateBuilder(builder); - channelTracer.updateBuilder(builder); - builder.setTarget(target).setState(channelStateManager.getState()); - List children = new ArrayList<>(); - children.addAll(subchannels); - children.addAll(oobChannels); - builder.setSubchannels(children); - ret.set(builder.build()); - } - } - - // subchannels and oobchannels can only be accessed from syncContext - syncContext.execute(new StatsFetcher()); - return ret; - } - - @Override - public InternalLogId getLogId() { - return logId; - } - - // Run from syncContext - private class IdleModeTimer implements Runnable { - - @Override - public void run() { - enterIdleMode(); - } - } - - // Must be called from syncContext - private void shutdownNameResolverAndLoadBalancer(boolean channelIsActive) { - syncContext.throwIfNotInThisSynchronizationContext(); - if (channelIsActive) { - checkState(nameResolverStarted, "nameResolver is not started"); - checkState(lbHelper != null, "lbHelper is null"); - } - if (nameResolver != null) { - cancelNameResolverBackoff(); - nameResolver.shutdown(); - nameResolverStarted = false; - if (channelIsActive) { - nameResolver = getNameResolver(target, nameResolverFactory, nameResolverArgs); - } else { - nameResolver = null; - } - } - if (lbHelper != null) { - lbHelper.lb.shutdown(); - lbHelper = null; - } - subchannelPicker = null; - } - - /** - * Make the channel exit idle mode, if it's in it. - * - *

Must be called from syncContext - */ - @VisibleForTesting - void exitIdleMode() { - syncContext.throwIfNotInThisSynchronizationContext(); - if (shutdown.get() || panicMode) { - return; - } - if (inUseStateAggregator.isInUse()) { - // Cancel the timer now, so that a racing due timer will not put Channel on idleness - // when the caller of exitIdleMode() is about to use the returned loadBalancer. - cancelIdleTimer(false); - } else { - // exitIdleMode() may be called outside of inUseStateAggregator.handleNotInUse() while - // isInUse() == false, in which case we still need to schedule the timer. - rescheduleIdleTimer(); - } - if (lbHelper != null) { - return; - } - channelLogger.log(ChannelLogLevel.INFO, "Exiting idle mode"); - LbHelperImpl lbHelper = new LbHelperImpl(); - lbHelper.lb = loadBalancerFactory.newLoadBalancer(lbHelper); - // Delay setting lbHelper until fully initialized, since loadBalancerFactory is user code and - // may throw. We don't want to confuse our state, even if we will enter panic mode. - this.lbHelper = lbHelper; - - NameResolverListener listener = new NameResolverListener(lbHelper, nameResolver); - nameResolver.start(listener); - nameResolverStarted = true; - } - - // Must be run from syncContext - private void enterIdleMode() { - // nameResolver and loadBalancer are guaranteed to be non-null. If any of them were null, - // either the idleModeTimer ran twice without exiting the idle mode, or the task in shutdown() - // did not cancel idleModeTimer, or enterIdle() ran while shutdown or in idle, all of - // which are bugs. - shutdownNameResolverAndLoadBalancer(true); - delayedTransport.reprocess(null); - channelLogger.log(ChannelLogLevel.INFO, "Entering IDLE state"); - channelStateManager.gotoState(IDLE); - if (inUseStateAggregator.isInUse()) { - exitIdleMode(); - } - } - - // Must be run from syncContext - private void cancelIdleTimer(boolean permanent) { - idleTimer.cancel(permanent); - } - - // Always run from syncContext - private void rescheduleIdleTimer() { - if (idleTimeoutMillis == IDLE_TIMEOUT_MILLIS_DISABLE) { - return; - } - idleTimer.reschedule(idleTimeoutMillis, TimeUnit.MILLISECONDS); - } - - // Run from syncContext - @VisibleForTesting - class DelayedNameResolverRefresh implements Runnable { - @Override - public void run() { - scheduledNameResolverRefresh = null; - refreshNameResolution(); - } - } - - // Must be used from syncContext - @Nullable private ScheduledHandle scheduledNameResolverRefresh; - // The policy to control backoff between name resolution attempts. Non-null when an attempt is - // scheduled. Must be used from syncContext - @Nullable private BackoffPolicy nameResolverBackoffPolicy; - - // Must be run from syncContext - private void cancelNameResolverBackoff() { - syncContext.throwIfNotInThisSynchronizationContext(); - if (scheduledNameResolverRefresh != null) { - scheduledNameResolverRefresh.cancel(); - scheduledNameResolverRefresh = null; - nameResolverBackoffPolicy = null; - } - } - - /** - * Force name resolution refresh to happen immediately and reset refresh back-off. Must be run - * from syncContext. - */ - private void refreshAndResetNameResolution() { - syncContext.throwIfNotInThisSynchronizationContext(); - cancelNameResolverBackoff(); - refreshNameResolution(); - } - - private void refreshNameResolution() { - syncContext.throwIfNotInThisSynchronizationContext(); - if (nameResolverStarted) { - nameResolver.refresh(); - } - } - - private final class ChannelTransportProvider implements ClientTransportProvider { - @Override - public ClientTransport get(PickSubchannelArgs args) { - SubchannelPicker pickerCopy = subchannelPicker; - if (shutdown.get()) { - // If channel is shut down, delayedTransport is also shut down which will fail the stream - // properly. - return delayedTransport; - } - if (pickerCopy == null) { - final class ExitIdleModeForTransport implements Runnable { - @Override - public void run() { - exitIdleMode(); - } - } - - syncContext.execute(new ExitIdleModeForTransport()); - return delayedTransport; - } - // There is no need to reschedule the idle timer here. - // - // pickerCopy != null, which means idle timer has not expired when this method starts. - // Even if idle timer expires right after we grab pickerCopy, and it shuts down LoadBalancer - // which calls Subchannel.shutdown(), the InternalSubchannel will be actually shutdown after - // SUBCHANNEL_SHUTDOWN_DELAY_SECONDS, which gives the caller time to start RPC on it. - // - // In most cases the idle timer is scheduled to fire after the transport has created the - // stream, which would have reported in-use state to the channel that would have cancelled - // the idle timer. - PickResult pickResult = pickerCopy.pickSubchannel(args); - ClientTransport transport = GrpcUtil.getTransportFromPickResult( - pickResult, args.getCallOptions().isWaitForReady()); - if (transport != null) { - return transport; - } - return delayedTransport; - } - - @Override - public ClientStream newRetriableStream( - final MethodDescriptor method, - final CallOptions callOptions, - final Metadata headers, - final Context context) { - checkState(retryEnabled, "retry should be enabled"); - final Throttle throttle = lastServiceConfig.managedChannelServiceConfig.getRetryThrottling(); - final class RetryStream extends RetriableStream { - RetryStream() { - super( - method, - headers, - channelBufferUsed, - perRpcBufferLimit, - channelBufferLimit, - getCallExecutor(callOptions), - transportFactory.getScheduledExecutorService(), - callOptions.getOption(RETRY_POLICY_KEY), - callOptions.getOption(HEDGING_POLICY_KEY), - throttle); - } - - @Override - Status prestart() { - return uncommittedRetriableStreamsRegistry.add(this); - } - - @Override - void postCommit() { - uncommittedRetriableStreamsRegistry.remove(this); - } - - @Override - ClientStream newSubstream(ClientStreamTracer.Factory tracerFactory, Metadata newHeaders) { - CallOptions newOptions = callOptions.withStreamTracerFactory(tracerFactory); - ClientTransport transport = - get(new PickSubchannelArgsImpl(method, newHeaders, newOptions)); - Context origContext = context.attach(); - try { - return transport.newStream(method, newHeaders, newOptions); - } finally { - context.detach(origContext); - } - } - } - - return new RetryStream(); - } - } - - private final ClientTransportProvider transportProvider = new ChannelTransportProvider(); - - private final Rescheduler idleTimer; - - ManagedChannelImpl2( - AbstractManagedChannelImplBuilder builder, - ClientTransportFactory clientTransportFactory, - BackoffPolicy.Provider backoffPolicyProvider, - ObjectPool balancerRpcExecutorPool, - Supplier stopwatchSupplier, - List interceptors, - final TimeProvider timeProvider) { - this.target = checkNotNull(builder.target, "target"); - this.logId = InternalLogId.allocate("Channel", target); - this.timeProvider = checkNotNull(timeProvider, "timeProvider"); - this.executorPool = checkNotNull(builder.executorPool, "executorPool"); - this.executor = checkNotNull(executorPool.getObject(), "executor"); - this.transportFactory = - new CallCredentialsApplyingTransportFactory(clientTransportFactory, this.executor); - this.scheduledExecutor = - new RestrictedScheduledExecutor(transportFactory.getScheduledExecutorService()); - maxTraceEvents = builder.maxTraceEvents; - channelTracer = new ChannelTracer( - logId, builder.maxTraceEvents, timeProvider.currentTimeNanos(), - "Channel for '" + target + "'"); - channelLogger = new ChannelLoggerImpl(channelTracer, timeProvider); - this.nameResolverFactory = builder.getNameResolverFactory(); - ProxyDetector proxyDetector = - builder.proxyDetector != null ? builder.proxyDetector : GrpcUtil.DEFAULT_PROXY_DETECTOR; - this.retryEnabled = builder.retryEnabled && !builder.temporarilyDisableRetry; - this.loadBalancerFactory = new AutoConfiguredLoadBalancerFactory2(builder.defaultLbPolicy); - this.offloadExecutorHolder = - new ExecutorHolder( - checkNotNull(builder.offloadExecutorPool, "offloadExecutorPool")); - this.nameResolverRegistry = builder.nameResolverRegistry; - ScParser serviceConfigParser = - new ScParser( - retryEnabled, - builder.maxRetryAttempts, - builder.maxHedgedAttempts, - loadBalancerFactory, - channelLogger); - this.nameResolverArgs = - NameResolver.Args.newBuilder() - .setDefaultPort(builder.getDefaultPort()) - .setProxyDetector(proxyDetector) - .setSynchronizationContext(syncContext) - .setScheduledExecutorService(scheduledExecutor) - .setServiceConfigParser(serviceConfigParser) - .setChannelLogger(channelLogger) - .setOffloadExecutor( - // Avoid creating the offloadExecutor until it is first used - new Executor() { - @Override - public void execute(Runnable command) { - offloadExecutorHolder.getExecutor().execute(command); - } - }) - .build(); - this.nameResolver = getNameResolver(target, nameResolverFactory, nameResolverArgs); - this.balancerRpcExecutorPool = checkNotNull(balancerRpcExecutorPool, "balancerRpcExecutorPool"); - this.balancerRpcExecutorHolder = new ExecutorHolder(balancerRpcExecutorPool); - this.delayedTransport = new DelayedClientTransport(this.executor, this.syncContext); - this.delayedTransport.start(delayedTransportListener); - this.backoffPolicyProvider = backoffPolicyProvider; - - serviceConfigInterceptor = new ServiceConfigInterceptor2(retryEnabled); - if (builder.defaultServiceConfig != null) { - ConfigOrError parsedDefaultServiceConfig = - serviceConfigParser.parseServiceConfig(builder.defaultServiceConfig); - checkState( - parsedDefaultServiceConfig.getError() == null, - "Default config is invalid: %s", - parsedDefaultServiceConfig.getError()); - this.defaultServiceConfig = - new ServiceConfigHolder( - builder.defaultServiceConfig, - (ManagedChannelServiceConfig2) parsedDefaultServiceConfig.getConfig()); - this.lastServiceConfig = this.defaultServiceConfig; - } else { - this.defaultServiceConfig = null; - } - this.lookUpServiceConfig = builder.lookUpServiceConfig; - Channel channel = new RealChannel(nameResolver.getServiceAuthority()); - channel = ClientInterceptors.intercept(channel, serviceConfigInterceptor); - if (builder.binlog != null) { - channel = builder.binlog.wrapChannel(channel); - } - this.interceptorChannel = ClientInterceptors.intercept(channel, interceptors); - this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); - if (builder.idleTimeoutMillis == IDLE_TIMEOUT_MILLIS_DISABLE) { - this.idleTimeoutMillis = builder.idleTimeoutMillis; - } else { - checkArgument( - builder.idleTimeoutMillis - >= AbstractManagedChannelImplBuilder.IDLE_MODE_MIN_TIMEOUT_MILLIS, - "invalid idleTimeoutMillis %s", builder.idleTimeoutMillis); - this.idleTimeoutMillis = builder.idleTimeoutMillis; - } - - idleTimer = new Rescheduler( - new IdleModeTimer(), - syncContext, - transportFactory.getScheduledExecutorService(), - stopwatchSupplier.get()); - this.fullStreamDecompression = builder.fullStreamDecompression; - this.decompressorRegistry = checkNotNull(builder.decompressorRegistry, "decompressorRegistry"); - this.compressorRegistry = checkNotNull(builder.compressorRegistry, "compressorRegistry"); - this.userAgent = builder.userAgent; - - this.channelBufferLimit = builder.retryBufferSize; - this.perRpcBufferLimit = builder.perRpcBufferLimit; - final class ChannelCallTracerFactory implements CallTracer.Factory { - @Override - public CallTracer create() { - return new CallTracer(timeProvider); - } - } - - this.callTracerFactory = new ChannelCallTracerFactory(); - channelCallTracer = callTracerFactory.create(); - this.channelz = checkNotNull(builder.channelz); - channelz.addRootChannel(this); - - if (!lookUpServiceConfig) { - if (defaultServiceConfig != null) { - channelLogger.log( - ChannelLogLevel.INFO, "Service config look-up disabled, using default service config"); - } - handleServiceConfigUpdate(); - } - } - - // May only be called in constructor or syncContext - private void handleServiceConfigUpdate() { - serviceConfigUpdated = true; - serviceConfigInterceptor.handleUpdate(lastServiceConfig.managedChannelServiceConfig); - } - - @VisibleForTesting - static NameResolver getNameResolver(String target, NameResolver.Factory nameResolverFactory, - NameResolver.Args nameResolverArgs) { - // Finding a NameResolver. Try using the target string as the URI. If that fails, try prepending - // "dns:///". - URI targetUri = null; - StringBuilder uriSyntaxErrors = new StringBuilder(); - try { - targetUri = new URI(target); - // For "localhost:8080" this would likely cause newNameResolver to return null, because - // "localhost" is parsed as the scheme. Will fall into the next branch and try - // "dns:///localhost:8080". - } catch (URISyntaxException e) { - // Can happen with ip addresses like "[::1]:1234" or 127.0.0.1:1234. - uriSyntaxErrors.append(e.getMessage()); - } - if (targetUri != null) { - NameResolver resolver = nameResolverFactory.newNameResolver(targetUri, nameResolverArgs); - if (resolver != null) { - return resolver; - } - // "foo.googleapis.com:8080" cause resolver to be null, because "foo.googleapis.com" is an - // unmapped scheme. Just fall through and will try "dns:///foo.googleapis.com:8080" - } - - // If we reached here, the targetUri couldn't be used. - if (!URI_PATTERN.matcher(target).matches()) { - // It doesn't look like a URI target. Maybe it's an authority string. Try with the default - // scheme from the factory. - try { - targetUri = new URI(nameResolverFactory.getDefaultScheme(), "", "/" + target, null); - } catch (URISyntaxException e) { - // Should not be possible. - throw new IllegalArgumentException(e); - } - NameResolver resolver = nameResolverFactory.newNameResolver(targetUri, nameResolverArgs); - if (resolver != null) { - return resolver; - } - } - throw new IllegalArgumentException(String.format( - "cannot find a NameResolver for %s%s", - target, uriSyntaxErrors.length() > 0 ? " (" + uriSyntaxErrors + ")" : "")); - } - - /** - * Initiates an orderly shutdown in which preexisting calls continue but new calls are immediately - * cancelled. - */ - @Override - public ManagedChannelImpl2 shutdown() { - channelLogger.log(ChannelLogLevel.DEBUG, "shutdown() called"); - if (!shutdown.compareAndSet(false, true)) { - return this; - } - - // Put gotoState(SHUTDOWN) as early into the syncContext's queue as possible. - // delayedTransport.shutdown() may also add some tasks into the queue. But some things inside - // delayedTransport.shutdown() like setting delayedTransport.shutdown = true are not run in the - // syncContext's queue and should not be blocked, so we do not drain() immediately here. - final class Shutdown implements Runnable { - @Override - public void run() { - channelLogger.log(ChannelLogLevel.INFO, "Entering SHUTDOWN state"); - channelStateManager.gotoState(SHUTDOWN); - } - } - - syncContext.executeLater(new Shutdown()); - - uncommittedRetriableStreamsRegistry.onShutdown(SHUTDOWN_STATUS); - final class CancelIdleTimer implements Runnable { - @Override - public void run() { - cancelIdleTimer(/* permanent= */ true); - } - } - - syncContext.execute(new CancelIdleTimer()); - return this; - } - - /** - * Initiates a forceful shutdown in which preexisting and new calls are cancelled. Although - * forceful, the shutdown process is still not instantaneous; {@link #isTerminated()} will likely - * return {@code false} immediately after this method returns. - */ - @Override - public ManagedChannelImpl2 shutdownNow() { - channelLogger.log(ChannelLogLevel.DEBUG, "shutdownNow() called"); - shutdown(); - uncommittedRetriableStreamsRegistry.onShutdownNow(SHUTDOWN_NOW_STATUS); - final class ShutdownNow implements Runnable { - @Override - public void run() { - if (shutdownNowed) { - return; - } - shutdownNowed = true; - maybeShutdownNowSubchannels(); - } - } - - syncContext.execute(new ShutdownNow()); - return this; - } - - // Called from syncContext - @VisibleForTesting - void panic(final Throwable t) { - if (panicMode) { - // Preserve the first panic information - return; - } - panicMode = true; - cancelIdleTimer(/* permanent= */ true); - shutdownNameResolverAndLoadBalancer(false); - final class PanicSubchannelPicker extends SubchannelPicker { - private final PickResult panicPickResult = - PickResult.withDrop( - Status.INTERNAL.withDescription("Panic! This is a bug!").withCause(t)); - - @Override - public PickResult pickSubchannel(PickSubchannelArgs args) { - return panicPickResult; - } - } - - updateSubchannelPicker(new PanicSubchannelPicker()); - channelLogger.log(ChannelLogLevel.ERROR, "PANIC! Entering TRANSIENT_FAILURE"); - channelStateManager.gotoState(TRANSIENT_FAILURE); - } - - @VisibleForTesting - boolean isInPanicMode() { - return panicMode; - } - - // Called from syncContext - private void updateSubchannelPicker(SubchannelPicker newPicker) { - subchannelPicker = newPicker; - delayedTransport.reprocess(newPicker); - } - - @Override - public boolean isShutdown() { - return shutdown.get(); - } - - @Override - public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { - return terminatedLatch.await(timeout, unit); - } - - @Override - public boolean isTerminated() { - return terminated; - } - - /* - * Creates a new outgoing call on the channel. - */ - @Override - public ClientCall newCall(MethodDescriptor method, - CallOptions callOptions) { - return interceptorChannel.newCall(method, callOptions); - } - - @Override - public String authority() { - return interceptorChannel.authority(); - } - - private Executor getCallExecutor(CallOptions callOptions) { - Executor executor = callOptions.getExecutor(); - if (executor == null) { - executor = this.executor; - } - return executor; - } - - private class RealChannel extends Channel { - // Set when the NameResolver is initially created. When we create a new NameResolver for the - // same target, the new instance must have the same value. - private final String authority; - - private RealChannel(String authority) { - this.authority = checkNotNull(authority, "authority"); - } - - @Override - public ClientCall newCall(MethodDescriptor method, - CallOptions callOptions) { - return new ClientCallImpl<>( - method, - getCallExecutor(callOptions), - callOptions, - transportProvider, - terminated ? null : transportFactory.getScheduledExecutorService(), - channelCallTracer, - retryEnabled) - .setFullStreamDecompression(fullStreamDecompression) - .setDecompressorRegistry(decompressorRegistry) - .setCompressorRegistry(compressorRegistry); - } - - @Override - public String authority() { - return authority; - } - } - - /** - * Terminate the channel if termination conditions are met. - */ - // Must be run from syncContext - private void maybeTerminateChannel() { - if (terminated) { - return; - } - if (shutdown.get() && subchannels.isEmpty() && oobChannels.isEmpty()) { - channelLogger.log(ChannelLogLevel.INFO, "Terminated"); - channelz.removeRootChannel(this); - executorPool.returnObject(executor); - balancerRpcExecutorHolder.release(); - offloadExecutorHolder.release(); - // Release the transport factory so that it can deallocate any resources. - transportFactory.close(); - - terminated = true; - terminatedLatch.countDown(); - } - } - - // Must be called from syncContext - private void handleInternalSubchannelState(ConnectivityStateInfo newState) { - if (newState.getState() == TRANSIENT_FAILURE || newState.getState() == IDLE) { - refreshAndResetNameResolution(); - } - } - - @Override - @SuppressWarnings("deprecation") - public ConnectivityState getState(boolean requestConnection) { - ConnectivityState savedChannelState = channelStateManager.getState(); - if (requestConnection && savedChannelState == IDLE) { - final class RequestConnection implements Runnable { - @Override - public void run() { - exitIdleMode(); - if (subchannelPicker != null) { - subchannelPicker.requestConnection(); - } - if (lbHelper != null) { - lbHelper.lb.requestConnection(); - } - } - } - - syncContext.execute(new RequestConnection()); - } - return savedChannelState; - } - - @Override - public void notifyWhenStateChanged(final ConnectivityState source, final Runnable callback) { - final class NotifyStateChanged implements Runnable { - @Override - public void run() { - channelStateManager.notifyWhenStateChanged(callback, executor, source); - } - } - - syncContext.execute(new NotifyStateChanged()); - } - - @Override - public void resetConnectBackoff() { - final class ResetConnectBackoff implements Runnable { - @Override - public void run() { - if (shutdown.get()) { - return; - } - if (scheduledNameResolverRefresh != null && scheduledNameResolverRefresh.isPending()) { - checkState(nameResolverStarted, "name resolver must be started"); - refreshAndResetNameResolution(); - } - for (InternalSubchannel subchannel : subchannels) { - subchannel.resetConnectBackoff(); - } - for (OobChannel oobChannel : oobChannels) { - oobChannel.resetConnectBackoff(); - } - } - } - - syncContext.execute(new ResetConnectBackoff()); - } - - @Override - public void enterIdle() { - final class PrepareToLoseNetworkRunnable implements Runnable { - @Override - public void run() { - if (shutdown.get() || lbHelper == null) { - return; - } - cancelIdleTimer(/* permanent= */ false); - enterIdleMode(); - } - } - - syncContext.execute(new PrepareToLoseNetworkRunnable()); - } - - /** - * A registry that prevents channel shutdown from killing existing retry attempts that are in - * backoff. - */ - private final class UncommittedRetriableStreamsRegistry { - // TODO(zdapeng): This means we would acquire a lock for each new retry-able stream, - // it's worthwhile to look for a lock-free approach. - final Object lock = new Object(); - - @GuardedBy("lock") - Collection uncommittedRetriableStreams = new HashSet<>(); - - @GuardedBy("lock") - Status shutdownStatus; - - void onShutdown(Status reason) { - boolean shouldShutdownDelayedTransport = false; - synchronized (lock) { - if (shutdownStatus != null) { - return; - } - shutdownStatus = reason; - // Keep the delayedTransport open until there is no more uncommitted streams, b/c those - // retriable streams, which may be in backoff and not using any transport, are already - // started RPCs. - if (uncommittedRetriableStreams.isEmpty()) { - shouldShutdownDelayedTransport = true; - } - } - - if (shouldShutdownDelayedTransport) { - delayedTransport.shutdown(reason); - } - } - - void onShutdownNow(Status reason) { - onShutdown(reason); - Collection streams; - - synchronized (lock) { - streams = new ArrayList<>(uncommittedRetriableStreams); - } - - for (ClientStream stream : streams) { - stream.cancel(reason); - } - delayedTransport.shutdownNow(reason); - } - - /** - * Registers a RetriableStream and return null if not shutdown, otherwise just returns the - * shutdown Status. - */ - @Nullable - Status add(RetriableStream retriableStream) { - synchronized (lock) { - if (shutdownStatus != null) { - return shutdownStatus; - } - uncommittedRetriableStreams.add(retriableStream); - return null; - } - } - - void remove(RetriableStream retriableStream) { - Status shutdownStatusCopy = null; - - synchronized (lock) { - uncommittedRetriableStreams.remove(retriableStream); - if (uncommittedRetriableStreams.isEmpty()) { - shutdownStatusCopy = shutdownStatus; - // Because retriable transport is long-lived, we take this opportunity to down-size the - // hashmap. - uncommittedRetriableStreams = new HashSet<>(); - } - } - - if (shutdownStatusCopy != null) { - delayedTransport.shutdown(shutdownStatusCopy); - } - } - } - - private class LbHelperImpl extends LoadBalancer.Helper { - AutoConfiguredLoadBalancer lb; - - @Deprecated - @Override - public AbstractSubchannel createSubchannel( - List addressGroups, Attributes attrs) { - logWarningIfNotInSyncContext("createSubchannel()"); - // TODO(ejona): can we be even stricter? Like loadBalancer == null? - checkNotNull(addressGroups, "addressGroups"); - checkNotNull(attrs, "attrs"); - final SubchannelImpl subchannel = createSubchannelInternal( - CreateSubchannelArgs.newBuilder() - .setAddresses(addressGroups) - .setAttributes(attrs) - .build()); - - final SubchannelStateListener listener = - new LoadBalancer.SubchannelStateListener() { - @Override - public void onSubchannelState(ConnectivityStateInfo newState) { - // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. - if (LbHelperImpl.this != ManagedChannelImpl2.this.lbHelper) { - return; - } - lb.handleSubchannelState(subchannel, newState); - } - }; - - subchannel.internalStart(listener); - return subchannel; - } - - @Override - public AbstractSubchannel createSubchannel(CreateSubchannelArgs args) { - syncContext.throwIfNotInThisSynchronizationContext(); - return createSubchannelInternal(args); - } - - private SubchannelImpl createSubchannelInternal(CreateSubchannelArgs args) { - // TODO(ejona): can we be even stricter? Like loadBalancer == null? - checkState(!terminated, "Channel is terminated"); - return new SubchannelImpl(args, this); - } - - @Override - public void updateBalancingState( - final ConnectivityState newState, final SubchannelPicker newPicker) { - checkNotNull(newState, "newState"); - checkNotNull(newPicker, "newPicker"); - logWarningIfNotInSyncContext("updateBalancingState()"); - final class UpdateBalancingState implements Runnable { - @Override - public void run() { - if (LbHelperImpl.this != lbHelper) { - return; - } - updateSubchannelPicker(newPicker); - // It's not appropriate to report SHUTDOWN state from lb. - // Ignore the case of newState == SHUTDOWN for now. - if (newState != SHUTDOWN) { - channelLogger.log(ChannelLogLevel.INFO, "Entering {0} state", newState); - channelStateManager.gotoState(newState); - } - } - } - - syncContext.execute(new UpdateBalancingState()); - } - - @Override - public void refreshNameResolution() { - logWarningIfNotInSyncContext("refreshNameResolution()"); - final class LoadBalancerRefreshNameResolution implements Runnable { - @Override - public void run() { - refreshAndResetNameResolution(); - } - } - - syncContext.execute(new LoadBalancerRefreshNameResolution()); - } - - @Deprecated - @Override - public void updateSubchannelAddresses( - LoadBalancer.Subchannel subchannel, List addrs) { - checkArgument(subchannel instanceof SubchannelImpl, - "subchannel must have been returned from createSubchannel"); - logWarningIfNotInSyncContext("updateSubchannelAddresses()"); - ((InternalSubchannel) subchannel.getInternalSubchannel()).updateAddresses(addrs); - } - - @Override - public ManagedChannel createOobChannel(EquivalentAddressGroup addressGroup, String authority) { - // TODO(ejona): can we be even stricter? Like terminating? - checkState(!terminated, "Channel is terminated"); - long oobChannelCreationTime = timeProvider.currentTimeNanos(); - InternalLogId oobLogId = InternalLogId.allocate("OobChannel", /*details=*/ null); - InternalLogId subchannelLogId = - InternalLogId.allocate("Subchannel-OOB", /*details=*/ authority); - ChannelTracer oobChannelTracer = - new ChannelTracer( - oobLogId, maxTraceEvents, oobChannelCreationTime, - "OobChannel for " + addressGroup); - final OobChannel oobChannel = new OobChannel( - authority, balancerRpcExecutorPool, transportFactory.getScheduledExecutorService(), - syncContext, callTracerFactory.create(), oobChannelTracer, channelz, timeProvider); - channelTracer.reportEvent(new ChannelTrace.Event.Builder() - .setDescription("Child OobChannel created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(oobChannelCreationTime) - .setChannelRef(oobChannel) - .build()); - ChannelTracer subchannelTracer = - new ChannelTracer(subchannelLogId, maxTraceEvents, oobChannelCreationTime, - "Subchannel for " + addressGroup); - ChannelLogger subchannelLogger = new ChannelLoggerImpl(subchannelTracer, timeProvider); - final class ManagedOobChannelCallback extends InternalSubchannel.Callback { - @Override - void onTerminated(InternalSubchannel is) { - oobChannels.remove(oobChannel); - channelz.removeSubchannel(is); - oobChannel.handleSubchannelTerminated(); - maybeTerminateChannel(); - } - - @Override - void onStateChange(InternalSubchannel is, ConnectivityStateInfo newState) { - handleInternalSubchannelState(newState); - oobChannel.handleSubchannelStateChange(newState); - } - } - - final InternalSubchannel internalSubchannel = new InternalSubchannel( - Collections.singletonList(addressGroup), - authority, userAgent, backoffPolicyProvider, transportFactory, - transportFactory.getScheduledExecutorService(), stopwatchSupplier, syncContext, - // All callback methods are run from syncContext - new ManagedOobChannelCallback(), - channelz, - callTracerFactory.create(), - subchannelTracer, - subchannelLogId, - subchannelLogger); - oobChannelTracer.reportEvent(new ChannelTrace.Event.Builder() - .setDescription("Child Subchannel created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(oobChannelCreationTime) - .setSubchannelRef(internalSubchannel) - .build()); - channelz.addSubchannel(oobChannel); - channelz.addSubchannel(internalSubchannel); - oobChannel.setSubchannel(internalSubchannel); - final class AddOobChannel implements Runnable { - @Override - public void run() { - if (terminating) { - oobChannel.shutdown(); - } - if (!terminated) { - // If channel has not terminated, it will track the subchannel and block termination - // for it. - oobChannels.add(oobChannel); - } - } - } - - syncContext.execute(new AddOobChannel()); - return oobChannel; - } - - @Override - public void updateOobChannelAddresses(ManagedChannel channel, EquivalentAddressGroup eag) { - checkArgument(channel instanceof OobChannel, - "channel must have been returned from createOobChannel"); - ((OobChannel) channel).updateAddresses(eag); - } - - @Override - public String getAuthority() { - return ManagedChannelImpl2.this.authority(); - } - - @Deprecated - @Override - public NameResolver.Factory getNameResolverFactory() { - return nameResolverFactory; - } - - @Override - public SynchronizationContext getSynchronizationContext() { - return syncContext; - } - - @Override - public ScheduledExecutorService getScheduledExecutorService() { - return scheduledExecutor; - } - - @Override - public ChannelLogger getChannelLogger() { - return channelLogger; - } - - @Override - public NameResolver.Args getNameResolverArgs() { - return nameResolverArgs; - } - - @Override - public NameResolverRegistry getNameResolverRegistry() { - return nameResolverRegistry; - } - } - - private final class NameResolverListener extends NameResolver.Listener2 { - final LbHelperImpl helper; - final NameResolver resolver; - - NameResolverListener(LbHelperImpl helperImpl, NameResolver resolver) { - this.helper = checkNotNull(helperImpl, "helperImpl"); - this.resolver = checkNotNull(resolver, "resolver"); - } - - @Override - public void onResult(final ResolutionResult resolutionResult) { - final class NamesResolved implements Runnable { - - @SuppressWarnings({"ReferenceEquality", "deprecation"}) - @Override - public void run() { - List servers = resolutionResult.getAddresses(); - Attributes attrs = resolutionResult.getAttributes(); - channelLogger.log( - ChannelLogLevel.DEBUG, "Resolved address: {0}, config={1}", servers, attrs); - ResolutionState lastResolutionStateCopy = lastResolutionState; - - if (lastResolutionState != ResolutionState.SUCCESS) { - channelLogger.log(ChannelLogLevel.INFO, "Address resolved: {0}", servers); - lastResolutionState = ResolutionState.SUCCESS; - } - - nameResolverBackoffPolicy = null; - ConfigOrError configOrError = resolutionResult.getServiceConfig(); - ServiceConfigHolder validServiceConfig = null; - Status serviceConfigError = null; - if (configOrError != null) { - Map rawServiceConfig = - resolutionResult.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); - validServiceConfig = configOrError.getConfig() == null - ? null - : new ServiceConfigHolder( - rawServiceConfig, (ManagedChannelServiceConfig2) configOrError.getConfig()); - serviceConfigError = configOrError.getError(); - } - - ServiceConfigHolder effectiveServiceConfig; - if (!lookUpServiceConfig) { - if (validServiceConfig != null) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config from name resolver discarded by channel settings"); - } - effectiveServiceConfig = - defaultServiceConfig == null ? EMPTY_SERVICE_CONFIG : defaultServiceConfig; - attrs = attrs.toBuilder().discard(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG).build(); - } else { - // Try to use config if returned from name resolver - // Otherwise, try to use the default config if available - if (validServiceConfig != null) { - effectiveServiceConfig = validServiceConfig; - } else if (defaultServiceConfig != null) { - effectiveServiceConfig = defaultServiceConfig; - channelLogger.log( - ChannelLogLevel.INFO, - "Received no service config, using default service config"); - } else if (serviceConfigError != null) { - if (!serviceConfigUpdated) { - // First DNS lookup has invalid service config, and cannot fall back to default - channelLogger.log( - ChannelLogLevel.INFO, - "Fallback to error due to invalid first service config without default config"); - onError(configOrError.getError()); - return; - } else { - effectiveServiceConfig = lastServiceConfig; - } - } else { - effectiveServiceConfig = EMPTY_SERVICE_CONFIG; - } - if (!effectiveServiceConfig.equals(lastServiceConfig)) { - channelLogger.log( - ChannelLogLevel.INFO, - "Service config changed{0}", - effectiveServiceConfig == EMPTY_SERVICE_CONFIG ? " to empty" : ""); - lastServiceConfig = effectiveServiceConfig; - } - - try { - // TODO(creamsoup): when `servers` is empty and lastResolutionStateCopy == SUCCESS - // and lbNeedAddress, it shouldn't call the handleServiceConfigUpdate. But, - // lbNeedAddress is not deterministic - handleServiceConfigUpdate(); - } catch (RuntimeException re) { - logger.log( - Level.WARNING, - "[" + getLogId() + "] Unexpected exception from parsing service config", - re); - } - } - - // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. - if (NameResolverListener.this.helper == ManagedChannelImpl2.this.lbHelper) { - Attributes effectiveAttrs = attrs; - if (effectiveServiceConfig != validServiceConfig) { - effectiveAttrs = attrs.toBuilder() - .set( - GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, - effectiveServiceConfig.rawServiceConfig) - .build(); - } - - Status handleResult = helper.lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(effectiveAttrs) - .setLoadBalancingPolicyConfig( - effectiveServiceConfig.managedChannelServiceConfig.getLoadBalancingConfig()) - .build()); - - if (!handleResult.isOk()) { - if (servers.isEmpty() && lastResolutionStateCopy == ResolutionState.SUCCESS) { - // lb doesn't expose that it needs address or not, because for some LB it is not - // deterministic. Assuming lb needs address if LB returns error when the address is - // empty and it is not the first resolution. - scheduleExponentialBackOffInSyncContext(); - } else { - handleErrorInSyncContext(handleResult.augmentDescription(resolver + " was used")); - } - } - } - } - } - - syncContext.execute(new NamesResolved()); - } - - @Override - public void onError(final Status error) { - checkArgument(!error.isOk(), "the error status must not be OK"); - final class NameResolverErrorHandler implements Runnable { - @Override - public void run() { - handleErrorInSyncContext(error); - } - } - - syncContext.execute(new NameResolverErrorHandler()); - } - - private void handleErrorInSyncContext(Status error) { - logger.log(Level.WARNING, "[{0}] Failed to resolve name. status={1}", - new Object[] {getLogId(), error}); - if (lastResolutionState != ResolutionState.ERROR) { - channelLogger.log(ChannelLogLevel.WARNING, "Failed to resolve name: {0}", error); - lastResolutionState = ResolutionState.ERROR; - } - // Call LB only if it's not shutdown. If LB is shutdown, lbHelper won't match. - if (NameResolverListener.this.helper != ManagedChannelImpl2.this.lbHelper) { - return; - } - - helper.lb.handleNameResolutionError(error); - - scheduleExponentialBackOffInSyncContext(); - } - - private void scheduleExponentialBackOffInSyncContext() { - if (scheduledNameResolverRefresh != null && scheduledNameResolverRefresh.isPending()) { - // The name resolver may invoke onError multiple times, but we only want to - // schedule one backoff attempt - // TODO(ericgribkoff) Update contract of NameResolver.Listener or decide if we - // want to reset the backoff interval upon repeated onError() calls - return; - } - if (nameResolverBackoffPolicy == null) { - nameResolverBackoffPolicy = backoffPolicyProvider.get(); - } - long delayNanos = nameResolverBackoffPolicy.nextBackoffNanos(); - channelLogger.log( - ChannelLogLevel.DEBUG, - "Scheduling DNS resolution backoff for {0} ns", delayNanos); - scheduledNameResolverRefresh = - syncContext.schedule( - new DelayedNameResolverRefresh(), delayNanos, TimeUnit.NANOSECONDS, - transportFactory .getScheduledExecutorService()); - } - } - - private final class SubchannelImpl extends AbstractSubchannel { - final CreateSubchannelArgs args; - final LbHelperImpl helper; - final InternalLogId subchannelLogId; - final ChannelLoggerImpl subchannelLogger; - final ChannelTracer subchannelTracer; - SubchannelStateListener listener; - InternalSubchannel subchannel; - boolean started; - boolean shutdown; - ScheduledHandle delayedShutdownTask; - - SubchannelImpl(CreateSubchannelArgs args, LbHelperImpl helper) { - this.args = checkNotNull(args, "args"); - this.helper = checkNotNull(helper, "helper"); - subchannelLogId = InternalLogId.allocate("Subchannel", /*details=*/ authority()); - subchannelTracer = new ChannelTracer( - subchannelLogId, maxTraceEvents, timeProvider.currentTimeNanos(), - "Subchannel for " + args.getAddresses()); - subchannelLogger = new ChannelLoggerImpl(subchannelTracer, timeProvider); - } - - // This can be called either in or outside of syncContext - // TODO(zhangkun83): merge it back into start() once the caller createSubchannel() is deleted. - private void internalStart(final SubchannelStateListener listener) { - checkState(!started, "already started"); - checkState(!shutdown, "already shutdown"); - started = true; - this.listener = listener; - // TODO(zhangkun): possibly remove the volatile of terminating when this whole method is - // required to be called from syncContext - if (terminating) { - syncContext.execute(new Runnable() { - @Override - public void run() { - listener.onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); - } - }); - return; - } - final class ManagedInternalSubchannelCallback extends InternalSubchannel.Callback { - // All callbacks are run in syncContext - @Override - void onTerminated(InternalSubchannel is) { - subchannels.remove(is); - channelz.removeSubchannel(is); - maybeTerminateChannel(); - } - - @Override - void onStateChange(InternalSubchannel is, ConnectivityStateInfo newState) { - handleInternalSubchannelState(newState); - checkState(listener != null, "listener is null"); - listener.onSubchannelState(newState); - } - - @Override - void onInUse(InternalSubchannel is) { - inUseStateAggregator.updateObjectInUse(is, true); - } - - @Override - void onNotInUse(InternalSubchannel is) { - inUseStateAggregator.updateObjectInUse(is, false); - } - } - - final InternalSubchannel internalSubchannel = new InternalSubchannel( - args.getAddresses(), - authority(), - userAgent, - backoffPolicyProvider, - transportFactory, - transportFactory.getScheduledExecutorService(), - stopwatchSupplier, - syncContext, - new ManagedInternalSubchannelCallback(), - channelz, - callTracerFactory.create(), - subchannelTracer, - subchannelLogId, - subchannelLogger); - - channelTracer.reportEvent(new ChannelTrace.Event.Builder() - .setDescription("Child Subchannel started") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timeProvider.currentTimeNanos()) - .setSubchannelRef(internalSubchannel) - .build()); - - this.subchannel = internalSubchannel; - // TODO(zhangkun83): no need to schedule on syncContext when this whole method is required - // to be called from syncContext - syncContext.execute(new Runnable() { - @Override - public void run() { - channelz.addSubchannel(internalSubchannel); - subchannels.add(internalSubchannel); - } - }); - } - - @Override - public void start(SubchannelStateListener listener) { - syncContext.throwIfNotInThisSynchronizationContext(); - internalStart(listener); - } - - @Override - InternalInstrumented getInstrumentedInternalSubchannel() { - checkState(started, "not started"); - return subchannel; - } - - @Override - public void shutdown() { - // TODO(zhangkun83): replace shutdown() with internalShutdown() to turn the warning into an - // exception. - logWarningIfNotInSyncContext("Subchannel.shutdown()"); - syncContext.execute(new Runnable() { - @Override - public void run() { - internalShutdown(); - } - }); - } - - private void internalShutdown() { - syncContext.throwIfNotInThisSynchronizationContext(); - if (subchannel == null) { - // start() was not successful - shutdown = true; - return; - } - if (shutdown) { - if (terminating && delayedShutdownTask != null) { - // shutdown() was previously called when terminating == false, thus a delayed shutdown() - // was scheduled. Now since terminating == true, We should expedite the shutdown. - delayedShutdownTask.cancel(); - delayedShutdownTask = null; - // Will fall through to the subchannel.shutdown() at the end. - } else { - return; - } - } else { - shutdown = true; - } - // Add a delay to shutdown to deal with the race between 1) a transport being picked and - // newStream() being called on it, and 2) its Subchannel is shut down by LoadBalancer (e.g., - // because of address change, or because LoadBalancer is shutdown by Channel entering idle - // mode). If (2) wins, the app will see a spurious error. We work around this by delaying - // shutdown of Subchannel for a few seconds here. - // - // TODO(zhangkun83): consider a better approach - // (https://github.com/grpc/grpc-java/issues/2562). - if (!terminating) { - final class ShutdownSubchannel implements Runnable { - @Override - public void run() { - subchannel.shutdown(SUBCHANNEL_SHUTDOWN_STATUS); - } - } - - delayedShutdownTask = syncContext.schedule( - new LogExceptionRunnable(new ShutdownSubchannel()), - SUBCHANNEL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS, - transportFactory.getScheduledExecutorService()); - return; - } - // When terminating == true, no more real streams will be created. It's safe and also - // desirable to shutdown timely. - subchannel.shutdown(SHUTDOWN_STATUS); - } - - @Override - public void requestConnection() { - logWarningIfNotInSyncContext("Subchannel.requestConnection()"); - checkState(started, "not started"); - subchannel.obtainActiveTransport(); - } - - @Override - public List getAllAddresses() { - logWarningIfNotInSyncContext("Subchannel.getAllAddresses()"); - checkState(started, "not started"); - return subchannel.getAddressGroups(); - } - - @Override - public Attributes getAttributes() { - return args.getAttributes(); - } - - @Override - public String toString() { - return subchannelLogId.toString(); - } - - @Override - public Channel asChannel() { - checkState(started, "not started"); - return new SubchannelChannel( - subchannel, balancerRpcExecutorHolder.getExecutor(), - transportFactory.getScheduledExecutorService(), - callTracerFactory.create()); - } - - @Override - public Object getInternalSubchannel() { - checkState(started, "Subchannel is not started"); - return subchannel; - } - - @Override - public ChannelLogger getChannelLogger() { - return subchannelLogger; - } - - @Override - public void updateAddresses(List addrs) { - syncContext.throwIfNotInThisSynchronizationContext(); - subchannel.updateAddresses(addrs); - } - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("logId", logId.getId()) - .add("target", target) - .toString(); - } - - /** - * Called from syncContext. - */ - private final class DelayedTransportListener implements ManagedClientTransport.Listener { - @Override - public void transportShutdown(Status s) { - checkState(shutdown.get(), "Channel must have been shut down"); - } - - @Override - public void transportReady() { - // Don't care - } - - @Override - public void transportInUse(final boolean inUse) { - inUseStateAggregator.updateObjectInUse(delayedTransport, inUse); - } - - @Override - public void transportTerminated() { - checkState(shutdown.get(), "Channel must have been shut down"); - terminating = true; - shutdownNameResolverAndLoadBalancer(false); - // No need to call channelStateManager since we are already in SHUTDOWN state. - // Until LoadBalancer is shutdown, it may still create new subchannels. We catch them - // here. - maybeShutdownNowSubchannels(); - maybeTerminateChannel(); - } - } - - /** - * Must be accessed from syncContext. - */ - private final class IdleModeStateAggregator extends InUseStateAggregator { - @Override - protected void handleInUse() { - exitIdleMode(); - } - - @Override - protected void handleNotInUse() { - if (shutdown.get()) { - return; - } - rescheduleIdleTimer(); - } - } - - /** - * Lazily request for Executor from an executor pool. - */ - private static final class ExecutorHolder { - private final ObjectPool pool; - private Executor executor; - - ExecutorHolder(ObjectPool executorPool) { - this.pool = checkNotNull(executorPool, "executorPool"); - } - - synchronized Executor getExecutor() { - if (executor == null) { - executor = checkNotNull(pool.getObject(), "%s.getObject()", executor); - } - return executor; - } - - synchronized void release() { - if (executor != null) { - executor = pool.returnObject(executor); - } - } - } - - private static final class RestrictedScheduledExecutor implements ScheduledExecutorService { - final ScheduledExecutorService delegate; - - private RestrictedScheduledExecutor(ScheduledExecutorService delegate) { - this.delegate = checkNotNull(delegate, "delegate"); - } - - @Override - public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { - return delegate.schedule(callable, delay, unit); - } - - @Override - public ScheduledFuture schedule(Runnable cmd, long delay, TimeUnit unit) { - return delegate.schedule(cmd, delay, unit); - } - - @Override - public ScheduledFuture scheduleAtFixedRate( - Runnable command, long initialDelay, long period, TimeUnit unit) { - return delegate.scheduleAtFixedRate(command, initialDelay, period, unit); - } - - @Override - public ScheduledFuture scheduleWithFixedDelay( - Runnable command, long initialDelay, long delay, TimeUnit unit) { - return delegate.scheduleWithFixedDelay(command, initialDelay, delay, unit); - } - - @Override - public boolean awaitTermination(long timeout, TimeUnit unit) - throws InterruptedException { - return delegate.awaitTermination(timeout, unit); - } - - @Override - public List> invokeAll(Collection> tasks) - throws InterruptedException { - return delegate.invokeAll(tasks); - } - - @Override - public List> invokeAll( - Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException { - return delegate.invokeAll(tasks, timeout, unit); - } - - @Override - public T invokeAny(Collection> tasks) - throws InterruptedException, ExecutionException { - return delegate.invokeAny(tasks); - } - - @Override - public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return delegate.invokeAny(tasks, timeout, unit); - } - - @Override - public boolean isShutdown() { - return delegate.isShutdown(); - } - - @Override - public boolean isTerminated() { - return delegate.isTerminated(); - } - - @Override - public void shutdown() { - throw new UnsupportedOperationException("Restricted: shutdown() is not allowed"); - } - - @Override - public List shutdownNow() { - throw new UnsupportedOperationException("Restricted: shutdownNow() is not allowed"); - } - - @Override - public Future submit(Callable task) { - return delegate.submit(task); - } - - @Override - public Future submit(Runnable task) { - return delegate.submit(task); - } - - @Override - public Future submit(Runnable task, T result) { - return delegate.submit(task, result); - } - - @Override - public void execute(Runnable command) { - delegate.execute(command); - } - } - - @VisibleForTesting - static final class ScParser extends NameResolver.ServiceConfigParser { - - private final boolean retryEnabled; - private final int maxRetryAttemptsLimit; - private final int maxHedgedAttemptsLimit; - private final AutoConfiguredLoadBalancerFactory2 autoLoadBalancerFactory; - private final ChannelLogger channelLogger; - - ScParser( - boolean retryEnabled, - int maxRetryAttemptsLimit, - int maxHedgedAttemptsLimit, - AutoConfiguredLoadBalancerFactory2 autoLoadBalancerFactory, - ChannelLogger channelLogger) { - this.retryEnabled = retryEnabled; - this.maxRetryAttemptsLimit = maxRetryAttemptsLimit; - this.maxHedgedAttemptsLimit = maxHedgedAttemptsLimit; - this.autoLoadBalancerFactory = - checkNotNull(autoLoadBalancerFactory, "autoLoadBalancerFactory"); - this.channelLogger = checkNotNull(channelLogger, "channelLogger"); - } - - @Override - public ConfigOrError parseServiceConfig(Map rawServiceConfig) { - try { - Object loadBalancingPolicySelection; - ConfigOrError choiceFromLoadBalancer = - autoLoadBalancerFactory.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - if (choiceFromLoadBalancer == null) { - loadBalancingPolicySelection = null; - } else if (choiceFromLoadBalancer.getError() != null) { - return ConfigOrError.fromError(choiceFromLoadBalancer.getError()); - } else { - loadBalancingPolicySelection = choiceFromLoadBalancer.getConfig(); - } - return ConfigOrError.fromConfig( - ManagedChannelServiceConfig2.fromServiceConfig( - rawServiceConfig, - retryEnabled, - maxRetryAttemptsLimit, - maxHedgedAttemptsLimit, - loadBalancingPolicySelection)); - } catch (RuntimeException e) { - return ConfigOrError.fromError( - Status.UNKNOWN.withDescription("failed to parse service config").withCause(e)); - } - } - } - - private void logWarningIfNotInSyncContext(String method) { - try { - syncContext.throwIfNotInThisSynchronizationContext(); - } catch (IllegalStateException e) { - logger.log(Level.WARNING, - method + " should be called from SynchronizationContext. " - + "This warning will become an exception in a future release. " - + "See https://github.com/grpc/grpc-java/issues/5015 for more details", e); - } - } - - /** - * A ResolutionState indicates the status of last name resolution. - */ - enum ResolutionState { - NO_RESOLUTION, - SUCCESS, - ERROR - } - - // TODO(creamsoup) remove this class when AutoConfiguredLoadBalancerFactory doesn't require raw - // service config. - private static final class ServiceConfigHolder { - Map rawServiceConfig; - ManagedChannelServiceConfig2 managedChannelServiceConfig; - - ServiceConfigHolder( - Map rawServiceConfig, ManagedChannelServiceConfig2 managedChannelServiceConfig) { - this.rawServiceConfig = checkNotNull(rawServiceConfig, "rawServiceConfig"); - this.managedChannelServiceConfig = - checkNotNull(managedChannelServiceConfig, "managedChannelServiceConfig"); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ServiceConfigHolder that = (ServiceConfigHolder) o; - return Objects.equal(rawServiceConfig, that.rawServiceConfig) - && Objects - .equal(managedChannelServiceConfig, that.managedChannelServiceConfig); - } - - @Override - public int hashCode() { - return Objects.hashCode(rawServiceConfig, managedChannelServiceConfig); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("rawServiceConfig", rawServiceConfig) - .add("managedChannelServiceConfig", managedChannelServiceConfig) - .toString(); - } - } -} diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig.java b/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig.java index e01bb1b5243..32b9433b1ea 100644 --- a/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig.java +++ b/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig.java @@ -39,9 +39,7 @@ final class ManagedChannelServiceConfig { private final Map serviceMethodMap; private final Map serviceMap; - // TODO(notcarl/zdapeng): use retryThrottling here @Nullable - @SuppressWarnings("unused") private final Throttle retryThrottling; @Nullable private final Object loadBalancingConfig; @@ -57,6 +55,16 @@ final class ManagedChannelServiceConfig { this.loadBalancingConfig = loadBalancingConfig; } + /** Returns an empty {@link ManagedChannelServiceConfig}. */ + static ManagedChannelServiceConfig empty() { + return + new ManagedChannelServiceConfig( + new HashMap(), + new HashMap(), + /* retryThrottling= */ null, + /* loadBalancingConfig= */ null); + } + /** * Parses the Channel level config values (e.g. excludes load balancing) */ @@ -138,6 +146,41 @@ Object getLoadBalancingConfig() { return loadBalancingConfig; } + @Nullable + Throttle getRetryThrottling() { + return retryThrottling; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ManagedChannelServiceConfig that = (ManagedChannelServiceConfig) o; + return Objects.equal(serviceMethodMap, that.serviceMethodMap) + && Objects.equal(serviceMap, that.serviceMap) + && Objects.equal(retryThrottling, that.retryThrottling) + && Objects.equal(loadBalancingConfig, that.loadBalancingConfig); + } + + @Override + public int hashCode() { + return Objects.hashCode(serviceMethodMap, serviceMap, retryThrottling, loadBalancingConfig); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("serviceMethodMap", serviceMethodMap) + .add("serviceMap", serviceMap) + .add("retryThrottling", retryThrottling) + .add("loadBalancingConfig", loadBalancingConfig) + .toString(); + } + /** * Equivalent of MethodConfig from a ServiceConfig with restrictions from Channel setting. */ diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig2.java b/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig2.java deleted file mode 100644 index e20336215c6..00000000000 --- a/core/src/main/java/io/grpc/internal/ManagedChannelServiceConfig2.java +++ /dev/null @@ -1,322 +0,0 @@ -/* - * Copyright 2019 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.MoreObjects; -import com.google.common.base.Objects; -import com.google.common.base.Strings; -import io.grpc.MethodDescriptor; -import io.grpc.internal.RetriableStream.Throttle; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import javax.annotation.Nullable; - -/** - * {@link ManagedChannelServiceConfig2} is a fully parsed and validated representation of service - * configuration data. - */ -final class ManagedChannelServiceConfig2 { - - private final Map serviceMethodMap; - private final Map serviceMap; - @Nullable - private final Throttle retryThrottling; - @Nullable - private final Object loadBalancingConfig; - - ManagedChannelServiceConfig2( - Map serviceMethodMap, - Map serviceMap, - @Nullable Throttle retryThrottling, - @Nullable Object loadBalancingConfig) { - this.serviceMethodMap = Collections.unmodifiableMap(new HashMap<>(serviceMethodMap)); - this.serviceMap = Collections.unmodifiableMap(new HashMap<>(serviceMap)); - this.retryThrottling = retryThrottling; - this.loadBalancingConfig = loadBalancingConfig; - } - - /** Returns an empty {@link ManagedChannelServiceConfig2}. */ - static ManagedChannelServiceConfig2 empty() { - return - new ManagedChannelServiceConfig2( - new HashMap(), - new HashMap(), - /* retryThrottling= */ null, - /* loadBalancingConfig= */ null); - } - - /** - * Parses the Channel level config values (e.g. excludes load balancing) - */ - static ManagedChannelServiceConfig2 fromServiceConfig( - Map serviceConfig, - boolean retryEnabled, - int maxRetryAttemptsLimit, - int maxHedgedAttemptsLimit, - @Nullable Object loadBalancingConfig) { - Throttle retryThrottling = null; - if (retryEnabled) { - retryThrottling = ServiceConfigUtil.getThrottlePolicy(serviceConfig); - } - Map serviceMethodMap = new HashMap<>(); - Map serviceMap = new HashMap<>(); - - // Try and do as much validation here before we swap out the existing configuration. In case - // the input is invalid, we don't want to lose the existing configuration. - List> methodConfigs = - ServiceConfigUtil.getMethodConfigFromServiceConfig(serviceConfig); - - if (methodConfigs == null) { - // this is surprising, but possible. - return new ManagedChannelServiceConfig2( - serviceMethodMap, serviceMap, retryThrottling, loadBalancingConfig); - } - - for (Map methodConfig : methodConfigs) { - MethodInfo info = new MethodInfo( - methodConfig, retryEnabled, maxRetryAttemptsLimit, maxHedgedAttemptsLimit); - - List> nameList = - ServiceConfigUtil.getNameListFromMethodConfig(methodConfig); - - checkArgument( - nameList != null && !nameList.isEmpty(), "no names in method config %s", methodConfig); - for (Map name : nameList) { - String serviceName = ServiceConfigUtil.getServiceFromName(name); - checkArgument(!Strings.isNullOrEmpty(serviceName), "missing service name"); - String methodName = ServiceConfigUtil.getMethodFromName(name); - if (Strings.isNullOrEmpty(methodName)) { - // Service scoped config - checkArgument( - !serviceMap.containsKey(serviceName), "Duplicate service %s", serviceName); - serviceMap.put(serviceName, info); - } else { - // Method scoped config - String fullMethodName = MethodDescriptor.generateFullMethodName(serviceName, methodName); - checkArgument( - !serviceMethodMap.containsKey(fullMethodName), - "Duplicate method name %s", - fullMethodName); - serviceMethodMap.put(fullMethodName, info); - } - } - } - - return new ManagedChannelServiceConfig2( - serviceMethodMap, serviceMap, retryThrottling, loadBalancingConfig); - } - - /** - * Returns the per-service configuration for the channel. - */ - Map getServiceMap() { - return serviceMap; - } - - /** - * Returns the per-method configuration for the channel. - */ - Map getServiceMethodMap() { - return serviceMethodMap; - } - - @VisibleForTesting - @Nullable - Object getLoadBalancingConfig() { - return loadBalancingConfig; - } - - @Nullable - Throttle getRetryThrottling() { - return retryThrottling; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ManagedChannelServiceConfig2 that = (ManagedChannelServiceConfig2) o; - return Objects.equal(serviceMethodMap, that.serviceMethodMap) - && Objects.equal(serviceMap, that.serviceMap) - && Objects.equal(retryThrottling, that.retryThrottling) - && Objects.equal(loadBalancingConfig, that.loadBalancingConfig); - } - - @Override - public int hashCode() { - return Objects.hashCode(serviceMethodMap, serviceMap, retryThrottling, loadBalancingConfig); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("serviceMethodMap", serviceMethodMap) - .add("serviceMap", serviceMap) - .add("retryThrottling", retryThrottling) - .add("loadBalancingConfig", loadBalancingConfig) - .toString(); - } - - /** - * Equivalent of MethodConfig from a ServiceConfig with restrictions from Channel setting. - */ - static final class MethodInfo { - // TODO(carl-mastrangelo): add getters for these fields and make them private. - final Long timeoutNanos; - final Boolean waitForReady; - final Integer maxInboundMessageSize; - final Integer maxOutboundMessageSize; - final RetryPolicy retryPolicy; - final HedgingPolicy hedgingPolicy; - - /** - * Constructor. - * - * @param retryEnabled when false, the argument maxRetryAttemptsLimit will have no effect. - */ - MethodInfo( - Map methodConfig, boolean retryEnabled, int maxRetryAttemptsLimit, - int maxHedgedAttemptsLimit) { - timeoutNanos = ServiceConfigUtil.getTimeoutFromMethodConfig(methodConfig); - waitForReady = ServiceConfigUtil.getWaitForReadyFromMethodConfig(methodConfig); - maxInboundMessageSize = - ServiceConfigUtil.getMaxResponseMessageBytesFromMethodConfig(methodConfig); - if (maxInboundMessageSize != null) { - checkArgument( - maxInboundMessageSize >= 0, - "maxInboundMessageSize %s exceeds bounds", maxInboundMessageSize); - } - maxOutboundMessageSize = - ServiceConfigUtil.getMaxRequestMessageBytesFromMethodConfig(methodConfig); - if (maxOutboundMessageSize != null) { - checkArgument( - maxOutboundMessageSize >= 0, - "maxOutboundMessageSize %s exceeds bounds", maxOutboundMessageSize); - } - - Map retryPolicyMap = - retryEnabled ? ServiceConfigUtil.getRetryPolicyFromMethodConfig(methodConfig) : null; - retryPolicy = retryPolicyMap == null - ? RetryPolicy.DEFAULT : retryPolicy(retryPolicyMap, maxRetryAttemptsLimit); - - Map hedgingPolicyMap = - retryEnabled ? ServiceConfigUtil.getHedgingPolicyFromMethodConfig(methodConfig) : null; - hedgingPolicy = hedgingPolicyMap == null - ? HedgingPolicy.DEFAULT : hedgingPolicy(hedgingPolicyMap, maxHedgedAttemptsLimit); - } - - @Override - public int hashCode() { - return Objects.hashCode( - timeoutNanos, - waitForReady, - maxInboundMessageSize, - maxOutboundMessageSize, - retryPolicy, - hedgingPolicy); - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof MethodInfo)) { - return false; - } - MethodInfo that = (MethodInfo) other; - return Objects.equal(this.timeoutNanos, that.timeoutNanos) - && Objects.equal(this.waitForReady, that.waitForReady) - && Objects.equal(this.maxInboundMessageSize, that.maxInboundMessageSize) - && Objects.equal(this.maxOutboundMessageSize, that.maxOutboundMessageSize) - && Objects.equal(this.retryPolicy, that.retryPolicy) - && Objects.equal(this.hedgingPolicy, that.hedgingPolicy); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("timeoutNanos", timeoutNanos) - .add("waitForReady", waitForReady) - .add("maxInboundMessageSize", maxInboundMessageSize) - .add("maxOutboundMessageSize", maxOutboundMessageSize) - .add("retryPolicy", retryPolicy) - .add("hedgingPolicy", hedgingPolicy) - .toString(); - } - - private static RetryPolicy retryPolicy(Map retryPolicy, int maxAttemptsLimit) { - int maxAttempts = checkNotNull( - ServiceConfigUtil.getMaxAttemptsFromRetryPolicy(retryPolicy), - "maxAttempts cannot be empty"); - checkArgument(maxAttempts >= 2, "maxAttempts must be greater than 1: %s", maxAttempts); - maxAttempts = Math.min(maxAttempts, maxAttemptsLimit); - - long initialBackoffNanos = checkNotNull( - ServiceConfigUtil.getInitialBackoffNanosFromRetryPolicy(retryPolicy), - "initialBackoff cannot be empty"); - checkArgument( - initialBackoffNanos > 0, - "initialBackoffNanos must be greater than 0: %s", - initialBackoffNanos); - - long maxBackoffNanos = checkNotNull( - ServiceConfigUtil.getMaxBackoffNanosFromRetryPolicy(retryPolicy), - "maxBackoff cannot be empty"); - checkArgument( - maxBackoffNanos > 0, "maxBackoff must be greater than 0: %s", maxBackoffNanos); - - double backoffMultiplier = checkNotNull( - ServiceConfigUtil.getBackoffMultiplierFromRetryPolicy(retryPolicy), - "backoffMultiplier cannot be empty"); - checkArgument( - backoffMultiplier > 0, - "backoffMultiplier must be greater than 0: %s", - backoffMultiplier); - - return new RetryPolicy( - maxAttempts, initialBackoffNanos, maxBackoffNanos, backoffMultiplier, - ServiceConfigUtil.getRetryableStatusCodesFromRetryPolicy(retryPolicy)); - } - - private static HedgingPolicy hedgingPolicy( - Map hedgingPolicy, int maxAttemptsLimit) { - int maxAttempts = checkNotNull( - ServiceConfigUtil.getMaxAttemptsFromHedgingPolicy(hedgingPolicy), - "maxAttempts cannot be empty"); - checkArgument(maxAttempts >= 2, "maxAttempts must be greater than 1: %s", maxAttempts); - maxAttempts = Math.min(maxAttempts, maxAttemptsLimit); - - long hedgingDelayNanos = checkNotNull( - ServiceConfigUtil.getHedgingDelayNanosFromHedgingPolicy(hedgingPolicy), - "hedgingDelay cannot be empty"); - checkArgument( - hedgingDelayNanos >= 0, "hedgingDelay must not be negative: %s", hedgingDelayNanos); - - return new HedgingPolicy( - maxAttempts, hedgingDelayNanos, - ServiceConfigUtil.getNonFatalStatusCodesFromHedgingPolicy(hedgingPolicy)); - } - } -} diff --git a/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor.java b/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor.java index f33eaf39f72..f27f9efa788 100644 --- a/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor.java +++ b/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor.java @@ -26,8 +26,6 @@ import io.grpc.Deadline; import io.grpc.MethodDescriptor; import io.grpc.internal.ManagedChannelServiceConfig.MethodInfo; -import java.util.HashMap; -import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import javax.annotation.CheckForNull; @@ -40,34 +38,20 @@ final class ServiceConfigInterceptor implements ClientInterceptor { // Map from method name to MethodInfo @VisibleForTesting - final AtomicReference managedChannelServiceConfig - = new AtomicReference<>(); + final AtomicReference managedChannelServiceConfig = + new AtomicReference<>(); private final boolean retryEnabled; - private final int maxRetryAttemptsLimit; - private final int maxHedgedAttemptsLimit; // Setting this to true and observing this equal to true are run in different threads. private volatile boolean initComplete; - ServiceConfigInterceptor( - boolean retryEnabled, int maxRetryAttemptsLimit, int maxHedgedAttemptsLimit) { + ServiceConfigInterceptor(boolean retryEnabled) { this.retryEnabled = retryEnabled; - this.maxRetryAttemptsLimit = maxRetryAttemptsLimit; - this.maxHedgedAttemptsLimit = maxHedgedAttemptsLimit; } - void handleUpdate(@Nullable Map serviceConfig) { - // TODO(carl-mastrangelo): delete this. - ManagedChannelServiceConfig conf; - if (serviceConfig == null) { - conf = new ManagedChannelServiceConfig( - new HashMap(), new HashMap(), null, null); - } else { - conf = ManagedChannelServiceConfig.fromServiceConfig( - serviceConfig, retryEnabled, maxRetryAttemptsLimit, maxHedgedAttemptsLimit, null); - } - managedChannelServiceConfig.set(conf); + void handleUpdate(@Nullable ManagedChannelServiceConfig serviceConfig) { + managedChannelServiceConfig.set(serviceConfig); initComplete = true; } diff --git a/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor2.java b/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor2.java deleted file mode 100644 index bd1476d4b4c..00000000000 --- a/core/src/main/java/io/grpc/internal/ServiceConfigInterceptor2.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2018 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.base.Verify.verify; - -import com.google.common.annotations.VisibleForTesting; -import io.grpc.CallOptions; -import io.grpc.Channel; -import io.grpc.ClientCall; -import io.grpc.ClientInterceptor; -import io.grpc.Deadline; -import io.grpc.MethodDescriptor; -import io.grpc.internal.ManagedChannelServiceConfig2.MethodInfo; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import javax.annotation.CheckForNull; -import javax.annotation.Nullable; - -/** - * Modifies RPCs in conformance with a Service Config. - */ -final class ServiceConfigInterceptor2 implements ClientInterceptor { - - // Map from method name to MethodInfo - @VisibleForTesting - final AtomicReference managedChannelServiceConfig = - new AtomicReference<>(); - - private final boolean retryEnabled; - - // Setting this to true and observing this equal to true are run in different threads. - private volatile boolean initComplete; - - ServiceConfigInterceptor2(boolean retryEnabled) { - this.retryEnabled = retryEnabled; - } - - void handleUpdate(@Nullable ManagedChannelServiceConfig2 serviceConfig) { - managedChannelServiceConfig.set(serviceConfig); - initComplete = true; - } - - static final CallOptions.Key RETRY_POLICY_KEY = - CallOptions.Key.create("internal-retry-policy"); - static final CallOptions.Key HEDGING_POLICY_KEY = - CallOptions.Key.create("internal-hedging-policy"); - - @Override - public ClientCall interceptCall( - final MethodDescriptor method, CallOptions callOptions, Channel next) { - if (retryEnabled) { - if (initComplete) { - final RetryPolicy retryPolicy = getRetryPolicyFromConfig(method); - final class ImmediateRetryPolicyProvider implements RetryPolicy.Provider { - @Override - public RetryPolicy get() { - return retryPolicy; - } - } - - final HedgingPolicy hedgingPolicy = getHedgingPolicyFromConfig(method); - final class ImmediateHedgingPolicyProvider implements HedgingPolicy.Provider { - @Override - public HedgingPolicy get() { - return hedgingPolicy; - } - } - - verify( - retryPolicy.equals(RetryPolicy.DEFAULT) || hedgingPolicy.equals(HedgingPolicy.DEFAULT), - "Can not apply both retry and hedging policy for the method '%s'", method); - - callOptions = callOptions - .withOption(RETRY_POLICY_KEY, new ImmediateRetryPolicyProvider()) - .withOption(HEDGING_POLICY_KEY, new ImmediateHedgingPolicyProvider()); - } else { - final class DelayedRetryPolicyProvider implements RetryPolicy.Provider { - /** - * Returns RetryPolicy.DEFAULT if name resolving is not complete at the moment the method - * is invoked, otherwise returns the RetryPolicy computed from service config. - * - *

Note that this method is used no more than once for each call. - */ - @Override - public RetryPolicy get() { - if (!initComplete) { - return RetryPolicy.DEFAULT; - } - return getRetryPolicyFromConfig(method); - } - } - - final class DelayedHedgingPolicyProvider implements HedgingPolicy.Provider { - /** - * Returns HedgingPolicy.DEFAULT if name resolving is not complete at the moment the - * method is invoked, otherwise returns the HedgingPolicy computed from service config. - * - *

Note that this method is used no more than once for each call. - */ - @Override - public HedgingPolicy get() { - if (!initComplete) { - return HedgingPolicy.DEFAULT; - } - HedgingPolicy hedgingPolicy = getHedgingPolicyFromConfig(method); - verify( - hedgingPolicy.equals(HedgingPolicy.DEFAULT) - || getRetryPolicyFromConfig(method).equals(RetryPolicy.DEFAULT), - "Can not apply both retry and hedging policy for the method '%s'", method); - return hedgingPolicy; - } - } - - callOptions = callOptions - .withOption(RETRY_POLICY_KEY, new DelayedRetryPolicyProvider()) - .withOption(HEDGING_POLICY_KEY, new DelayedHedgingPolicyProvider()); - } - } - - MethodInfo info = getMethodInfo(method); - if (info == null) { - return next.newCall(method, callOptions); - } - - if (info.timeoutNanos != null) { - Deadline newDeadline = Deadline.after(info.timeoutNanos, TimeUnit.NANOSECONDS); - Deadline existingDeadline = callOptions.getDeadline(); - // If the new deadline is sooner than the existing deadline, swap them. - if (existingDeadline == null || newDeadline.compareTo(existingDeadline) < 0) { - callOptions = callOptions.withDeadline(newDeadline); - } - } - if (info.waitForReady != null) { - callOptions = - info.waitForReady ? callOptions.withWaitForReady() : callOptions.withoutWaitForReady(); - } - if (info.maxInboundMessageSize != null) { - Integer existingLimit = callOptions.getMaxInboundMessageSize(); - if (existingLimit != null) { - callOptions = callOptions.withMaxInboundMessageSize( - Math.min(existingLimit, info.maxInboundMessageSize)); - } else { - callOptions = callOptions.withMaxInboundMessageSize(info.maxInboundMessageSize); - } - } - if (info.maxOutboundMessageSize != null) { - Integer existingLimit = callOptions.getMaxOutboundMessageSize(); - if (existingLimit != null) { - callOptions = callOptions.withMaxOutboundMessageSize( - Math.min(existingLimit, info.maxOutboundMessageSize)); - } else { - callOptions = callOptions.withMaxOutboundMessageSize(info.maxOutboundMessageSize); - } - } - - return next.newCall(method, callOptions); - } - - @CheckForNull - private MethodInfo getMethodInfo(MethodDescriptor method) { - ManagedChannelServiceConfig2 mcsc = managedChannelServiceConfig.get(); - MethodInfo info = null; - if (mcsc != null) { - info = mcsc.getServiceMethodMap().get(method.getFullMethodName()); - } - if (info == null && mcsc != null) { - String serviceName = method.getServiceName(); - info = mcsc.getServiceMap().get(serviceName); - } - return info; - } - - @VisibleForTesting - RetryPolicy getRetryPolicyFromConfig(MethodDescriptor method) { - MethodInfo info = getMethodInfo(method); - return info == null ? RetryPolicy.DEFAULT : info.retryPolicy; - } - - @VisibleForTesting - HedgingPolicy getHedgingPolicyFromConfig(MethodDescriptor method) { - MethodInfo info = getMethodInfo(method); - return info == null ? HedgingPolicy.DEFAULT : info.hedgingPolicy; - } -} diff --git a/core/src/test/java/io/grpc/internal/AbstractManagedChannelImplBuilderTest.java b/core/src/test/java/io/grpc/internal/AbstractManagedChannelImplBuilderTest.java index 1a39671af5d..f7f79f0b5fe 100644 --- a/core/src/test/java/io/grpc/internal/AbstractManagedChannelImplBuilderTest.java +++ b/core/src/test/java/io/grpc/internal/AbstractManagedChannelImplBuilderTest.java @@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; import static org.mockito.Mockito.mock; import com.google.common.util.concurrent.MoreExecutors; @@ -481,19 +480,6 @@ public void disableNameResolverServiceConfig() { assertThat(builder.lookUpServiceConfig).isFalse(); } - @Test - public void enableServiceConfigErrorHandling() { - String propertyValue = System.getProperty( - AbstractManagedChannelImplBuilder.ENABLE_SERVICE_CONFIG_ERROR_HANDLING_PROPERTY); - assumeTrue(propertyValue == null); - - Builder builder = new Builder("target"); - assertThat(builder.enableServiceConfigErrorHandling).isFalse(); - - builder.enableServiceConfigErrorHandling(); - assertThat(builder.enableServiceConfigErrorHandling).isTrue(); - } - static class Builder extends AbstractManagedChannelImplBuilder { Builder(String target) { super(target); diff --git a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java index 66e69bc4b21..9232772778a 100644 --- a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java +++ b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java @@ -51,23 +51,25 @@ import io.grpc.LoadBalancerProvider; import io.grpc.LoadBalancerRegistry; import io.grpc.ManagedChannel; +import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; import io.grpc.SynchronizationContext; import io.grpc.grpclb.GrpclbLoadBalancerProvider; import io.grpc.internal.AutoConfiguredLoadBalancerFactory.AutoConfiguredLoadBalancer; import io.grpc.internal.AutoConfiguredLoadBalancerFactory.PolicyException; import io.grpc.internal.AutoConfiguredLoadBalancerFactory.PolicySelection; +import io.grpc.internal.AutoConfiguredLoadBalancerFactory.ResolvedPolicySelection; import io.grpc.util.ForwardingLoadBalancerHelper; import java.net.SocketAddress; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -78,8 +80,8 @@ /** * Unit tests for {@link AutoConfiguredLoadBalancerFactory}. */ -@Deprecated // to be migrate to AutoConfiguredLoadBalancerFactoryTest2 @RunWith(JUnit4.class) +// TODO(creamsoup) remove backward compatible check when fully migrated @SuppressWarnings("deprecation") public class AutoConfiguredLoadBalancerFactoryTest { private static final LoadBalancerRegistry defaultRegistry = @@ -90,12 +92,18 @@ public class AutoConfiguredLoadBalancerFactoryTest { private final ChannelLogger channelLogger = mock(ChannelLogger.class); private final LoadBalancer testLbBalancer = mock(LoadBalancer.class); private final LoadBalancer testLbBalancer2 = mock(LoadBalancer.class); - private final LoadBalancerProvider testLbBalancerProvider = - mock(LoadBalancerProvider.class, - delegatesTo(new FakeLoadBalancerProvider("test_lb", testLbBalancer))); - private final LoadBalancerProvider testLbBalancerProvider2 = - mock(LoadBalancerProvider.class, - delegatesTo(new FakeLoadBalancerProvider("test_lb2", testLbBalancer2))); + private final AtomicReference nextParsedConfigOrError = + new AtomicReference<>(ConfigOrError.fromConfig("default")); + private final AtomicReference nextParsedConfigOrError2 = + new AtomicReference<>(ConfigOrError.fromConfig("default2")); + private final FakeLoadBalancerProvider testLbBalancerProvider = + mock(FakeLoadBalancerProvider.class, + delegatesTo( + new FakeLoadBalancerProvider("test_lb", testLbBalancer, nextParsedConfigOrError))); + private final FakeLoadBalancerProvider testLbBalancerProvider2 = + mock(FakeLoadBalancerProvider.class, + delegatesTo( + new FakeLoadBalancerProvider("test_lb2", testLbBalancer2, nextParsedConfigOrError2))); @Before public void setUp() { @@ -190,6 +198,7 @@ public Subchannel createSubchannel(CreateSubchannelArgs args) { ResolvedAddresses.newBuilder() .setAddresses(servers) .setAttributes(Attributes.EMPTY) + .setLoadBalancingPolicyConfig(null) .build()); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); @@ -197,13 +206,11 @@ public Subchannel createSubchannel(CreateSubchannelArgs args) { } @Test - public void handleResolvedAddressGroups_shutsDownOldBalancer() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("loadBalancingPolicy", "round_robin"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); + public void handleResolvedAddressGroups_shutsDownOldBalancer() throws Exception { + Map serviceConfig = + parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": { } } ] }"); + ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + final List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); Helper helper = new TestHelper() { @@ -232,7 +239,7 @@ public void shutdown() { Status handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(servers) - .setAttributes(serviceConfigAttrs) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); @@ -242,13 +249,13 @@ public void shutdown() { } @Test + @SuppressWarnings("unchecked") public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exception { - Map serviceConfig = + Map rawServiceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); + ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfigs.getConfig()).isNotNull(); + final List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); Helper helper = new TestHelper(); @@ -257,7 +264,7 @@ public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exc Status handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(servers) - .setAttributes(serviceConfigAttrs) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); verify(testLbBalancerProvider).newLoadBalancer(same(helper)); @@ -267,22 +274,22 @@ public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exc ArgumentCaptor.forClass(ResolvedAddresses.class); verify(testLbBalancer).handleResolvedAddresses(resultCaptor.capture()); assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); - Attributes actualAttributes = resultCaptor.getValue().getAttributes(); - assertThat(actualAttributes.get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(Collections.singletonMap("setting1", "high")); + assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) + .isEqualTo(rawServiceConfig); verify(testLbBalancer, atLeast(0)).canHandleEmptyAddressListFromNameResolution(); + ArgumentCaptor> lbConfigCaptor = ArgumentCaptor.forClass(Map.class); + verify(testLbBalancerProvider).parseLoadBalancingPolicyConfig(lbConfigCaptor.capture()); + assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "high"); verifyNoMoreInteractions(testLbBalancer); - serviceConfig = + rawServiceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"low\" } } ] }"); - serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); + lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(servers) - .setAttributes(serviceConfigAttrs) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); resultCaptor = @@ -290,10 +297,11 @@ public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exc verify(testLbBalancer, times(2)).handleResolvedAddresses(resultCaptor.capture()); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); - actualAttributes = resultCaptor.getValue().getAttributes(); - // But the balancer config is changed. - assertThat(actualAttributes.get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(Collections.singletonMap("setting1", "low")); + assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) + .isEqualTo(rawServiceConfig); + verify(testLbBalancerProvider, times(2)) + .parseLoadBalancingPolicyConfig(lbConfigCaptor.capture()); + assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "low"); // Service config didn't change policy, thus the delegateLb is not swapped verifyNoMoreInteractions(testLbBalancer); verify(testLbBalancerProvider).newLoadBalancer(any(Helper.class)); @@ -304,7 +312,9 @@ public void handleResolvedAddressGroups_propagateOnlyBackendAddrsToDelegate() th // This case only happens when grpclb is missing. We will use a local registry LoadBalancerRegistry registry = new LoadBalancerRegistry(); registry.register(new PickFirstLoadBalancerProvider()); - registry.register(new FakeLoadBalancerProvider("round_robin", testLbBalancer)); + registry.register( + new FakeLoadBalancerProvider( + "round_robin", testLbBalancer, /* nextParsedLbPolicyConfig= */ null)); final List servers = Arrays.asList( @@ -339,11 +349,11 @@ public void handleResolvedAddressGroups_delegateDoNotAcceptEmptyAddressList_noth Map serviceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }"); + ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(serviceConfig, helper.getChannelLogger()); Status handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(Collections.emptyList()) - .setAttributes(Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build()) + .setLoadBalancingPolicyConfig(lbConfig.getConfig()) .build()); assertThat(testLbBalancer.canHandleEmptyAddressListFromNameResolution()).isFalse(); @@ -358,13 +368,14 @@ public void handleResolvedAddressGroups_delegateAcceptsEmptyAddressList() Helper helper = new TestHelper(); AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); - Map serviceConfig = + Map rawServiceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb2\": { \"setting1\": \"high\" } } ] }"); + ConfigOrError lbConfigs = + lbf.parseLoadBalancerPolicy(rawServiceConfig, helper.getChannelLogger()); Status handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(Collections.emptyList()) - .setAttributes(Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build()) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); @@ -374,26 +385,25 @@ public void handleResolvedAddressGroups_delegateAcceptsEmptyAddressList() ArgumentCaptor.forClass(ResolvedAddresses.class); verify(testLbBalancer2).handleResolvedAddresses(resultCaptor.capture()); assertThat(resultCaptor.getValue().getAddresses()).isEmpty(); - Attributes actualAttributes = resultCaptor.getValue().getAttributes(); - - Map lbConfig = actualAttributes.get(LoadBalancer.ATTR_LOAD_BALANCING_CONFIG); - assertThat(lbConfig).isEqualTo(Collections.singletonMap("setting1", "high")); - assertThat(actualAttributes.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isSameInstanceAs(serviceConfig); + assertThat(resultCaptor.getValue().getLoadBalancingPolicyConfig()) + .isEqualTo(nextParsedConfigOrError2.get().getConfig()); + assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) + .isEqualTo(rawServiceConfig); } @Test public void decideLoadBalancerProvider_noBalancerAddresses_noServiceConfig_pickFirst() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = null; + PolicySelection policySelection = null; List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider).isInstanceOf(PickFirstLoadBalancerProvider.class); + assertThat(selection.policySelection.provider) + .isInstanceOf(PickFirstLoadBalancerProvider.class); assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isNull(); + assertThat(selection.policySelection.config).isNull(); verifyZeroInteractions(channelLogger); } @@ -402,39 +412,43 @@ public void decideLoadBalancerProvider_noBalancerAddresses_noServiceConfig_custo throws Exception { AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory("test_lb") .newLoadBalancer(new TestHelper()); - Map serviceConfig = null; + PolicySelection policySelection = null; List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider).isSameInstanceAs(testLbBalancerProvider); + assertThat(selection.policySelection.provider).isSameInstanceAs(testLbBalancerProvider); assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isNull(); + assertThat(selection.policySelection.config).isNull(); verifyZeroInteractions(channelLogger); } @Test public void decideLoadBalancerProvider_oneBalancer_noServiceConfig_grpclb() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = null; + PolicySelection policySelection = null; List servers = Collections.singletonList( new EquivalentAddressGroup( new SocketAddress(){}, Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); + assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isNull(); + assertThat(selection.policySelection.config).isNull(); verifyZeroInteractions(channelLogger); } @Test public void decideLoadBalancerProvider_serviceConfigLbPolicy() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("loadBalancingPolicy", "round_robin"); + Map rawServiceConfig = + parseConfig("{\"loadBalancingPolicy\": \"round_robin\"}"); + + ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfig.getConfig()).isNotNull(); + PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); List servers = Arrays.asList( new EquivalentAddressGroup( @@ -443,21 +457,23 @@ public void decideLoadBalancerProvider_serviceConfigLbPolicy() throws Exception new EquivalentAddressGroup( new SocketAddress(){})); List backends = Arrays.asList(servers.get(1)); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider.getClass().getName()).isEqualTo( + assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); assertThat(selection.serverList).isEqualTo(backends); - assertThat(selection.config).isEqualTo(Collections.emptyMap()); verifyZeroInteractions(channelLogger); } - @SuppressWarnings("unchecked") @Test public void decideLoadBalancerProvider_serviceConfigLbConfig() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": {} } ] }"); + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); + + ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfig.getConfig()).isNotNull(); + PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); List servers = Arrays.asList( new EquivalentAddressGroup( @@ -466,55 +482,54 @@ public void decideLoadBalancerProvider_serviceConfigLbConfig() throws Exception new EquivalentAddressGroup( new SocketAddress(){})); List backends = Arrays.asList(servers.get(1)); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider.getClass().getName()).isEqualTo( + assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); assertThat(selection.serverList).isEqualTo(backends); - assertThat(selection.config).isEqualTo(Collections.emptyMap()); verifyZeroInteractions(channelLogger); } @Test public void decideLoadBalancerProvider_grpclbConfigPropagated() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = + Map rawServiceConfig = parseConfig( "{\"loadBalancingConfig\": [" - + "{\"grpclb\": {\"childPolicy\": [ {\"pick_first\": {} } ] } }" - + "] }"); + + "{\"grpclb\": {\"childPolicy\": [ {\"pick_first\": {} } ] } }" + + "] }"); + ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfig.getConfig()).isNotNull(); + PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); + List servers = Collections.singletonList( new EquivalentAddressGroup( new SocketAddress(){}, Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); + assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isEqualTo( - parseConfig("{\"childPolicy\": [ {\"pick_first\": {} } ] }")); + assertThat(selection.policySelection.config) + .isEqualTo(((PolicySelection) lbConfig.getConfig()).config); verifyZeroInteractions(channelLogger); } @Test public void decideLoadBalancerProvider_policyUnavailButGrpclbAddressPresent() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [" - + "{\"unavail\": {} }" - + "] }"); + List servers = Collections.singletonList( new EquivalentAddressGroup( new SocketAddress(){}, Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, null); - assertThat(selection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); + assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isNull(); + assertThat(selection.policySelection.config).isNull(); verifyZeroInteractions(channelLogger); } @@ -524,34 +539,32 @@ public void decideLoadBalancerProvider_grpclbProviderNotFound_fallbackToRoundRob LoadBalancerRegistry registry = new LoadBalancerRegistry(); registry.register(new PickFirstLoadBalancerProvider()); LoadBalancerProvider fakeRoundRobinProvider = - new FakeLoadBalancerProvider("round_robin", testLbBalancer); + new FakeLoadBalancerProvider("round_robin", testLbBalancer, null); registry.register(fakeRoundRobinProvider); AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory( registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"grpclb\": {} } ] }"); List servers = Arrays.asList( new EquivalentAddressGroup( new SocketAddress(){}, Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, null); - assertThat(selection.provider).isSameInstanceAs(fakeRoundRobinProvider); - assertThat(selection.config).isNull(); + assertThat(selection.policySelection.provider).isSameInstanceAs(fakeRoundRobinProvider); + assertThat(selection.policySelection.config).isNull(); verify(channelLogger).log( eq(ChannelLogLevel.ERROR), startsWith("Found balancer addresses but grpclb runtime is missing")); // Called for the second time, the warning is only logged once - selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + selection = lb.resolveLoadBalancerProvider(servers, null); - assertThat(selection.provider).isSameInstanceAs(fakeRoundRobinProvider); + assertThat(selection.policySelection.provider).isSameInstanceAs(fakeRoundRobinProvider); + assertThat(selection.policySelection.config).isNull(); // Balancer addresses are filtered out in the server list passed to round_robin assertThat(selection.serverList).containsExactly(servers.get(1)); - assertThat(selection.config).isNull(); - verifyNoMoreInteractions(channelLogger); + verifyNoMoreInteractions(channelLogger);; } @Test @@ -559,18 +572,16 @@ public void decideLoadBalancerProvider_grpclbProviderNotFound_noBackendAddress() throws Exception { LoadBalancerRegistry registry = new LoadBalancerRegistry(); registry.register(new PickFirstLoadBalancerProvider()); - registry.register(new FakeLoadBalancerProvider("round_robin", testLbBalancer)); + registry.register(new FakeLoadBalancerProvider("round_robin", testLbBalancer, null)); AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory( registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"grpclb\": {} } ] }"); List servers = Collections.singletonList( new EquivalentAddressGroup( new SocketAddress(){}, Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); try { - lb.decideLoadBalancerProvider(servers, serviceConfig); + lb.resolveLoadBalancerProvider(servers, null); fail("Should throw"); } catch (PolicyException e) { assertThat(e) @@ -579,105 +590,26 @@ public void decideLoadBalancerProvider_grpclbProviderNotFound_noBackendAddress() } } - @Test - public void decideLoadBalancerProvider_serviceConfigLbPolicyOverridesDefault() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("loadBalancingPolicy", "round_robin"); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); - - assertThat(selection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.config).isEqualTo(Collections.emptyMap()); - verifyZeroInteractions(channelLogger); - } - @Test public void decideLoadBalancerProvider_serviceConfigLbConfigOverridesDefault() throws Exception { AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": {\"setting1\": \"high\"} } ] }"); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); - - assertThat(selection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isEqualTo(Collections.singletonMap("setting1", "high")); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbPolicyFailsOnUnknown() { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("loadBalancingPolicy", "MAGIC_BALANCER"); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - try { - lb.decideLoadBalancerProvider(servers, serviceConfig); - fail(); - } catch (PolicyException e) { - assertThat(e).hasMessageThat().isEqualTo( - "None of [magic_balancer] specified by Service Config are available."); - } - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbConfigFailsOnUnknown() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"magic_balancer\": {} } ] }"); + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": {} } ] }"); + ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfigs.getConfig()).isNotNull(); + PolicySelection policySelection = (PolicySelection) lbConfigs.getConfig(); List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - try { - lb.decideLoadBalancerProvider(servers, serviceConfig); - fail(); - } catch (PolicyException e) { - assertThat(e).hasMessageThat().isEqualTo( - "None of [magic_balancer] specified by Service Config are available."); - } - } - @Test - public void decideLoadBalancerProvider_serviceConfigLbConfigSkipUnknown() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [ {\"magic_balancer\": {} }, {\"round_robin\": {} } ] }"); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider(servers, serviceConfig); + ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - assertThat(selection.provider.getClass().getName()).isEqualTo( + assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isEqualTo(Collections.emptyMap()); - verify(channelLogger).log( - eq(ChannelLogLevel.DEBUG), - eq("{0} specified by Service Config are not available"), - eq(new LinkedHashSet<>(Arrays.asList("magic_balancer")))); - } - - @Test - public void decideLoadBalancerProvider_serviceConfigHasZeroLbConfig() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - PolicySelection selection = lb.decideLoadBalancerProvider( - servers, Collections.emptyMap()); - - assertThat(selection.provider).isInstanceOf(PickFirstLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.config).isNull(); verifyZeroInteractions(channelLogger); } @Test - public void channelTracing_lbPolicyChanged() { + public void channelTracing_lbPolicyChanged() throws Exception { final FakeClock clock = new FakeClock(); List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); @@ -734,38 +666,44 @@ public ScheduledExecutorService getScheduledExecutorService() { assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); verifyNoMoreInteractions(channelLogger); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("loadBalancingPolicy", "round_robin"); + ConfigOrError testLbParsedConfig = ConfigOrError.fromConfig("foo"); + nextParsedConfigOrError.set(testLbParsedConfig); + Map serviceConfig = + parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { } } ] }"); + ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(servers) - .setAttributes(Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build()) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); verify(channelLogger).log( eq(ChannelLogLevel.INFO), eq("Load balancer changed from {0} to {1}"), - eq("PickFirstLoadBalancer"), eq("RoundRobinLoadBalancer")); + eq("PickFirstLoadBalancer"), + eq(testLbBalancer.getClass().getSimpleName())); + verify(channelLogger).log( eq(ChannelLogLevel.DEBUG), eq("Load-balancing config: {0}"), - eq(Collections.emptyMap())); + eq(testLbParsedConfig.getConfig())); verifyNoMoreInteractions(channelLogger); - serviceConfig.put("loadBalancingPolicy", "round_robin"); + testLbParsedConfig = ConfigOrError.fromConfig("bar"); + nextParsedConfigOrError.set(testLbParsedConfig); + serviceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { } } ] }"); + lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(servers) - .setAttributes(Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build()) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - verify(channelLogger, times(2)).log( + verify(channelLogger).log( eq(ChannelLogLevel.DEBUG), eq("Load-balancing config: {0}"), - eq(Collections.emptyMap())); + eq(testLbParsedConfig.getConfig())); verifyNoMoreInteractions(channelLogger); servers = Collections.singletonList(new EquivalentAddressGroup( @@ -781,11 +719,122 @@ public ScheduledExecutorService getScheduledExecutorService() { verify(channelLogger).log( eq(ChannelLogLevel.INFO), eq("Load balancer changed from {0} to {1}"), - eq("RoundRobinLoadBalancer"), eq("GrpclbLoadBalancer")); + eq(testLbBalancer.getClass().getSimpleName()), eq("GrpclbLoadBalancer")); verifyNoMoreInteractions(channelLogger); } + @Test + public void parseLoadBalancerConfig_failedOnUnknown() throws Exception { + Map serviceConfig = + parseConfig("{\"loadBalancingConfig\": [ {\"magic_balancer\": {} } ] }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed.getError()).isNotNull(); + assertThat(parsed.getError().getDescription()) + .isEqualTo("None of [magic_balancer] specified by Service Config are available."); + } + + @Test + public void parseLoadBalancerPolicy_failedOnUnknown() throws Exception { + Map serviceConfig = + parseConfig("{\"loadBalancingPolicy\": \"magic_balancer\"}"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed.getError()).isNotNull(); + assertThat(parsed.getError().getDescription()) + .isEqualTo("None of [magic_balancer] specified by Service Config are available."); + } + + @Test + public void parseLoadBalancerConfig_multipleValidPolicies() throws Exception { + Map serviceConfig = + parseConfig( + "{\"loadBalancingConfig\": [" + + "{\"round_robin\": {}}," + + "{\"test_lb\": {} } ] }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed).isNotNull(); + assertThat(parsed.getError()).isNull(); + assertThat(parsed.getConfig()).isInstanceOf(PolicySelection.class); + assertThat(((PolicySelection) parsed.getConfig()).provider.getClass().getName()) + .isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); + } + + @Test + public void parseLoadBalancerConfig_policyShouldBeIgnoredIfConfigExists() throws Exception { + Map serviceConfig = + parseConfig( + "{\"loadBalancingConfig\": [{\"round_robin\": {} } ]," + + "\"loadBalancingPolicy\": \"pick_first\" }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed).isNotNull(); + assertThat(parsed.getError()).isNull(); + assertThat(parsed.getConfig()).isInstanceOf(PolicySelection.class); + assertThat(((PolicySelection) parsed.getConfig()).provider.getClass().getName()) + .isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); + } + + @Test + public void parseLoadBalancerConfig_policyShouldBeIgnoredEvenIfUnknownPolicyExists() + throws Exception { + Map serviceConfig = + parseConfig( + "{\"loadBalancingConfig\": [{\"magic_balancer\": {} } ]," + + "\"loadBalancingPolicy\": \"round_robin\" }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed.getError()).isNotNull(); + assertThat(parsed.getError().getDescription()) + .isEqualTo("None of [magic_balancer] specified by Service Config are available."); + } + + @Test + @SuppressWarnings("unchecked") + public void parseLoadBalancerConfig_firstInvalidPolicy() throws Exception { + when(testLbBalancerProvider.parseLoadBalancingPolicyConfig(any(Map.class))) + .thenReturn(ConfigOrError.fromError(Status.UNKNOWN)); + Map serviceConfig = + parseConfig( + "{\"loadBalancingConfig\": [" + + "{\"test_lb\": {}}," + + "{\"round_robin\": {} } ] }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed).isNotNull(); + assertThat(parsed.getConfig()).isNull(); + assertThat(parsed.getError()).isEqualTo(Status.UNKNOWN); + } + + @Test + @SuppressWarnings("unchecked") + public void parseLoadBalancerConfig_firstValidSecondInvalidPolicy() throws Exception { + when(testLbBalancerProvider.parseLoadBalancingPolicyConfig(any(Map.class))) + .thenReturn(ConfigOrError.fromError(Status.UNKNOWN)); + Map serviceConfig = + parseConfig( + "{\"loadBalancingConfig\": [" + + "{\"round_robin\": {}}," + + "{\"test_lb\": {} } ] }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed).isNotNull(); + assertThat(parsed.getConfig()).isNotNull(); + assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull(); + } + + @Test + public void parseLoadBalancerConfig_someProvidesAreNotAvailable() throws Exception { + Map serviceConfig = + parseConfig("{\"loadBalancingConfig\": [ " + + "{\"magic_balancer\": {} }," + + "{\"round_robin\": {}} ] }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); + assertThat(parsed).isNotNull(); + assertThat(parsed.getConfig()).isNotNull(); + assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull(); + verify(channelLogger).log( + eq(ChannelLogLevel.DEBUG), + eq("{0} specified by Service Config are not available"), + eq(new ArrayList<>(Collections.singletonList("magic_balancer")))); + } + + public static class ForwardingLoadBalancer extends LoadBalancer { private final LoadBalancer delegate; @@ -886,13 +935,18 @@ public void updateAddresses(List addrs) { } } - private static final class FakeLoadBalancerProvider extends LoadBalancerProvider { + private static class FakeLoadBalancerProvider extends LoadBalancerProvider { private final String policyName; private final LoadBalancer balancer; + private final AtomicReference nextParsedLbPolicyConfig; - FakeLoadBalancerProvider(String policyName, LoadBalancer balancer) { + FakeLoadBalancerProvider( + String policyName, + LoadBalancer balancer, + AtomicReference nextParsedLbPolicyConfig) { this.policyName = policyName; this.balancer = balancer; + this.nextParsedLbPolicyConfig = nextParsedLbPolicyConfig; } @Override @@ -914,5 +968,14 @@ public String getPolicyName() { public LoadBalancer newLoadBalancer(Helper helper) { return balancer; } + + @Override + public ConfigOrError parseLoadBalancingPolicyConfig( + Map rawLoadBalancingPolicyConfig) { + if (nextParsedLbPolicyConfig == null) { + return super.parseLoadBalancingPolicyConfig(rawLoadBalancingPolicyConfig); + } + return nextParsedLbPolicyConfig.get(); + } } } diff --git a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java deleted file mode 100644 index c60c6eb4397..00000000000 --- a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest2.java +++ /dev/null @@ -1,981 +0,0 @@ -/* - * Copyright 2018 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.truth.Truth.assertThat; -import static io.grpc.LoadBalancer.ATTR_LOAD_BALANCING_CONFIG; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.AdditionalAnswers.delegatesTo; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.same; -import static org.mockito.ArgumentMatchers.startsWith; -import static org.mockito.Mockito.RETURNS_DEEP_STUBS; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.google.common.base.Preconditions; -import io.grpc.Attributes; -import io.grpc.ChannelLogger; -import io.grpc.ChannelLogger.ChannelLogLevel; -import io.grpc.ConnectivityState; -import io.grpc.ConnectivityStateInfo; -import io.grpc.EquivalentAddressGroup; -import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.CreateSubchannelArgs; -import io.grpc.LoadBalancer.Helper; -import io.grpc.LoadBalancer.ResolvedAddresses; -import io.grpc.LoadBalancer.Subchannel; -import io.grpc.LoadBalancer.SubchannelPicker; -import io.grpc.LoadBalancer.SubchannelStateListener; -import io.grpc.LoadBalancerProvider; -import io.grpc.LoadBalancerRegistry; -import io.grpc.ManagedChannel; -import io.grpc.NameResolver.ConfigOrError; -import io.grpc.Status; -import io.grpc.SynchronizationContext; -import io.grpc.grpclb.GrpclbLoadBalancerProvider; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory2.AutoConfiguredLoadBalancer; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory2.PolicyException; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory2.PolicySelection; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory2.ResolvedPolicySelection; -import io.grpc.util.ForwardingLoadBalancerHelper; -import java.net.SocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; - -/** - * Unit tests for {@link AutoConfiguredLoadBalancerFactory}. - */ -@RunWith(JUnit4.class) -// TODO(creamsoup) remove backward compatible check when fully migrated -@SuppressWarnings("deprecation") -public class AutoConfiguredLoadBalancerFactoryTest2 { - private static final LoadBalancerRegistry defaultRegistry = - LoadBalancerRegistry.getDefaultRegistry(); - private final AutoConfiguredLoadBalancerFactory2 lbf = - new AutoConfiguredLoadBalancerFactory2(GrpcUtil.DEFAULT_LB_POLICY); - - private final ChannelLogger channelLogger = mock(ChannelLogger.class); - private final LoadBalancer testLbBalancer = mock(LoadBalancer.class); - private final LoadBalancer testLbBalancer2 = mock(LoadBalancer.class); - private final AtomicReference nextParsedConfigOrError = - new AtomicReference<>(ConfigOrError.fromConfig("default")); - private final AtomicReference nextParsedConfigOrError2 = - new AtomicReference<>(ConfigOrError.fromConfig("default2")); - private final FakeLoadBalancerProvider testLbBalancerProvider = - mock(FakeLoadBalancerProvider.class, - delegatesTo( - new FakeLoadBalancerProvider("test_lb", testLbBalancer, nextParsedConfigOrError))); - private final FakeLoadBalancerProvider testLbBalancerProvider2 = - mock(FakeLoadBalancerProvider.class, - delegatesTo( - new FakeLoadBalancerProvider("test_lb2", testLbBalancer2, nextParsedConfigOrError2))); - - @Before - public void setUp() { - when(testLbBalancer.canHandleEmptyAddressListFromNameResolution()).thenCallRealMethod(); - assertThat(testLbBalancer.canHandleEmptyAddressListFromNameResolution()).isFalse(); - when(testLbBalancer2.canHandleEmptyAddressListFromNameResolution()).thenReturn(true); - defaultRegistry.register(testLbBalancerProvider); - defaultRegistry.register(testLbBalancerProvider2); - } - - @After - public void tearDown() { - defaultRegistry.deregister(testLbBalancerProvider); - defaultRegistry.deregister(testLbBalancerProvider2); - } - - @Test - public void newLoadBalancer_isAuto() { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - - assertThat(lb).isInstanceOf(AutoConfiguredLoadBalancer.class); - } - - @Test - public void defaultIsPickFirst() { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - - assertThat(lb.getDelegateProvider()).isInstanceOf(PickFirstLoadBalancerProvider.class); - assertThat(lb.getDelegate().getClass().getName()).contains("PickFirst"); - } - - @Test - public void defaultIsConfigurable() { - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory2("test_lb") - .newLoadBalancer(new TestHelper()); - - assertThat(lb.getDelegateProvider()).isSameInstanceAs(testLbBalancerProvider); - assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer); - } - - @SuppressWarnings("deprecation") - @Test - public void forwardsCalls() { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - - final AtomicInteger calls = new AtomicInteger(); - TestLoadBalancer testlb = new TestLoadBalancer() { - - @Override - public void handleNameResolutionError(Status error) { - calls.getAndSet(1); - } - - @Override - public void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo stateInfo) { - calls.getAndSet(2); - } - - @Override - public void shutdown() { - calls.getAndSet(3); - } - }; - - lb.setDelegate(testlb); - - lb.handleNameResolutionError(Status.RESOURCE_EXHAUSTED); - assertThat(calls.getAndSet(0)).isEqualTo(1); - - lb.handleSubchannelState(null, null); - assertThat(calls.getAndSet(0)).isEqualTo(2); - - lb.shutdown(); - assertThat(calls.getAndSet(0)).isEqualTo(3); - } - - @Test - public void handleResolvedAddressGroups_keepOldBalancer() { - final List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - Helper helper = new TestHelper() { - @Override - public Subchannel createSubchannel(CreateSubchannelArgs args) { - assertThat(args.getAddresses()).isEqualTo(servers); - return new TestSubchannel(args); - } - }; - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); - LoadBalancer oldDelegate = lb.getDelegate(); - - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(Attributes.EMPTY) - .setLoadBalancingPolicyConfig(null) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - assertThat(lb.getDelegate()).isSameInstanceAs(oldDelegate); - } - - @Test - public void handleResolvedAddressGroups_shutsDownOldBalancer() throws Exception { - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": { } } ] }"); - ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - - final List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - Helper helper = new TestHelper() { - @Override - public Subchannel createSubchannel(CreateSubchannelArgs args) { - assertThat(args.getAddresses()).isEqualTo(servers); - return new TestSubchannel(args); - } - }; - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); - final AtomicBoolean shutdown = new AtomicBoolean(); - TestLoadBalancer testlb = new TestLoadBalancer() { - - @Override - public void handleNameResolutionError(Status error) { - // noop - } - - @Override - public void shutdown() { - shutdown.set(true); - } - }; - lb.setDelegate(testlb); - - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - assertThat(lb.getDelegateProvider().getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertTrue(shutdown.get()); - } - - @Test - @SuppressWarnings("unchecked") - public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exception { - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }"); - ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfigs.getConfig()).isNotNull(); - - final List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - Helper helper = new TestHelper(); - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); - - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) - .build()); - - verify(testLbBalancerProvider).newLoadBalancer(same(helper)); - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(testLbBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); - assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(rawServiceConfig); - verify(testLbBalancer, atLeast(0)).canHandleEmptyAddressListFromNameResolution(); - ArgumentCaptor> lbConfigCaptor = ArgumentCaptor.forClass(Map.class); - verify(testLbBalancerProvider).parseLoadBalancingPolicyConfig(lbConfigCaptor.capture()); - assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "high"); - verifyNoMoreInteractions(testLbBalancer); - - rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"low\" } } ] }"); - lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - - handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) - .build()); - - resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(testLbBalancer, times(2)).handleResolvedAddresses(resultCaptor.capture()); - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); - assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(rawServiceConfig); - verify(testLbBalancerProvider, times(2)) - .parseLoadBalancingPolicyConfig(lbConfigCaptor.capture()); - assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "low"); - // Service config didn't change policy, thus the delegateLb is not swapped - verifyNoMoreInteractions(testLbBalancer); - verify(testLbBalancerProvider).newLoadBalancer(any(Helper.class)); - } - - @Test - public void handleResolvedAddressGroups_propagateOnlyBackendAddrsToDelegate() throws Exception { - // This case only happens when grpclb is missing. We will use a local registry - LoadBalancerRegistry registry = new LoadBalancerRegistry(); - registry.register(new PickFirstLoadBalancerProvider()); - registry.register( - new FakeLoadBalancerProvider( - "round_robin", testLbBalancer, /* nextParsedLbPolicyConfig= */ null)); - - final List servers = - Arrays.asList( - new EquivalentAddressGroup(new SocketAddress(){}), - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - Helper helper = new TestHelper(); - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory2( - registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(helper); - - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(Attributes.EMPTY) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer); - verify(testLbBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(Collections.singletonList(servers.get(0))) - .setAttributes(Attributes.EMPTY) - .build()); - } - - @Test - public void handleResolvedAddressGroups_delegateDoNotAcceptEmptyAddressList_nothing() - throws Exception { - Helper helper = new TestHelper(); - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); - - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }"); - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(serviceConfig, helper.getChannelLogger()); - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(Collections.emptyList()) - .setLoadBalancingPolicyConfig(lbConfig.getConfig()) - .build()); - - assertThat(testLbBalancer.canHandleEmptyAddressListFromNameResolution()).isFalse(); - assertThat(handleResult.getCode()).isEqualTo(Status.Code.UNAVAILABLE); - assertThat(handleResult.getDescription()).startsWith("NameResolver returned no usable address"); - assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer); - } - - @Test - public void handleResolvedAddressGroups_delegateAcceptsEmptyAddressList() - throws Exception { - Helper helper = new TestHelper(); - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); - - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"test_lb2\": { \"setting1\": \"high\" } } ] }"); - ConfigOrError lbConfigs = - lbf.parseLoadBalancerPolicy(rawServiceConfig, helper.getChannelLogger()); - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(Collections.emptyList()) - .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer2); - assertThat(testLbBalancer2.canHandleEmptyAddressListFromNameResolution()).isTrue(); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(testLbBalancer2).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).isEmpty(); - assertThat(resultCaptor.getValue().getLoadBalancingPolicyConfig()) - .isEqualTo(nextParsedConfigOrError2.get().getConfig()); - assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(rawServiceConfig); - } - - @Test - public void decideLoadBalancerProvider_noBalancerAddresses_noServiceConfig_pickFirst() - throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - PolicySelection policySelection = null; - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider) - .isInstanceOf(PickFirstLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_noBalancerAddresses_noServiceConfig_customDefault() - throws Exception { - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory2("test_lb") - .newLoadBalancer(new TestHelper()); - PolicySelection policySelection = null; - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider).isSameInstanceAs(testLbBalancerProvider); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_oneBalancer_noServiceConfig_grpclb() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - PolicySelection policySelection = null; - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbPolicy() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig("{\"loadBalancingPolicy\": \"round_robin\"}"); - - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfig.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); - List servers = - Arrays.asList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), - new EquivalentAddressGroup( - new SocketAddress(){})); - List backends = Arrays.asList(servers.get(1)); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.serverList).isEqualTo(backends); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbConfig() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); - - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfig.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); - List servers = - Arrays.asList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), - new EquivalentAddressGroup( - new SocketAddress(){})); - List backends = Arrays.asList(servers.get(1)); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.serverList).isEqualTo(backends); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_grpclbConfigPropagated() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig( - "{\"loadBalancingConfig\": [" - + "{\"grpclb\": {\"childPolicy\": [ {\"pick_first\": {} } ] } }" - + "] }"); - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfig.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); - - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config) - .isEqualTo(((PolicySelection) lbConfig.getConfig()).config); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_policyUnavailButGrpclbAddressPresent() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, null); - - assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_grpclbProviderNotFound_fallbackToRoundRobin() - throws Exception { - LoadBalancerRegistry registry = new LoadBalancerRegistry(); - registry.register(new PickFirstLoadBalancerProvider()); - LoadBalancerProvider fakeRoundRobinProvider = - new FakeLoadBalancerProvider("round_robin", testLbBalancer, null); - registry.register(fakeRoundRobinProvider); - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory2( - registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(new TestHelper()); - List servers = - Arrays.asList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), - new EquivalentAddressGroup(new SocketAddress(){})); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, null); - - assertThat(selection.policySelection.provider).isSameInstanceAs(fakeRoundRobinProvider); - assertThat(selection.policySelection.config).isNull(); - verify(channelLogger).log( - eq(ChannelLogLevel.ERROR), - startsWith("Found balancer addresses but grpclb runtime is missing")); - - // Called for the second time, the warning is only logged once - selection = lb.resolveLoadBalancerProvider(servers, null); - - assertThat(selection.policySelection.provider).isSameInstanceAs(fakeRoundRobinProvider); - assertThat(selection.policySelection.config).isNull(); - // Balancer addresses are filtered out in the server list passed to round_robin - assertThat(selection.serverList).containsExactly(servers.get(1)); - verifyNoMoreInteractions(channelLogger);; - } - - @Test - public void decideLoadBalancerProvider_grpclbProviderNotFound_noBackendAddress() - throws Exception { - LoadBalancerRegistry registry = new LoadBalancerRegistry(); - registry.register(new PickFirstLoadBalancerProvider()); - registry.register(new FakeLoadBalancerProvider("round_robin", testLbBalancer, null)); - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory2( - registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(new TestHelper()); - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - try { - lb.resolveLoadBalancerProvider(servers, null); - fail("Should throw"); - } catch (PolicyException e) { - assertThat(e) - .hasMessageThat() - .isEqualTo("Received ONLY balancer addresses but grpclb runtime is missing"); - } - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbConfigOverridesDefault() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": {} } ] }"); - ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfigs.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfigs.getConfig(); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - verifyZeroInteractions(channelLogger); - } - - @Test - public void channelTracing_lbPolicyChanged() throws Exception { - final FakeClock clock = new FakeClock(); - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - Helper helper = new TestHelper() { - @Override - @Deprecated - public Subchannel createSubchannel(List addrs, Attributes attrs) { - return new TestSubchannel(CreateSubchannelArgs.newBuilder() - .setAddresses(addrs) - .setAttributes(attrs) - .build()); - } - - @Override - public Subchannel createSubchannel(CreateSubchannelArgs args) { - return new TestSubchannel(args); - } - - @Override - public ManagedChannel createOobChannel(EquivalentAddressGroup eag, String authority) { - return mock(ManagedChannel.class, RETURNS_DEEP_STUBS); - } - - @Override - public String getAuthority() { - return "fake_authority"; - } - - @Override - public SynchronizationContext getSynchronizationContext() { - return new SynchronizationContext( - new Thread.UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - throw new AssertionError(e); - } - }); - } - - @Override - public ScheduledExecutorService getScheduledExecutorService() { - return clock.getScheduledExecutorService(); - } - }; - - AutoConfiguredLoadBalancer lb = - new AutoConfiguredLoadBalancerFactory2(GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(helper); - Status handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(Attributes.EMPTY) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - verifyNoMoreInteractions(channelLogger); - - ConfigOrError testLbParsedConfig = ConfigOrError.fromConfig("foo"); - nextParsedConfigOrError.set(testLbParsedConfig); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { } } ] }"); - ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - verify(channelLogger).log( - eq(ChannelLogLevel.INFO), - eq("Load balancer changed from {0} to {1}"), - eq("PickFirstLoadBalancer"), - eq(testLbBalancer.getClass().getSimpleName())); - - verify(channelLogger).log( - eq(ChannelLogLevel.DEBUG), - eq("Load-balancing config: {0}"), - eq(testLbParsedConfig.getConfig())); - verifyNoMoreInteractions(channelLogger); - - testLbParsedConfig = ConfigOrError.fromConfig("bar"); - nextParsedConfigOrError.set(testLbParsedConfig); - serviceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { } } ] }"); - lbConfigs = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) - .build()); - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - verify(channelLogger).log( - eq(ChannelLogLevel.DEBUG), - eq("Load-balancing config: {0}"), - eq(testLbParsedConfig.getConfig())); - verifyNoMoreInteractions(channelLogger); - - servers = Collections.singletonList(new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(Attributes.EMPTY) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - verify(channelLogger).log( - eq(ChannelLogLevel.INFO), - eq("Load balancer changed from {0} to {1}"), - eq(testLbBalancer.getClass().getSimpleName()), eq("GrpclbLoadBalancer")); - - verifyNoMoreInteractions(channelLogger); - } - - @Test - public void parseLoadBalancerConfig_failedOnUnknown() throws Exception { - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"magic_balancer\": {} } ] }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed.getError()).isNotNull(); - assertThat(parsed.getError().getDescription()) - .isEqualTo("None of [magic_balancer] specified by Service Config are available."); - } - - @Test - public void parseLoadBalancerPolicy_failedOnUnknown() throws Exception { - Map serviceConfig = - parseConfig("{\"loadBalancingPolicy\": \"magic_balancer\"}"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed.getError()).isNotNull(); - assertThat(parsed.getError().getDescription()) - .isEqualTo("None of [magic_balancer] specified by Service Config are available."); - } - - @Test - public void parseLoadBalancerConfig_multipleValidPolicies() throws Exception { - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [" - + "{\"round_robin\": {}}," - + "{\"test_lb\": {} } ] }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed).isNotNull(); - assertThat(parsed.getError()).isNull(); - assertThat(parsed.getConfig()).isInstanceOf(PolicySelection.class); - assertThat(((PolicySelection) parsed.getConfig()).provider.getClass().getName()) - .isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - } - - @Test - public void parseLoadBalancerConfig_policyShouldBeIgnoredIfConfigExists() throws Exception { - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [{\"round_robin\": {} } ]," - + "\"loadBalancingPolicy\": \"pick_first\" }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed).isNotNull(); - assertThat(parsed.getError()).isNull(); - assertThat(parsed.getConfig()).isInstanceOf(PolicySelection.class); - assertThat(((PolicySelection) parsed.getConfig()).provider.getClass().getName()) - .isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - } - - @Test - public void parseLoadBalancerConfig_policyShouldBeIgnoredEvenIfUnknownPolicyExists() - throws Exception { - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [{\"magic_balancer\": {} } ]," - + "\"loadBalancingPolicy\": \"round_robin\" }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed.getError()).isNotNull(); - assertThat(parsed.getError().getDescription()) - .isEqualTo("None of [magic_balancer] specified by Service Config are available."); - } - - @Test - @SuppressWarnings("unchecked") - public void parseLoadBalancerConfig_firstInvalidPolicy() throws Exception { - when(testLbBalancerProvider.parseLoadBalancingPolicyConfig(any(Map.class))) - .thenReturn(ConfigOrError.fromError(Status.UNKNOWN)); - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [" - + "{\"test_lb\": {}}," - + "{\"round_robin\": {} } ] }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed).isNotNull(); - assertThat(parsed.getConfig()).isNull(); - assertThat(parsed.getError()).isEqualTo(Status.UNKNOWN); - } - - @Test - @SuppressWarnings("unchecked") - public void parseLoadBalancerConfig_firstValidSecondInvalidPolicy() throws Exception { - when(testLbBalancerProvider.parseLoadBalancingPolicyConfig(any(Map.class))) - .thenReturn(ConfigOrError.fromError(Status.UNKNOWN)); - Map serviceConfig = - parseConfig( - "{\"loadBalancingConfig\": [" - + "{\"round_robin\": {}}," - + "{\"test_lb\": {} } ] }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed).isNotNull(); - assertThat(parsed.getConfig()).isNotNull(); - assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull(); - } - - @Test - public void parseLoadBalancerConfig_someProvidesAreNotAvailable() throws Exception { - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ " - + "{\"magic_balancer\": {} }," - + "{\"round_robin\": {}} ] }"); - ConfigOrError parsed = lbf.parseLoadBalancerPolicy(serviceConfig, channelLogger); - assertThat(parsed).isNotNull(); - assertThat(parsed.getConfig()).isNotNull(); - assertThat(((PolicySelection) parsed.getConfig()).config).isNotNull(); - verify(channelLogger).log( - eq(ChannelLogLevel.DEBUG), - eq("{0} specified by Service Config are not available"), - eq(new ArrayList<>(Collections.singletonList("magic_balancer")))); - } - - - public static class ForwardingLoadBalancer extends LoadBalancer { - private final LoadBalancer delegate; - - public ForwardingLoadBalancer(LoadBalancer delegate) { - this.delegate = delegate; - } - - protected LoadBalancer delegate() { - return delegate; - } - - @Override - @Deprecated - public void handleResolvedAddressGroups( - List servers, Attributes attributes) { - delegate().handleResolvedAddressGroups(servers, attributes); - } - - @Override - public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - delegate().handleResolvedAddresses(resolvedAddresses); - } - - @Override - public void handleNameResolutionError(Status error) { - delegate().handleNameResolutionError(error); - } - - @Override - public void shutdown() { - delegate().shutdown(); - } - } - - @SuppressWarnings("unchecked") - private static Map parseConfig(String json) throws Exception { - return (Map) JsonParser.parse(json); - } - - private static class TestLoadBalancer extends ForwardingLoadBalancer { - TestLoadBalancer() { - super(null); - } - } - - private class TestHelper extends ForwardingLoadBalancerHelper { - @Override - protected Helper delegate() { - return null; - } - - @Override - public ChannelLogger getChannelLogger() { - return channelLogger; - } - - @Override - public void updateBalancingState(ConnectivityState newState, SubchannelPicker newPicker) { - // noop - } - } - - private static class TestSubchannel extends Subchannel { - TestSubchannel(CreateSubchannelArgs args) { - this.addrs = args.getAddresses(); - this.attrs = args.getAttributes(); - } - - List addrs; - final Attributes attrs; - - @Override - public void start(SubchannelStateListener listener) { - } - - @Override - public void shutdown() { - } - - @Override - public void requestConnection() { - } - - @Override - public List getAllAddresses() { - return addrs; - } - - @Override - public Attributes getAttributes() { - return attrs; - } - - @Override - public void updateAddresses(List addrs) { - Preconditions.checkNotNull(addrs, "addrs"); - this.addrs = addrs; - } - } - - private static class FakeLoadBalancerProvider extends LoadBalancerProvider { - private final String policyName; - private final LoadBalancer balancer; - private final AtomicReference nextParsedLbPolicyConfig; - - FakeLoadBalancerProvider( - String policyName, - LoadBalancer balancer, - AtomicReference nextParsedLbPolicyConfig) { - this.policyName = policyName; - this.balancer = balancer; - this.nextParsedLbPolicyConfig = nextParsedLbPolicyConfig; - } - - @Override - public boolean isAvailable() { - return true; - } - - @Override - public int getPriority() { - return 5; - } - - @Override - public String getPolicyName() { - return policyName; - } - - @Override - public LoadBalancer newLoadBalancer(Helper helper) { - return balancer; - } - - @Override - public ConfigOrError parseLoadBalancingPolicyConfig( - Map rawLoadBalancingPolicyConfig) { - if (nextParsedLbPolicyConfig == null) { - return super.parseLoadBalancingPolicyConfig(rawLoadBalancingPolicyConfig); - } - return nextParsedLbPolicyConfig.get(); - } - } -} diff --git a/core/src/test/java/io/grpc/internal/HedgingPolicyTest.java b/core/src/test/java/io/grpc/internal/HedgingPolicyTest.java index 82806232861..7c640862f8a 100644 --- a/core/src/test/java/io/grpc/internal/HedgingPolicyTest.java +++ b/core/src/test/java/io/grpc/internal/HedgingPolicyTest.java @@ -59,10 +59,17 @@ public void getHedgingPolicies() throws Exception { @SuppressWarnings("unchecked") Map serviceConfig = (Map) serviceConfigObj; - ServiceConfigInterceptor serviceConfigInterceptor = new ServiceConfigInterceptor( - /* retryEnabled = */ true, /* maxRetryAttemptsLimit = */ 3, - /* maxHedgedAttemptsLimit = */ 4); - serviceConfigInterceptor.handleUpdate(serviceConfig); + ServiceConfigInterceptor serviceConfigInterceptor = + new ServiceConfigInterceptor(/* retryEnabled= */ true); + serviceConfigInterceptor + .handleUpdate( + ManagedChannelServiceConfig + .fromServiceConfig( + serviceConfig, + /* retryEnabled= */ true, + /* maxRetryAttemptsLimit= */ 3, + /* maxHedgedAttemptsLimit= */ 4, + /* loadBalancingConfig= */ null)); MethodDescriptor.Builder builder = TestMethodDescriptors.voidMethod().toBuilder(); @@ -131,10 +138,17 @@ public void getRetryPolicies_hedgingDisabled() throws Exception { @SuppressWarnings("unchecked") Map serviceConfig = (Map) serviceConfigObj; - ServiceConfigInterceptor serviceConfigInterceptor = new ServiceConfigInterceptor( - /* retryEnabled = */ false, /* maxRetryAttemptsLimit = */ 3, - /* maxHedgedAttemptsLimit = */ 4); - serviceConfigInterceptor.handleUpdate(serviceConfig); + ServiceConfigInterceptor serviceConfigInterceptor = + new ServiceConfigInterceptor(/* retryEnabled= */ false); + serviceConfigInterceptor + .handleUpdate( + ManagedChannelServiceConfig + .fromServiceConfig( + serviceConfig, + /* retryEnabled= */ false, + /* maxRetryAttemptsLimit= */ 3, + /* maxHedgedAttemptsLimit= */ 4, + /* loadBalancingConfig= */ null)); MethodDescriptor.Builder builder = TestMethodDescriptors.voidMethod().toBuilder(); diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest.java index 191533333c6..22cd9879e85 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest.java @@ -16,6 +16,7 @@ package io.grpc.internal; +import static com.google.common.truth.Truth.assertThat; import static io.grpc.ConnectivityState.READY; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; import static org.junit.Assert.assertEquals; @@ -87,7 +88,6 @@ /** * Unit tests for {@link ManagedChannelImpl}'s idle mode. */ -@Deprecated // migrate to ManagedChannelImplIdlenessTest2 @RunWith(JUnit4.class) public class ManagedChannelImplIdlenessTest { @Rule @@ -234,9 +234,12 @@ public void newCallExitsIdleness() throws Exception { .setAttributes(Attributes.EMPTY) .build(); nameResolverListenerCaptor.getValue().onResult(resolutionResult); - verify(mockLoadBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(Attributes.EMPTY) - .build()); + + ArgumentCaptor resolvedAddressCaptor = + ArgumentCaptor.forClass(ResolvedAddresses.class); + verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); + assertThat(resolvedAddressCaptor.getValue().getAddresses()) + .containsExactlyElementsIn(servers); } @Test diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest2.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest2.java deleted file mode 100644 index 840bdaa22d8..00000000000 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplIdlenessTest2.java +++ /dev/null @@ -1,560 +0,0 @@ -/* - * Copyright 2016 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.truth.Truth.assertThat; -import static io.grpc.ConnectivityState.READY; -import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.AdditionalAnswers.delegatesTo; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.same; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.google.common.collect.Lists; -import io.grpc.Attributes; -import io.grpc.CallOptions; -import io.grpc.ChannelLogger; -import io.grpc.ClientCall; -import io.grpc.ClientInterceptor; -import io.grpc.ConnectivityState; -import io.grpc.EquivalentAddressGroup; -import io.grpc.IntegerMarshaller; -import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.CreateSubchannelArgs; -import io.grpc.LoadBalancer.Helper; -import io.grpc.LoadBalancer.PickResult; -import io.grpc.LoadBalancer.PickSubchannelArgs; -import io.grpc.LoadBalancer.ResolvedAddresses; -import io.grpc.LoadBalancer.Subchannel; -import io.grpc.LoadBalancer.SubchannelPicker; -import io.grpc.LoadBalancer.SubchannelStateListener; -import io.grpc.LoadBalancerProvider; -import io.grpc.LoadBalancerRegistry; -import io.grpc.ManagedChannel; -import io.grpc.Metadata; -import io.grpc.MethodDescriptor; -import io.grpc.MethodDescriptor.MethodType; -import io.grpc.NameResolver; -import io.grpc.NameResolver.ResolutionResult; -import io.grpc.Status; -import io.grpc.StringMarshaller; -import io.grpc.internal.FakeClock.ScheduledTask; -import io.grpc.internal.TestUtils.MockClientTransportInfo; -import java.net.SocketAddress; -import java.net.URI; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; - -/** - * Unit tests for {@link ManagedChannelImpl2}'s idle mode. - */ -@RunWith(JUnit4.class) -public class ManagedChannelImplIdlenessTest2 { - @Rule - public final MockitoRule mocks = MockitoJUnit.rule(); - private final FakeClock timer = new FakeClock(); - private final FakeClock executor = new FakeClock(); - private final FakeClock oobExecutor = new FakeClock(); - private static final String AUTHORITY = "fakeauthority"; - private static final String USER_AGENT = "fakeagent"; - private static final long IDLE_TIMEOUT_SECONDS = 30; - private static final String MOCK_POLICY_NAME = "mock_lb"; - private ManagedChannelImpl2 channel; - - private final MethodDescriptor method = - MethodDescriptor.newBuilder() - .setType(MethodType.UNKNOWN) - .setFullMethodName("service/method") - .setRequestMarshaller(new StringMarshaller()) - .setResponseMarshaller(new IntegerMarshaller()) - .build(); - - private final List servers = Lists.newArrayList(); - private final ObjectPool executorPool = - new FixedObjectPool(executor.getScheduledExecutorService()); - private final ObjectPool oobExecutorPool = - new FixedObjectPool(oobExecutor.getScheduledExecutorService()); - - @Mock private ClientTransportFactory mockTransportFactory; - @Mock private LoadBalancer mockLoadBalancer; - @Mock private SubchannelStateListener subchannelStateListener; - private final LoadBalancerProvider mockLoadBalancerProvider = - mock(LoadBalancerProvider.class, delegatesTo(new LoadBalancerProvider() { - @Override - public LoadBalancer newLoadBalancer(Helper helper) { - return mockLoadBalancer; - } - - @Override - public boolean isAvailable() { - return true; - } - - @Override - public int getPriority() { - return 999; - } - - @Override - public String getPolicyName() { - return MOCK_POLICY_NAME; - } - })); - - @Mock private NameResolver mockNameResolver; - @Mock private NameResolver.Factory mockNameResolverFactory; - @Mock private ClientCall.Listener mockCallListener; - @Mock private ClientCall.Listener mockCallListener2; - @Captor private ArgumentCaptor nameResolverListenerCaptor; - private BlockingQueue newTransports; - - @Before - @SuppressWarnings("deprecation") // For NameResolver.Listener - public void setUp() { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - when(mockNameResolver.getServiceAuthority()).thenReturn(AUTHORITY); - when(mockNameResolverFactory - .newNameResolver(any(URI.class), any(NameResolver.Args.class))) - .thenReturn(mockNameResolver); - when(mockTransportFactory.getScheduledExecutorService()) - .thenReturn(timer.getScheduledExecutorService()); - - class Builder extends AbstractManagedChannelImplBuilder { - Builder(String target) { - super(target); - } - - @Override protected ClientTransportFactory buildTransportFactory() { - throw new UnsupportedOperationException(); - } - - @Override public Builder usePlaintext() { - throw new UnsupportedOperationException(); - } - } - - Builder builder = new Builder("fake://target") - .nameResolverFactory(mockNameResolverFactory) - .defaultLoadBalancingPolicy(MOCK_POLICY_NAME) - .idleTimeout(IDLE_TIMEOUT_SECONDS, TimeUnit.SECONDS) - .userAgent(USER_AGENT); - builder.executorPool = executorPool; - channel = new ManagedChannelImpl2( - builder, mockTransportFactory, new FakeBackoffPolicyProvider(), - oobExecutorPool, timer.getStopwatchSupplier(), - Collections.emptyList(), - TimeProvider.SYSTEM_TIME_PROVIDER); - newTransports = TestUtils.captureTransports(mockTransportFactory); - - for (int i = 0; i < 2; i++) { - ArrayList addrs = Lists.newArrayList(); - for (int j = 0; j < 2; j++) { - addrs.add(new FakeSocketAddress("servergroup" + i + "server" + j)); - } - servers.add(new EquivalentAddressGroup(addrs)); - } - verify(mockNameResolverFactory).newNameResolver(any(URI.class), any(NameResolver.Args.class)); - // Verify the initial idleness - verify(mockLoadBalancerProvider, never()).newLoadBalancer(any(Helper.class)); - verify(mockTransportFactory, never()).newClientTransport( - any(SocketAddress.class), - any(ClientTransportFactory.ClientTransportOptions.class), - any(ChannelLogger.class)); - verify(mockNameResolver, never()).start(any(NameResolver.Listener.class)); - verify(mockNameResolver, never()).start(any(NameResolver.Listener2.class)); - } - - @After - public void allPendingTasksAreRun() { - Collection pendingTimerTasks = timer.getPendingTasks(); - for (ScheduledTask a : pendingTimerTasks) { - assertFalse(Rescheduler.isEnabled(a.command)); - } - assertEquals(executor.getPendingTasks() + " should be empty", 0, executor.numPendingTasks()); - } - - @After - public void cleanUp() { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - - @Test - public void newCallExitsIdleness() throws Exception { - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - - verify(mockNameResolver).start(nameResolverListenerCaptor.capture()); - // Simulate new address resolved to make sure the LoadBalancer is correctly linked to - // the NameResolver. - ResolutionResult resolutionResult = - ResolutionResult.newBuilder() - .setAddresses(servers) - .setAttributes(Attributes.EMPTY) - .build(); - nameResolverListenerCaptor.getValue().onResult(resolutionResult); - - ArgumentCaptor resolvedAddressCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); - assertThat(resolvedAddressCaptor.getValue().getAddresses()) - .containsExactlyElementsIn(servers); - } - - @Test - public void newCallRefreshesIdlenessTimer() throws Exception { - // First call to exit the initial idleness, then immediately cancel the call. - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - call.cancel("For testing", null); - - // Verify that we have exited the idle mode - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - assertFalse(channel.inUseStateAggregator.isInUse()); - - // Move closer to idleness, but not yet. - timer.forwardTime(IDLE_TIMEOUT_SECONDS - 1, TimeUnit.SECONDS); - verify(mockLoadBalancer, never()).shutdown(); - assertFalse(channel.inUseStateAggregator.isInUse()); - - // A new call would refresh the timer - call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - call.cancel("For testing", null); - assertFalse(channel.inUseStateAggregator.isInUse()); - - // ... so that passing the same length of time will not trigger idle mode - timer.forwardTime(IDLE_TIMEOUT_SECONDS - 1, TimeUnit.SECONDS); - verify(mockLoadBalancer, never()).shutdown(); - assertFalse(channel.inUseStateAggregator.isInUse()); - - // ... until the time since last call has reached the timeout - timer.forwardTime(1, TimeUnit.SECONDS); - verify(mockLoadBalancer).shutdown(); - assertFalse(channel.inUseStateAggregator.isInUse()); - - // Drain the app executor, which runs the call listeners - verify(mockCallListener, never()).onClose(any(Status.class), any(Metadata.class)); - assertEquals(2, executor.runDueTasks()); - verify(mockCallListener, times(2)).onClose(any(Status.class), any(Metadata.class)); - } - - @Test - public void delayedTransportHoldsOffIdleness() throws Exception { - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - assertTrue(channel.inUseStateAggregator.isInUse()); - - // As long as the delayed transport is in-use (by the pending RPC), the channel won't go idle. - timer.forwardTime(IDLE_TIMEOUT_SECONDS * 2, TimeUnit.SECONDS); - assertTrue(channel.inUseStateAggregator.isInUse()); - - // Cancelling the only RPC will reset the in-use state. - assertEquals(0, executor.numPendingTasks()); - call.cancel("In test", null); - assertEquals(1, executor.runDueTasks()); - assertFalse(channel.inUseStateAggregator.isInUse()); - // And allow the channel to go idle. - timer.forwardTime(IDLE_TIMEOUT_SECONDS - 1, TimeUnit.SECONDS); - verify(mockLoadBalancer, never()).shutdown(); - timer.forwardTime(1, TimeUnit.SECONDS); - verify(mockLoadBalancer).shutdown(); - } - - @Test - public void realTransportsHoldsOffIdleness() throws Exception { - final EquivalentAddressGroup addressGroup = servers.get(1); - - // Start a call, which goes to delayed transport - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - // Verify that we have exited the idle mode - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - Helper helper = helperCaptor.getValue(); - assertTrue(channel.inUseStateAggregator.isInUse()); - - // Assume LoadBalancer has received an address, then create a subchannel. - Subchannel subchannel = createSubchannelSafely(helper, addressGroup, Attributes.EMPTY); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo t0 = newTransports.poll(); - t0.listener.transportReady(); - - SubchannelPicker mockPicker = mock(SubchannelPicker.class); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - updateBalancingStateSafely(helper, READY, mockPicker); - // Delayed transport creates real streams in the app executor - executor.runDueTasks(); - - // Delayed transport exits in-use, while real transport has not entered in-use yet. - assertFalse(channel.inUseStateAggregator.isInUse()); - - // Now it's in-use - t0.listener.transportInUse(true); - assertTrue(channel.inUseStateAggregator.isInUse()); - - // As long as the transport is in-use, the channel won't go idle. - timer.forwardTime(IDLE_TIMEOUT_SECONDS * 2, TimeUnit.SECONDS); - assertTrue(channel.inUseStateAggregator.isInUse()); - - t0.listener.transportInUse(false); - assertFalse(channel.inUseStateAggregator.isInUse()); - // And allow the channel to go idle. - timer.forwardTime(IDLE_TIMEOUT_SECONDS - 1, TimeUnit.SECONDS); - verify(mockLoadBalancer, never()).shutdown(); - timer.forwardTime(1, TimeUnit.SECONDS); - verify(mockLoadBalancer).shutdown(); - } - - @Test - public void updateSubchannelAddresses_newAddressConnects() { - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); // Create LB - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - Helper helper = helperCaptor.getValue(); - Subchannel subchannel = createSubchannelSafely(helper, servers.get(0), Attributes.EMPTY); - - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo t0 = newTransports.poll(); - t0.listener.transportReady(); - - updateSubchannelAddressesSafely(helper, subchannel, servers.get(1)); - - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo t1 = newTransports.poll(); - t1.listener.transportReady(); - } - - @Test - public void updateSubchannelAddresses_existingAddressDoesNotConnect() { - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); // Create LB - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - Helper helper = helperCaptor.getValue(); - Subchannel subchannel = createSubchannelSafely(helper, servers.get(0), Attributes.EMPTY); - - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo t0 = newTransports.poll(); - t0.listener.transportReady(); - - List changedList = new ArrayList<>(servers.get(0).getAddresses()); - changedList.add(new FakeSocketAddress("aDifferentServer")); - updateSubchannelAddressesSafely(helper, subchannel, new EquivalentAddressGroup(changedList)); - - requestConnectionSafely(helper, subchannel); - assertNull(newTransports.poll()); - } - - @Test - public void oobTransportDoesNotAffectIdleness() { - // Start a call, which goes to delayed transport - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - // Verify that we have exited the idle mode - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - Helper helper = helperCaptor.getValue(); - - // Fail the RPC - SubchannelPicker failingPicker = mock(SubchannelPicker.class); - when(failingPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withError(Status.UNAVAILABLE)); - updateBalancingStateSafely(helper, TRANSIENT_FAILURE, failingPicker); - executor.runDueTasks(); - verify(mockCallListener).onClose(same(Status.UNAVAILABLE), any(Metadata.class)); - - // ... so that the channel resets its in-use state - assertFalse(channel.inUseStateAggregator.isInUse()); - - // Now make an RPC on an OOB channel - ManagedChannel oob = helper.createOobChannel(servers.get(0), "oobauthority"); - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), - eq(new ClientTransportFactory.ClientTransportOptions() - .setAuthority("oobauthority") - .setUserAgent(USER_AGENT)), - any(ChannelLogger.class)); - ClientCall oobCall = oob.newCall(method, CallOptions.DEFAULT); - oobCall.start(mockCallListener2, new Metadata()); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), - eq(new ClientTransportFactory.ClientTransportOptions() - .setAuthority("oobauthority") - .setUserAgent(USER_AGENT)), - any(ChannelLogger.class)); - MockClientTransportInfo oobTransportInfo = newTransports.poll(); - assertEquals(0, newTransports.size()); - // The OOB transport reports in-use state - oobTransportInfo.listener.transportInUse(true); - - // But it won't stop the channel from going idle - verify(mockLoadBalancer, never()).shutdown(); - timer.forwardTime(IDLE_TIMEOUT_SECONDS, TimeUnit.SECONDS); - verify(mockLoadBalancer).shutdown(); - } - - @Test - public void updateOobChannelAddresses_newAddressConnects() { - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); // Create LB - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - Helper helper = helperCaptor.getValue(); - ManagedChannel oobChannel = helper.createOobChannel(servers.get(0), "localhost"); - - oobChannel.newCall(method, CallOptions.DEFAULT).start(mockCallListener, new Metadata()); - MockClientTransportInfo t0 = newTransports.poll(); - t0.listener.transportReady(); - - helper.updateOobChannelAddresses(oobChannel, servers.get(1)); - - oobChannel.newCall(method, CallOptions.DEFAULT).start(mockCallListener, new Metadata()); - MockClientTransportInfo t1 = newTransports.poll(); - t1.listener.transportReady(); - } - - @Test - public void updateOobChannelAddresses_existingAddressDoesNotConnect() { - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); // Create LB - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - Helper helper = helperCaptor.getValue(); - ManagedChannel oobChannel = helper.createOobChannel(servers.get(0), "localhost"); - - oobChannel.newCall(method, CallOptions.DEFAULT).start(mockCallListener, new Metadata()); - MockClientTransportInfo t0 = newTransports.poll(); - t0.listener.transportReady(); - - List changedList = new ArrayList<>(servers.get(0).getAddresses()); - changedList.add(new FakeSocketAddress("aDifferentServer")); - helper.updateOobChannelAddresses(oobChannel, new EquivalentAddressGroup(changedList)); - - oobChannel.newCall(method, CallOptions.DEFAULT).start(mockCallListener, new Metadata()); - assertNull(newTransports.poll()); - } - - private static class FakeBackoffPolicyProvider implements BackoffPolicy.Provider { - @Override - public BackoffPolicy get() { - return new BackoffPolicy() { - @Override - public long nextBackoffNanos() { - return 1; - } - }; - } - } - - private static class FakeSocketAddress extends SocketAddress { - final String name; - - FakeSocketAddress(String name) { - this.name = name; - } - - @Override - public String toString() { - return "FakeSocketAddress-" + name; - } - } - - // Helper methods to call methods from SynchronizationContext - private Subchannel createSubchannelSafely( - final Helper helper, final EquivalentAddressGroup addressGroup, final Attributes attrs) { - final AtomicReference resultCapture = new AtomicReference<>(); - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - Subchannel s = helper.createSubchannel(CreateSubchannelArgs.newBuilder() - .setAddresses(addressGroup) - .setAttributes(attrs) - .build()); - s.start(subchannelStateListener); - resultCapture.set(s); - } - }); - return resultCapture.get(); - } - - private static void requestConnectionSafely(Helper helper, final Subchannel subchannel) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - subchannel.requestConnection(); - } - }); - } - - private static void updateBalancingStateSafely( - final Helper helper, final ConnectivityState state, final SubchannelPicker picker) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - helper.updateBalancingState(state, picker); - } - }); - } - - private static void updateSubchannelAddressesSafely( - final Helper helper, final Subchannel subchannel, final EquivalentAddressGroup addrs) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - subchannel.updateAddresses(Collections.singletonList(addrs)); - } - }); - } -} diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java index 99b05926b60..f272316dedc 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java @@ -53,7 +53,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; -import com.google.common.truth.Truth; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.SettableFuture; @@ -103,6 +102,7 @@ import io.grpc.Status; import io.grpc.Status.Code; import io.grpc.StringMarshaller; +import io.grpc.internal.AutoConfiguredLoadBalancerFactory.PolicySelection; import io.grpc.internal.ClientTransportFactory.ClientTransportOptions; import io.grpc.internal.InternalSubchannel.TransportLogger; import io.grpc.internal.ManagedChannelImpl.ScParser; @@ -152,8 +152,9 @@ import org.mockito.stubbing.Answer; /** Unit tests for {@link ManagedChannelImpl}. */ -@Deprecated // to be migrated to ManagedChannelImplTest2 @RunWith(JUnit4.class) +// TODO(creamsoup) remove backward compatible check when fully migrated +@SuppressWarnings("deprecation") public class ManagedChannelImplTest { private static final int DEFAULT_PORT = 447; @@ -272,6 +273,8 @@ public String getPolicyName() { private boolean requestConnection = true; private BlockingQueue transports; private boolean panicExpected; + @Captor + private ArgumentCaptor resolvedAddressCaptor; private ArgumentCaptor streamListenerCaptor = ArgumentCaptor.forClass(ClientStreamListener.class); @@ -749,11 +752,8 @@ public void noMoreCallbackAfterLoadBalancerShutdown() { FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - verify(mockLoadBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(Arrays.asList(addressGroup)) - .setAttributes(Attributes.EMPTY) - .build()); + verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); + assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class); SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class); @@ -970,13 +970,12 @@ public void nameResolverReturnsEmptySubLists_becomeErrorByDefault() throws Excep // Pass a FakeNameResolverFactory with an empty list and LB config FakeNameResolverFactory nameResolverFactory = new FakeNameResolverFactory.Builder(expectedUri).build(); - Map serviceConfig = + Map rawServiceConfig = parseConfig("{\"loadBalancingConfig\": [ {\"mock_lb\": { \"setting1\": \"high\" } } ] }"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextConfigOrError.set(ConfigOrError.fromConfig(parsedServiceConfig)); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); channelBuilder.nameResolverFactory(nameResolverFactory); createChannel(); @@ -985,7 +984,7 @@ public void nameResolverReturnsEmptySubLists_becomeErrorByDefault() throws Excep verify(mockLoadBalancer).handleNameResolutionError(statusCaptor.capture()); Status status = statusCaptor.getValue(); assertSame(Status.Code.UNAVAILABLE, status.getCode()); - Truth.assertThat(status.getDescription()).startsWith(errorDescription); + assertThat(status.getDescription()).startsWith(errorDescription); // A resolution retry has been scheduled assertEquals(1, timer.numPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER)); @@ -998,13 +997,18 @@ public void nameResolverReturnsEmptySubLists_optionallyAllowed() throws Exceptio // Pass a FakeNameResolverFactory with an empty list and LB config FakeNameResolverFactory nameResolverFactory = new FakeNameResolverFactory.Builder(expectedUri).build(); - Map serviceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"mock_lb\": { \"setting1\": \"high\" } } ] }"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + String rawLbConfig = "{ \"setting1\": \"high\" }"; + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [ {\"mock_lb\": " + rawLbConfig + " } ] }"); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig( + rawServiceConfig, + new PolicySelection( + mockLoadBalancerProvider, + parseConfig(rawLbConfig), + new Object())); + nameResolverFactory.nextConfigOrError.set(ConfigOrError.fromConfig(parsedServiceConfig)); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); channelBuilder.nameResolverFactory(nameResolverFactory); createChannel(); @@ -1018,7 +1022,7 @@ public void nameResolverReturnsEmptySubLists_optionallyAllowed() throws Exceptio Map lbConfig = actualAttrs.get(LoadBalancer.ATTR_LOAD_BALANCING_CONFIG); assertEquals(ImmutableMap.of("setting1", "high"), lbConfig); assertSame( - serviceConfig, actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)); + rawServiceConfig, actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)); // A no resolution retry assertEquals(0, timer.numPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER)); @@ -1099,10 +1103,8 @@ public void firstResolvedServerFailedToConnect() throws Exception { // Simulate name resolution results EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(resolvedAddrs); - inOrder.verify(mockLoadBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(Arrays.asList(addressGroup)) - .build()); + inOrder.verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); + assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); Subchannel subchannel = createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) @@ -1248,10 +1250,9 @@ public void allServersFailedToConnect() throws Exception { // Simulate name resolution results EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(resolvedAddrs); - inOrder.verify(mockLoadBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(Arrays.asList(addressGroup)) - .build()); + inOrder.verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); + assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); + Subchannel subchannel = createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) @@ -2760,7 +2761,6 @@ public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exc .setAddresses(Collections.singletonList( new EquivalentAddressGroup( Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setAttributes(Attributes.EMPTY) .build(); nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult1); assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); @@ -2778,7 +2778,6 @@ public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exc .setAddresses(Collections.singletonList( new EquivalentAddressGroup( Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setAttributes(Attributes.EMPTY) .build(); nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2); assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); @@ -2800,11 +2799,16 @@ public void channelTracing_serviceConfigChange() throws Exception { Attributes.newBuilder() .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, new HashMap()) .build(); + ManagedChannelServiceConfig mcsc1 = createManagedChannelServiceConfig( + ImmutableMap.of(), + new PolicySelection( + mockLoadBalancerProvider, ImmutableMap.of("foo", "bar"), null)); ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() .setAddresses(Collections.singletonList( new EquivalentAddressGroup( Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) .setAttributes(attributes) + .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) .build(); nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult1); assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); @@ -2821,6 +2825,7 @@ public void channelTracing_serviceConfigChange() throws Exception { new EquivalentAddressGroup( Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) .setAttributes(attributes) + .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) .build(); nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2); assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); @@ -2838,6 +2843,7 @@ public void channelTracing_serviceConfigChange() throws Exception { new EquivalentAddressGroup( Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) .setAttributes(attributes) + .setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig.empty())) .build(); nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3); assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); @@ -3187,16 +3193,21 @@ public void retryBackoffThenChannelShutdown_retryShouldStillHappen_newCallShould name.put("service", "service"); methodConfig.put("name", Arrays.asList(name)); methodConfig.put("retryPolicy", retryPolicy); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("methodConfig", Arrays.asList(methodConfig)); + Map rawServiceConfig = new HashMap<>(); + rawServiceConfig.put("methodConfig", Arrays.asList(methodConfig)); Attributes attributesWithRetryPolicy = Attributes - .newBuilder().set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); + .newBuilder().set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig).build(); FakeNameResolverFactory nameResolverFactory = new FakeNameResolverFactory.Builder(expectedUri) .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) .build(); - nameResolverFactory.nextResolvedAttributes.set(attributesWithRetryPolicy); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); + channelBuilder.nameResolverFactory(nameResolverFactory); channelBuilder.executor(MoreExecutors.directExecutor()); channelBuilder.enableRetry(); @@ -3296,16 +3307,21 @@ public void hedgingScheduledThenChannelShutdown_hedgeShouldStillHappen_newCallSh name.put("service", "service"); methodConfig.put("name", Arrays.asList(name)); methodConfig.put("hedgingPolicy", hedgingPolicy); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("methodConfig", Arrays.asList(methodConfig)); + Map rawServiceConfig = new HashMap<>(); + rawServiceConfig.put("methodConfig", Arrays.asList(methodConfig)); Attributes attributesWithRetryPolicy = Attributes - .newBuilder().set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); + .newBuilder().set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig).build(); FakeNameResolverFactory nameResolverFactory = new FakeNameResolverFactory.Builder(expectedUri) .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) .build(); - nameResolverFactory.nextResolvedAttributes.set(attributesWithRetryPolicy); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); + channelBuilder.nameResolverFactory(nameResolverFactory); channelBuilder.executor(MoreExecutors.directExecutor()); channelBuilder.enableRetry(); @@ -3388,6 +3404,8 @@ public void hedgingScheduledThenChannelShutdown_hedgeShouldStillHappen_newCallSh @Test public void badServiceConfigIsRecoverable() throws Exception { + final Map invalidServiceConfig = + parseConfig("{\"loadBalancingConfig\": [{\"kaboom\": {}}]}"); final List addresses = ImmutableList.of(new EquivalentAddressGroup(new SocketAddress() {})); final class FakeNameResolver extends NameResolver { @@ -3407,9 +3425,11 @@ public void start(Listener2 listener) { .setAttributes( Attributes.newBuilder() .set( - GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, - ImmutableMap.of("loadBalancingPolicy", "kaboom")) + GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, invalidServiceConfig) .build()) + .setServiceConfig( + ConfigOrError.fromError( + Status.INTERNAL.withDescription("kaboom is invalid"))) .build()); } @@ -3454,23 +3474,31 @@ protected ClientTransportFactory buildTransportFactory() { ListenableFuture future1 = ClientCalls.futureUnaryCall(call1, null); executor.runDueTasks(); try { - future1.get(); + future1.get(1, TimeUnit.SECONDS); Assert.fail(); } catch (ExecutionException e) { assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("kaboom"); } // ok the service config is bad, let's fix it. - + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); + Object fakeLbConfig = new Object(); + PolicySelection lbConfigs = + new PolicySelection( + mockLoadBalancerProvider, rawServiceConfig, fakeLbConfig); + mockLoadBalancerProvider.parseLoadBalancingPolicyConfig(rawServiceConfig); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, lbConfigs); factory.resolver.listener.onResult( ResolutionResult.newBuilder() .setAddresses(addresses) .setAttributes( Attributes.newBuilder() .set( - GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, - ImmutableMap.of("loadBalancingPolicy", "round_robin")) + GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig) .build()) + .setServiceConfig(ConfigOrError.fromConfig(managedChannelServiceConfig)) .build()); ClientCall call2 = mychannel.newCall( @@ -3621,7 +3649,8 @@ public void nameResolverHelper_emptyConfigSucceeds() { retryEnabled, maxRetryAttemptsLimit, maxHedgedAttemptsLimit, - autoConfiguredLoadBalancerFactory); + autoConfiguredLoadBalancerFactory, + mock(ChannelLogger.class)); ConfigOrError coe = parser.parseServiceConfig(ImmutableMap.of()); @@ -3643,7 +3672,8 @@ public void nameResolverHelper_badConfigFails() { retryEnabled, maxRetryAttemptsLimit, maxHedgedAttemptsLimit, - autoConfiguredLoadBalancerFactory); + autoConfiguredLoadBalancerFactory, + mock(ChannelLogger.class)); ConfigOrError coe = parser.parseServiceConfig(ImmutableMap.of("methodConfig", "bogus")); @@ -3666,14 +3696,15 @@ public void nameResolverHelper_noConfigChosen() { retryEnabled, maxRetryAttemptsLimit, maxHedgedAttemptsLimit, - autoConfiguredLoadBalancerFactory); + autoConfiguredLoadBalancerFactory, + mock(ChannelLogger.class)); ConfigOrError coe = parser.parseServiceConfig(ImmutableMap.of("loadBalancingConfig", ImmutableList.of())); assertThat(coe.getError()).isNull(); ManagedChannelServiceConfig cfg = (ManagedChannelServiceConfig) coe.getConfig(); - assertThat(cfg.getLoadBalancingConfig()).isEqualTo(null); + assertThat(cfg.getLoadBalancingConfig()).isNull(); } @Test @@ -3686,15 +3717,15 @@ public void disableServiceConfigLookUp_noDefaultConfig() throws Exception { channelBuilder.nameResolverFactory(nameResolverFactory); channelBuilder.disableServiceConfigLookUp(); - Map serviceConfig = + Map rawServiceConfig = parseConfig("{\"methodConfig\":[{" + "\"name\":[{\"service\":\"SimpleService1\"}]," + "\"waitForReady\":true}]}"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); createChannel(); @@ -3703,7 +3734,7 @@ public void disableServiceConfigLookUp_noDefaultConfig() throws Exception { verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)).isNull(); + assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)).isEmpty(); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); } finally { LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); @@ -3725,12 +3756,12 @@ public void disableServiceConfigLookUp_withDefaultConfig() throws Exception { + "\"waitForReady\":true}]}"); channelBuilder.defaultServiceConfig(defaultServiceConfig); - Map serviceConfig = new HashMap<>(); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + Map rawServiceConfig = new HashMap<>(); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); createChannel(); @@ -3757,15 +3788,15 @@ public void enableServiceConfigLookUp_noDefaultConfig() throws Exception { .setServers(ImmutableList.of(addressGroup)).build(); channelBuilder.nameResolverFactory(nameResolverFactory); - Map serviceConfig = + Map rawServiceConfig = parseConfig("{\"methodConfig\":[{" + "\"name\":[{\"service\":\"SimpleService1\"}]," + "\"waitForReady\":true}]}"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); createChannel(); ArgumentCaptor resultCaptor = @@ -3775,19 +3806,19 @@ public void enableServiceConfigLookUp_noDefaultConfig() throws Exception { Attributes actualAttrs = resultCaptor.getValue().getAttributes(); assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(serviceConfig); + .isEqualTo(rawServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); // new config - serviceConfig = + rawServiceConfig = parseConfig("{\"methodConfig\":[{" + "\"name\":[{\"service\":\"SimpleService1\"}]," + "\"waitForReady\":false}]}"); - serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); nameResolverFactory.allResolved(); resultCaptor = ArgumentCaptor.forClass(ResolvedAddresses.class); @@ -3795,7 +3826,7 @@ public void enableServiceConfigLookUp_noDefaultConfig() throws Exception { assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); actualAttrs = resultCaptor.getValue().getAttributes(); assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(serviceConfig); + .isEqualTo(rawServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); } finally { LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); @@ -3816,15 +3847,15 @@ public void enableServiceConfigLookUp_withDefaultConfig() throws Exception { + "\"waitForReady\":true}]}"); channelBuilder.defaultServiceConfig(defaultServiceConfig); - Map serviceConfig = + Map rawServiceConfig = parseConfig("{\"methodConfig\":[{" + "\"name\":[{\"service\":\"SimpleService2\"}]," + "\"waitForReady\":false}]}"); - Attributes serviceConfigAttrs = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); createChannel(); ArgumentCaptor resultCaptor = @@ -3833,7 +3864,7 @@ public void enableServiceConfigLookUp_withDefaultConfig() throws Exception { assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); Attributes actualAttrs = resultCaptor.getValue().getAttributes(); assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(serviceConfig); + .isEqualTo(rawServiceConfig); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); } finally { LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); @@ -3855,8 +3886,8 @@ public void enableServiceConfigLookUp_resolverReturnsNoConfig_withDefaultConfig( + "\"waitForReady\":true}]}"); channelBuilder.defaultServiceConfig(defaultServiceConfig); - Attributes serviceConfigAttrs = Attributes.EMPTY; - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + nameResolverFactory.nextRawServiceConfig.set(null); + nameResolverFactory.nextConfigOrError.set(null); createChannel(); ArgumentCaptor resultCaptor = @@ -3881,8 +3912,12 @@ public void enableServiceConfigLookUp_resolverReturnsNoConfig_noDefaultConfig() .setServers(ImmutableList.of(addressGroup)).build(); channelBuilder.nameResolverFactory(nameResolverFactory); - Attributes serviceConfigAttrs = Attributes.EMPTY; - nameResolverFactory.nextResolvedAttributes.set(serviceConfigAttrs); + Map rawServiceConfig = Collections.emptyMap(); + ManagedChannelServiceConfig managedChannelServiceConfig = + createManagedChannelServiceConfig(rawServiceConfig, null); + nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); + nameResolverFactory.nextConfigOrError.set( + ConfigOrError.fromConfig(managedChannelServiceConfig)); createChannel(); ArgumentCaptor resultCaptor = @@ -3890,7 +3925,7 @@ public void enableServiceConfigLookUp_resolverReturnsNoConfig_noDefaultConfig() verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)).isNull(); + assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)).isEmpty(); verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); } finally { LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); @@ -3983,10 +4018,9 @@ private static final class FakeNameResolverFactory extends NameResolver.Factory final List servers; final boolean resolvedAtStart; final Status error; - final ArrayList resolvers = new ArrayList<>(); - // The Attributes argument of the next invocation of listener.onAddresses(servers, attrs) - final AtomicReference nextResolvedAttributes = - new AtomicReference<>(Attributes.EMPTY); + final ArrayList resolvers = new ArrayList<>(); + final AtomicReference nextConfigOrError = new AtomicReference<>(); + final AtomicReference> nextRawServiceConfig = new AtomicReference<>(); FakeNameResolverFactory( URI expectedUri, @@ -4005,7 +4039,8 @@ public NameResolver newNameResolver(final URI targetUri, NameResolver.Args args) return null; } assertEquals(DEFAULT_PORT, args.getDefaultPort()); - FakeNameResolver resolver = new FakeNameResolver(error); + FakeNameResolverFactory.FakeNameResolver resolver = + new FakeNameResolverFactory.FakeNameResolver(error); resolvers.add(resolver); return resolver; } @@ -4016,7 +4051,7 @@ public String getDefaultScheme() { } void allResolved() { - for (FakeNameResolver resolver : resolvers) { + for (FakeNameResolverFactory.FakeNameResolver resolver : resolvers) { resolver.resolved(); } } @@ -4052,11 +4087,22 @@ void resolved() { listener.onError(error); return; } - listener.onResult( + ResolutionResult.Builder builder = ResolutionResult.newBuilder() - .setAddresses(servers) - .setAttributes(nextResolvedAttributes.get()) - .build()); + .setAddresses(servers); + ConfigOrError configOrError = nextConfigOrError.get(); + Map rawServiceConfig = nextRawServiceConfig.get(); + if (configOrError != null) { + builder.setServiceConfig(configOrError); + } + if (rawServiceConfig != null) { + builder.setAttributes( + Attributes.newBuilder() + .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig) + .build()); + } + + listener.onResult(builder.build()); } @Override public void shutdown() { @@ -4079,17 +4125,17 @@ static final class Builder { this.expectedUri = expectedUri; } - Builder setServers(List servers) { + FakeNameResolverFactory.Builder setServers(List servers) { this.servers = servers; return this; } - Builder setResolvedAtStart(boolean resolvedAtStart) { + FakeNameResolverFactory.Builder setResolvedAtStart(boolean resolvedAtStart) { this.resolvedAtStart = resolvedAtStart; return this; } - Builder setError(Status error) { + FakeNameResolverFactory.Builder setError(Status error) { this.error = error; return this; } @@ -4207,4 +4253,11 @@ public void run() { private static Map parseConfig(String json) throws Exception { return (Map) JsonParser.parse(json); } + + private static ManagedChannelServiceConfig createManagedChannelServiceConfig( + Map rawServiceConfig, PolicySelection policySelection) { + // Provides dummy variable for retry related params (not used in this test class) + return ManagedChannelServiceConfig + .fromServiceConfig(rawServiceConfig, true, 3, 4, policySelection); + } } diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java deleted file mode 100644 index bde0d08caa4..00000000000 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest2.java +++ /dev/null @@ -1,4263 +0,0 @@ -/* - * Copyright 2015 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.truth.Truth.assertThat; -import static io.grpc.ConnectivityState.CONNECTING; -import static io.grpc.ConnectivityState.IDLE; -import static io.grpc.ConnectivityState.READY; -import static io.grpc.ConnectivityState.SHUTDOWN; -import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static junit.framework.TestCase.assertNotSame; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.AdditionalAnswers.delegatesTo; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.isA; -import static org.mockito.ArgumentMatchers.same; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Iterables; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.SettableFuture; -import io.grpc.Attributes; -import io.grpc.BinaryLog; -import io.grpc.CallCredentials; -import io.grpc.CallCredentials.RequestInfo; -import io.grpc.CallOptions; -import io.grpc.Channel; -import io.grpc.ChannelLogger; -import io.grpc.ClientCall; -import io.grpc.ClientInterceptor; -import io.grpc.ClientInterceptors; -import io.grpc.ClientStreamTracer; -import io.grpc.ConnectivityState; -import io.grpc.ConnectivityStateInfo; -import io.grpc.Context; -import io.grpc.EquivalentAddressGroup; -import io.grpc.IntegerMarshaller; -import io.grpc.InternalChannelz; -import io.grpc.InternalChannelz.ChannelStats; -import io.grpc.InternalChannelz.ChannelTrace; -import io.grpc.InternalInstrumented; -import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.CreateSubchannelArgs; -import io.grpc.LoadBalancer.Helper; -import io.grpc.LoadBalancer.PickResult; -import io.grpc.LoadBalancer.PickSubchannelArgs; -import io.grpc.LoadBalancer.ResolvedAddresses; -import io.grpc.LoadBalancer.Subchannel; -import io.grpc.LoadBalancer.SubchannelPicker; -import io.grpc.LoadBalancer.SubchannelStateListener; -import io.grpc.LoadBalancerProvider; -import io.grpc.LoadBalancerRegistry; -import io.grpc.ManagedChannel; -import io.grpc.Metadata; -import io.grpc.MethodDescriptor; -import io.grpc.MethodDescriptor.MethodType; -import io.grpc.NameResolver; -import io.grpc.NameResolver.ConfigOrError; -import io.grpc.NameResolver.ResolutionResult; -import io.grpc.NameResolverRegistry; -import io.grpc.ProxiedSocketAddress; -import io.grpc.ProxyDetector; -import io.grpc.SecurityLevel; -import io.grpc.ServerMethodDefinition; -import io.grpc.Status; -import io.grpc.Status.Code; -import io.grpc.StringMarshaller; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory2.PolicySelection; -import io.grpc.internal.ClientTransportFactory.ClientTransportOptions; -import io.grpc.internal.InternalSubchannel.TransportLogger; -import io.grpc.internal.ManagedChannelImpl2.ScParser; -import io.grpc.internal.TestUtils.MockClientTransportInfo; -import io.grpc.stub.ClientCalls; -import io.grpc.testing.TestMethodDescriptors; -import io.grpc.util.ForwardingSubchannel; -import java.io.IOException; -import java.net.SocketAddress; -import java.net.URI; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.logging.Handler; -import java.util.logging.Level; -import java.util.logging.LogRecord; -import java.util.logging.Logger; -import javax.annotation.Nullable; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; -import org.mockito.stubbing.Answer; - -/** Unit tests for {@link ManagedChannelImpl2}. */ -@RunWith(JUnit4.class) -// TODO(creamsoup) remove backward compatible check when fully migrated -@SuppressWarnings("deprecation") -public class ManagedChannelImplTest2 { - private static final int DEFAULT_PORT = 447; - - private static final MethodDescriptor method = - MethodDescriptor.newBuilder() - .setType(MethodType.UNKNOWN) - .setFullMethodName("service/method") - .setRequestMarshaller(new StringMarshaller()) - .setResponseMarshaller(new IntegerMarshaller()) - .build(); - private static final Attributes.Key SUBCHANNEL_ATTR_KEY = - Attributes.Key.create("subchannel-attr-key"); - private static final long RECONNECT_BACKOFF_INTERVAL_NANOS = 10; - private static final String SERVICE_NAME = "fake.example.com"; - private static final String AUTHORITY = SERVICE_NAME; - private static final String USER_AGENT = "userAgent"; - private static final ClientTransportOptions clientTransportOptions = - new ClientTransportOptions() - .setAuthority(AUTHORITY) - .setUserAgent(USER_AGENT); - private static final String TARGET = "fake://" + SERVICE_NAME; - private static final String MOCK_POLICY_NAME = "mock_lb"; - private URI expectedUri; - private final SocketAddress socketAddress = - new SocketAddress() { - @Override - public String toString() { - return "test-addr"; - } - }; - private final SocketAddress socketAddress2 = - new SocketAddress() { - @Override - public String toString() { - return "test-addr"; - } - }; - private final EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(socketAddress); - private final EquivalentAddressGroup addressGroup2 = - new EquivalentAddressGroup(Arrays.asList(socketAddress, socketAddress2)); - private final FakeClock timer = new FakeClock(); - private final FakeClock executor = new FakeClock(); - private final FakeClock balancerRpcExecutor = new FakeClock(); - private static final FakeClock.TaskFilter NAME_RESOLVER_REFRESH_TASK_FILTER = - new FakeClock.TaskFilter() { - @Override - public boolean shouldAccept(Runnable command) { - return command.toString().contains( - ManagedChannelImpl2.DelayedNameResolverRefresh.class.getName()); - } - }; - - private final InternalChannelz channelz = new InternalChannelz(); - - @Rule public final ExpectedException thrown = ExpectedException.none(); - @Rule public final MockitoRule mocks = MockitoJUnit.rule(); - - private ManagedChannelImpl2 channel; - private Helper helper; - @Captor - private ArgumentCaptor statusCaptor; - @Captor - private ArgumentCaptor callOptionsCaptor; - @Mock - private LoadBalancer mockLoadBalancer; - @Mock - private SubchannelStateListener subchannelStateListener; - private final LoadBalancerProvider mockLoadBalancerProvider = - mock(LoadBalancerProvider.class, delegatesTo(new LoadBalancerProvider() { - @Override - public LoadBalancer newLoadBalancer(Helper helper) { - return mockLoadBalancer; - } - - @Override - public boolean isAvailable() { - return true; - } - - @Override - public int getPriority() { - return 999; - } - - @Override - public String getPolicyName() { - return MOCK_POLICY_NAME; - } - })); - - @Captor - private ArgumentCaptor stateInfoCaptor; - @Mock - private SubchannelPicker mockPicker; - @Mock - private ClientTransportFactory mockTransportFactory; - @Mock - private ClientCall.Listener mockCallListener; - @Mock - private ClientCall.Listener mockCallListener2; - @Mock - private ClientCall.Listener mockCallListener3; - @Mock - private ClientCall.Listener mockCallListener4; - @Mock - private ClientCall.Listener mockCallListener5; - @Mock - private ObjectPool executorPool; - @Mock - private ObjectPool balancerRpcExecutorPool; - @Mock - private CallCredentials creds; - @Mock - private Executor offloadExecutor; - private ChannelBuilder channelBuilder; - private boolean requestConnection = true; - private BlockingQueue transports; - private boolean panicExpected; - @Captor - private ArgumentCaptor resolvedAddressCaptor; - - private ArgumentCaptor streamListenerCaptor = - ArgumentCaptor.forClass(ClientStreamListener.class); - - private void createChannel(ClientInterceptor... interceptors) { - checkState(channel == null); - - channel = new ManagedChannelImpl2( - channelBuilder, mockTransportFactory, new FakeBackoffPolicyProvider(), - balancerRpcExecutorPool, timer.getStopwatchSupplier(), Arrays.asList(interceptors), - timer.getTimeProvider()); - - if (requestConnection) { - int numExpectedTasks = 0; - - // Force-exit the initial idle-mode - channel.syncContext.execute(new Runnable() { - @Override - public void run() { - channel.exitIdleMode(); - } - }); - if (channelBuilder.idleTimeoutMillis != ManagedChannelImpl2.IDLE_TIMEOUT_MILLIS_DISABLE) { - numExpectedTasks += 1; - } - - if (getNameResolverRefresh() != null) { - numExpectedTasks += 1; - } - - assertEquals(numExpectedTasks, timer.numPendingTasks()); - - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - helper = helperCaptor.getValue(); - } - } - - @Before - public void setUp() throws Exception { - when(mockLoadBalancer.canHandleEmptyAddressListFromNameResolution()).thenCallRealMethod(); - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - expectedUri = new URI(TARGET); - transports = TestUtils.captureTransports(mockTransportFactory); - when(mockTransportFactory.getScheduledExecutorService()) - .thenReturn(timer.getScheduledExecutorService()); - when(executorPool.getObject()).thenReturn(executor.getScheduledExecutorService()); - when(balancerRpcExecutorPool.getObject()) - .thenReturn(balancerRpcExecutor.getScheduledExecutorService()); - - channelBuilder = - new ChannelBuilder() - .nameResolverFactory(new FakeNameResolverFactory.Builder(expectedUri).build()) - .defaultLoadBalancingPolicy(MOCK_POLICY_NAME) - .userAgent(USER_AGENT) - .idleTimeout( - AbstractManagedChannelImplBuilder.IDLE_MODE_MAX_TIMEOUT_DAYS, TimeUnit.DAYS) - .offloadExecutor(offloadExecutor); - channelBuilder.executorPool = executorPool; - channelBuilder.binlog = null; - channelBuilder.channelz = channelz; - } - - @After - public void allPendingTasksAreRun() throws Exception { - // The "never" verifications in the tests only hold up if all due tasks are done. - // As for timer, although there may be scheduled tasks in a future time, since we don't test - // any time-related behavior in this test suite, we only care the tasks that are due. This - // would ignore any time-sensitive tasks, e.g., back-off and the idle timer. - assertTrue(timer.getDueTasks() + " should be empty", timer.getDueTasks().isEmpty()); - assertEquals(executor.getPendingTasks() + " should be empty", 0, executor.numPendingTasks()); - if (channel != null) { - if (!panicExpected) { - assertFalse(channel.isInPanicMode()); - } - channel.shutdownNow(); - channel = null; - } - } - - @After - public void cleanUp() { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - - @Deprecated - @Test - public void createSubchannel_old_outsideSynchronizationContextShouldLogWarning() { - createChannel(); - final AtomicReference logRef = new AtomicReference<>(); - Handler handler = new Handler() { - @Override - public void publish(LogRecord record) { - logRef.set(record); - } - - @Override - public void flush() { - } - - @Override - public void close() throws SecurityException { - } - }; - Logger logger = Logger.getLogger(ManagedChannelImpl2.class.getName()); - try { - logger.addHandler(handler); - helper.createSubchannel(addressGroup, Attributes.EMPTY); - LogRecord record = logRef.get(); - assertThat(record.getLevel()).isEqualTo(Level.WARNING); - assertThat(record.getMessage()).contains( - "createSubchannel() should be called from SynchronizationContext"); - assertThat(record.getThrown()).isInstanceOf(IllegalStateException.class); - } finally { - logger.removeHandler(handler); - } - } - - @Deprecated - @Test - public void createSubchannel_old_insideSyncContextFollowedByRequestConnectionShouldSucceed() { - createChannel(); - final AtomicReference error = new AtomicReference<>(); - helper.getSynchronizationContext().execute(new Runnable() { - @Override - public void run() { - try { - Subchannel subchannel = helper.createSubchannel(addressGroup, Attributes.EMPTY); - subchannel.requestConnection(); - } catch (Throwable e) { - error.set(e); - } - } - }); - assertThat(error.get()).isNull(); - } - - @Deprecated - @Test - @SuppressWarnings("deprecation") - public void createSubchannel_old_propagateSubchannelStatesToOldApi() { - createChannel(); - - Subchannel subchannel = helper.createSubchannel(addressGroup, Attributes.EMPTY); - subchannel.requestConnection(); - - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - verify(mockLoadBalancer).handleSubchannelState( - same(subchannel), eq(ConnectivityStateInfo.forNonError(CONNECTING))); - - MockClientTransportInfo transportInfo = transports.poll(); - transportInfo.listener.transportReady(); - - verify(mockLoadBalancer).handleSubchannelState( - same(subchannel), eq(ConnectivityStateInfo.forNonError(READY))); - - channel.shutdown(); - verify(mockLoadBalancer).shutdown(); - subchannel.shutdown(); - - verify(mockLoadBalancer, atLeast(0)).canHandleEmptyAddressListFromNameResolution(); - verify(mockLoadBalancer, atLeast(0)).handleNameResolutionError(any(Status.class)); - // handleSubchannelState() should not be called after shutdown() - verifyNoMoreInteractions(mockLoadBalancer); - } - - @Test - public void createSubchannel_outsideSynchronizationContextShouldThrow() { - createChannel(); - try { - helper.createSubchannel(CreateSubchannelArgs.newBuilder() - .setAddresses(addressGroup) - .build()); - fail("Should throw"); - } catch (IllegalStateException e) { - assertThat(e).hasMessageThat().isEqualTo("Not called from the SynchronizationContext"); - } - } - - @Test - public void idleModeDisabled() { - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build()); - createChannel(); - - // In this test suite, the channel is always created with idle mode disabled. - // No task is scheduled to enter idle mode - assertEquals(0, timer.numPendingTasks()); - assertEquals(0, executor.numPendingTasks()); - } - - @Test - public void immediateDeadlineExceeded() { - createChannel(); - ClientCall call = - channel.newCall(method, CallOptions.DEFAULT.withDeadlineAfter(0, TimeUnit.NANOSECONDS)); - call.start(mockCallListener, new Metadata()); - assertEquals(1, executor.runDueTasks()); - - verify(mockCallListener).onClose(statusCaptor.capture(), any(Metadata.class)); - Status status = statusCaptor.getValue(); - assertSame(Status.DEADLINE_EXCEEDED.getCode(), status.getCode()); - } - - @Test - public void shutdownWithNoTransportsEverCreated() { - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build()); - createChannel(); - verify(executorPool).getObject(); - verify(executorPool, never()).returnObject(any()); - channel.shutdown(); - assertTrue(channel.isShutdown()); - assertTrue(channel.isTerminated()); - verify(executorPool).returnObject(executor.getScheduledExecutorService()); - } - - @Test - public void channelzMembership() throws Exception { - createChannel(); - assertNotNull(channelz.getRootChannel(channel.getLogId().getId())); - assertFalse(channelz.containsSubchannel(channel.getLogId())); - channel.shutdownNow(); - channel.awaitTermination(5, TimeUnit.SECONDS); - assertNull(channelz.getRootChannel(channel.getLogId().getId())); - assertFalse(channelz.containsSubchannel(channel.getLogId())); - } - - @Test - public void channelzMembership_subchannel() throws Exception { - createChannel(); - assertNotNull(channelz.getRootChannel(channel.getLogId().getId())); - - AbstractSubchannel subchannel = - (AbstractSubchannel) createSubchannelSafely( - helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - // subchannels are not root channels - assertNull( - channelz.getRootChannel(subchannel.getInstrumentedInternalSubchannel().getLogId().getId())); - assertTrue( - channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId())); - assertThat(getStats(channel).subchannels) - .containsExactly(subchannel.getInstrumentedInternalSubchannel()); - - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - assertNotNull(transportInfo); - assertTrue(channelz.containsClientSocket(transportInfo.transport.getLogId())); - transportInfo.listener.transportReady(); - - // terminate transport - transportInfo.listener.transportShutdown(Status.CANCELLED); - transportInfo.listener.transportTerminated(); - assertFalse(channelz.containsClientSocket(transportInfo.transport.getLogId())); - - // terminate subchannel - assertTrue( - channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId())); - shutdownSafely(helper, subchannel); - timer.forwardTime(ManagedChannelImpl2.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); - timer.runDueTasks(); - assertFalse( - channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId())); - assertThat(getStats(channel).subchannels).isEmpty(); - - // channel still appears - assertNotNull(channelz.getRootChannel(channel.getLogId().getId())); - } - - @Test - public void channelzMembership_oob() throws Exception { - createChannel(); - OobChannel oob = (OobChannel) helper.createOobChannel(addressGroup, AUTHORITY); - // oob channels are not root channels - assertNull(channelz.getRootChannel(oob.getLogId().getId())); - assertTrue(channelz.containsSubchannel(oob.getLogId())); - assertThat(getStats(channel).subchannels).containsExactly(oob); - assertTrue(channelz.containsSubchannel(oob.getLogId())); - - AbstractSubchannel subchannel = (AbstractSubchannel) oob.getSubchannel(); - assertTrue( - channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId())); - assertThat(getStats(oob).subchannels) - .containsExactly(subchannel.getInstrumentedInternalSubchannel()); - assertTrue( - channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId())); - - oob.getSubchannel().requestConnection(); - MockClientTransportInfo transportInfo = transports.poll(); - assertNotNull(transportInfo); - assertTrue(channelz.containsClientSocket(transportInfo.transport.getLogId())); - - // terminate transport - transportInfo.listener.transportShutdown(Status.INTERNAL); - transportInfo.listener.transportTerminated(); - assertFalse(channelz.containsClientSocket(transportInfo.transport.getLogId())); - - // terminate oobchannel - oob.shutdown(); - assertFalse(channelz.containsSubchannel(oob.getLogId())); - assertThat(getStats(channel).subchannels).isEmpty(); - assertFalse( - channelz.containsSubchannel(subchannel.getInstrumentedInternalSubchannel().getLogId())); - - // channel still appears - assertNotNull(channelz.getRootChannel(channel.getLogId().getId())); - } - - @Test - public void callsAndShutdown() { - subtestCallsAndShutdown(false, false); - } - - @Test - public void callsAndShutdownNow() { - subtestCallsAndShutdown(true, false); - } - - /** Make sure shutdownNow() after shutdown() has an effect. */ - @Test - public void callsAndShutdownAndShutdownNow() { - subtestCallsAndShutdown(false, true); - } - - private void subtestCallsAndShutdown(boolean shutdownNow, boolean shutdownNowAfterShutdown) { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - verify(executorPool).getObject(); - ClientStream mockStream = mock(ClientStream.class); - ClientStream mockStream2 = mock(ClientStream.class); - Metadata headers = new Metadata(); - Metadata headers2 = new Metadata(); - - // Configure the picker so that first RPC goes to delayed transport, and second RPC goes to - // real transport. - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - verify(mockTransport).start(any(ManagedClientTransport.Listener.class)); - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), same(headers), same(CallOptions.DEFAULT))) - .thenReturn(mockStream); - when(mockTransport.newStream(same(method), same(headers2), same(CallOptions.DEFAULT))) - .thenReturn(mockStream2); - transportListener.transportReady(); - when(mockPicker.pickSubchannel( - new PickSubchannelArgsImpl(method, headers, CallOptions.DEFAULT))).thenReturn( - PickResult.withNoResult()); - when(mockPicker.pickSubchannel( - new PickSubchannelArgsImpl(method, headers2, CallOptions.DEFAULT))).thenReturn( - PickResult.withSubchannel(subchannel)); - updateBalancingStateSafely(helper, READY, mockPicker); - - // First RPC, will be pending - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - call.start(mockCallListener, headers); - - verify(mockTransport, never()) - .newStream(same(method), same(headers), same(CallOptions.DEFAULT)); - - // Second RPC, will be assigned to the real transport - ClientCall call2 = channel.newCall(method, CallOptions.DEFAULT); - call2.start(mockCallListener2, headers2); - verify(mockTransport).newStream(same(method), same(headers2), same(CallOptions.DEFAULT)); - verify(mockTransport).newStream(same(method), same(headers2), same(CallOptions.DEFAULT)); - verify(mockStream2).start(any(ClientStreamListener.class)); - - // Shutdown - if (shutdownNow) { - channel.shutdownNow(); - } else { - channel.shutdown(); - if (shutdownNowAfterShutdown) { - channel.shutdownNow(); - shutdownNow = true; - } - } - assertTrue(channel.isShutdown()); - assertFalse(channel.isTerminated()); - assertThat(nameResolverFactory.resolvers).hasSize(1); - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - - // Further calls should fail without going to the transport - ClientCall call3 = channel.newCall(method, CallOptions.DEFAULT); - call3.start(mockCallListener3, headers2); - timer.runDueTasks(); - executor.runDueTasks(); - - verify(mockCallListener3).onClose(statusCaptor.capture(), any(Metadata.class)); - assertSame(Status.Code.UNAVAILABLE, statusCaptor.getValue().getCode()); - - if (shutdownNow) { - // LoadBalancer and NameResolver are shut down as soon as delayed transport is terminated. - verify(mockLoadBalancer).shutdown(); - assertTrue(nameResolverFactory.resolvers.get(0).shutdown); - // call should have been aborted by delayed transport - executor.runDueTasks(); - verify(mockCallListener).onClose(same(ManagedChannelImpl2.SHUTDOWN_NOW_STATUS), - any(Metadata.class)); - } else { - // LoadBalancer and NameResolver are still running. - verify(mockLoadBalancer, never()).shutdown(); - assertFalse(nameResolverFactory.resolvers.get(0).shutdown); - // call and call2 are still alive, and can still be assigned to a real transport - SubchannelPicker picker2 = mock(SubchannelPicker.class); - when(picker2.pickSubchannel(new PickSubchannelArgsImpl(method, headers, CallOptions.DEFAULT))) - .thenReturn(PickResult.withSubchannel(subchannel)); - updateBalancingStateSafely(helper, READY, picker2); - executor.runDueTasks(); - verify(mockTransport).newStream(same(method), same(headers), same(CallOptions.DEFAULT)); - verify(mockStream).start(any(ClientStreamListener.class)); - } - - // After call is moved out of delayed transport, LoadBalancer, NameResolver and the transports - // will be shutdown. - verify(mockLoadBalancer).shutdown(); - assertTrue(nameResolverFactory.resolvers.get(0).shutdown); - - if (shutdownNow) { - // Channel shutdownNow() all subchannels after shutting down LoadBalancer - verify(mockTransport).shutdownNow(ManagedChannelImpl2.SHUTDOWN_NOW_STATUS); - } else { - verify(mockTransport, never()).shutdownNow(any(Status.class)); - } - // LoadBalancer should shutdown the subchannel - shutdownSafely(helper, subchannel); - if (shutdownNow) { - verify(mockTransport).shutdown(same(ManagedChannelImpl2.SHUTDOWN_NOW_STATUS)); - } else { - verify(mockTransport).shutdown(same(ManagedChannelImpl2.SHUTDOWN_STATUS)); - } - - // Killing the remaining real transport will terminate the channel - transportListener.transportShutdown(Status.UNAVAILABLE); - assertFalse(channel.isTerminated()); - verify(executorPool, never()).returnObject(any()); - transportListener.transportTerminated(); - assertTrue(channel.isTerminated()); - verify(executorPool).returnObject(executor.getScheduledExecutorService()); - verifyNoMoreInteractions(balancerRpcExecutorPool); - - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - verify(mockTransportFactory).close(); - verify(mockTransport, atLeast(0)).getLogId(); - verifyNoMoreInteractions(mockTransport); - } - - @Test - public void noMoreCallbackAfterLoadBalancerShutdown() { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - Status resolutionError = Status.UNAVAILABLE.withDescription("Resolution failed"); - createChannel(); - - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); - assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); - - SubchannelStateListener stateListener1 = mock(SubchannelStateListener.class); - SubchannelStateListener stateListener2 = mock(SubchannelStateListener.class); - Subchannel subchannel1 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener1); - Subchannel subchannel2 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, stateListener2); - requestConnectionSafely(helper, subchannel1); - requestConnectionSafely(helper, subchannel2); - verify(mockTransportFactory, times(2)) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo1 = transports.poll(); - MockClientTransportInfo transportInfo2 = transports.poll(); - - // LoadBalancer receives all sorts of callbacks - transportInfo1.listener.transportReady(); - - verify(stateListener1, times(2)).onSubchannelState(stateInfoCaptor.capture()); - assertSame(CONNECTING, stateInfoCaptor.getAllValues().get(0).getState()); - assertSame(READY, stateInfoCaptor.getAllValues().get(1).getState()); - - verify(stateListener2).onSubchannelState(stateInfoCaptor.capture()); - assertSame(CONNECTING, stateInfoCaptor.getValue().getState()); - - resolver.listener.onError(resolutionError); - verify(mockLoadBalancer).handleNameResolutionError(resolutionError); - - verifyNoMoreInteractions(mockLoadBalancer); - - channel.shutdown(); - verify(mockLoadBalancer).shutdown(); - verifyNoMoreInteractions(stateListener1, stateListener2); - - // LoadBalancer will normally shutdown all subchannels - subchannel1.shutdown(); - subchannel2.shutdown(); - - // Since subchannels are shutdown, SubchannelStateListeners will only get SHUTDOWN regardless of - // the transport states. - transportInfo1.listener.transportShutdown(Status.UNAVAILABLE); - transportInfo2.listener.transportReady(); - verify(stateListener1).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); - verify(stateListener2).onSubchannelState(ConnectivityStateInfo.forNonError(SHUTDOWN)); - verifyNoMoreInteractions(stateListener1, stateListener2); - - // No more callback should be delivered to LoadBalancer after it's shut down - resolver.listener.onError(resolutionError); - resolver.resolved(); - verifyNoMoreInteractions(mockLoadBalancer); - } - - @Test - public void interceptor() throws Exception { - final AtomicLong atomic = new AtomicLong(); - ClientInterceptor interceptor = new ClientInterceptor() { - @Override - public ClientCall interceptCall( - MethodDescriptor method, CallOptions callOptions, - Channel next) { - atomic.set(1); - return next.newCall(method, callOptions); - } - }; - createChannel(interceptor); - assertNotNull(channel.newCall(method, CallOptions.DEFAULT)); - assertEquals(1, atomic.get()); - } - - @Test - public void callOptionsExecutor() { - Metadata headers = new Metadata(); - ClientStream mockStream = mock(ClientStream.class); - FakeClock callExecutor = new FakeClock(); - createChannel(); - - // Start a call with a call executor - CallOptions options = - CallOptions.DEFAULT.withExecutor(callExecutor.getScheduledExecutorService()); - ClientCall call = channel.newCall(method, options); - call.start(mockCallListener, headers); - - // Make the transport available - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - requestConnectionSafely(helper, subchannel); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), same(headers), any(CallOptions.class))) - .thenReturn(mockStream); - transportListener.transportReady(); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - assertEquals(0, callExecutor.numPendingTasks()); - updateBalancingStateSafely(helper, READY, mockPicker); - - // Real streams are started in the call executor if they were previously buffered. - assertEquals(1, callExecutor.runDueTasks()); - verify(mockTransport).newStream(same(method), same(headers), same(options)); - verify(mockStream).start(streamListenerCaptor.capture()); - - // Call listener callbacks are also run in the call executor - ClientStreamListener streamListener = streamListenerCaptor.getValue(); - Metadata trailers = new Metadata(); - assertEquals(0, callExecutor.numPendingTasks()); - streamListener.closed(Status.CANCELLED, trailers); - verify(mockCallListener, never()).onClose(same(Status.CANCELLED), same(trailers)); - assertEquals(1, callExecutor.runDueTasks()); - verify(mockCallListener).onClose(same(Status.CANCELLED), same(trailers)); - - - transportListener.transportShutdown(Status.UNAVAILABLE); - transportListener.transportTerminated(); - - // Clean up as much as possible to allow the channel to terminate. - shutdownSafely(helper, subchannel); - timer.forwardNanos( - TimeUnit.SECONDS.toNanos(ManagedChannelImpl2.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS)); - } - - @Test - public void nameResolutionFailed() { - Status error = Status.UNAVAILABLE.withCause(new Throwable("fake name resolution error")); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .setError(error) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - // Name resolution is started as soon as channel is created. - createChannel(); - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); - verify(mockLoadBalancer).handleNameResolutionError(same(error)); - assertEquals(1, timer.numPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER)); - - timer.forwardNanos(RECONNECT_BACKOFF_INTERVAL_NANOS - 1); - assertEquals(0, resolver.refreshCalled); - - timer.forwardNanos(1); - assertEquals(1, resolver.refreshCalled); - verify(mockLoadBalancer, times(2)).handleNameResolutionError(same(error)); - - // Verify an additional name resolution failure does not schedule another timer - resolver.refresh(); - verify(mockLoadBalancer, times(3)).handleNameResolutionError(same(error)); - assertEquals(1, timer.numPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER)); - - // Allow the next refresh attempt to succeed - resolver.error = null; - - // For the second attempt, the backoff should occur at RECONNECT_BACKOFF_INTERVAL_NANOS * 2 - timer.forwardNanos(RECONNECT_BACKOFF_INTERVAL_NANOS * 2 - 1); - assertEquals(2, resolver.refreshCalled); - timer.forwardNanos(1); - assertEquals(3, resolver.refreshCalled); - assertEquals(0, timer.numPendingTasks()); - - // Verify that the successful resolution reset the backoff policy - resolver.listener.onError(error); - timer.forwardNanos(RECONNECT_BACKOFF_INTERVAL_NANOS - 1); - assertEquals(3, resolver.refreshCalled); - timer.forwardNanos(1); - assertEquals(4, resolver.refreshCalled); - assertEquals(0, timer.numPendingTasks()); - } - - @Test - public void nameResolutionFailed_delayedTransportShutdownCancelsBackoff() { - Status error = Status.UNAVAILABLE.withCause(new Throwable("fake name resolution error")); - - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setError(error).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - // Name resolution is started as soon as channel is created. - createChannel(); - verify(mockLoadBalancer).handleNameResolutionError(same(error)); - - FakeClock.ScheduledTask nameResolverBackoff = getNameResolverRefresh(); - assertNotNull(nameResolverBackoff); - assertFalse(nameResolverBackoff.isCancelled()); - - // Add a pending call to the delayed transport - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - Metadata headers = new Metadata(); - call.start(mockCallListener, headers); - - // The pending call on the delayed transport stops the name resolver backoff from cancelling - channel.shutdown(); - assertFalse(nameResolverBackoff.isCancelled()); - - // Notify that a subchannel is ready, which drains the delayed transport - SubchannelPicker picker = mock(SubchannelPicker.class); - Status status = Status.UNAVAILABLE.withDescription("for test"); - when(picker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withDrop(status)); - updateBalancingStateSafely(helper, READY, picker); - executor.runDueTasks(); - verify(mockCallListener).onClose(same(status), any(Metadata.class)); - - assertTrue(nameResolverBackoff.isCancelled()); - } - - @Test - public void nameResolverReturnsEmptySubLists_becomeErrorByDefault() throws Exception { - String errorDescription = "NameResolver returned no usable address"; - - // Pass a FakeNameResolverFactory with an empty list and LB config - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"mock_lb\": { \"setting1\": \"high\" } } ] }"); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextConfigOrError.set(ConfigOrError.fromConfig(parsedServiceConfig)); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - // LoadBalancer received the error - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - verify(mockLoadBalancer).handleNameResolutionError(statusCaptor.capture()); - Status status = statusCaptor.getValue(); - assertSame(Status.Code.UNAVAILABLE, status.getCode()); - assertThat(status.getDescription()).startsWith(errorDescription); - - // A resolution retry has been scheduled - assertEquals(1, timer.numPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER)); - } - - @Test - public void nameResolverReturnsEmptySubLists_optionallyAllowed() throws Exception { - when(mockLoadBalancer.canHandleEmptyAddressListFromNameResolution()).thenReturn(true); - - // Pass a FakeNameResolverFactory with an empty list and LB config - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - String rawLbConfig = "{ \"setting1\": \"high\" }"; - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"mock_lb\": " + rawLbConfig + " } ] }"); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig( - rawServiceConfig, - new PolicySelection( - mockLoadBalancerProvider, - parseConfig(rawLbConfig), - new Object())); - nameResolverFactory.nextConfigOrError.set(ConfigOrError.fromConfig(parsedServiceConfig)); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - // LoadBalancer received the empty list and the LB config - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).isEmpty(); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - Map lbConfig = actualAttrs.get(LoadBalancer.ATTR_LOAD_BALANCING_CONFIG); - assertEquals(ImmutableMap.of("setting1", "high"), lbConfig); - assertSame( - rawServiceConfig, actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)); - - // A no resolution retry - assertEquals(0, timer.numPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER)); - } - - @Test - public void loadBalancerThrowsInHandleResolvedAddresses() { - RuntimeException ex = new RuntimeException("simulated"); - // Delay the success of name resolution until allResolved() is called - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setResolvedAtStart(false) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - doThrow(ex).when(mockLoadBalancer).handleResolvedAddresses(any(ResolvedAddresses.class)); - - // NameResolver returns addresses. - nameResolverFactory.allResolved(); - - // Exception thrown from balancer is caught by ChannelExecutor, making channel enter panic mode. - verifyPanicMode(ex); - } - - @Test - public void nameResolvedAfterChannelShutdown() { - // Delay the success of name resolution until allResolved() is called. - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - channel.shutdown(); - - assertTrue(channel.isShutdown()); - assertTrue(channel.isTerminated()); - verify(mockLoadBalancer).shutdown(); - // Name resolved after the channel is shut down, which is possible if the name resolution takes - // time and is not cancellable. The resolved address will be dropped. - nameResolverFactory.allResolved(); - verifyNoMoreInteractions(mockLoadBalancer); - } - - /** - * Verify that if the first resolved address points to a server that cannot be connected, the call - * will end up with the second address which works. - */ - @Test - public void firstResolvedServerFailedToConnect() throws Exception { - final SocketAddress goodAddress = new SocketAddress() { - @Override public String toString() { - return "goodAddress"; - } - }; - final SocketAddress badAddress = new SocketAddress() { - @Override public String toString() { - return "badAddress"; - } - }; - InOrder inOrder = inOrder(mockLoadBalancer, subchannelStateListener); - - List resolvedAddrs = Arrays.asList(badAddress, goodAddress); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(resolvedAddrs))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - // Start the call - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - Metadata headers = new Metadata(); - call.start(mockCallListener, headers); - executor.runDueTasks(); - - // Simulate name resolution results - EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(resolvedAddrs); - inOrder.verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); - assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - requestConnectionSafely(helper, subchannel); - inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture()); - assertEquals(CONNECTING, stateInfoCaptor.getValue().getState()); - - // The channel will starts with the first address (badAddress) - verify(mockTransportFactory) - .newClientTransport( - same(badAddress), any(ClientTransportOptions.class), any(ChannelLogger.class)); - verify(mockTransportFactory, times(0)) - .newClientTransport( - same(goodAddress), any(ClientTransportOptions.class), any(ChannelLogger.class)); - - MockClientTransportInfo badTransportInfo = transports.poll(); - // Which failed to connect - badTransportInfo.listener.transportShutdown(Status.UNAVAILABLE); - inOrder.verifyNoMoreInteractions(); - - // The channel then try the second address (goodAddress) - verify(mockTransportFactory) - .newClientTransport( - same(goodAddress), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo goodTransportInfo = transports.poll(); - when(goodTransportInfo.transport.newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mock(ClientStream.class)); - - goodTransportInfo.listener.transportReady(); - inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture()); - assertEquals(READY, stateInfoCaptor.getValue().getState()); - - // A typical LoadBalancer will call this once the subchannel becomes READY - updateBalancingStateSafely(helper, READY, mockPicker); - // Delayed transport uses the app executor to create real streams. - executor.runDueTasks(); - - verify(goodTransportInfo.transport).newStream(same(method), same(headers), - same(CallOptions.DEFAULT)); - // The bad transport was never used. - verify(badTransportInfo.transport, times(0)).newStream(any(MethodDescriptor.class), - any(Metadata.class), any(CallOptions.class)); - } - - @Test - public void failFastRpcFailFromErrorFromBalancer() { - subtestFailRpcFromBalancer(false, false, true); - } - - @Test - public void failFastRpcFailFromDropFromBalancer() { - subtestFailRpcFromBalancer(false, true, true); - } - - @Test - public void waitForReadyRpcImmuneFromErrorFromBalancer() { - subtestFailRpcFromBalancer(true, false, false); - } - - @Test - public void waitForReadyRpcFailFromDropFromBalancer() { - subtestFailRpcFromBalancer(true, true, true); - } - - private void subtestFailRpcFromBalancer(boolean waitForReady, boolean drop, boolean shouldFail) { - createChannel(); - - // This call will be buffered by the channel, thus involve delayed transport - CallOptions callOptions = CallOptions.DEFAULT; - if (waitForReady) { - callOptions = callOptions.withWaitForReady(); - } else { - callOptions = callOptions.withoutWaitForReady(); - } - ClientCall call1 = channel.newCall(method, callOptions); - call1.start(mockCallListener, new Metadata()); - - SubchannelPicker picker = mock(SubchannelPicker.class); - Status status = Status.UNAVAILABLE.withDescription("for test"); - - when(picker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(drop ? PickResult.withDrop(status) : PickResult.withError(status)); - updateBalancingStateSafely(helper, READY, picker); - - executor.runDueTasks(); - if (shouldFail) { - verify(mockCallListener).onClose(same(status), any(Metadata.class)); - } else { - verifyZeroInteractions(mockCallListener); - } - - // This call doesn't involve delayed transport - ClientCall call2 = channel.newCall(method, callOptions); - call2.start(mockCallListener2, new Metadata()); - - executor.runDueTasks(); - if (shouldFail) { - verify(mockCallListener2).onClose(same(status), any(Metadata.class)); - } else { - verifyZeroInteractions(mockCallListener2); - } - } - - /** - * Verify that if all resolved addresses failed to connect, a fail-fast call will fail, while a - * wait-for-ready call will still be buffered. - */ - @Test - public void allServersFailedToConnect() throws Exception { - final SocketAddress addr1 = new SocketAddress() { - @Override public String toString() { - return "addr1"; - } - }; - final SocketAddress addr2 = new SocketAddress() { - @Override public String toString() { - return "addr2"; - } - }; - InOrder inOrder = inOrder(mockLoadBalancer, subchannelStateListener); - - List resolvedAddrs = Arrays.asList(addr1, addr2); - - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(resolvedAddrs))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - // Start a wait-for-ready call - ClientCall call = - channel.newCall(method, CallOptions.DEFAULT.withWaitForReady()); - Metadata headers = new Metadata(); - call.start(mockCallListener, headers); - // ... and a fail-fast call - ClientCall call2 = - channel.newCall(method, CallOptions.DEFAULT.withoutWaitForReady()); - call2.start(mockCallListener2, headers); - executor.runDueTasks(); - - // Simulate name resolution results - EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(resolvedAddrs); - inOrder.verify(mockLoadBalancer).handleResolvedAddresses(resolvedAddressCaptor.capture()); - assertThat(resolvedAddressCaptor.getValue().getAddresses()).containsExactly(addressGroup); - - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - requestConnectionSafely(helper, subchannel); - - inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture()); - assertEquals(CONNECTING, stateInfoCaptor.getValue().getState()); - - // Connecting to server1, which will fail - verify(mockTransportFactory) - .newClientTransport( - same(addr1), any(ClientTransportOptions.class), any(ChannelLogger.class)); - verify(mockTransportFactory, times(0)) - .newClientTransport( - same(addr2), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo1 = transports.poll(); - transportInfo1.listener.transportShutdown(Status.UNAVAILABLE); - - // Connecting to server2, which will fail too - verify(mockTransportFactory) - .newClientTransport( - same(addr2), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo2 = transports.poll(); - Status server2Error = Status.UNAVAILABLE.withDescription("Server2 failed to connect"); - transportInfo2.listener.transportShutdown(server2Error); - - // ... which makes the subchannel enter TRANSIENT_FAILURE. The last error Status is propagated - // to LoadBalancer. - inOrder.verify(subchannelStateListener).onSubchannelState(stateInfoCaptor.capture()); - assertEquals(TRANSIENT_FAILURE, stateInfoCaptor.getValue().getState()); - assertSame(server2Error, stateInfoCaptor.getValue().getStatus()); - - // A typical LoadBalancer would create a picker with error - SubchannelPicker picker2 = mock(SubchannelPicker.class); - when(picker2.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withError(server2Error)); - updateBalancingStateSafely(helper, TRANSIENT_FAILURE, picker2); - executor.runDueTasks(); - - // ... which fails the fail-fast call - verify(mockCallListener2).onClose(same(server2Error), any(Metadata.class)); - // ... while the wait-for-ready call stays - verifyNoMoreInteractions(mockCallListener); - // No real stream was ever created - verify(transportInfo1.transport, times(0)) - .newStream(any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - verify(transportInfo2.transport, times(0)) - .newStream(any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - } - - @Test - public void subchannels() { - createChannel(); - - // createSubchannel() always return a new Subchannel - Attributes attrs1 = Attributes.newBuilder().set(SUBCHANNEL_ATTR_KEY, "attr1").build(); - Attributes attrs2 = Attributes.newBuilder().set(SUBCHANNEL_ATTR_KEY, "attr2").build(); - SubchannelStateListener listener1 = mock(SubchannelStateListener.class); - SubchannelStateListener listener2 = mock(SubchannelStateListener.class); - final Subchannel sub1 = createSubchannelSafely(helper, addressGroup, attrs1, listener1); - final Subchannel sub2 = createSubchannelSafely(helper, addressGroup, attrs2, listener2); - assertNotSame(sub1, sub2); - assertNotSame(attrs1, attrs2); - assertSame(attrs1, sub1.getAttributes()); - assertSame(attrs2, sub2.getAttributes()); - - final AtomicBoolean snippetPassed = new AtomicBoolean(false); - helper.getSynchronizationContext().execute(new Runnable() { - @Override - public void run() { - // getAddresses() must be called from sync context - assertSame(addressGroup, sub1.getAddresses()); - assertSame(addressGroup, sub2.getAddresses()); - snippetPassed.set(true); - } - }); - assertThat(snippetPassed.get()).isTrue(); - - // requestConnection() - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), - any(ClientTransportOptions.class), - any(TransportLogger.class)); - requestConnectionSafely(helper, sub1); - verify(mockTransportFactory) - .newClientTransport( - eq(socketAddress), - eq(clientTransportOptions), - isA(TransportLogger.class)); - MockClientTransportInfo transportInfo1 = transports.poll(); - assertNotNull(transportInfo1); - - requestConnectionSafely(helper, sub2); - verify(mockTransportFactory, times(2)) - .newClientTransport( - eq(socketAddress), - eq(clientTransportOptions), - isA(TransportLogger.class)); - MockClientTransportInfo transportInfo2 = transports.poll(); - assertNotNull(transportInfo2); - - requestConnectionSafely(helper, sub1); - requestConnectionSafely(helper, sub2); - // The subchannel doesn't matter since this isn't called - verify(mockTransportFactory, times(2)) - .newClientTransport( - eq(socketAddress), eq(clientTransportOptions), isA(TransportLogger.class)); - - // updateAddresses() - updateAddressesSafely(helper, sub1, Collections.singletonList(addressGroup2)); - assertThat(((InternalSubchannel) sub1.getInternalSubchannel()).getAddressGroups()) - .isEqualTo(Collections.singletonList(addressGroup2)); - - // shutdown() has a delay - shutdownSafely(helper, sub1); - timer.forwardTime(ManagedChannelImpl2.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS - 1, TimeUnit.SECONDS); - shutdownSafely(helper, sub1); - verify(transportInfo1.transport, never()).shutdown(any(Status.class)); - timer.forwardTime(1, TimeUnit.SECONDS); - verify(transportInfo1.transport).shutdown(same(ManagedChannelImpl2.SUBCHANNEL_SHUTDOWN_STATUS)); - - // ... but not after Channel is terminating - verify(mockLoadBalancer, never()).shutdown(); - channel.shutdown(); - verify(mockLoadBalancer).shutdown(); - verify(transportInfo2.transport, never()).shutdown(any(Status.class)); - - shutdownSafely(helper, sub2); - verify(transportInfo2.transport).shutdown(same(ManagedChannelImpl2.SHUTDOWN_STATUS)); - - // Cleanup - transportInfo1.listener.transportShutdown(Status.UNAVAILABLE); - transportInfo1.listener.transportTerminated(); - transportInfo2.listener.transportShutdown(Status.UNAVAILABLE); - transportInfo2.listener.transportTerminated(); - timer.forwardTime(ManagedChannelImpl2.SUBCHANNEL_SHUTDOWN_DELAY_SECONDS, TimeUnit.SECONDS); - } - - @Test - public void subchannelStringableBeforeStart() { - createChannel(); - Subchannel subchannel = createUnstartedSubchannel(helper, addressGroup, Attributes.EMPTY); - assertThat(subchannel.toString()).isNotNull(); - } - - @Test - public void subchannelLoggerCreatedBeforeSubchannelStarted() { - createChannel(); - Subchannel subchannel = createUnstartedSubchannel(helper, addressGroup, Attributes.EMPTY); - assertThat(subchannel.getChannelLogger()).isNotNull(); - } - - @Test - public void subchannelsWhenChannelShutdownNow() { - createChannel(); - Subchannel sub1 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - Subchannel sub2 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, sub1); - requestConnectionSafely(helper, sub2); - - assertThat(transports).hasSize(2); - MockClientTransportInfo ti1 = transports.poll(); - MockClientTransportInfo ti2 = transports.poll(); - - ti1.listener.transportReady(); - ti2.listener.transportReady(); - - channel.shutdownNow(); - verify(ti1.transport).shutdownNow(any(Status.class)); - verify(ti2.transport).shutdownNow(any(Status.class)); - - ti1.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now")); - ti2.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now")); - ti1.listener.transportTerminated(); - - assertFalse(channel.isTerminated()); - ti2.listener.transportTerminated(); - assertTrue(channel.isTerminated()); - } - - @Test - public void subchannelsNoConnectionShutdown() { - createChannel(); - Subchannel sub1 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - Subchannel sub2 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - - channel.shutdown(); - verify(mockLoadBalancer).shutdown(); - shutdownSafely(helper, sub1); - assertFalse(channel.isTerminated()); - shutdownSafely(helper, sub2); - assertTrue(channel.isTerminated()); - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - } - - @Test - public void subchannelsNoConnectionShutdownNow() { - createChannel(); - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - channel.shutdownNow(); - - verify(mockLoadBalancer).shutdown(); - // Channel's shutdownNow() will call shutdownNow() on all subchannels and oobchannels. - // Therefore, channel is terminated without relying on LoadBalancer to shutdown subchannels. - assertTrue(channel.isTerminated()); - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - } - - @Test - public void oobchannels() { - createChannel(); - - ManagedChannel oob1 = helper.createOobChannel(addressGroup, "oob1authority"); - ManagedChannel oob2 = helper.createOobChannel(addressGroup, "oob2authority"); - verify(balancerRpcExecutorPool, times(2)).getObject(); - - assertEquals("oob1authority", oob1.authority()); - assertEquals("oob2authority", oob2.authority()); - - // OOB channels create connections lazily. A new call will initiate the connection. - Metadata headers = new Metadata(); - ClientCall call = oob1.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, headers); - verify(mockTransportFactory) - .newClientTransport( - eq(socketAddress), - eq(new ClientTransportOptions().setAuthority("oob1authority").setUserAgent(USER_AGENT)), - isA(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - assertNotNull(transportInfo); - - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - transportInfo.listener.transportReady(); - assertEquals(1, balancerRpcExecutor.runDueTasks()); - verify(transportInfo.transport).newStream(same(method), same(headers), - same(CallOptions.DEFAULT)); - - // The transport goes away - transportInfo.listener.transportShutdown(Status.UNAVAILABLE); - transportInfo.listener.transportTerminated(); - - // A new call will trigger a new transport - ClientCall call2 = oob1.newCall(method, CallOptions.DEFAULT); - call2.start(mockCallListener2, headers); - ClientCall call3 = - oob1.newCall(method, CallOptions.DEFAULT.withWaitForReady()); - call3.start(mockCallListener3, headers); - verify(mockTransportFactory, times(2)).newClientTransport( - eq(socketAddress), - eq(new ClientTransportOptions().setAuthority("oob1authority").setUserAgent(USER_AGENT)), - isA(ChannelLogger.class)); - transportInfo = transports.poll(); - assertNotNull(transportInfo); - - // This transport fails - Status transportError = Status.UNAVAILABLE.withDescription("Connection refused"); - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - transportInfo.listener.transportShutdown(transportError); - assertTrue(balancerRpcExecutor.runDueTasks() > 0); - - // Fail-fast RPC will fail, while wait-for-ready RPC will still be pending - verify(mockCallListener2).onClose(same(transportError), any(Metadata.class)); - verify(mockCallListener3, never()).onClose(any(Status.class), any(Metadata.class)); - - // Shutdown - assertFalse(oob1.isShutdown()); - assertFalse(oob2.isShutdown()); - oob1.shutdown(); - oob2.shutdownNow(); - assertTrue(oob1.isShutdown()); - assertTrue(oob2.isShutdown()); - assertTrue(oob2.isTerminated()); - verify(balancerRpcExecutorPool).returnObject(balancerRpcExecutor.getScheduledExecutorService()); - - // New RPCs will be rejected. - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - ClientCall call4 = oob1.newCall(method, CallOptions.DEFAULT); - ClientCall call5 = oob2.newCall(method, CallOptions.DEFAULT); - call4.start(mockCallListener4, headers); - call5.start(mockCallListener5, headers); - assertTrue(balancerRpcExecutor.runDueTasks() > 0); - verify(mockCallListener4).onClose(statusCaptor.capture(), any(Metadata.class)); - Status status4 = statusCaptor.getValue(); - assertEquals(Status.Code.UNAVAILABLE, status4.getCode()); - verify(mockCallListener5).onClose(statusCaptor.capture(), any(Metadata.class)); - Status status5 = statusCaptor.getValue(); - assertEquals(Status.Code.UNAVAILABLE, status5.getCode()); - - // The pending RPC will still be pending - verify(mockCallListener3, never()).onClose(any(Status.class), any(Metadata.class)); - - // This will shutdownNow() the delayed transport, terminating the pending RPC - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - oob1.shutdownNow(); - assertTrue(balancerRpcExecutor.runDueTasks() > 0); - verify(mockCallListener3).onClose(any(Status.class), any(Metadata.class)); - - // Shut down the channel, and it will not terminated because OOB channel has not. - channel.shutdown(); - assertFalse(channel.isTerminated()); - // Delayed transport has already terminated. Terminating the transport terminates the - // subchannel, which in turn terimates the OOB channel, which terminates the channel. - assertFalse(oob1.isTerminated()); - verify(balancerRpcExecutorPool).returnObject(balancerRpcExecutor.getScheduledExecutorService()); - transportInfo.listener.transportTerminated(); - assertTrue(oob1.isTerminated()); - assertTrue(channel.isTerminated()); - verify(balancerRpcExecutorPool, times(2)) - .returnObject(balancerRpcExecutor.getScheduledExecutorService()); - } - - @Test - public void oobChannelsWhenChannelShutdownNow() { - createChannel(); - ManagedChannel oob1 = helper.createOobChannel(addressGroup, "oob1Authority"); - ManagedChannel oob2 = helper.createOobChannel(addressGroup, "oob2Authority"); - - oob1.newCall(method, CallOptions.DEFAULT).start(mockCallListener, new Metadata()); - oob2.newCall(method, CallOptions.DEFAULT).start(mockCallListener2, new Metadata()); - - assertThat(transports).hasSize(2); - MockClientTransportInfo ti1 = transports.poll(); - MockClientTransportInfo ti2 = transports.poll(); - - ti1.listener.transportReady(); - ti2.listener.transportReady(); - - channel.shutdownNow(); - verify(ti1.transport).shutdownNow(any(Status.class)); - verify(ti2.transport).shutdownNow(any(Status.class)); - - ti1.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now")); - ti2.listener.transportShutdown(Status.UNAVAILABLE.withDescription("shutdown now")); - ti1.listener.transportTerminated(); - - assertFalse(channel.isTerminated()); - ti2.listener.transportTerminated(); - assertTrue(channel.isTerminated()); - } - - @Test - public void oobChannelsNoConnectionShutdown() { - createChannel(); - ManagedChannel oob1 = helper.createOobChannel(addressGroup, "oob1Authority"); - ManagedChannel oob2 = helper.createOobChannel(addressGroup, "oob2Authority"); - channel.shutdown(); - - verify(mockLoadBalancer).shutdown(); - oob1.shutdown(); - assertTrue(oob1.isTerminated()); - assertFalse(channel.isTerminated()); - oob2.shutdown(); - assertTrue(oob2.isTerminated()); - assertTrue(channel.isTerminated()); - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - } - - @Test - public void oobChannelsNoConnectionShutdownNow() { - createChannel(); - helper.createOobChannel(addressGroup, "oob1Authority"); - helper.createOobChannel(addressGroup, "oob2Authority"); - channel.shutdownNow(); - - verify(mockLoadBalancer).shutdown(); - assertTrue(channel.isTerminated()); - // Channel's shutdownNow() will call shutdownNow() on all subchannels and oobchannels. - // Therefore, channel is terminated without relying on LoadBalancer to shutdown oobchannels. - verify(mockTransportFactory, never()) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - } - - @Test - public void subchannelChannel_normalUsage() { - createChannel(); - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - verify(balancerRpcExecutorPool, never()).getObject(); - - Channel sChannel = subchannel.asChannel(); - verify(balancerRpcExecutorPool).getObject(); - - Metadata headers = new Metadata(); - CallOptions callOptions = CallOptions.DEFAULT.withDeadlineAfter(5, TimeUnit.SECONDS); - - // Subchannel must be READY when creating the RPC. - requestConnectionSafely(helper, subchannel); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - transportListener.transportReady(); - - ClientCall call = sChannel.newCall(method, callOptions); - call.start(mockCallListener, headers); - verify(mockTransport).newStream(same(method), same(headers), callOptionsCaptor.capture()); - - CallOptions capturedCallOption = callOptionsCaptor.getValue(); - assertThat(capturedCallOption.getDeadline()).isSameInstanceAs(callOptions.getDeadline()); - assertThat(capturedCallOption.getOption(GrpcUtil.CALL_OPTIONS_RPC_OWNED_BY_BALANCER)).isTrue(); - } - - @Test - public void subchannelChannel_failWhenNotReady() { - createChannel(); - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - Channel sChannel = subchannel.asChannel(); - Metadata headers = new Metadata(); - - requestConnectionSafely(helper, subchannel); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - - // Subchannel is still CONNECTING, but not READY yet - ClientCall call = sChannel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, headers); - verify(mockTransport, never()).newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - - verifyZeroInteractions(mockCallListener); - assertEquals(1, balancerRpcExecutor.runDueTasks()); - verify(mockCallListener).onClose( - same(SubchannelChannel.NOT_READY_ERROR), any(Metadata.class)); - } - - @Test - public void subchannelChannel_failWaitForReady() { - createChannel(); - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - Channel sChannel = subchannel.asChannel(); - Metadata headers = new Metadata(); - - // Subchannel must be READY when creating the RPC. - requestConnectionSafely(helper, subchannel); - verify(mockTransportFactory) - .newClientTransport( - any(SocketAddress.class), any(ClientTransportOptions.class), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - transportListener.transportReady(); - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - - // Wait-for-ready RPC is not allowed - ClientCall call = - sChannel.newCall(method, CallOptions.DEFAULT.withWaitForReady()); - call.start(mockCallListener, headers); - verify(mockTransport, never()).newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - - verifyZeroInteractions(mockCallListener); - assertEquals(1, balancerRpcExecutor.runDueTasks()); - verify(mockCallListener).onClose( - same(SubchannelChannel.WAIT_FOR_READY_ERROR), any(Metadata.class)); - } - - @Test - public void lbHelper_getScheduledExecutorService() { - createChannel(); - - ScheduledExecutorService ses = helper.getScheduledExecutorService(); - Runnable task = mock(Runnable.class); - helper.getSynchronizationContext().schedule(task, 110, TimeUnit.NANOSECONDS, ses); - timer.forwardNanos(109); - verify(task, never()).run(); - timer.forwardNanos(1); - verify(task).run(); - - try { - ses.shutdown(); - fail("Should throw"); - } catch (UnsupportedOperationException e) { - // expected - } - - try { - ses.shutdownNow(); - fail("Should throw"); - } catch (UnsupportedOperationException e) { - // expected - } - } - - @Test - public void lbHelper_getNameResolverArgs() { - createChannel(); - - NameResolver.Args args = helper.getNameResolverArgs(); - assertThat(args.getDefaultPort()).isEqualTo(DEFAULT_PORT); - assertThat(args.getProxyDetector()).isSameInstanceAs(GrpcUtil.DEFAULT_PROXY_DETECTOR); - assertThat(args.getSynchronizationContext()) - .isSameInstanceAs(helper.getSynchronizationContext()); - assertThat(args.getServiceConfigParser()).isNotNull(); - } - - @Test - public void lbHelper_getNameResolverRegistry() { - createChannel(); - - assertThat(helper.getNameResolverRegistry()) - .isSameInstanceAs(NameResolverRegistry.getDefaultRegistry()); - } - - @Test - public void refreshNameResolution_whenSubchannelConnectionFailed_notIdle() { - subtestNameResolutionRefreshWhenConnectionFailed(false, false); - } - - @Test - public void refreshNameResolution_whenOobChannelConnectionFailed_notIdle() { - subtestNameResolutionRefreshWhenConnectionFailed(true, false); - } - - @Test - public void notRefreshNameResolution_whenSubchannelConnectionFailed_idle() { - subtestNameResolutionRefreshWhenConnectionFailed(false, true); - } - - @Test - public void notRefreshNameResolution_whenOobChannelConnectionFailed_idle() { - subtestNameResolutionRefreshWhenConnectionFailed(true, true); - } - - private void subtestNameResolutionRefreshWhenConnectionFailed( - boolean isOobChannel, boolean isIdle) { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - if (isOobChannel) { - OobChannel oobChannel = (OobChannel) helper.createOobChannel(addressGroup, "oobAuthority"); - oobChannel.getSubchannel().requestConnection(); - } else { - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - } - - MockClientTransportInfo transportInfo = transports.poll(); - assertNotNull(transportInfo); - - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.remove(0); - - if (isIdle) { - channel.enterIdle(); - // Entering idle mode will result in a new resolver - resolver = nameResolverFactory.resolvers.remove(0); - } - - assertEquals(0, nameResolverFactory.resolvers.size()); - - int expectedRefreshCount = 0; - - // Transport closed when connecting - assertEquals(expectedRefreshCount, resolver.refreshCalled); - transportInfo.listener.transportShutdown(Status.UNAVAILABLE); - // When channel enters idle, new resolver is created but not started. - if (!isIdle) { - expectedRefreshCount++; - } - assertEquals(expectedRefreshCount, resolver.refreshCalled); - - timer.forwardNanos(RECONNECT_BACKOFF_INTERVAL_NANOS); - transportInfo = transports.poll(); - assertNotNull(transportInfo); - - transportInfo.listener.transportReady(); - - // Transport closed when ready - assertEquals(expectedRefreshCount, resolver.refreshCalled); - transportInfo.listener.transportShutdown(Status.UNAVAILABLE); - // When channel enters idle, new resolver is created but not started. - if (!isIdle) { - expectedRefreshCount++; - } - assertEquals(expectedRefreshCount, resolver.refreshCalled); - } - - @Test - public void uriPattern() { - assertTrue(ManagedChannelImpl2.URI_PATTERN.matcher("a:/").matches()); - assertTrue(ManagedChannelImpl2.URI_PATTERN.matcher("Z019+-.:/!@ #~ ").matches()); - assertFalse(ManagedChannelImpl2.URI_PATTERN.matcher("a/:").matches()); // "/:" not matched - assertFalse(ManagedChannelImpl2.URI_PATTERN.matcher("0a:/").matches()); // '0' not matched - assertFalse(ManagedChannelImpl2.URI_PATTERN.matcher("a,:/").matches()); // ',' not matched - assertFalse(ManagedChannelImpl2.URI_PATTERN.matcher(" a:/").matches()); // space not matched - } - - /** - * Test that information such as the Call's context, MethodDescriptor, authority, executor are - * propagated to newStream() and applyRequestMetadata(). - */ - @Test - public void informationPropagatedToNewStreamAndCallCredentials() { - createChannel(); - CallOptions callOptions = CallOptions.DEFAULT.withCallCredentials(creds); - final Context.Key testKey = Context.key("testing"); - Context ctx = Context.current().withValue(testKey, "testValue"); - final LinkedList credsApplyContexts = new LinkedList<>(); - final LinkedList newStreamContexts = new LinkedList<>(); - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock in) throws Throwable { - credsApplyContexts.add(Context.current()); - return null; - } - }).when(creds).applyRequestMetadata( - any(RequestInfo.class), any(Executor.class), any(CallCredentials.MetadataApplier.class)); - - // First call will be on delayed transport. Only newCall() is run within the expected context, - // so that we can verify that the context is explicitly attached before calling newStream() and - // applyRequestMetadata(), which happens after we detach the context from the thread. - Context origCtx = ctx.attach(); - assertEquals("testValue", testKey.get()); - ClientCall call = channel.newCall(method, callOptions); - ctx.detach(origCtx); - assertNull(testKey.get()); - call.start(mockCallListener, new Metadata()); - - // Simulate name resolution results - EquivalentAddressGroup addressGroup = new EquivalentAddressGroup(socketAddress); - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - verify(mockTransportFactory) - .newClientTransport( - same(socketAddress), eq(clientTransportOptions), any(ChannelLogger.class)); - MockClientTransportInfo transportInfo = transports.poll(); - final ConnectionClientTransport transport = transportInfo.transport; - when(transport.getAttributes()).thenReturn(Attributes.EMPTY); - doAnswer(new Answer() { - @Override - public ClientStream answer(InvocationOnMock in) throws Throwable { - newStreamContexts.add(Context.current()); - return mock(ClientStream.class); - } - }).when(transport).newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - - verify(creds, never()).applyRequestMetadata( - any(RequestInfo.class), any(Executor.class), any(CallCredentials.MetadataApplier.class)); - - // applyRequestMetadata() is called after the transport becomes ready. - transportInfo.listener.transportReady(); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - updateBalancingStateSafely(helper, READY, mockPicker); - executor.runDueTasks(); - ArgumentCaptor infoCaptor = ArgumentCaptor.forClass(null); - ArgumentCaptor applierCaptor = ArgumentCaptor.forClass(null); - verify(creds).applyRequestMetadata(infoCaptor.capture(), - same(executor.getScheduledExecutorService()), applierCaptor.capture()); - assertEquals("testValue", testKey.get(credsApplyContexts.poll())); - assertEquals(AUTHORITY, infoCaptor.getValue().getAuthority()); - assertEquals(SecurityLevel.NONE, infoCaptor.getValue().getSecurityLevel()); - verify(transport, never()).newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - - // newStream() is called after apply() is called - applierCaptor.getValue().apply(new Metadata()); - verify(transport).newStream(same(method), any(Metadata.class), same(callOptions)); - assertEquals("testValue", testKey.get(newStreamContexts.poll())); - // The context should not live beyond the scope of newStream() and applyRequestMetadata() - assertNull(testKey.get()); - - - // Second call will not be on delayed transport - origCtx = ctx.attach(); - call = channel.newCall(method, callOptions); - ctx.detach(origCtx); - call.start(mockCallListener, new Metadata()); - - verify(creds, times(2)).applyRequestMetadata(infoCaptor.capture(), - same(executor.getScheduledExecutorService()), applierCaptor.capture()); - assertEquals("testValue", testKey.get(credsApplyContexts.poll())); - assertEquals(AUTHORITY, infoCaptor.getValue().getAuthority()); - assertEquals(SecurityLevel.NONE, infoCaptor.getValue().getSecurityLevel()); - // This is from the first call - verify(transport).newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - - // Still, newStream() is called after apply() is called - applierCaptor.getValue().apply(new Metadata()); - verify(transport, times(2)).newStream(same(method), any(Metadata.class), same(callOptions)); - assertEquals("testValue", testKey.get(newStreamContexts.poll())); - - assertNull(testKey.get()); - } - - @Test - public void pickerReturnsStreamTracer_noDelay() { - ClientStream mockStream = mock(ClientStream.class); - ClientStreamTracer.Factory factory1 = mock(ClientStreamTracer.Factory.class); - ClientStreamTracer.Factory factory2 = mock(ClientStreamTracer.Factory.class); - createChannel(); - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - transportInfo.listener.transportReady(); - ClientTransport mockTransport = transportInfo.transport; - when(mockTransport.newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn( - PickResult.withSubchannel(subchannel, factory2)); - updateBalancingStateSafely(helper, READY, mockPicker); - - CallOptions callOptions = CallOptions.DEFAULT.withStreamTracerFactory(factory1); - ClientCall call = channel.newCall(method, callOptions); - call.start(mockCallListener, new Metadata()); - - verify(mockPicker).pickSubchannel(any(PickSubchannelArgs.class)); - verify(mockTransport).newStream(same(method), any(Metadata.class), callOptionsCaptor.capture()); - assertEquals( - Arrays.asList(factory1, factory2), - callOptionsCaptor.getValue().getStreamTracerFactories()); - // The factories are safely not stubbed because we do not expect any usage of them. - verifyZeroInteractions(factory1); - verifyZeroInteractions(factory2); - } - - @Test - public void pickerReturnsStreamTracer_delayed() { - ClientStream mockStream = mock(ClientStream.class); - ClientStreamTracer.Factory factory1 = mock(ClientStreamTracer.Factory.class); - ClientStreamTracer.Factory factory2 = mock(ClientStreamTracer.Factory.class); - createChannel(); - - CallOptions callOptions = CallOptions.DEFAULT.withStreamTracerFactory(factory1); - ClientCall call = channel.newCall(method, callOptions); - call.start(mockCallListener, new Metadata()); - - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - transportInfo.listener.transportReady(); - ClientTransport mockTransport = transportInfo.transport; - when(mockTransport.newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn( - PickResult.withSubchannel(subchannel, factory2)); - - updateBalancingStateSafely(helper, READY, mockPicker); - assertEquals(1, executor.runDueTasks()); - - verify(mockPicker).pickSubchannel(any(PickSubchannelArgs.class)); - verify(mockTransport).newStream(same(method), any(Metadata.class), callOptionsCaptor.capture()); - assertEquals( - Arrays.asList(factory1, factory2), - callOptionsCaptor.getValue().getStreamTracerFactories()); - // The factories are safely not stubbed because we do not expect any usage of them. - verifyZeroInteractions(factory1); - verifyZeroInteractions(factory2); - } - - @Test - public void getState_loadBalancerSupportsChannelState() { - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build()); - createChannel(); - assertEquals(IDLE, channel.getState(false)); - - updateBalancingStateSafely(helper, TRANSIENT_FAILURE, mockPicker); - assertEquals(TRANSIENT_FAILURE, channel.getState(false)); - } - - @Test - public void getState_withRequestConnect() { - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build()); - requestConnection = false; - createChannel(); - - assertEquals(IDLE, channel.getState(false)); - verify(mockLoadBalancerProvider, never()).newLoadBalancer(any(Helper.class)); - - // call getState() with requestConnection = true - assertEquals(IDLE, channel.getState(true)); - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - helper = helperCaptor.getValue(); - - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - assertEquals(CONNECTING, channel.getState(false)); - assertEquals(CONNECTING, channel.getState(true)); - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - } - - @SuppressWarnings("deprecation") - @Test - public void getState_withRequestConnect_IdleWithLbRunning() { - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build()); - createChannel(); - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - - updateBalancingStateSafely(helper, IDLE, mockPicker); - - assertEquals(IDLE, channel.getState(true)); - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - verify(mockPicker).requestConnection(); - verify(mockLoadBalancer).requestConnection(); - } - - @Test - public void notifyWhenStateChanged() { - final AtomicBoolean stateChanged = new AtomicBoolean(); - Runnable onStateChanged = new Runnable() { - @Override - public void run() { - stateChanged.set(true); - } - }; - - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build()); - createChannel(); - assertEquals(IDLE, channel.getState(false)); - - channel.notifyWhenStateChanged(IDLE, onStateChanged); - executor.runDueTasks(); - assertFalse(stateChanged.get()); - - // state change from IDLE to CONNECTING - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - // onStateChanged callback should run - executor.runDueTasks(); - assertTrue(stateChanged.get()); - - // clear and test form CONNECTING - stateChanged.set(false); - channel.notifyWhenStateChanged(IDLE, onStateChanged); - // onStateChanged callback should run immediately - executor.runDueTasks(); - assertTrue(stateChanged.get()); - } - - @Test - public void channelStateWhenChannelShutdown() { - final AtomicBoolean stateChanged = new AtomicBoolean(); - Runnable onStateChanged = new Runnable() { - @Override - public void run() { - stateChanged.set(true); - } - }; - - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build()); - createChannel(); - assertEquals(IDLE, channel.getState(false)); - channel.notifyWhenStateChanged(IDLE, onStateChanged); - executor.runDueTasks(); - assertFalse(stateChanged.get()); - - channel.shutdown(); - assertEquals(SHUTDOWN, channel.getState(false)); - executor.runDueTasks(); - assertTrue(stateChanged.get()); - - stateChanged.set(false); - channel.notifyWhenStateChanged(SHUTDOWN, onStateChanged); - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - - assertEquals(SHUTDOWN, channel.getState(false)); - executor.runDueTasks(); - assertFalse(stateChanged.get()); - } - - @Test - public void stateIsIdleOnIdleTimeout() { - long idleTimeoutMillis = 2000L; - channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS); - createChannel(); - assertEquals(IDLE, channel.getState(false)); - - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - assertEquals(CONNECTING, channel.getState(false)); - - timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis)); - assertEquals(IDLE, channel.getState(false)); - } - - @Test - public void panic_whenIdle() { - subtestPanic(IDLE); - } - - @Test - public void panic_whenConnecting() { - subtestPanic(CONNECTING); - } - - @Test - public void panic_whenTransientFailure() { - subtestPanic(TRANSIENT_FAILURE); - } - - @Test - public void panic_whenReady() { - subtestPanic(READY); - } - - private void subtestPanic(ConnectivityState initialState) { - assertNotEquals("We don't test panic mode if it's already SHUTDOWN", SHUTDOWN, initialState); - long idleTimeoutMillis = 2000L; - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS); - createChannel(); - - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - assertThat(nameResolverFactory.resolvers).hasSize(1); - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.remove(0); - - final Throwable panicReason = new Exception("Simulated uncaught exception"); - if (initialState == IDLE) { - timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis)); - } else { - updateBalancingStateSafely(helper, initialState, mockPicker); - } - assertEquals(initialState, channel.getState(false)); - - if (initialState == IDLE) { - // IDLE mode will shutdown resolver and balancer - verify(mockLoadBalancer).shutdown(); - assertTrue(resolver.shutdown); - // A new resolver is created - assertThat(nameResolverFactory.resolvers).hasSize(1); - resolver = nameResolverFactory.resolvers.remove(0); - assertFalse(resolver.shutdown); - } else { - verify(mockLoadBalancer, never()).shutdown(); - assertFalse(resolver.shutdown); - } - - // Make channel panic! - channel.syncContext.execute( - new Runnable() { - @Override - public void run() { - channel.panic(panicReason); - } - }); - - // Calls buffered in delayedTransport will fail - - // Resolver and balancer are shutdown - verify(mockLoadBalancer).shutdown(); - assertTrue(resolver.shutdown); - - // Channel will stay in TRANSIENT_FAILURE. getState(true) will not revive it. - assertEquals(TRANSIENT_FAILURE, channel.getState(true)); - assertEquals(TRANSIENT_FAILURE, channel.getState(true)); - verifyPanicMode(panicReason); - - // Besides the resolver created initially, no new resolver or balancer are created. - verify(mockLoadBalancerProvider).newLoadBalancer(any(Helper.class)); - assertThat(nameResolverFactory.resolvers).isEmpty(); - - // A misbehaving balancer that calls updateBalancingState() after it's shut down will not be - // able to revive it. - updateBalancingStateSafely(helper, READY, mockPicker); - verifyPanicMode(panicReason); - - // Cannot be revived by exitIdleMode() - channel.syncContext.execute(new Runnable() { - @Override - public void run() { - channel.exitIdleMode(); - } - }); - verifyPanicMode(panicReason); - - // Can still shutdown normally - channel.shutdown(); - assertTrue(channel.isShutdown()); - assertTrue(channel.isTerminated()); - assertEquals(SHUTDOWN, channel.getState(false)); - - // We didn't stub mockPicker, because it should have never been called in this test. - verifyZeroInteractions(mockPicker); - } - - @Test - public void panic_bufferedCallsWillFail() { - createChannel(); - - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withNoResult()); - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - - // Start RPCs that will be buffered in delayedTransport - ClientCall call = - channel.newCall(method, CallOptions.DEFAULT.withoutWaitForReady()); - call.start(mockCallListener, new Metadata()); - - ClientCall call2 = - channel.newCall(method, CallOptions.DEFAULT.withWaitForReady()); - call2.start(mockCallListener2, new Metadata()); - - executor.runDueTasks(); - verifyZeroInteractions(mockCallListener, mockCallListener2); - - // Enter panic - final Throwable panicReason = new Exception("Simulated uncaught exception"); - channel.syncContext.execute( - new Runnable() { - @Override - public void run() { - channel.panic(panicReason); - } - }); - - // Buffered RPCs fail immediately - executor.runDueTasks(); - verifyCallListenerClosed(mockCallListener, Status.Code.INTERNAL, panicReason); - verifyCallListenerClosed(mockCallListener2, Status.Code.INTERNAL, panicReason); - panicExpected = true; - } - - private void verifyPanicMode(Throwable cause) { - panicExpected = true; - @SuppressWarnings("unchecked") - ClientCall.Listener mockListener = - (ClientCall.Listener) mock(ClientCall.Listener.class); - assertEquals(TRANSIENT_FAILURE, channel.getState(false)); - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockListener, new Metadata()); - executor.runDueTasks(); - verifyCallListenerClosed(mockListener, Status.Code.INTERNAL, cause); - - // Channel is dead. No more pending task to possibly revive it. - assertEquals(0, timer.numPendingTasks()); - assertEquals(0, executor.numPendingTasks()); - assertEquals(0, balancerRpcExecutor.numPendingTasks()); - } - - private void verifyCallListenerClosed( - ClientCall.Listener listener, Status.Code code, Throwable cause) { - ArgumentCaptor captor = ArgumentCaptor.forClass(null); - verify(listener).onClose(captor.capture(), any(Metadata.class)); - Status rpcStatus = captor.getValue(); - assertEquals(code, rpcStatus.getCode()); - assertSame(cause, rpcStatus.getCause()); - verifyNoMoreInteractions(listener); - } - - @Test - public void idleTimeoutAndReconnect() { - long idleTimeoutMillis = 2000L; - channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS); - createChannel(); - - timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis)); - assertEquals(IDLE, channel.getState(true /* request connection */)); - - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(Helper.class); - // Two times of requesting connection will create loadBalancer twice. - verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture()); - Helper helper2 = helperCaptor.getValue(); - - // Updating on the old helper (whose balancer has been shutdown) does not change the channel - // state. - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - assertEquals(IDLE, channel.getState(false)); - - updateBalancingStateSafely(helper2, CONNECTING, mockPicker); - assertEquals(CONNECTING, channel.getState(false)); - } - - @Test - public void idleMode_resetsDelayedTransportPicker() { - ClientStream mockStream = mock(ClientStream.class); - Status pickError = Status.UNAVAILABLE.withDescription("pick result error"); - long idleTimeoutMillis = 1000L; - channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS); - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build()); - createChannel(); - assertEquals(IDLE, channel.getState(false)); - - // This call will be buffered in delayedTransport - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - // Move channel into TRANSIENT_FAILURE, which will fail the pending call - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withError(pickError)); - updateBalancingStateSafely(helper, TRANSIENT_FAILURE, mockPicker); - assertEquals(TRANSIENT_FAILURE, channel.getState(false)); - executor.runDueTasks(); - verify(mockCallListener).onClose(same(pickError), any(Metadata.class)); - - // Move channel to idle - timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis)); - assertEquals(IDLE, channel.getState(false)); - - // This call should be buffered, but will move the channel out of idle - ClientCall call2 = channel.newCall(method, CallOptions.DEFAULT); - call2.start(mockCallListener2, new Metadata()); - executor.runDueTasks(); - verifyNoMoreInteractions(mockCallListener2); - - // Get the helper created on exiting idle - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(Helper.class); - verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture()); - Helper helper2 = helperCaptor.getValue(); - - // Establish a connection - Subchannel subchannel = - createSubchannelSafely(helper2, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - transportListener.transportReady(); - - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - updateBalancingStateSafely(helper2, READY, mockPicker); - assertEquals(READY, channel.getState(false)); - executor.runDueTasks(); - - // Verify the buffered call was drained - verify(mockTransport).newStream(same(method), any(Metadata.class), any(CallOptions.class)); - verify(mockStream).start(any(ClientStreamListener.class)); - } - - @Test - public void enterIdleEntersIdle() { - createChannel(); - updateBalancingStateSafely(helper, READY, mockPicker); - assertEquals(READY, channel.getState(false)); - - channel.enterIdle(); - - assertEquals(IDLE, channel.getState(false)); - } - - @Test - public void enterIdleAfterIdleTimerIsNoOp() { - long idleTimeoutMillis = 2000L; - channelBuilder.idleTimeout(idleTimeoutMillis, TimeUnit.MILLISECONDS); - createChannel(); - timer.forwardNanos(TimeUnit.MILLISECONDS.toNanos(idleTimeoutMillis)); - assertEquals(IDLE, channel.getState(false)); - - channel.enterIdle(); - - assertEquals(IDLE, channel.getState(false)); - } - - @Test - public void enterIdle_exitsIdleIfDelayedStreamPending() { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - // Start a call that will be buffered in delayedTransport - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - // enterIdle() will shut down the name resolver and lb policy used to get a pick for the delayed - // call - channel.enterIdle(); - assertEquals(IDLE, channel.getState(false)); - - // enterIdle() will restart the delayed call by exiting idle. This creates a new helper. - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(Helper.class); - verify(mockLoadBalancerProvider, times(2)).newLoadBalancer(helperCaptor.capture()); - Helper helper2 = helperCaptor.getValue(); - - // Establish a connection - Subchannel subchannel = - createSubchannelSafely(helper2, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - ClientStream mockStream = mock(ClientStream.class); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - transportListener.transportReady(); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - updateBalancingStateSafely(helper2, READY, mockPicker); - assertEquals(READY, channel.getState(false)); - - // Verify the original call was drained - executor.runDueTasks(); - verify(mockTransport).newStream(same(method), any(Metadata.class), any(CallOptions.class)); - verify(mockStream).start(any(ClientStreamListener.class)); - } - - @Test - public void updateBalancingStateDoesUpdatePicker() { - ClientStream mockStream = mock(ClientStream.class); - createChannel(); - - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - // Make the transport available with subchannel2 - Subchannel subchannel1 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - Subchannel subchannel2 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel2); - - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - transportListener.transportReady(); - - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel1)); - updateBalancingStateSafely(helper, READY, mockPicker); - - executor.runDueTasks(); - verify(mockTransport, never()) - .newStream(any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class)); - verify(mockStream, never()).start(any(ClientStreamListener.class)); - - - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel2)); - updateBalancingStateSafely(helper, READY, mockPicker); - - executor.runDueTasks(); - verify(mockTransport).newStream(same(method), any(Metadata.class), any(CallOptions.class)); - verify(mockStream).start(any(ClientStreamListener.class)); - } - - @Test - public void updateBalancingState_withWrappedSubchannel() { - ClientStream mockStream = mock(ClientStream.class); - createChannel(); - - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - - final Subchannel subchannel1 = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel1); - - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - transportListener.transportReady(); - - Subchannel wrappedSubchannel1 = new ForwardingSubchannel() { - @Override - protected Subchannel delegate() { - return subchannel1; - } - }; - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(wrappedSubchannel1)); - updateBalancingStateSafely(helper, READY, mockPicker); - - executor.runDueTasks(); - verify(mockTransport).newStream(same(method), any(Metadata.class), any(CallOptions.class)); - verify(mockStream).start(any(ClientStreamListener.class)); - } - - @Test - public void updateBalancingStateWithShutdownShouldBeIgnored() { - channelBuilder.nameResolverFactory( - new FakeNameResolverFactory.Builder(expectedUri).setResolvedAtStart(false).build()); - createChannel(); - assertEquals(IDLE, channel.getState(false)); - - Runnable onStateChanged = mock(Runnable.class); - channel.notifyWhenStateChanged(IDLE, onStateChanged); - - updateBalancingStateSafely(helper, SHUTDOWN, mockPicker); - - assertEquals(IDLE, channel.getState(false)); - executor.runDueTasks(); - verify(onStateChanged, never()).run(); - } - - @Test - public void balancerRefreshNameResolution() { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); - int initialRefreshCount = resolver.refreshCalled; - refreshNameResolutionSafely(helper); - assertEquals(initialRefreshCount + 1, resolver.refreshCalled); - } - - @Test - public void resetConnectBackoff() { - // Start with a name resolution failure to trigger backoff attempts - Status error = Status.UNAVAILABLE.withCause(new Throwable("fake name resolution error")); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setError(error).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - // Name resolution is started as soon as channel is created. - createChannel(); - FakeNameResolverFactory.FakeNameResolver resolver = nameResolverFactory.resolvers.get(0); - verify(mockLoadBalancer).handleNameResolutionError(same(error)); - - FakeClock.ScheduledTask nameResolverBackoff = getNameResolverRefresh(); - assertNotNull("There should be a name resolver backoff task", nameResolverBackoff); - assertEquals(0, resolver.refreshCalled); - - // Verify resetConnectBackoff() calls refresh and cancels the scheduled backoff - channel.resetConnectBackoff(); - assertEquals(1, resolver.refreshCalled); - assertTrue(nameResolverBackoff.isCancelled()); - - // Simulate a race between cancel and the task scheduler. Should be a no-op. - nameResolverBackoff.command.run(); - assertEquals(1, resolver.refreshCalled); - - // Verify that the reconnect policy was recreated and the backoff multiplier reset to 1 - timer.forwardNanos(RECONNECT_BACKOFF_INTERVAL_NANOS); - assertEquals(2, resolver.refreshCalled); - } - - @Test - public void resetConnectBackoff_noOpWithoutPendingResolverBackoff() { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - FakeNameResolverFactory.FakeNameResolver nameResolver = nameResolverFactory.resolvers.get(0); - assertEquals(0, nameResolver.refreshCalled); - - channel.resetConnectBackoff(); - - assertEquals(0, nameResolver.refreshCalled); - } - - @Test - public void resetConnectBackoff_noOpWhenChannelShutdown() { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - channel.shutdown(); - assertTrue(channel.isShutdown()); - channel.resetConnectBackoff(); - - FakeNameResolverFactory.FakeNameResolver nameResolver = nameResolverFactory.resolvers.get(0); - assertEquals(0, nameResolver.refreshCalled); - } - - @Test - public void resetConnectBackoff_noOpWhenNameResolverNotStarted() { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - requestConnection = false; - createChannel(); - - channel.resetConnectBackoff(); - - FakeNameResolverFactory.FakeNameResolver nameResolver = nameResolverFactory.resolvers.get(0); - assertEquals(0, nameResolver.refreshCalled); - } - - @Test - public void channelsAndSubchannels_instrumented_name() throws Exception { - createChannel(); - assertEquals(TARGET, getStats(channel).target); - - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - assertEquals(Collections.singletonList(addressGroup).toString(), - getStats((AbstractSubchannel) subchannel).target); - } - - @Test - public void channelTracing_channelCreationEvent() throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - createChannel(); - assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Channel for 'fake://fake.example.com' created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_subchannelCreationEvents() throws Exception { - channelBuilder.maxTraceEvents(10); - createChannel(); - timer.forwardNanos(1234); - AbstractSubchannel subchannel = - (AbstractSubchannel) createSubchannelSafely( - helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Child Subchannel started") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .setSubchannelRef(subchannel.getInstrumentedInternalSubchannel()) - .build()); - assertThat(getStats(subchannel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Subchannel for [[[test-addr]/{}]] created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_nameResolvingErrorEvent() throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - - Status error = Status.UNAVAILABLE.withDescription("simulated error"); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setError(error).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Failed to resolve name: " + error) - .setSeverity(ChannelTrace.Event.Severity.CT_WARNING) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_nameResolvedEvent() throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Address resolved: " - + Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_nameResolvedEvent_zeorAndNonzeroBackends() throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - List servers = new ArrayList<>(); - servers.add(new EquivalentAddressGroup(socketAddress)); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - int prevSize = getStats(channel).channelTrace.events.size(); - ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .build(); - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult1); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); - - prevSize = getStats(channel).channelTrace.events.size(); - nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - - prevSize = getStats(channel).channelTrace.events.size(); - nameResolverFactory.resolvers.get(0).listener.onError(Status.INTERNAL); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); - - prevSize = getStats(channel).channelTrace.events.size(); - ResolutionResult resolutionResult2 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .build(); - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - } - - @Test - public void channelTracing_serviceConfigChange() throws Exception { - timer.forwardNanos(1234); - channelBuilder.maxTraceEvents(10); - List servers = new ArrayList<>(); - servers.add(new EquivalentAddressGroup(socketAddress)); - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri).setServers(servers).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - createChannel(); - - int prevSize = getStats(channel).channelTrace.events.size(); - Attributes attributes = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, new HashMap()) - .build(); - ManagedChannelServiceConfig2 mcsc1 = createManagedChannelServiceConfig( - ImmutableMap.of(), - new PolicySelection( - mockLoadBalancerProvider, ImmutableMap.of("foo", "bar"), null)); - ResolutionResult resolutionResult1 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setAttributes(attributes) - .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) - .build(); - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult1); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - assertThat(getStats(channel).channelTrace.events.get(prevSize)) - .isEqualTo(new ChannelTrace.Event.Builder() - .setDescription("Service config changed") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - - prevSize = getStats(channel).channelTrace.events.size(); - ResolutionResult resolutionResult2 = ResolutionResult.newBuilder().setAddresses( - Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setAttributes(attributes) - .setServiceConfig(ConfigOrError.fromConfig(mcsc1)) - .build(); - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult2); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize); - - prevSize = getStats(channel).channelTrace.events.size(); - Map serviceConfig = new HashMap<>(); - serviceConfig.put("methodConfig", new HashMap()); - attributes = - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig) - .build(); - timer.forwardNanos(1234); - ResolutionResult resolutionResult3 = ResolutionResult.newBuilder() - .setAddresses(Collections.singletonList( - new EquivalentAddressGroup( - Arrays.asList(new SocketAddress() {}, new SocketAddress() {})))) - .setAttributes(attributes) - .setServiceConfig(ConfigOrError.fromConfig(ManagedChannelServiceConfig2.empty())) - .build(); - nameResolverFactory.resolvers.get(0).listener.onResult(resolutionResult3); - assertThat(getStats(channel).channelTrace.events).hasSize(prevSize + 1); - assertThat(getStats(channel).channelTrace.events.get(prevSize)) - .isEqualTo(new ChannelTrace.Event.Builder() - .setDescription("Service config changed") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_stateChangeEvent() throws Exception { - channelBuilder.maxTraceEvents(10); - createChannel(); - timer.forwardNanos(1234); - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Entering CONNECTING state") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_subchannelStateChangeEvent() throws Exception { - channelBuilder.maxTraceEvents(10); - createChannel(); - AbstractSubchannel subchannel = - (AbstractSubchannel) createSubchannelSafely( - helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - timer.forwardNanos(1234); - ((TransportProvider) subchannel.getInternalSubchannel()).obtainActiveTransport(); - assertThat(getStats(subchannel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("CONNECTING as requested") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_oobChannelStateChangeEvent() throws Exception { - channelBuilder.maxTraceEvents(10); - createChannel(); - OobChannel oobChannel = (OobChannel) helper.createOobChannel(addressGroup, "authority"); - timer.forwardNanos(1234); - oobChannel.handleSubchannelStateChange( - ConnectivityStateInfo.forNonError(ConnectivityState.CONNECTING)); - assertThat(getStats(oobChannel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Entering CONNECTING state") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelTracing_oobChannelCreationEvents() throws Exception { - channelBuilder.maxTraceEvents(10); - createChannel(); - timer.forwardNanos(1234); - OobChannel oobChannel = (OobChannel) helper.createOobChannel(addressGroup, "authority"); - assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Child OobChannel created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .setChannelRef(oobChannel) - .build()); - assertThat(getStats(oobChannel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("OobChannel for [[test-addr]/{}] created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - assertThat(getStats(oobChannel.getInternalSubchannel()).channelTrace.events).contains( - new ChannelTrace.Event.Builder() - .setDescription("Subchannel for [[test-addr]/{}] created") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void channelsAndSubchannels_instrumented_state() throws Exception { - createChannel(); - - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(null); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - helper = helperCaptor.getValue(); - - assertEquals(IDLE, getStats(channel).state); - updateBalancingStateSafely(helper, CONNECTING, mockPicker); - assertEquals(CONNECTING, getStats(channel).state); - - AbstractSubchannel subchannel = - (AbstractSubchannel) createSubchannelSafely( - helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - - assertEquals(IDLE, getStats(subchannel).state); - requestConnectionSafely(helper, subchannel); - assertEquals(CONNECTING, getStats(subchannel).state); - - MockClientTransportInfo transportInfo = transports.poll(); - - assertEquals(CONNECTING, getStats(subchannel).state); - transportInfo.listener.transportReady(); - assertEquals(READY, getStats(subchannel).state); - - assertEquals(CONNECTING, getStats(channel).state); - updateBalancingStateSafely(helper, READY, mockPicker); - assertEquals(READY, getStats(channel).state); - - channel.shutdownNow(); - assertEquals(SHUTDOWN, getStats(channel).state); - assertEquals(SHUTDOWN, getStats(subchannel).state); - } - - @Test - public void channelStat_callStarted() throws Exception { - createChannel(); - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - assertEquals(0, getStats(channel).callsStarted); - call.start(mockCallListener, new Metadata()); - assertEquals(1, getStats(channel).callsStarted); - assertEquals(executor.getTicker().read(), getStats(channel).lastCallStartedNanos); - } - - @Test - public void channelsAndSubChannels_instrumented_success() throws Exception { - channelsAndSubchannels_instrumented0(true); - } - - @Test - public void channelsAndSubChannels_instrumented_fail() throws Exception { - channelsAndSubchannels_instrumented0(false); - } - - private void channelsAndSubchannels_instrumented0(boolean success) throws Exception { - createChannel(); - - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - - // Channel stat bumped when ClientCall.start() called - assertEquals(0, getStats(channel).callsStarted); - call.start(mockCallListener, new Metadata()); - assertEquals(1, getStats(channel).callsStarted); - - ClientStream mockStream = mock(ClientStream.class); - ClientStreamTracer.Factory factory = mock(ClientStreamTracer.Factory.class); - AbstractSubchannel subchannel = - (AbstractSubchannel) createSubchannelSafely( - helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - transportInfo.listener.transportReady(); - ClientTransport mockTransport = transportInfo.transport; - when(mockTransport.newStream( - any(MethodDescriptor.class), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))).thenReturn( - PickResult.withSubchannel(subchannel, factory)); - - // subchannel stat bumped when call gets assigned to it - assertEquals(0, getStats(subchannel).callsStarted); - updateBalancingStateSafely(helper, READY, mockPicker); - assertEquals(1, executor.runDueTasks()); - verify(mockStream).start(streamListenerCaptor.capture()); - assertEquals(1, getStats(subchannel).callsStarted); - - ClientStreamListener streamListener = streamListenerCaptor.getValue(); - call.halfClose(); - - // closing stream listener affects subchannel stats immediately - assertEquals(0, getStats(subchannel).callsSucceeded); - assertEquals(0, getStats(subchannel).callsFailed); - streamListener.closed(success ? Status.OK : Status.UNKNOWN, new Metadata()); - if (success) { - assertEquals(1, getStats(subchannel).callsSucceeded); - assertEquals(0, getStats(subchannel).callsFailed); - } else { - assertEquals(0, getStats(subchannel).callsSucceeded); - assertEquals(1, getStats(subchannel).callsFailed); - } - - // channel stats bumped when the ClientCall.Listener is notified - assertEquals(0, getStats(channel).callsSucceeded); - assertEquals(0, getStats(channel).callsFailed); - executor.runDueTasks(); - if (success) { - assertEquals(1, getStats(channel).callsSucceeded); - assertEquals(0, getStats(channel).callsFailed); - } else { - assertEquals(0, getStats(channel).callsSucceeded); - assertEquals(1, getStats(channel).callsFailed); - } - } - - @Test - public void channelsAndSubchannels_oob_instrumented_success() throws Exception { - channelsAndSubchannels_oob_instrumented0(true); - } - - @Test - public void channelsAndSubchannels_oob_instrumented_fail() throws Exception { - channelsAndSubchannels_oob_instrumented0(false); - } - - private void channelsAndSubchannels_oob_instrumented0(boolean success) throws Exception { - // set up - ClientStream mockStream = mock(ClientStream.class); - createChannel(); - - OobChannel oobChannel = (OobChannel) helper.createOobChannel(addressGroup, "oobauthority"); - AbstractSubchannel oobSubchannel = (AbstractSubchannel) oobChannel.getSubchannel(); - FakeClock callExecutor = new FakeClock(); - CallOptions options = - CallOptions.DEFAULT.withExecutor(callExecutor.getScheduledExecutorService()); - ClientCall call = oobChannel.newCall(method, options); - Metadata headers = new Metadata(); - - // Channel stat bumped when ClientCall.start() called - assertEquals(0, getStats(oobChannel).callsStarted); - call.start(mockCallListener, headers); - assertEquals(1, getStats(oobChannel).callsStarted); - - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ManagedClientTransport.Listener transportListener = transportInfo.listener; - when(mockTransport.newStream(same(method), same(headers), any(CallOptions.class))) - .thenReturn(mockStream); - - // subchannel stat bumped when call gets assigned to it - assertEquals(0, getStats(oobSubchannel).callsStarted); - transportListener.transportReady(); - callExecutor.runDueTasks(); - verify(mockStream).start(streamListenerCaptor.capture()); - assertEquals(1, getStats(oobSubchannel).callsStarted); - - ClientStreamListener streamListener = streamListenerCaptor.getValue(); - call.halfClose(); - - // closing stream listener affects subchannel stats immediately - assertEquals(0, getStats(oobSubchannel).callsSucceeded); - assertEquals(0, getStats(oobSubchannel).callsFailed); - streamListener.closed(success ? Status.OK : Status.UNKNOWN, new Metadata()); - if (success) { - assertEquals(1, getStats(oobSubchannel).callsSucceeded); - assertEquals(0, getStats(oobSubchannel).callsFailed); - } else { - assertEquals(0, getStats(oobSubchannel).callsSucceeded); - assertEquals(1, getStats(oobSubchannel).callsFailed); - } - - // channel stats bumped when the ClientCall.Listener is notified - assertEquals(0, getStats(oobChannel).callsSucceeded); - assertEquals(0, getStats(oobChannel).callsFailed); - callExecutor.runDueTasks(); - if (success) { - assertEquals(1, getStats(oobChannel).callsSucceeded); - assertEquals(0, getStats(oobChannel).callsFailed); - } else { - assertEquals(0, getStats(oobChannel).callsSucceeded); - assertEquals(1, getStats(oobChannel).callsFailed); - } - // oob channel is separate from the original channel - assertEquals(0, getStats(channel).callsSucceeded); - assertEquals(0, getStats(channel).callsFailed); - } - - @Test - public void channelsAndSubchannels_oob_instrumented_name() throws Exception { - createChannel(); - - String authority = "oobauthority"; - OobChannel oobChannel = (OobChannel) helper.createOobChannel(addressGroup, authority); - assertEquals(authority, getStats(oobChannel).target); - } - - @Test - public void channelsAndSubchannels_oob_instrumented_state() throws Exception { - createChannel(); - - OobChannel oobChannel = (OobChannel) helper.createOobChannel(addressGroup, "oobauthority"); - assertEquals(IDLE, getStats(oobChannel).state); - - oobChannel.getSubchannel().requestConnection(); - assertEquals(CONNECTING, getStats(oobChannel).state); - - MockClientTransportInfo transportInfo = transports.poll(); - ManagedClientTransport.Listener transportListener = transportInfo.listener; - - transportListener.transportReady(); - assertEquals(READY, getStats(oobChannel).state); - - // oobchannel state is separate from the ManagedChannel - assertEquals(IDLE, getStats(channel).state); - channel.shutdownNow(); - assertEquals(SHUTDOWN, getStats(channel).state); - assertEquals(SHUTDOWN, getStats(oobChannel).state); - } - - @Test - public void binaryLogInstalled() throws Exception { - final SettableFuture intercepted = SettableFuture.create(); - channelBuilder.binlog = new BinaryLog() { - @Override - public void close() throws IOException { - // noop - } - - @Override - public ServerMethodDefinition wrapMethodDefinition( - ServerMethodDefinition oMethodDef) { - return oMethodDef; - } - - @Override - public Channel wrapChannel(Channel channel) { - return ClientInterceptors.intercept(channel, - new ClientInterceptor() { - @Override - public ClientCall interceptCall( - MethodDescriptor method, - CallOptions callOptions, - Channel next) { - intercepted.set(true); - return next.newCall(method, callOptions); - } - }); - } - }; - - createChannel(); - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - assertTrue(intercepted.get()); - } - - @Test - public void retryBackoffThenChannelShutdown_retryShouldStillHappen_newCallShouldFail() { - Map retryPolicy = new HashMap<>(); - retryPolicy.put("maxAttempts", 3D); - retryPolicy.put("initialBackoff", "10s"); - retryPolicy.put("maxBackoff", "30s"); - retryPolicy.put("backoffMultiplier", 2D); - retryPolicy.put("retryableStatusCodes", Arrays.asList("UNAVAILABLE")); - Map methodConfig = new HashMap<>(); - Map name = new HashMap<>(); - name.put("service", "service"); - methodConfig.put("name", Arrays.asList(name)); - methodConfig.put("retryPolicy", retryPolicy); - Map rawServiceConfig = new HashMap<>(); - rawServiceConfig.put("methodConfig", Arrays.asList(methodConfig)); - Attributes attributesWithRetryPolicy = Attributes - .newBuilder().set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig).build(); - - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - channelBuilder.nameResolverFactory(nameResolverFactory); - channelBuilder.executor(MoreExecutors.directExecutor()); - channelBuilder.enableRetry(); - RetriableStream.setRandom( - // not random - new Random() { - @Override - public double nextDouble() { - return 1D; // fake random - } - }); - - requestConnection = false; - createChannel(); - - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(Helper.class); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - helper = helperCaptor.getValue(); - verify(mockLoadBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(nameResolverFactory.servers) - .setAttributes(attributesWithRetryPolicy) - .build()); - - // simulating request connection and then transport ready after resolved address - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ClientStream mockStream = mock(ClientStream.class); - ClientStream mockStream2 = mock(ClientStream.class); - when(mockTransport.newStream(same(method), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream).thenReturn(mockStream2); - transportInfo.listener.transportReady(); - updateBalancingStateSafely(helper, READY, mockPicker); - - ArgumentCaptor streamListenerCaptor = - ArgumentCaptor.forClass(ClientStreamListener.class); - verify(mockStream).start(streamListenerCaptor.capture()); - assertThat(timer.getPendingTasks()).isEmpty(); - - // trigger retry - streamListenerCaptor.getValue().closed(Status.UNAVAILABLE, new Metadata()); - - // in backoff - timer.forwardTime(5, TimeUnit.SECONDS); - assertThat(timer.getPendingTasks()).hasSize(1); - verify(mockStream2, never()).start(any(ClientStreamListener.class)); - - // shutdown during backoff period - channel.shutdown(); - - assertThat(timer.getPendingTasks()).hasSize(1); - verify(mockCallListener, never()).onClose(any(Status.class), any(Metadata.class)); - - ClientCall call2 = channel.newCall(method, CallOptions.DEFAULT); - call2.start(mockCallListener2, new Metadata()); - - ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(Status.class); - verify(mockCallListener2).onClose(statusCaptor.capture(), any(Metadata.class)); - assertSame(Status.Code.UNAVAILABLE, statusCaptor.getValue().getCode()); - assertEquals("Channel shutdown invoked", statusCaptor.getValue().getDescription()); - - // backoff ends - timer.forwardTime(5, TimeUnit.SECONDS); - assertThat(timer.getPendingTasks()).isEmpty(); - verify(mockStream2).start(streamListenerCaptor.capture()); - verify(mockLoadBalancer, never()).shutdown(); - assertFalse( - "channel.isTerminated() is expected to be false but was true", - channel.isTerminated()); - - streamListenerCaptor.getValue().closed(Status.INTERNAL, new Metadata()); - verify(mockLoadBalancer).shutdown(); - // simulating the shutdown of load balancer triggers the shutdown of subchannel - shutdownSafely(helper, subchannel); - transportInfo.listener.transportShutdown(Status.INTERNAL); - transportInfo.listener.transportTerminated(); // simulating transport terminated - assertTrue( - "channel.isTerminated() is expected to be true but was false", - channel.isTerminated()); - } - - @Test - public void hedgingScheduledThenChannelShutdown_hedgeShouldStillHappen_newCallShouldFail() { - Map hedgingPolicy = new HashMap<>(); - hedgingPolicy.put("maxAttempts", 3D); - hedgingPolicy.put("hedgingDelay", "10s"); - hedgingPolicy.put("nonFatalStatusCodes", Arrays.asList("UNAVAILABLE")); - Map methodConfig = new HashMap<>(); - Map name = new HashMap<>(); - name.put("service", "service"); - methodConfig.put("name", Arrays.asList(name)); - methodConfig.put("hedgingPolicy", hedgingPolicy); - Map rawServiceConfig = new HashMap<>(); - rawServiceConfig.put("methodConfig", Arrays.asList(methodConfig)); - Attributes attributesWithRetryPolicy = Attributes - .newBuilder().set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig).build(); - - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(Collections.singletonList(new EquivalentAddressGroup(socketAddress))) - .build(); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - channelBuilder.nameResolverFactory(nameResolverFactory); - channelBuilder.executor(MoreExecutors.directExecutor()); - channelBuilder.enableRetry(); - - requestConnection = false; - createChannel(); - - ClientCall call = channel.newCall(method, CallOptions.DEFAULT); - call.start(mockCallListener, new Metadata()); - ArgumentCaptor helperCaptor = ArgumentCaptor.forClass(Helper.class); - verify(mockLoadBalancerProvider).newLoadBalancer(helperCaptor.capture()); - helper = helperCaptor.getValue(); - verify(mockLoadBalancer).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(nameResolverFactory.servers) - .setAttributes(attributesWithRetryPolicy) - .build()); - - // simulating request connection and then transport ready after resolved address - Subchannel subchannel = - createSubchannelSafely(helper, addressGroup, Attributes.EMPTY, subchannelStateListener); - when(mockPicker.pickSubchannel(any(PickSubchannelArgs.class))) - .thenReturn(PickResult.withSubchannel(subchannel)); - requestConnectionSafely(helper, subchannel); - MockClientTransportInfo transportInfo = transports.poll(); - ConnectionClientTransport mockTransport = transportInfo.transport; - ClientStream mockStream = mock(ClientStream.class); - ClientStream mockStream2 = mock(ClientStream.class); - when(mockTransport.newStream(same(method), any(Metadata.class), any(CallOptions.class))) - .thenReturn(mockStream).thenReturn(mockStream2); - transportInfo.listener.transportReady(); - updateBalancingStateSafely(helper, READY, mockPicker); - - ArgumentCaptor streamListenerCaptor = - ArgumentCaptor.forClass(ClientStreamListener.class); - verify(mockStream).start(streamListenerCaptor.capture()); - - // in hedging delay backoff - timer.forwardTime(5, TimeUnit.SECONDS); - assertThat(timer.numPendingTasks()).isEqualTo(1); - // first hedge fails - streamListenerCaptor.getValue().closed(Status.UNAVAILABLE, new Metadata()); - verify(mockStream2, never()).start(any(ClientStreamListener.class)); - - // shutdown during backoff period - channel.shutdown(); - - assertThat(timer.numPendingTasks()).isEqualTo(1); - verify(mockCallListener, never()).onClose(any(Status.class), any(Metadata.class)); - - ClientCall call2 = channel.newCall(method, CallOptions.DEFAULT); - call2.start(mockCallListener2, new Metadata()); - - ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(Status.class); - verify(mockCallListener2).onClose(statusCaptor.capture(), any(Metadata.class)); - assertSame(Status.Code.UNAVAILABLE, statusCaptor.getValue().getCode()); - assertEquals("Channel shutdown invoked", statusCaptor.getValue().getDescription()); - - // backoff ends - timer.forwardTime(5, TimeUnit.SECONDS); - assertThat(timer.numPendingTasks()).isEqualTo(1); - verify(mockStream2).start(streamListenerCaptor.capture()); - verify(mockLoadBalancer, never()).shutdown(); - assertFalse( - "channel.isTerminated() is expected to be false but was true", - channel.isTerminated()); - - streamListenerCaptor.getValue().closed(Status.INTERNAL, new Metadata()); - assertThat(timer.numPendingTasks()).isEqualTo(0); - verify(mockLoadBalancer).shutdown(); - // simulating the shutdown of load balancer triggers the shutdown of subchannel - shutdownSafely(helper, subchannel); - // simulating transport shutdown & terminated - transportInfo.listener.transportShutdown(Status.INTERNAL); - transportInfo.listener.transportTerminated(); - assertTrue( - "channel.isTerminated() is expected to be true but was false", - channel.isTerminated()); - } - - @Test - public void badServiceConfigIsRecoverable() throws Exception { - final Map invalidServiceConfig = - parseConfig("{\"loadBalancingConfig\": [{\"kaboom\": {}}]}"); - final List addresses = - ImmutableList.of(new EquivalentAddressGroup(new SocketAddress() {})); - final class FakeNameResolver extends NameResolver { - Listener2 listener; - - @Override - public String getServiceAuthority() { - return "also fake"; - } - - @Override - public void start(Listener2 listener) { - this.listener = listener; - listener.onResult( - ResolutionResult.newBuilder() - .setAddresses(addresses) - .setAttributes( - Attributes.newBuilder() - .set( - GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, invalidServiceConfig) - .build()) - .setServiceConfig( - ConfigOrError.fromError( - Status.INTERNAL.withDescription("kaboom is invalid"))) - .build()); - } - - @Override - public void shutdown() {} - } - - final class FakeNameResolverFactory2 extends NameResolver.Factory { - FakeNameResolver resolver; - - @Nullable - @Override - public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { - return (resolver = new FakeNameResolver()); - } - - @Override - public String getDefaultScheme() { - return "fake"; - } - } - - FakeNameResolverFactory2 factory = new FakeNameResolverFactory2(); - final class CustomBuilder extends AbstractManagedChannelImplBuilder { - - CustomBuilder() { - super(TARGET); - this.executorPool = ManagedChannelImplTest2.this.executorPool; - this.channelz = ManagedChannelImplTest2.this.channelz; - } - - @Override - protected ClientTransportFactory buildTransportFactory() { - return mockTransportFactory; - } - } - - ManagedChannel mychannel = new CustomBuilder().nameResolverFactory(factory).build(); - - ClientCall call1 = - mychannel.newCall(TestMethodDescriptors.voidMethod(), CallOptions.DEFAULT); - ListenableFuture future1 = ClientCalls.futureUnaryCall(call1, null); - executor.runDueTasks(); - try { - future1.get(1, TimeUnit.SECONDS); - Assert.fail(); - } catch (ExecutionException e) { - assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("kaboom"); - } - - // ok the service config is bad, let's fix it. - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); - Object fakeLbConfig = new Object(); - PolicySelection lbConfigs = - new PolicySelection( - mockLoadBalancerProvider, rawServiceConfig, fakeLbConfig); - mockLoadBalancerProvider.parseLoadBalancingPolicyConfig(rawServiceConfig); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, lbConfigs); - factory.resolver.listener.onResult( - ResolutionResult.newBuilder() - .setAddresses(addresses) - .setAttributes( - Attributes.newBuilder() - .set( - GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig) - .build()) - .setServiceConfig(ConfigOrError.fromConfig(managedChannelServiceConfig)) - .build()); - - ClientCall call2 = mychannel.newCall( - TestMethodDescriptors.voidMethod(), - CallOptions.DEFAULT.withDeadlineAfter(5, TimeUnit.SECONDS)); - ListenableFuture future2 = ClientCalls.futureUnaryCall(call2, null); - - timer.forwardTime(5, TimeUnit.SECONDS); - - executor.runDueTasks(); - try { - future2.get(); - Assert.fail(); - } catch (ExecutionException e) { - assertThat(Throwables.getStackTraceAsString(e.getCause())).contains("deadline"); - } - - mychannel.shutdownNow(); - // Now for Deadline_exceeded, stream shutdown is delayed, calling shutdownNow() on a open stream - // will add a task to executor. Cleaning that task here. - executor.runDueTasks(); - } - - @Deprecated - @Test - public void nameResolver_forwardingStartOldApi() { - final AtomicReference listenerCapture = new AtomicReference<>(); - final NameResolver noopResolver = new NameResolver() { - @Override - public String getServiceAuthority() { - return "fake-authority"; - } - - @Override - public void start(Listener2 listener) { - listenerCapture.set(listener); - } - - @Override - public void shutdown() {} - }; - - // This forwarding resolver is still on the old start() API. Despite that, the delegate - // resolver which is on the new API should get the new Listener2. - final NameResolver oldApiForwardingResolver = new NameResolver() { - @Override - public String getServiceAuthority() { - return noopResolver.getServiceAuthority(); - } - - @Override - public void start(Listener listener) { - noopResolver.start(listener); - } - - @Override - public void shutdown() { - noopResolver.shutdown(); - } - }; - - NameResolver.Factory oldApiResolverFactory = new NameResolver.Factory() { - @Override - public NameResolver newNameResolver(URI targetUri, NameResolver.Helper helper) { - return oldApiForwardingResolver; - } - - @Override - public String getDefaultScheme() { - return "fakescheme"; - } - }; - channelBuilder.nameResolverFactory(oldApiResolverFactory); - createChannel(); - - assertThat(listenerCapture.get()).isNotNull(); - } - - @Test - public void nameResolverArgsPropagation() { - final AtomicReference capturedArgs = new AtomicReference<>(); - final NameResolver noopResolver = new NameResolver() { - @Override - public String getServiceAuthority() { - return "fake-authority"; - } - - @Override - public void start(Listener2 listener) { - } - - @Override - public void shutdown() {} - }; - ProxyDetector neverProxy = new ProxyDetector() { - @Override - public ProxiedSocketAddress proxyFor(SocketAddress targetAddress) { - return null; - } - }; - NameResolver.Factory factory = new NameResolver.Factory() { - @Override - public NameResolver newNameResolver(URI targetUri, NameResolver.Args args) { - capturedArgs.set(args); - return noopResolver; - } - - @Override - public String getDefaultScheme() { - return "fakescheme"; - } - }; - channelBuilder.nameResolverFactory(factory).proxyDetector(neverProxy); - createChannel(); - - NameResolver.Args args = capturedArgs.get(); - assertThat(args).isNotNull(); - assertThat(args.getDefaultPort()).isEqualTo(DEFAULT_PORT); - assertThat(args.getProxyDetector()).isSameInstanceAs(neverProxy); - - verify(offloadExecutor, never()).execute(any(Runnable.class)); - args.getOffloadExecutor() - .execute( - new Runnable() { - @Override - public void run() {} - }); - verify(offloadExecutor, times(1)).execute(any(Runnable.class)); - } - - @Test - public void getAuthorityAfterShutdown() throws Exception { - createChannel(); - assertEquals(SERVICE_NAME, channel.authority()); - channel.shutdownNow().awaitTermination(1, TimeUnit.SECONDS); - assertEquals(SERVICE_NAME, channel.authority()); - } - - @Test - public void nameResolverHelper_emptyConfigSucceeds() { - boolean retryEnabled = false; - int maxRetryAttemptsLimit = 2; - int maxHedgedAttemptsLimit = 3; - AutoConfiguredLoadBalancerFactory2 autoConfiguredLoadBalancerFactory = - new AutoConfiguredLoadBalancerFactory2("pick_first"); - - ScParser parser = new ScParser( - retryEnabled, - maxRetryAttemptsLimit, - maxHedgedAttemptsLimit, - autoConfiguredLoadBalancerFactory, - mock(ChannelLogger.class)); - - ConfigOrError coe = parser.parseServiceConfig(ImmutableMap.of()); - - assertThat(coe.getError()).isNull(); - ManagedChannelServiceConfig2 cfg = (ManagedChannelServiceConfig2) coe.getConfig(); - assertThat(cfg.getServiceMap()).isEmpty(); - assertThat(cfg.getServiceMethodMap()).isEmpty(); - } - - @Test - public void nameResolverHelper_badConfigFails() { - boolean retryEnabled = false; - int maxRetryAttemptsLimit = 2; - int maxHedgedAttemptsLimit = 3; - AutoConfiguredLoadBalancerFactory2 autoConfiguredLoadBalancerFactory = - new AutoConfiguredLoadBalancerFactory2("pick_first"); - - ScParser parser = new ScParser( - retryEnabled, - maxRetryAttemptsLimit, - maxHedgedAttemptsLimit, - autoConfiguredLoadBalancerFactory, - mock(ChannelLogger.class)); - - ConfigOrError coe = - parser.parseServiceConfig(ImmutableMap.of("methodConfig", "bogus")); - - assertThat(coe.getError()).isNotNull(); - assertThat(coe.getError().getCode()).isEqualTo(Code.UNKNOWN); - assertThat(coe.getError().getDescription()).contains("failed to parse service config"); - assertThat(coe.getError().getCause()).isInstanceOf(ClassCastException.class); - } - - @Test - public void nameResolverHelper_noConfigChosen() { - boolean retryEnabled = false; - int maxRetryAttemptsLimit = 2; - int maxHedgedAttemptsLimit = 3; - AutoConfiguredLoadBalancerFactory2 autoConfiguredLoadBalancerFactory = - new AutoConfiguredLoadBalancerFactory2("pick_first"); - - ScParser parser = new ScParser( - retryEnabled, - maxRetryAttemptsLimit, - maxHedgedAttemptsLimit, - autoConfiguredLoadBalancerFactory, - mock(ChannelLogger.class)); - - ConfigOrError coe = - parser.parseServiceConfig(ImmutableMap.of("loadBalancingConfig", ImmutableList.of())); - - assertThat(coe.getError()).isNull(); - ManagedChannelServiceConfig2 cfg = (ManagedChannelServiceConfig2) coe.getConfig(); - assertThat(cfg.getLoadBalancingConfig()).isNull(); - } - - @Test - public void disableServiceConfigLookUp_noDefaultConfig() throws Exception { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - try { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - channelBuilder.disableServiceConfigLookUp(); - - Map rawServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - createChannel(); - - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)).isEmpty(); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - } finally { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - } - - @Test - public void disableServiceConfigLookUp_withDefaultConfig() throws Exception { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - try { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - channelBuilder.disableServiceConfigLookUp(); - Map defaultServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - channelBuilder.defaultServiceConfig(defaultServiceConfig); - - Map rawServiceConfig = new HashMap<>(); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - createChannel(); - - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(defaultServiceConfig); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - } finally { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - } - - @Test - public void enableServiceConfigLookUp_noDefaultConfig() throws Exception { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - try { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - - Map rawServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - createChannel(); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(rawServiceConfig); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - - // new config - rawServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":false}]}"); - managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - nameResolverFactory.allResolved(); - - resultCaptor = ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer, times(2)).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(rawServiceConfig); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - } finally { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - } - - @Test - public void enableServiceConfigLookUp_withDefaultConfig() throws Exception { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - try { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - Map defaultServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - channelBuilder.defaultServiceConfig(defaultServiceConfig); - - Map rawServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService2\"}]," - + "\"waitForReady\":false}]}"); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - createChannel(); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(rawServiceConfig); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - } finally { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - } - - @Test - public void enableServiceConfigLookUp_resolverReturnsNoConfig_withDefaultConfig() - throws Exception { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - try { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - Map defaultServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - channelBuilder.defaultServiceConfig(defaultServiceConfig); - - nameResolverFactory.nextRawServiceConfig.set(null); - nameResolverFactory.nextConfigOrError.set(null); - - createChannel(); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)) - .isEqualTo(defaultServiceConfig); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - } finally { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - } - - @Test - public void enableServiceConfigLookUp_resolverReturnsNoConfig_noDefaultConfig() { - LoadBalancerRegistry.getDefaultRegistry().register(mockLoadBalancerProvider); - try { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - - Map rawServiceConfig = Collections.emptyMap(); - ManagedChannelServiceConfig2 managedChannelServiceConfig = - createManagedChannelServiceConfig(rawServiceConfig, null); - nameResolverFactory.nextRawServiceConfig.set(rawServiceConfig); - nameResolverFactory.nextConfigOrError.set( - ConfigOrError.fromConfig(managedChannelServiceConfig)); - - createChannel(); - ArgumentCaptor resultCaptor = - ArgumentCaptor.forClass(ResolvedAddresses.class); - verify(mockLoadBalancer).handleResolvedAddresses(resultCaptor.capture()); - assertThat(resultCaptor.getValue().getAddresses()).containsExactly(addressGroup); - Attributes actualAttrs = resultCaptor.getValue().getAttributes(); - assertThat(actualAttrs.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG)).isEmpty(); - verify(mockLoadBalancer, never()).handleNameResolutionError(any(Status.class)); - } finally { - LoadBalancerRegistry.getDefaultRegistry().deregister(mockLoadBalancerProvider); - } - } - - @Test - public void useDefaultImmediatelyIfDisableLookUp() throws Exception { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - channelBuilder.disableServiceConfigLookUp(); - Map defaultServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - channelBuilder.defaultServiceConfig(defaultServiceConfig); - requestConnection = false; - channelBuilder.maxTraceEvents(10); - - createChannel(); - - int size = getStats(channel).channelTrace.events.size(); - assertThat(getStats(channel).channelTrace.events.get(size - 1)) - .isEqualTo(new ChannelTrace.Event.Builder() - .setDescription("Service config look-up disabled, using default service config") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - @Test - public void notUseDefaultImmediatelyIfEnableLookUp() throws Exception { - FakeNameResolverFactory nameResolverFactory = - new FakeNameResolverFactory.Builder(expectedUri) - .setServers(ImmutableList.of(addressGroup)).build(); - channelBuilder.nameResolverFactory(nameResolverFactory); - Map defaultServiceConfig = - parseConfig("{\"methodConfig\":[{" - + "\"name\":[{\"service\":\"SimpleService1\"}]," - + "\"waitForReady\":true}]}"); - channelBuilder.defaultServiceConfig(defaultServiceConfig); - requestConnection = false; - channelBuilder.maxTraceEvents(10); - - createChannel(); - - int size = getStats(channel).channelTrace.events.size(); - assertThat(getStats(channel).channelTrace.events.get(size - 1)) - .isNotEqualTo(new ChannelTrace.Event.Builder() - .setDescription("Using default service config") - .setSeverity(ChannelTrace.Event.Severity.CT_INFO) - .setTimestampNanos(timer.getTicker().read()) - .build()); - } - - private static final class ChannelBuilder - extends AbstractManagedChannelImplBuilder { - - ChannelBuilder() { - super(TARGET); - } - - @Override protected ClientTransportFactory buildTransportFactory() { - throw new UnsupportedOperationException(); - } - - @Override protected int getDefaultPort() { - return DEFAULT_PORT; - } - } - - private static final class FakeBackoffPolicyProvider implements BackoffPolicy.Provider { - @Override - public BackoffPolicy get() { - return new BackoffPolicy() { - int multiplier = 1; - - @Override - public long nextBackoffNanos() { - return RECONNECT_BACKOFF_INTERVAL_NANOS * multiplier++; - } - }; - } - } - - private static final class FakeNameResolverFactory extends NameResolver.Factory { - final URI expectedUri; - final List servers; - final boolean resolvedAtStart; - final Status error; - final ArrayList resolvers = new ArrayList<>(); - final AtomicReference nextConfigOrError = new AtomicReference<>(); - final AtomicReference> nextRawServiceConfig = new AtomicReference<>(); - - FakeNameResolverFactory( - URI expectedUri, - List servers, - boolean resolvedAtStart, - Status error) { - this.expectedUri = expectedUri; - this.servers = servers; - this.resolvedAtStart = resolvedAtStart; - this.error = error; - } - - @Override - public NameResolver newNameResolver(final URI targetUri, NameResolver.Args args) { - if (!expectedUri.equals(targetUri)) { - return null; - } - assertEquals(DEFAULT_PORT, args.getDefaultPort()); - FakeNameResolverFactory.FakeNameResolver resolver = - new FakeNameResolverFactory.FakeNameResolver(error); - resolvers.add(resolver); - return resolver; - } - - @Override - public String getDefaultScheme() { - return "fake"; - } - - void allResolved() { - for (FakeNameResolverFactory.FakeNameResolver resolver : resolvers) { - resolver.resolved(); - } - } - - final class FakeNameResolver extends NameResolver { - Listener2 listener; - boolean shutdown; - int refreshCalled; - Status error; - - FakeNameResolver(Status error) { - this.error = error; - } - - @Override public String getServiceAuthority() { - return expectedUri.getAuthority(); - } - - @Override public void start(Listener2 listener) { - this.listener = listener; - if (resolvedAtStart) { - resolved(); - } - } - - @Override public void refresh() { - refreshCalled++; - resolved(); - } - - void resolved() { - if (error != null) { - listener.onError(error); - return; - } - ResolutionResult.Builder builder = - ResolutionResult.newBuilder() - .setAddresses(servers); - ConfigOrError configOrError = nextConfigOrError.get(); - Map rawServiceConfig = nextRawServiceConfig.get(); - if (configOrError != null) { - builder.setServiceConfig(configOrError); - } - if (rawServiceConfig != null) { - builder.setAttributes( - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, rawServiceConfig) - .build()); - } - - listener.onResult(builder.build()); - } - - @Override public void shutdown() { - shutdown = true; - } - - @Override - public String toString() { - return "FakeNameResolver"; - } - } - - static final class Builder { - final URI expectedUri; - List servers = ImmutableList.of(); - boolean resolvedAtStart = true; - Status error = null; - - Builder(URI expectedUri) { - this.expectedUri = expectedUri; - } - - FakeNameResolverFactory.Builder setServers(List servers) { - this.servers = servers; - return this; - } - - FakeNameResolverFactory.Builder setResolvedAtStart(boolean resolvedAtStart) { - this.resolvedAtStart = resolvedAtStart; - return this; - } - - FakeNameResolverFactory.Builder setError(Status error) { - this.error = error; - return this; - } - - FakeNameResolverFactory build() { - return new FakeNameResolverFactory(expectedUri, servers, resolvedAtStart, error); - } - } - } - - private static ChannelStats getStats(AbstractSubchannel subchannel) throws Exception { - return subchannel.getInstrumentedInternalSubchannel().getStats().get(); - } - - private static ChannelStats getStats( - InternalInstrumented instrumented) throws Exception { - return instrumented.getStats().get(); - } - - private FakeClock.ScheduledTask getNameResolverRefresh() { - return Iterables.getOnlyElement(timer.getPendingTasks(NAME_RESOLVER_REFRESH_TASK_FILTER), null); - } - - // Helper methods to call methods from SynchronizationContext - private static Subchannel createSubchannelSafely( - final Helper helper, final EquivalentAddressGroup addressGroup, final Attributes attrs, - final SubchannelStateListener stateListener) { - final AtomicReference resultCapture = new AtomicReference<>(); - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - Subchannel s = helper.createSubchannel(CreateSubchannelArgs.newBuilder() - .setAddresses(addressGroup) - .setAttributes(attrs) - .build()); - s.start(stateListener); - resultCapture.set(s); - } - }); - return resultCapture.get(); - } - - private static Subchannel createUnstartedSubchannel( - final Helper helper, final EquivalentAddressGroup addressGroup, final Attributes attrs) { - final AtomicReference resultCapture = new AtomicReference<>(); - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - Subchannel s = helper.createSubchannel(CreateSubchannelArgs.newBuilder() - .setAddresses(addressGroup) - .setAttributes(attrs) - .build()); - resultCapture.set(s); - } - }); - return resultCapture.get(); - } - - private static void requestConnectionSafely(Helper helper, final Subchannel subchannel) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - subchannel.requestConnection(); - } - }); - } - - private static void updateBalancingStateSafely( - final Helper helper, final ConnectivityState state, final SubchannelPicker picker) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - helper.updateBalancingState(state, picker); - } - }); - } - - private static void refreshNameResolutionSafely(final Helper helper) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - helper.refreshNameResolution(); - } - }); - } - - private static void updateAddressesSafely( - Helper helper, final Subchannel subchannel, final List addrs) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - subchannel.updateAddresses(addrs); - } - }); - } - - private static void shutdownSafely( - final Helper helper, final Subchannel subchannel) { - helper.getSynchronizationContext().execute( - new Runnable() { - @Override - public void run() { - subchannel.shutdown(); - } - }); - } - - @SuppressWarnings("unchecked") - private static Map parseConfig(String json) throws Exception { - return (Map) JsonParser.parse(json); - } - - private static ManagedChannelServiceConfig2 createManagedChannelServiceConfig( - Map rawServiceConfig, PolicySelection policySelection) { - // Provides dummy variable for retry related params (not used in this test class) - return ManagedChannelServiceConfig2 - .fromServiceConfig(rawServiceConfig, true, 3, 4, policySelection); - } -} diff --git a/core/src/test/java/io/grpc/internal/RetryPolicyTest.java b/core/src/test/java/io/grpc/internal/RetryPolicyTest.java index fc13607107c..2e02078698a 100644 --- a/core/src/test/java/io/grpc/internal/RetryPolicyTest.java +++ b/core/src/test/java/io/grpc/internal/RetryPolicyTest.java @@ -62,10 +62,17 @@ public void getRetryPolicies() throws Exception { @SuppressWarnings("unchecked") Map serviceConfig = (Map) serviceConfigObj; - ServiceConfigInterceptor serviceConfigInterceptor = new ServiceConfigInterceptor( - /* retryEnabled = */ true, /* maxRetryAttemptsLimit = */ 4, - /* maxHedgedAttemptsLimit = */ 3); - serviceConfigInterceptor.handleUpdate(serviceConfig); + ServiceConfigInterceptor serviceConfigInterceptor = + new ServiceConfigInterceptor(/* retryEnabled= */ true); + serviceConfigInterceptor + .handleUpdate( + ManagedChannelServiceConfig + .fromServiceConfig( + serviceConfig, + /* retryEnabled= */ true, + /* maxRetryAttemptsLimit= */ 4, + /* maxHedgedAttemptsLimit= */ 3, + /* loadBalancingConfig= */ null)); MethodDescriptor.Builder builder = TestMethodDescriptors.voidMethod().toBuilder(); @@ -140,10 +147,17 @@ public void getRetryPolicies_retryDisabled() throws Exception { @SuppressWarnings("unchecked") Map serviceConfig = (Map) serviceConfigObj; - ServiceConfigInterceptor serviceConfigInterceptor = new ServiceConfigInterceptor( - /* retryEnabled = */ false, /* maxRetryAttemptsLimit = */ 4, - /* maxHedgedAttemptsLimit = */ 3); - serviceConfigInterceptor.handleUpdate(serviceConfig); + ServiceConfigInterceptor serviceConfigInterceptor = + new ServiceConfigInterceptor(/* retryEnabled= */ false); + serviceConfigInterceptor + .handleUpdate( + ManagedChannelServiceConfig + .fromServiceConfig( + serviceConfig, + /* retryEnabled= */ false, + /* maxRetryAttemptsLimit= */ 4, + /* maxHedgedAttemptsLimit= */ 3, + /* loadBalancingConfig= */ null)); MethodDescriptor.Builder builder = TestMethodDescriptors.voidMethod().toBuilder(); diff --git a/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java b/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java index 24cbd589a28..75b11771ce9 100644 --- a/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java +++ b/core/src/test/java/io/grpc/internal/ServiceConfigErrorHandlingTest.java @@ -95,7 +95,7 @@ public String toString() { @Override public boolean shouldAccept(Runnable command) { return command.toString().contains( - ManagedChannelImpl2.DelayedNameResolverRefresh.class.getName()); + ManagedChannelImpl.DelayedNameResolverRefresh.class.getName()); } }; @@ -104,7 +104,7 @@ public boolean shouldAccept(Runnable command) { @Rule public final ExpectedException thrown = ExpectedException.none(); @Rule public final MockitoRule mocks = MockitoJUnit.rule(); - private ManagedChannelImpl2 channel; + private ManagedChannelImpl channel; private final AtomicReference nextLbPolicyConfigError = new AtomicReference<>(); private FakeLoadBalancer mockLoadBalancer = @@ -157,7 +157,7 @@ private void createChannel(ClientInterceptor... interceptors) { checkState(channel == null); channel = - new ManagedChannelImpl2( + new ManagedChannelImpl( channelBuilder, mockTransportFactory, new FakeBackoffPolicyProvider(), @@ -175,7 +175,7 @@ public void run() { channel.exitIdleMode(); } }); - if (channelBuilder.idleTimeoutMillis != ManagedChannelImpl2.IDLE_TIMEOUT_MILLIS_DISABLE) { + if (channelBuilder.idleTimeoutMillis != ManagedChannelImpl.IDLE_TIMEOUT_MILLIS_DISABLE) { numExpectedTasks += 1; } diff --git a/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest.java b/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest.java index 31bede998a6..d339e4423d6 100644 --- a/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest.java +++ b/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest.java @@ -47,7 +47,6 @@ /** * Unit tests for {@link ServiceConfigInterceptor}. */ -@Deprecated // migrate to ServiceConfigInterceptor(Test)?2 @RunWith(JUnit4.class) public class ServiceConfigInterceptorTest { @@ -61,8 +60,8 @@ public void setUp() { MockitoAnnotations.initMocks(this); } - private final ServiceConfigInterceptor interceptor = new ServiceConfigInterceptor( - /* retryEnabled = */ true, /* maxRetryAttemptsLimit = */ 5, /* maxHedgedAttemptsLimit = */ 6); + private final ServiceConfigInterceptor interceptor = + new ServiceConfigInterceptor(/* retryEnabled = */ true); private final String fullMethodName = MethodDescriptor.generateFullMethodName("service", "method"); @@ -72,8 +71,6 @@ public void setUp() { .setFullMethodName(fullMethodName) .build(); - - private static final class JsonObj extends HashMap { private JsonObj(Object ... kv) { for (int i = 0; i < kv.length; i += 2) { @@ -93,8 +90,10 @@ public void withWaitForReady() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "waitForReady", true); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withoutWaitForReady(), channel); @@ -108,7 +107,7 @@ public void handleNullConfig() { JsonObj methodConfig = new JsonObj("name", new JsonList(name), "waitForReady", true); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(createManagedChannelServiceConfig(serviceConfig)); interceptor.handleUpdate(null); interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withoutWaitForReady(), channel); @@ -134,8 +133,10 @@ public void withMaxRequestSize() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", 1d); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT, channel); @@ -148,8 +149,10 @@ public void withMaxRequestSize_pickSmallerExisting() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", 10d); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall( methodDescriptor, CallOptions.DEFAULT.withMaxOutboundMessageSize(5), channel); @@ -163,8 +166,10 @@ public void withMaxRequestSize_pickSmallerNew() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", 5d); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall( methodDescriptor, CallOptions.DEFAULT.withMaxOutboundMessageSize(10), channel); @@ -178,8 +183,10 @@ public void withMaxResponseSize() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", 1d); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT, channel); @@ -192,8 +199,10 @@ public void withMaxResponseSize_pickSmallerExisting() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", 5d); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall( methodDescriptor, CallOptions.DEFAULT.withMaxInboundMessageSize(10), channel); @@ -207,8 +216,10 @@ public void withMaxResponseSize_pickSmallerNew() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", 10d); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall( methodDescriptor, CallOptions.DEFAULT.withMaxInboundMessageSize(5), channel); @@ -222,8 +233,10 @@ public void withoutWaitForReady() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "waitForReady", false); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withWaitForReady(), channel); @@ -241,8 +254,10 @@ public void fullMethodMatched() { JsonObj methodConfig2 = new JsonObj("name", new JsonList(name2), "timeout", "1s"); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig1, methodConfig2)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT, channel); @@ -255,8 +270,10 @@ public void nearerDeadlineKept_existing() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "timeout", "100000s"); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); Deadline existingDeadline = Deadline.after(1000, TimeUnit.NANOSECONDS); interceptor.interceptCall( @@ -273,8 +290,10 @@ public void nearerDeadlineKept_new() { JsonObj name = new JsonObj("service", "service"); JsonObj methodConfig = new JsonObj("name", new JsonList(name), "timeout", "1s"); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); Deadline existingDeadline = Deadline.after(1234567890, TimeUnit.NANOSECONDS); interceptor.interceptCall( @@ -284,7 +303,6 @@ public void nearerDeadlineKept_new() { assertThat(callOptionsCap.getValue().getDeadline()).isNotEqualTo(existingDeadline); } - @Test public void handleUpdate_failsOnMissingServiceName() { JsonObj name = new JsonObj("method", "method"); @@ -294,9 +312,11 @@ public void handleUpdate_failsOnMissingServiceName() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("missing service"); - interceptor.handleUpdate(serviceConfig); - } + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); + } @Test public void handleUpdate_failsOnDuplicateMethod() { @@ -308,7 +328,10 @@ public void handleUpdate_failsOnDuplicateMethod() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Duplicate method"); - interceptor.handleUpdate(serviceConfig); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); + + interceptor.handleUpdate(parsedServiceConfig); } @Test @@ -319,7 +342,10 @@ public void handleUpdate_failsOnEmptyName() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("no names in method config"); - interceptor.handleUpdate(serviceConfig); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); + + interceptor.handleUpdate(parsedServiceConfig); } @Test @@ -332,7 +358,10 @@ public void handleUpdate_failsOnDuplicateService() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Duplicate service"); - interceptor.handleUpdate(serviceConfig); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); + + interceptor.handleUpdate(parsedServiceConfig); } @Test @@ -346,7 +375,10 @@ public void handleUpdate_failsOnDuplicateServiceMultipleConfig() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Duplicate service"); - interceptor.handleUpdate(serviceConfig); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); + + interceptor.handleUpdate(parsedServiceConfig); } @Test @@ -358,13 +390,17 @@ public void handleUpdate_replaceExistingConfig() { JsonObj name2 = new JsonObj("service", "service", "method", "method"); JsonObj methodConfig2 = new JsonObj("name", new JsonList(name2)); JsonObj serviceConfig2 = new JsonObj("methodConfig", new JsonList(methodConfig2)); + ManagedChannelServiceConfig parsedServiceConfig1 = + createManagedChannelServiceConfig(serviceConfig1); + ManagedChannelServiceConfig parsedServiceConfig2 = + createManagedChannelServiceConfig(serviceConfig2); - interceptor.handleUpdate(serviceConfig1); + interceptor.handleUpdate(parsedServiceConfig1); assertThat(interceptor.managedChannelServiceConfig.get().getServiceMap()).isNotEmpty(); assertThat(interceptor.managedChannelServiceConfig.get().getServiceMethodMap()).isEmpty(); - interceptor.handleUpdate(serviceConfig2); + interceptor.handleUpdate(parsedServiceConfig2); assertThat(interceptor.managedChannelServiceConfig.get().getServiceMap()).isEmpty(); assertThat(interceptor.managedChannelServiceConfig.get().getServiceMethodMap()).isNotEmpty(); @@ -376,8 +412,10 @@ public void handleUpdate_matchNames() { JsonObj name2 = new JsonObj("service", "service", "method", "method"); JsonObj methodConfig = new JsonObj("name", new JsonList(name1, name2)); JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); + ManagedChannelServiceConfig parsedServiceConfig = + createManagedChannelServiceConfig(serviceConfig); - interceptor.handleUpdate(serviceConfig); + interceptor.handleUpdate(parsedServiceConfig); assertThat(interceptor.managedChannelServiceConfig.get().getServiceMethodMap()) .containsExactly( @@ -387,7 +425,6 @@ public void handleUpdate_matchNames() { "service2", new MethodInfo(methodConfig, false, 1, 1)); } - @Test public void methodInfo_validateDeadline() { JsonObj name = new JsonObj("service", "service"); @@ -408,7 +445,6 @@ public void methodInfo_saturateDeadline() { assertThat(info.timeoutNanos).isEqualTo(Long.MAX_VALUE); } - @Test public void methodInfo_badMaxRequestSize() { JsonObj name = new JsonObj("service", "service"); @@ -431,6 +467,17 @@ public void methodInfo_badMaxResponseSize() { new MethodInfo(methodConfig, false, 1, 1); } + private static ManagedChannelServiceConfig createManagedChannelServiceConfig( + JsonObj rawServiceConfig) { + // current tests doesn't use any other values except rawServiceConfig, so provide dummy values. + return ManagedChannelServiceConfig.fromServiceConfig( + rawServiceConfig, + /* retryEnabled= */ true, + /* maxRetryAttemptsLimit= */ 3, + /* maxHedgedAttemptsLimit= */ 4, + /* loadBalancingConfig= */ null); + } + private static final class NoopMarshaller implements MethodDescriptor.Marshaller { @Override diff --git a/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest2.java b/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest2.java deleted file mode 100644 index 12edbf23d68..00000000000 --- a/core/src/test/java/io/grpc/internal/ServiceConfigInterceptorTest2.java +++ /dev/null @@ -1,493 +0,0 @@ -/* - * Copyright 2018 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.internal; - -import static com.google.common.truth.Truth.assertThat; -import static io.grpc.internal.ServiceConfigInterceptor2.HEDGING_POLICY_KEY; -import static io.grpc.internal.ServiceConfigInterceptor2.RETRY_POLICY_KEY; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.verify; - -import io.grpc.CallOptions; -import io.grpc.Channel; -import io.grpc.Deadline; -import io.grpc.MethodDescriptor; -import io.grpc.MethodDescriptor.MethodType; -import io.grpc.internal.ManagedChannelServiceConfig2.MethodInfo; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.concurrent.TimeUnit; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -/** - * Unit tests for {@link ServiceConfigInterceptor2}. - */ -@RunWith(JUnit4.class) -public class ServiceConfigInterceptorTest2 { - - @Rule public final ExpectedException thrown = ExpectedException.none(); - - @Mock private Channel channel; - @Captor private ArgumentCaptor callOptionsCap; - - @Before - public void setUp() { - MockitoAnnotations.initMocks(this); - } - - private final ServiceConfigInterceptor2 interceptor = - new ServiceConfigInterceptor2(/* retryEnabled = */ true); - - private final String fullMethodName = - MethodDescriptor.generateFullMethodName("service", "method"); - private final MethodDescriptor methodDescriptor = - MethodDescriptor.newBuilder(new NoopMarshaller(), new NoopMarshaller()) - .setType(MethodType.UNARY) - .setFullMethodName(fullMethodName) - .build(); - - private static final class JsonObj extends HashMap { - private JsonObj(Object ... kv) { - for (int i = 0; i < kv.length; i += 2) { - put((String) kv[i], kv[i + 1]); - } - } - } - - private static final class JsonList extends ArrayList { - private JsonList(Object ... values) { - addAll(Arrays.asList(values)); - } - } - - @Test - public void withWaitForReady() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "waitForReady", true); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withoutWaitForReady(), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().isWaitForReady()).isTrue(); - } - - @Test - public void handleNullConfig() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "waitForReady", true); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - - interceptor.handleUpdate(createManagedChannelServiceConfig(serviceConfig)); - interceptor.handleUpdate(null); - - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withoutWaitForReady(), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().isWaitForReady()).isFalse(); - } - - @Test - public void handleUpdateNotCalledBeforeInterceptCall() { - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withoutWaitForReady(), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().isWaitForReady()).isFalse(); - assertThat(callOptionsCap.getValue().getOption(RETRY_POLICY_KEY).get()) - .isEqualTo(RetryPolicy.DEFAULT); - assertThat(callOptionsCap.getValue().getOption(HEDGING_POLICY_KEY).get()) - .isEqualTo(HedgingPolicy.DEFAULT); - } - - @Test - public void withMaxRequestSize() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", 1d); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT, channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getMaxOutboundMessageSize()).isEqualTo(1); - } - - @Test - public void withMaxRequestSize_pickSmallerExisting() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", 10d); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall( - methodDescriptor, CallOptions.DEFAULT.withMaxOutboundMessageSize(5), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getMaxOutboundMessageSize()).isEqualTo(5); - } - - @Test - public void withMaxRequestSize_pickSmallerNew() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", 5d); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall( - methodDescriptor, CallOptions.DEFAULT.withMaxOutboundMessageSize(10), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getMaxOutboundMessageSize()).isEqualTo(5); - } - - @Test - public void withMaxResponseSize() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", 1d); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT, channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getMaxInboundMessageSize()).isEqualTo(1); - } - - @Test - public void withMaxResponseSize_pickSmallerExisting() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", 5d); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall( - methodDescriptor, CallOptions.DEFAULT.withMaxInboundMessageSize(10), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getMaxInboundMessageSize()).isEqualTo(5); - } - - @Test - public void withMaxResponseSize_pickSmallerNew() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", 10d); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall( - methodDescriptor, CallOptions.DEFAULT.withMaxInboundMessageSize(5), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getMaxInboundMessageSize()).isEqualTo(5); - } - - @Test - public void withoutWaitForReady() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "waitForReady", false); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT.withWaitForReady(), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().isWaitForReady()).isFalse(); - } - - @Test - public void fullMethodMatched() { - // Put in service that matches, but has no deadline. It should be lower priority - JsonObj name1 = new JsonObj("service", "service"); - JsonObj methodConfig1 = new JsonObj("name", new JsonList(name1)); - - JsonObj name2 = new JsonObj("service", "service", "method", "method"); - JsonObj methodConfig2 = new JsonObj("name", new JsonList(name2), "timeout", "1s"); - - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig1, methodConfig2)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - interceptor.interceptCall(methodDescriptor, CallOptions.DEFAULT, channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getDeadline()).isNotNull(); - } - - @Test - public void nearerDeadlineKept_existing() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "timeout", "100000s"); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - Deadline existingDeadline = Deadline.after(1000, TimeUnit.NANOSECONDS); - interceptor.interceptCall( - methodDescriptor, CallOptions.DEFAULT.withDeadline(existingDeadline), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getDeadline()).isEqualTo(existingDeadline); - } - - @Test - public void nearerDeadlineKept_new() { - // TODO(carl-mastrangelo): the deadlines are very large because they change over time. - // This should be fixed, and is tracked in https://github.com/grpc/grpc-java/issues/2531 - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "timeout", "1s"); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - Deadline existingDeadline = Deadline.after(1234567890, TimeUnit.NANOSECONDS); - interceptor.interceptCall( - methodDescriptor, CallOptions.DEFAULT.withDeadline(existingDeadline), channel); - - verify(channel).newCall(eq(methodDescriptor), callOptionsCap.capture()); - assertThat(callOptionsCap.getValue().getDeadline()).isNotEqualTo(existingDeadline); - } - - @Test - public void handleUpdate_failsOnMissingServiceName() { - JsonObj name = new JsonObj("method", "method"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name)); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("missing service"); - - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - } - - @Test - public void handleUpdate_failsOnDuplicateMethod() { - JsonObj name1 = new JsonObj("service", "service", "method", "method"); - JsonObj name2 = new JsonObj("service", "service", "method", "method"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name1, name2)); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Duplicate method"); - - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - } - - @Test - public void handleUpdate_failsOnEmptyName() { - JsonObj methodConfig = new JsonObj(); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("no names in method config"); - - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - } - - @Test - public void handleUpdate_failsOnDuplicateService() { - JsonObj name1 = new JsonObj("service", "service"); - JsonObj name2 = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name1, name2)); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Duplicate service"); - - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - } - - @Test - public void handleUpdate_failsOnDuplicateServiceMultipleConfig() { - JsonObj name1 = new JsonObj("service", "service"); - JsonObj name2 = new JsonObj("service", "service"); - JsonObj methodConfig1 = new JsonObj("name", new JsonList(name1)); - JsonObj methodConfig2 = new JsonObj("name", new JsonList(name2)); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig1, methodConfig2)); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Duplicate service"); - - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - } - - @Test - public void handleUpdate_replaceExistingConfig() { - JsonObj name1 = new JsonObj("service", "service"); - JsonObj methodConfig1 = new JsonObj("name", new JsonList(name1)); - JsonObj serviceConfig1 = new JsonObj("methodConfig", new JsonList(methodConfig1)); - - JsonObj name2 = new JsonObj("service", "service", "method", "method"); - JsonObj methodConfig2 = new JsonObj("name", new JsonList(name2)); - JsonObj serviceConfig2 = new JsonObj("methodConfig", new JsonList(methodConfig2)); - ManagedChannelServiceConfig2 parsedServiceConfig1 = - createManagedChannelServiceConfig(serviceConfig1); - ManagedChannelServiceConfig2 parsedServiceConfig2 = - createManagedChannelServiceConfig(serviceConfig2); - - interceptor.handleUpdate(parsedServiceConfig1); - - assertThat(interceptor.managedChannelServiceConfig.get().getServiceMap()).isNotEmpty(); - assertThat(interceptor.managedChannelServiceConfig.get().getServiceMethodMap()).isEmpty(); - - interceptor.handleUpdate(parsedServiceConfig2); - - assertThat(interceptor.managedChannelServiceConfig.get().getServiceMap()).isEmpty(); - assertThat(interceptor.managedChannelServiceConfig.get().getServiceMethodMap()).isNotEmpty(); - } - - @Test - public void handleUpdate_matchNames() { - JsonObj name1 = new JsonObj("service", "service2"); - JsonObj name2 = new JsonObj("service", "service", "method", "method"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name1, name2)); - JsonObj serviceConfig = new JsonObj("methodConfig", new JsonList(methodConfig)); - ManagedChannelServiceConfig2 parsedServiceConfig = - createManagedChannelServiceConfig(serviceConfig); - - interceptor.handleUpdate(parsedServiceConfig); - - assertThat(interceptor.managedChannelServiceConfig.get().getServiceMethodMap()) - .containsExactly( - methodDescriptor.getFullMethodName(), - new MethodInfo(methodConfig, false, 1, 1)); - assertThat(interceptor.managedChannelServiceConfig.get().getServiceMap()).containsExactly( - "service2", new MethodInfo(methodConfig, false, 1, 1)); - } - - @Test - public void methodInfo_validateDeadline() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "timeout", "10000000000000000s"); - - thrown.expectMessage("Duration value is out of range"); - - new MethodInfo(methodConfig, false, 1, 1); - } - - @Test - public void methodInfo_saturateDeadline() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "timeout", "315576000000s"); - - MethodInfo info = new MethodInfo(methodConfig, false, 1, 1); - - assertThat(info.timeoutNanos).isEqualTo(Long.MAX_VALUE); - } - - @Test - public void methodInfo_badMaxRequestSize() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxRequestMessageBytes", -1d); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("exceeds bounds"); - - new MethodInfo(methodConfig, false, 1, 1); - } - - @Test - public void methodInfo_badMaxResponseSize() { - JsonObj name = new JsonObj("service", "service"); - JsonObj methodConfig = new JsonObj("name", new JsonList(name), "maxResponseMessageBytes", -1d); - - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("exceeds bounds"); - - new MethodInfo(methodConfig, false, 1, 1); - } - - private static ManagedChannelServiceConfig2 createManagedChannelServiceConfig( - JsonObj rawServiceConfig) { - // current tests doesn't use any other values except rawServiceConfig, so provide dummy values. - return ManagedChannelServiceConfig2.fromServiceConfig( - rawServiceConfig, - /* retryEnabled= */ true, - /* maxRetryAttemptsLimit= */ 3, - /* maxHedgedAttemptsLimit= */ 4, - /* loadBalancingConfig= */ null); - } - - private static final class NoopMarshaller implements MethodDescriptor.Marshaller { - - @Override - public InputStream stream(Void value) { - return null; - } - - @Override - public Void parse(InputStream stream) { - return null; - } - } -} From e5745a514f25bad726176885d80388ee163a6b87 Mon Sep 17 00:00:00 2001 From: Eric Gribkoff Date: Fri, 24 Jan 2020 14:03:49 -0800 Subject: [PATCH 18/86] interop-testing: use server hostname instead of id for xds test (#6639) --- .../testing/integration/XdsTestClient.java | 18 +++++++++--------- .../testing/integration/XdsTestServer.java | 5 +++-- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java index 67ad40674eb..66d061b766d 100644 --- a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java +++ b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java @@ -208,17 +208,17 @@ public void run() { CallOptions.DEFAULT.withDeadlineAfter(rpcTimeoutSec, TimeUnit.SECONDS)); call.start( new ClientCall.Listener() { - private String serverId; + private String hostname; @Override public void onMessage(SimpleResponse response) { - serverId = response.getServerId(); + hostname = response.getHostname(); // TODO(ericgribkoff) Currently some test environments cannot access the stats RPC // service and rely on parsing stdout. if (printResponse) { System.out.println( "Greeting: Hello world, this is " - + response.getHostname() + + hostname + ", from " + call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR)); } @@ -227,7 +227,7 @@ public void onMessage(SimpleResponse response) { @Override public void onClose(Status status, Metadata trailers) { for (XdsStatsWatcher watcher : savedWatchers) { - watcher.rpcCompleted(requestId, serverId); + watcher.rpcCompleted(requestId, hostname); } } }, @@ -295,14 +295,14 @@ private XdsStatsWatcher(long startId, long endId) { this.endId = endId; } - void rpcCompleted(long requestId, @Nullable String serverId) { + void rpcCompleted(long requestId, @Nullable String hostname) { synchronized (lock) { if (startId <= requestId && requestId < endId) { - if (serverId != null) { - if (rpcsByPeer.containsKey(serverId)) { - rpcsByPeer.put(serverId, rpcsByPeer.get(serverId) + 1); + if (hostname != null) { + if (rpcsByPeer.containsKey(hostname)) { + rpcsByPeer.put(hostname, rpcsByPeer.get(hostname) + 1); } else { - rpcsByPeer.put(serverId, 1); + rpcsByPeer.put(hostname, 1); } } else { noRemotePeer += 1; diff --git a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java index e3ae481142f..915be98a409 100644 --- a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java +++ b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestServer.java @@ -124,13 +124,14 @@ private void blockUntilShutdown() throws InterruptedException { } private class TestServiceImpl extends TestServiceGrpc.TestServiceImplBase { - private String host = ""; + private final String host; private TestServiceImpl() { try { host = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { - logger.log(Level.WARNING, "Failed to get host", e); + logger.log(Level.SEVERE, "Failed to get host", e); + throw new RuntimeException(e); } } From e9882ec78db581106f709ab5130bfe30af660e79 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 24 Jan 2020 16:12:24 -0800 Subject: [PATCH 19/86] xds: fix bug of xDS resolver parsing service config incorrectly (#6640) The ResolutionResult returned by resolver should have its ServiceConfig set with a parsed service config, ins --- .../java/io/grpc/xds/XdsNameResolver.java | 9 ++++---- .../java/io/grpc/xds/XdsNameResolverTest.java | 22 ++++++++++++++----- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index 207db3473ce..6451df79588 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -25,7 +25,6 @@ import io.envoyproxy.envoy.api.v2.core.Node; import io.grpc.Attributes; import io.grpc.EquivalentAddressGroup; -import io.grpc.LoadBalancerRegistry; import io.grpc.NameResolver; import io.grpc.Status; import io.grpc.Status.Code; @@ -64,6 +63,7 @@ final class XdsNameResolver extends NameResolver { private final XdsChannelFactory channelFactory; private final SynchronizationContext syncContext; private final ScheduledExecutorService timeService; + private final ServiceConfigParser serviceConfigParser; private final BackoffPolicy.Provider backoffPolicyProvider; private final Supplier stopwatchSupplier; private final Bootstrapper bootstrapper; @@ -89,6 +89,7 @@ final class XdsNameResolver extends NameResolver { this.channelFactory = checkNotNull(channelFactory, "channelFactory"); this.syncContext = checkNotNull(args.getSynchronizationContext(), "syncContext"); this.timeService = checkNotNull(args.getScheduledExecutorService(), "timeService"); + this.serviceConfigParser = checkNotNull(args.getServiceConfigParser(), "serviceConfigParser"); this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); this.bootstrapper = checkNotNull(bootstrapper, "bootstrapper"); @@ -158,14 +159,12 @@ public void onConfigChanged(ConfigUpdate update) { .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, config) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build(); - ConfigOrError xdsServiceConfig = - XdsLoadBalancerProvider - .parseLoadBalancingConfigPolicy(config, LoadBalancerRegistry.getDefaultRegistry()); + ConfigOrError parsedServiceConfig = serviceConfigParser.parseServiceConfig(config); ResolutionResult result = ResolutionResult.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(attrs) - .setServiceConfig(xdsServiceConfig) + .setServiceConfig(parsedServiceConfig) .build(); listener.onResult(result); } diff --git a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java index ad80f79bc6d..cb82406dae5 100644 --- a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java @@ -39,6 +39,7 @@ import io.grpc.ChannelLogger; import io.grpc.ManagedChannel; import io.grpc.NameResolver; +import io.grpc.NameResolver.ConfigOrError; import io.grpc.NameResolver.ResolutionResult; import io.grpc.NameResolver.ServiceConfigParser; import io.grpc.Status; @@ -86,7 +87,7 @@ public class XdsNameResolverTest { public final MockitoRule mocks = MockitoJUnit.rule(); @Rule public final GrpcCleanupRule cleanupRule = new GrpcCleanupRule(); - + private final SynchronizationContext syncContext = new SynchronizationContext( new Thread.UncaughtExceptionHandler() { @Override @@ -96,19 +97,24 @@ public void uncaughtException(Thread t, Throwable e) { }); private final FakeClock fakeClock = new FakeClock(); + private final Queue> responseObservers = new ArrayDeque<>(); + private final ServiceConfigParser serviceConfigParser = new ServiceConfigParser() { + @Override + public ConfigOrError parseServiceConfig(Map rawServiceConfig) { + return ConfigOrError.fromConfig(rawServiceConfig); + } + }; + private final NameResolver.Args args = NameResolver.Args.newBuilder() .setDefaultPort(8080) .setProxyDetector(GrpcUtil.NOOP_PROXY_DETECTOR) .setSynchronizationContext(syncContext) - .setServiceConfigParser(mock(ServiceConfigParser.class)) + .setServiceConfigParser(serviceConfigParser) .setScheduledExecutorService(fakeClock.getScheduledExecutorService()) .setChannelLogger(mock(ChannelLogger.class)) .build(); - - private final Queue> responseObservers = new ArrayDeque<>(); - @Mock private BackoffPolicy.Provider backoffPolicyProvider; @Mock @@ -226,7 +232,7 @@ public BootstrapInfo readBootstrap() throws IOException { } @Test - public void resolve_passxdsClientPoolInResult() { + public void resolve_passXdsClientPoolInResult() { xdsNameResolver.start(mockListener); assertThat(responseObservers).hasSize(1); StreamObserver responseObserver = responseObservers.poll(); @@ -260,6 +266,7 @@ public void resolve_foundResource() { assertThat(result.getAddresses()).isEmpty(); Map serviceConfig = result.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); + assertThat(result.getServiceConfig().getConfig()).isEqualTo(serviceConfig); @SuppressWarnings("unchecked") List> rawLbConfigs = (List>) serviceConfig.get("loadBalancingConfig"); @@ -306,6 +313,7 @@ public void resolve_resourceUpdated() { assertThat(result.getAddresses()).isEmpty(); Map serviceConfig = result.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); + assertThat(result.getServiceConfig().getConfig()).isEqualTo(serviceConfig); List> rawLbConfigs = (List>) serviceConfig.get("loadBalancingConfig"); @@ -331,6 +339,7 @@ public void resolve_resourceUpdated() { result = resolutionResultCaptor.getValue(); assertThat(result.getAddresses()).isEmpty(); serviceConfig = result.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); + assertThat(result.getServiceConfig().getConfig()).isEqualTo(serviceConfig); rawLbConfigs = (List>) serviceConfig.get("loadBalancingConfig"); lbConfig = Iterables.getOnlyElement(rawLbConfigs); assertThat(lbConfig.keySet()).containsExactly("cds_experimental"); @@ -366,6 +375,7 @@ public void resolve_resourceNewlyAdded() { assertThat(result.getAddresses()).isEmpty(); Map serviceConfig = result.getAttributes().get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); + assertThat(result.getServiceConfig().getConfig()).isEqualTo(serviceConfig); @SuppressWarnings("unchecked") List> rawLbConfigs = (List>) serviceConfig.get("loadBalancingConfig"); From d8250e9456d2adfa7d9a0d1ea3b77e5c594094c9 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Mon, 27 Jan 2020 13:58:48 -0800 Subject: [PATCH 20/86] interop-testing: log RPC failures for XdsTestClient --- .../java/io/grpc/testing/integration/XdsTestClient.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java index 66d061b766d..4958d5011b1 100644 --- a/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java +++ b/interop-testing/src/main/java/io/grpc/testing/integration/XdsTestClient.java @@ -47,8 +47,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicLong; import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; @@ -185,10 +183,9 @@ private void stop() throws InterruptedException { } - private void runQps() throws InterruptedException, ExecutionException, TimeoutException { + private void runQps() throws InterruptedException, ExecutionException { final SettableFuture failure = SettableFuture.create(); final class PeriodicRpc implements Runnable { - final AtomicLong messageIds = new AtomicLong(); @Override public void run() { @@ -226,6 +223,9 @@ public void onMessage(SimpleResponse response) { @Override public void onClose(Status status, Metadata trailers) { + if (!status.isOk()) { + logger.log(Level.WARNING, "Greeting RPC failed with status {0}", status); + } for (XdsStatsWatcher watcher : savedWatchers) { watcher.rpcCompleted(requestId, hostname); } From 85eb92418cae3e6f2429323c9866f65e92486c17 Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Mon, 27 Jan 2020 14:36:53 -0800 Subject: [PATCH 21/86] core: fix ATTR_LOAD_BALANCING_CONFIG has service config instead of raw lb config (#6648) --- .../io/grpc/internal/AutoConfiguredLoadBalancerFactory.java | 3 ++- .../internal/AutoConfiguredLoadBalancerFactoryTest.java | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java index 263a20d4a8d..c775deca001 100644 --- a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java +++ b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java @@ -338,7 +338,8 @@ ConfigOrError parseLoadBalancerPolicy(Map serviceConfig, ChannelLogge return parsedLbPolicyConfig; } return ConfigOrError.fromConfig( - new PolicySelection(provider, serviceConfig, parsedLbPolicyConfig.getConfig())); + new PolicySelection( + provider, lbConfig.getRawConfigValue(), parsedLbPolicyConfig.getConfig())); } } return ConfigOrError.fromError( diff --git a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java index 9232772778a..c0918151790 100644 --- a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java +++ b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java @@ -275,7 +275,7 @@ public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exc verify(testLbBalancer).handleResolvedAddresses(resultCaptor.capture()); assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(rawServiceConfig); + .containsExactly("setting1", "high"); verify(testLbBalancer, atLeast(0)).canHandleEmptyAddressListFromNameResolution(); ArgumentCaptor> lbConfigCaptor = ArgumentCaptor.forClass(Map.class); verify(testLbBalancerProvider).parseLoadBalancingPolicyConfig(lbConfigCaptor.capture()); @@ -298,7 +298,7 @@ public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exc assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(rawServiceConfig); + .containsExactly("setting1", "low"); verify(testLbBalancerProvider, times(2)) .parseLoadBalancingPolicyConfig(lbConfigCaptor.capture()); assertThat(lbConfigCaptor.getValue()).containsExactly("setting1", "low"); @@ -388,7 +388,7 @@ public void handleResolvedAddressGroups_delegateAcceptsEmptyAddressList() assertThat(resultCaptor.getValue().getLoadBalancingPolicyConfig()) .isEqualTo(nextParsedConfigOrError2.get().getConfig()); assertThat(resultCaptor.getValue().getAttributes().get(ATTR_LOAD_BALANCING_CONFIG)) - .isEqualTo(rawServiceConfig); + .containsExactly("setting1", "high"); } @Test From 14e63fea61550d3a7f55ad3685cb2afc4a863fbc Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Mon, 27 Jan 2020 15:47:25 -0800 Subject: [PATCH 22/86] xds: use resolvedAddresses.getLoadBalancingPolicyConfig() to get LB config --- .../java/io/grpc/xds/CdsLoadBalancer.java | 32 +++---------- .../main/java/io/grpc/xds/LookasideLb.java | 43 ++++------------- .../java/io/grpc/xds/CdsLoadBalancerTest.java | 47 ++++--------------- 3 files changed, 24 insertions(+), 98 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index c9dc443495f..5a7b0d8c578 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -23,13 +23,11 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.Attributes; import io.grpc.ChannelLogger; import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.EquivalentAddressGroup; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerRegistry; -import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; import io.grpc.internal.ObjectPool; import io.grpc.internal.ServiceConfigUtil.LbConfig; @@ -45,7 +43,6 @@ import io.grpc.xds.sds.TlsContextManagerImpl; import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; import javax.annotation.Nullable; @@ -90,7 +87,6 @@ public final class CdsLoadBalancer extends LoadBalancer { @Override public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { channelLogger.log(ChannelLogLevel.DEBUG, "Received ResolvedAddresses {0}", resolvedAddresses); - Attributes attributes = resolvedAddresses.getAttributes(); if (xdsClientPool == null) { xdsClientPool = resolvedAddresses.getAttributes().get(XdsAttributes.XDS_CLIENT_POOL); if (xdsClientPool == null) { @@ -104,25 +100,15 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { xdsClient = xdsClientPool.getObject(); } - Map newRawLbConfig = attributes.get(ATTR_LOAD_BALANCING_CONFIG); - if (newRawLbConfig == null) { - // This will not happen when the service config error handling is implemented. - // For now simply go to TRANSIENT_FAILURE. + Object lbConfig = resolvedAddresses.getLoadBalancingPolicyConfig(); + if (!(lbConfig instanceof CdsConfig)) { helper.updateBalancingState( TRANSIENT_FAILURE, - new ErrorPicker( - Status.UNAVAILABLE.withDescription("ATTR_LOAD_BALANCING_CONFIG not available"))); + new ErrorPicker(Status.UNAVAILABLE.withDescription( + "Load balancing config '" + lbConfig + "' is not a CdsConfig"))); return; } - ConfigOrError cfg = - CdsLoadBalancerProvider.parseLoadBalancingConfigPolicy(newRawLbConfig); - if (cfg.getError() != null) { - // This will not happen when the service config error handling is implemented. - // For now simply go to TRANSIENT_FAILURE. - helper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(cfg.getError())); - return; - } - final CdsConfig newCdsConfig = (CdsConfig) cfg.getConfig(); + CdsConfig newCdsConfig = (CdsConfig) lbConfig; // If CdsConfig is changed, do a graceful switch. if (!newCdsConfig.equals(cdsConfig)) { @@ -325,13 +311,7 @@ public void onClusterChanged(ClusterUpdate newUpdate) { edsBalancer = lbRegistry.getProvider(XDS_POLICY_NAME).newLoadBalancer(helper); } edsBalancer.handleResolvedAddresses( - resolvedAddresses.toBuilder() - .setAttributes( - resolvedAddresses.getAttributes().toBuilder() - .discard(ATTR_LOAD_BALANCING_CONFIG) - .build()) - .setLoadBalancingPolicyConfig(edsConfig) - .build()); + resolvedAddresses.toBuilder().setLoadBalancingPolicyConfig(edsConfig).build()); } /** For new UpstreamTlsContext value, release old SslContextProvider. */ diff --git a/xds/src/main/java/io/grpc/xds/LookasideLb.java b/xds/src/main/java/io/grpc/xds/LookasideLb.java index 57013e32425..f0c87115ed7 100644 --- a/xds/src/main/java/io/grpc/xds/LookasideLb.java +++ b/xds/src/main/java/io/grpc/xds/LookasideLb.java @@ -28,7 +28,6 @@ import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerRegistry; -import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; import io.grpc.internal.ExponentialBackoffPolicy; import io.grpc.internal.GrpcUtil; @@ -114,41 +113,15 @@ final class LookasideLb extends LoadBalancer { public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { channelLogger.log(ChannelLogLevel.DEBUG, "Received ResolvedAddresses {0}", resolvedAddresses); - Attributes attributes = resolvedAddresses.getAttributes(); - XdsConfig newXdsConfig; Object lbConfig = resolvedAddresses.getLoadBalancingPolicyConfig(); - if (lbConfig != null) { - if (!(lbConfig instanceof XdsConfig)) { - lookasideLbHelper.updateBalancingState( - TRANSIENT_FAILURE, - new ErrorPicker(Status.UNAVAILABLE.withDescription( - "Load balancing config '" + lbConfig + "' is not an XdsConfig"))); - return; - } - newXdsConfig = (XdsConfig) lbConfig; - } else { - // In the future, in all cases xdsConfig can be obtained directly by - // resolvedAddresses.getLoadBalancingPolicyConfig(). - Map newRawLbConfig = attributes.get(ATTR_LOAD_BALANCING_CONFIG); - if (newRawLbConfig == null) { - // This will not happen when the service config error handling is implemented. - // For now simply go to TRANSIENT_FAILURE. - lookasideLbHelper.updateBalancingState( - TRANSIENT_FAILURE, - new ErrorPicker( - Status.UNAVAILABLE.withDescription("ATTR_LOAD_BALANCING_CONFIG not available"))); - return; - } - ConfigOrError cfg = - XdsLoadBalancerProvider.parseLoadBalancingConfigPolicy(newRawLbConfig, lbRegistry); - if (cfg.getError() != null) { - // This will not happen when the service config error handling is implemented. - // For now simply go to TRANSIENT_FAILURE. - lookasideLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(cfg.getError())); - return; - } - newXdsConfig = (XdsConfig) cfg.getConfig(); + if (!(lbConfig instanceof XdsConfig)) { + lookasideLbHelper.updateBalancingState( + TRANSIENT_FAILURE, + new ErrorPicker(Status.UNAVAILABLE.withDescription( + "Load balancing config '" + lbConfig + "' is not an XdsConfig"))); + return; } + XdsConfig newXdsConfig = (XdsConfig) lbConfig; if (xdsClientPool == null) { // Init xdsClientPool and xdsClient. @@ -163,6 +136,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { // We assume XdsConfig switching happens only within one usecase, and there is no switching // between different usecases. + Attributes attributes = resolvedAddresses.getAttributes(); xdsClientPool = attributes.get(XdsAttributes.XDS_CLIENT_POOL); if (xdsClientPool == null) { // This is the EDS-only usecase. final BootstrapInfo bootstrapInfo; @@ -241,7 +215,6 @@ XdsClient createXdsClient() { switchingLoadBalancer.switchTo(clusterEndpointsLoadBalancerFactory); } resolvedAddresses = resolvedAddresses.toBuilder() - .setAttributes(attributes.toBuilder().discard(ATTR_LOAD_BALANCING_CONFIG).build()) .setLoadBalancingPolicyConfig(newXdsConfig) .build(); switchingLoadBalancer.handleResolvedAddresses(resolvedAddresses); diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java index 52cfe0a9720..50efbc705bf 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java @@ -19,7 +19,6 @@ import static com.google.common.truth.Truth.assertThat; import static io.grpc.ConnectivityState.CONNECTING; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.LoadBalancer.ATTR_LOAD_BALANCING_CONFIG; import static io.grpc.xds.XdsLoadBalancerProvider.XDS_POLICY_NAME; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -49,8 +48,8 @@ import io.grpc.Status; import io.grpc.SynchronizationContext; import io.grpc.internal.FakeClock; -import io.grpc.internal.JsonParser; import io.grpc.internal.ServiceConfigUtil.LbConfig; +import io.grpc.xds.CdsLoadBalancerProvider.CdsConfig; import io.grpc.xds.XdsClient.ClusterUpdate; import io.grpc.xds.XdsClient.ClusterWatcher; import io.grpc.xds.XdsClient.EndpointUpdate; @@ -66,7 +65,6 @@ import java.util.ArrayList; import java.util.Deque; import java.util.List; -import java.util.Map; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -79,8 +77,6 @@ * Tests for {@link CdsLoadBalancer}. */ @RunWith(JUnit4.class) -// TODO(creamsoup) use parsed service config -@SuppressWarnings("deprecation") public class CdsLoadBalancerTest { private static final String CLIENT_PEM_FILE = "client.pem"; private static final String CLIENT_KEY_FILE = "client.key"; @@ -164,16 +160,13 @@ public void canHandleEmptyAddressListFromNameResolution() { } @Test - public void invalidConfigType() throws Exception { - String lbConfigRaw = "{'cluster' : {}}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); + public void invalidConfigType() { ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new Object()) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses); @@ -182,16 +175,13 @@ public void invalidConfigType() throws Exception { } @Test - public void handleResolutionErrorBeforeOrAfterCdsWorking() throws Exception { - String lbConfigRaw1 = "{'cluster' : 'foo.googleapis.com'}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig1 = (Map) JsonParser.parse(lbConfigRaw1); + public void handleResolutionErrorBeforeOrAfterCdsWorking() { ResolvedAddresses resolvedAddresses1 = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig1) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new CdsConfig("foo.googleapis.com")) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses1); ArgumentCaptor clusterWatcherCaptor1 = ArgumentCaptor.forClass(null); @@ -222,16 +212,12 @@ public void handleResolutionErrorBeforeOrAfterCdsWorking() throws Exception { @Test public void handleCdsConfigs() throws Exception { assertThat(xdsClient).isNull(); - - String lbConfigRaw1 = "{'cluster' : 'foo.googleapis.com'}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig1 = (Map) JsonParser.parse(lbConfigRaw1); ResolvedAddresses resolvedAddresses1 = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig1) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new CdsConfig("foo.googleapis.com")) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses1); @@ -267,15 +253,12 @@ public void handleCdsConfigs() throws Exception { edsLbHelper1.updateBalancingState(ConnectivityState.READY, picker1); verify(helper).updateBalancingState(ConnectivityState.READY, picker1); - String lbConfigRaw2 = "{'cluster' : 'bar.googleapis.com'}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig2 = (Map) JsonParser.parse(lbConfigRaw2); ResolvedAddresses resolvedAddresses2 = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig2) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new CdsConfig("bar.googleapis.com")) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses2); @@ -345,18 +328,14 @@ public void handleCdsConfigs() throws Exception { @SuppressWarnings({"unchecked"}) public void handleCdsConfigs_withUpstreamTlsContext() throws Exception { assertThat(xdsClient).isNull(); - - String lbConfigRaw1 = "{'cluster' : 'foo.googleapis.com'}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig1 = (Map) JsonParser.parse(lbConfigRaw1); ResolvedAddresses resolvedAddresses1 = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes( Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig1) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new CdsConfig("foo.googleapis.com")) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses1); @@ -498,15 +477,12 @@ private void verifyUpstreamTlsContextAttribute( @Test public void clusterWatcher_onErrorCalledBeforeAndAfterOnClusterChanged() throws Exception { - String lbConfigRaw = "{'cluster' : 'foo.googleapis.com'}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new CdsConfig("foo.googleapis.com")) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses); @@ -550,15 +526,12 @@ public void cdsBalancerIntegrateWithEdsBalancer() throws Exception { lbRegistry.deregister(fakeXdsLoadBlancerProvider); lbRegistry.register(new XdsLoadBalancerProvider()); - String lbConfigRaw = "{'cluster' : 'foo.googleapis.com'}".replace("'", "\""); - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); ResolvedAddresses resolvedAddresses1 = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) .setAttributes(Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, lbConfig) .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) .build()) + .setLoadBalancingPolicyConfig(new CdsConfig("foo.googleapis.com")) .build(); cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses1); ArgumentCaptor clusterWatcherCaptor = ArgumentCaptor.forClass(null); From 1ed538f253577273181986d29e1ac08e406aefcc Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Tue, 28 Jan 2020 09:55:04 -0800 Subject: [PATCH 23/86] all: log picker when updating balancing state Let `ManagedChannelImpl` log picker update when updating balancing state. --- .../AutoConfiguredLoadBalancerFactory.java | 5 ++ .../io/grpc/internal/ManagedChannelImpl.java | 10 +++- .../java/io/grpc/internal/OobChannel.java | 48 +++++++++++++------ .../grpc/internal/PickFirstLoadBalancer.java | 6 +++ .../grpc/util/GracefulSwitchLoadBalancer.java | 20 +++++--- .../io/grpc/util/RoundRobinLoadBalancer.java | 11 +++++ .../grpc/internal/ManagedChannelImplTest.java | 2 +- .../main/java/io/grpc/grpclb/GrpclbState.java | 8 ++++ .../java/io/grpc/xds/ClientLoadCounter.java | 14 ++++++ 9 files changed, 101 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java index c775deca001..d0a71b3ded2 100644 --- a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java +++ b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java @@ -432,6 +432,11 @@ private static final class EmptyPicker extends SubchannelPicker { public PickResult pickSubchannel(PickSubchannelArgs args) { return PickResult.withNoResult(); } + + @Override + public String toString() { + return MoreObjects.toStringHelper(EmptyPicker.class).toString(); + } } private static final class FailingPicker extends SubchannelPicker { diff --git a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java index 062acc3a597..040d964da5e 100644 --- a/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java +++ b/core/src/main/java/io/grpc/internal/ManagedChannelImpl.java @@ -812,6 +812,13 @@ final class PanicSubchannelPicker extends SubchannelPicker { public PickResult pickSubchannel(PickSubchannelArgs args) { return panicPickResult; } + + @Override + public String toString() { + return MoreObjects.toStringHelper(PanicSubchannelPicker.class) + .add("panicPickResult", panicPickResult) + .toString(); + } } updateSubchannelPicker(new PanicSubchannelPicker()); @@ -1146,7 +1153,8 @@ public void run() { // It's not appropriate to report SHUTDOWN state from lb. // Ignore the case of newState == SHUTDOWN for now. if (newState != SHUTDOWN) { - channelLogger.log(ChannelLogLevel.INFO, "Entering {0} state", newState); + channelLogger.log( + ChannelLogLevel.INFO, "Entering {0} state with picker: {1}", newState, newPicker); channelStateManager.gotoState(newState); } } diff --git a/core/src/main/java/io/grpc/internal/OobChannel.java b/core/src/main/java/io/grpc/internal/OobChannel.java index 512abaeed1e..aae3314f3f3 100644 --- a/core/src/main/java/io/grpc/internal/OobChannel.java +++ b/core/src/main/java/io/grpc/internal/OobChannel.java @@ -173,14 +173,23 @@ public Object getInternalSubchannel() { } }; - subchannelPicker = new SubchannelPicker() { - final PickResult result = PickResult.withSubchannel(subchannelImpl); + final class OobSubchannelPicker extends SubchannelPicker { + final PickResult result = PickResult.withSubchannel(subchannelImpl); + + @Override + public PickResult pickSubchannel(PickSubchannelArgs args) { + return result; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(OobSubchannelPicker.class) + .add("result", result) + .toString(); + } + } - @Override - public PickResult pickSubchannel(PickSubchannelArgs args) { - return result; - } - }; + subchannelPicker = new OobSubchannelPicker(); delayedTransport.reprocess(subchannelPicker); } @@ -253,14 +262,23 @@ void handleSubchannelStateChange(final ConnectivityStateInfo newState) { delayedTransport.reprocess(subchannelPicker); break; case TRANSIENT_FAILURE: - delayedTransport.reprocess(new SubchannelPicker() { - final PickResult errorResult = PickResult.withError(newState.getStatus()); - - @Override - public PickResult pickSubchannel(PickSubchannelArgs args) { - return errorResult; - } - }); + final class OobErrorPicker extends SubchannelPicker { + final PickResult errorResult = PickResult.withError(newState.getStatus()); + + @Override + public PickResult pickSubchannel(PickSubchannelArgs args) { + return errorResult; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(OobErrorPicker.class) + .add("errorResult", errorResult) + .toString(); + } + } + + delayedTransport.reprocess(new OobErrorPicker()); break; default: // Do nothing diff --git a/core/src/main/java/io/grpc/internal/PickFirstLoadBalancer.java b/core/src/main/java/io/grpc/internal/PickFirstLoadBalancer.java index d579e437b59..535b34b015d 100644 --- a/core/src/main/java/io/grpc/internal/PickFirstLoadBalancer.java +++ b/core/src/main/java/io/grpc/internal/PickFirstLoadBalancer.java @@ -21,6 +21,7 @@ import static io.grpc.ConnectivityState.SHUTDOWN; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; +import com.google.common.base.MoreObjects; import io.grpc.ConnectivityState; import io.grpc.ConnectivityStateInfo; import io.grpc.EquivalentAddressGroup; @@ -135,6 +136,11 @@ private static final class Picker extends SubchannelPicker { public PickResult pickSubchannel(PickSubchannelArgs args) { return result; } + + @Override + public String toString() { + return MoreObjects.toStringHelper(Picker.class).add("result", result).toString(); + } } /** Picker that requests connection during the first pick, and returns noResult. */ diff --git a/core/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java b/core/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java index cdb68685cac..ecd1f77572a 100644 --- a/core/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java +++ b/core/src/main/java/io/grpc/util/GracefulSwitchLoadBalancer.java @@ -20,6 +20,7 @@ import static com.google.common.base.Preconditions.checkState; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; import io.grpc.ConnectivityState; import io.grpc.ConnectivityStateInfo; import io.grpc.ExperimentalApi; @@ -56,14 +57,21 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { @Override public void handleNameResolutionError(final Status error) { + class ErrorPicker extends SubchannelPicker { + @Override + public PickResult pickSubchannel(PickSubchannelArgs args) { + return PickResult.withError(error); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(ErrorPicker.class).add("error", error).toString(); + } + } + helper.updateBalancingState( ConnectivityState.TRANSIENT_FAILURE, - new SubchannelPicker() { - @Override - public PickResult pickSubchannel(PickSubchannelArgs args) { - return PickResult.withError(error); - } - }); + new ErrorPicker()); } @Override diff --git a/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java b/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java index 7557aa86f78..82d803a8294 100644 --- a/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java +++ b/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java @@ -24,6 +24,7 @@ import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; import com.google.common.base.Objects; import com.google.common.base.Preconditions; @@ -428,6 +429,11 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { return PickResult.withSubchannel(subchannel != null ? subchannel : nextSubchannel()); } + @Override + public String toString() { + return MoreObjects.toStringHelper(ReadyPicker.class).add("list", list).toString(); + } + private Subchannel nextSubchannel() { int size = list.size(); int i = indexUpdater.incrementAndGet(this); @@ -476,6 +482,11 @@ boolean isEquivalentTo(RoundRobinPicker picker) { return picker instanceof EmptyPicker && (Objects.equal(status, ((EmptyPicker) picker).status) || (status.isOk() && ((EmptyPicker) picker).status.isOk())); } + + @Override + public String toString() { + return MoreObjects.toStringHelper(EmptyPicker.class).add("status", status).toString(); + } } /** diff --git a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java index f272316dedc..9066f3e33ad 100644 --- a/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java +++ b/core/src/test/java/io/grpc/internal/ManagedChannelImplTest.java @@ -2862,7 +2862,7 @@ public void channelTracing_stateChangeEvent() throws Exception { timer.forwardNanos(1234); updateBalancingStateSafely(helper, CONNECTING, mockPicker); assertThat(getStats(channel).channelTrace.events).contains(new ChannelTrace.Event.Builder() - .setDescription("Entering CONNECTING state") + .setDescription("Entering CONNECTING state with picker: mockPicker") .setSeverity(ChannelTrace.Event.Severity.CT_INFO) .setTimestampNanos(timer.getTicker().read()) .build()); diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java index 925eb2f04e7..a2679472f6f 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java @@ -26,6 +26,7 @@ import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; import com.google.common.base.Objects; import com.google.common.base.Stopwatch; import com.google.protobuf.util.Durations; @@ -1051,5 +1052,12 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { } } + @Override + public String toString() { + return MoreObjects.toStringHelper(RoundRobinPicker.class) + .add("dropList", dropList) + .add("pickList", pickList) + .toString(); + } } } diff --git a/xds/src/main/java/io/grpc/xds/ClientLoadCounter.java b/xds/src/main/java/io/grpc/xds/ClientLoadCounter.java index 2ad4fed502f..8e8754d2542 100644 --- a/xds/src/main/java/io/grpc/xds/ClientLoadCounter.java +++ b/xds/src/main/java/io/grpc/xds/ClientLoadCounter.java @@ -413,6 +413,13 @@ protected ClientStreamTracer.Factory wrapTracerFactory( ClientStreamTracer.Factory originFactory) { return new LoadRecordingStreamTracerFactory(counter, originFactory); } + + @Override + public String toString() { + return MoreObjects.toStringHelper(LoadRecordingSubchannelPicker.class) + .add("delegate", delegate) + .toString(); + } } /** @@ -445,5 +452,12 @@ protected ClientStreamTracer.Factory wrapTracerFactory( ClientStreamTracer.Factory originFactory) { return orcaPerRequestUtil.newOrcaClientStreamTracerFactory(originFactory, listener); } + + @Override + public String toString() { + return MoreObjects.toStringHelper(MetricsObservingSubchannelPicker.class) + .add("delegate", delegate) + .toString(); + } } } From 4ad3acc1d409789a4b82a8791261a013efc446b4 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Tue, 28 Jan 2020 10:37:10 -0800 Subject: [PATCH 24/86] xds: remove special logic for unsubscribing last CDS resource (#6647) --- .../main/java/io/grpc/xds/XdsClientImpl.java | 28 +++---------------- .../java/io/grpc/xds/XdsClientImplTest.java | 13 ++++----- 2 files changed, 10 insertions(+), 31 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 20bdf069885..7554223b3fc 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -311,12 +311,6 @@ void cancelClusterDataWatch(String clusterName, ClusterWatcher watcher) { cdsRespTimers.get(clusterName).cancel(); cdsRespTimers.remove(clusterName); } - - // If unsubscribe the last resource, do NOT send a CDS request for an empty resource list. - // This is a workaround for CDS protocol resource unsubscribe. - if (clusterWatchers.isEmpty()) { - return; - } // No longer interested in this cluster, send an updated CDS request to unsubscribe // this resource. if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { @@ -669,8 +663,6 @@ private String processRouteConfig(RouteConfiguration config) { */ private void handleCdsResponse(DiscoveryResponse cdsResponse) { logger.log(Level.FINE, "Received an CDS response: {0}", cdsResponse); - checkState(adsStream.cdsResourceNames != null, - "Never requested for CDS resources, management server is doing something wrong"); adsStream.cdsRespNonce = cdsResponse.getNonce(); // Unpack Cluster messages. @@ -680,7 +672,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { clusters.add(res.unpack(Cluster.class)); } } catch (InvalidProtocolBufferException e) { - adsStream.sendNackRequest(ADS_TYPE_URL_CDS, adsStream.cdsResourceNames, + adsStream.sendNackRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet(), "Broken CDS response"); return; } @@ -697,7 +689,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { // Management server is required to always send newly requested resources, even if they // may have been sent previously (proactively). Thus, client does not need to cache // unrequested resources. - if (!adsStream.cdsResourceNames.contains(clusterName)) { + if (!clusterWatchers.keySet().contains(clusterName)) { continue; } ClusterUpdate.Builder updateBuilder = ClusterUpdate.newBuilder(); @@ -751,10 +743,10 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { clusterUpdates.put(clusterName, updateBuilder.build()); } if (errorMessage != null) { - adsStream.sendNackRequest(ADS_TYPE_URL_CDS, adsStream.cdsResourceNames, errorMessage); + adsStream.sendNackRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet(), errorMessage); return; } - adsStream.sendAckRequest(ADS_TYPE_URL_CDS, adsStream.cdsResourceNames, + adsStream.sendAckRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet(), cdsResponse.getVersionInfo()); // Update local CDS cache with data in this response. @@ -997,14 +989,6 @@ private final class AdsStream implements StreamObserver { // watchers are interested in. @Nullable private String rdsResourceName; - // Most recently requested CDS resource names. - // Due to CDS protocol limitation, client does not send a CDS request for empty resource - // names when unsubscribing the last resource. Management server assumes it is still - // subscribing to the last resource, client also need to behave so to avoid data lose. - // Therefore, cluster names that watchers interested in cannot always represent resource names - // in most recently sent CDS requests. - @Nullable - private Collection cdsResourceNames; private AdsStream(AggregatedDiscoveryServiceGrpc.AggregatedDiscoveryServiceStub stub) { this.stub = checkNotNull(stub, "stub"); @@ -1147,10 +1131,6 @@ private void sendXdsRequest(String typeUrl, Collection resourceNames) { } else if (typeUrl.equals(ADS_TYPE_URL_CDS)) { version = cdsVersion; nonce = cdsRespNonce; - // For CDS protocol resource unsubscribe workaround, keep the last unsubscribed cluster - // as the requested resource name for ACK requests when all all resources have - // been unsubscribed. - cdsResourceNames = ImmutableList.copyOf(resourceNames); } else if (typeUrl.equals(ADS_TYPE_URL_EDS)) { version = edsVersion; nonce = edsRespNonce; diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index a9c139eaaed..f0ab428e915 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -1613,9 +1613,11 @@ public void addRemoveClusterWatchers() { // Cancel the other watcher. All resources have been unsubscribed. xdsClient.cancelClusterDataWatch("cluster-bar.googleapis.com", watcher2); - // All endpoint watchers have been cancelled. Due to protocol limitation, we do not send - // a CDS request for updated resource names (empty) when canceling the last resource. - verifyNoMoreInteractions(requestObserver); + verify(requestObserver) + .onNext( + argThat( + new DiscoveryRequestMatcher("1", ImmutableList.of(), + XdsClientImpl.ADS_TYPE_URL_CDS, "0001"))); // Management server sends back a new CDS response. clusters = ImmutableList.of( @@ -1626,13 +1628,10 @@ public void addRemoveClusterWatchers() { buildDiscoveryResponse("2", clusters, XdsClientImpl.ADS_TYPE_URL_CDS, "0002"); responseObserver.onNext(response); - // Due to protocol limitation, client sent an ACK CDS request, with resource_names containing - // the last unsubscribed resource. verify(requestObserver) .onNext( argThat( - new DiscoveryRequestMatcher("2", - ImmutableList.of("cluster-bar.googleapis.com"), + new DiscoveryRequestMatcher("2", ImmutableList.of(), XdsClientImpl.ADS_TYPE_URL_CDS, "0002"))); // Cancelled watchers do not receive notification. From 589a645a38b306044f54768c60dcf35f055b4356 Mon Sep 17 00:00:00 2001 From: Roman Leventov Date: Tue, 28 Jan 2020 22:32:09 +0300 Subject: [PATCH 25/86] stub,examples: Clarify CallStreamObserver's Javadoc (#6561) * Clarify CallStreamObserver's Javadoc * Remove unnecessary AtomicBoolean and clarify a comment in ManualFlowControlServer --- .../ManualFlowControlServer.java | 35 ++++++++++--------- .../java/io/grpc/stub/CallStreamObserver.java | 26 +++++++++----- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java b/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java index abd04fa9ad5..694330dfdb6 100644 --- a/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java +++ b/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Logger; public class ManualFlowControlServer { @@ -42,12 +41,6 @@ public StreamObserver sayHelloStreaming(final StreamObserver) responseObserver; serverCallStreamObserver.disableAutoInboundFlowControl(); - // Guard against spurious onReady() calls caused by a race between onNext() and onReady(). If the transport - // toggles isReady() from false to true while onNext() is executing, but before onNext() checks isReady(), - // request(1) would be called twice - once by onNext() and once by the onReady() scheduled during onNext()'s - // execution. - final AtomicBoolean wasReady = new AtomicBoolean(false); - // Set up a back-pressure-aware consumer for the request stream. The onReadyHandler will be invoked // when the consuming side has enough buffer space to receive more messages. // @@ -55,10 +48,17 @@ public StreamObserver sayHelloStreaming(final StreamObserver() { @@ -90,16 +92,17 @@ public void onNext(HelloRequest request) { // Check the provided ServerCallStreamObserver to see if it is still ready to accept more messages. if (serverCallStreamObserver.isReady()) { // Signal the sender to send another request. As long as isReady() stays true, the server will keep - // cycling through the loop of onNext() -> request()...onNext() -> request()... until either the client - // runs out of messages and ends the loop or the server runs out of receive buffer space. + // cycling through the loop of onNext() -> request(1)...onNext() -> request(1)... until the client runs + // out of messages and ends the loop (via onCompleted()). // - // If the server runs out of buffer space, isReady() will turn false. When the receive buffer has - // sufficiently drained, isReady() will turn true, and the serverCallStreamObserver's onReadyHandler - // will be called to restart the message pump. + // If request() was called here with the argument of more than 1, the server might runs out of receive + // buffer space, and isReady() will turn false. When the receive buffer has sufficiently drained, + // isReady() will turn true, and the serverCallStreamObserver's onReadyHandler will be called to restart + // the message pump. serverCallStreamObserver.request(1); } else { // If not, note that back-pressure has begun. - wasReady.set(false); + onReadyHandler.wasReady = false; } } catch (Throwable throwable) { throwable.printStackTrace(); diff --git a/stub/src/main/java/io/grpc/stub/CallStreamObserver.java b/stub/src/main/java/io/grpc/stub/CallStreamObserver.java index 8ed16bb96eb..98fa6fba57e 100644 --- a/stub/src/main/java/io/grpc/stub/CallStreamObserver.java +++ b/stub/src/main/java/io/grpc/stub/CallStreamObserver.java @@ -19,20 +19,30 @@ import io.grpc.ExperimentalApi; /** - * A refinement of StreamObserver provided by the GRPC runtime to the application that allows for - * more complex interactions with call behavior. + * A refinement of StreamObserver provided by the GRPC runtime to the application (the client or + * the server) that allows for more complex interactions with call behavior. * - *

In any call there are logically two {@link StreamObserver} implementations: + *

In any call there are logically four {@link StreamObserver} implementations: *

    - *
  • 'inbound' - which the GRPC runtime calls when it receives messages from the - * remote peer. This is implemented by the application. + *
  • 'inbound', client-side - which the GRPC runtime calls when it receives messages from + * the server. This is implemented by the client application and passed into a service method + * on a stub object. *
  • - *
  • 'outbound' - which the GRPC runtime provides to the application which it uses to - * send messages to the remote peer. + *
  • 'outbound', client-side - which the GRPC runtime provides to the client application and the + * client uses this {@code StreamObserver} to send messages to the server. + *
  • + *
  • 'inbound', server-side - which the GRPC runtime calls when it receives messages from + * the client. This is implemented by the server application and returned from service + * implementations of client-side streaming and bidirectional streaming methods. + *
  • + *
  • 'outbound', server-side - which the GRPC runtime provides to the server application and + * the server uses this {@code StreamObserver} to send messages (responses) to the client. *
  • *
* - *

Implementations of this class represent the 'outbound' message stream. + *

Implementations of this class represent the 'outbound' message streams. The client-side + * one is {@link ClientCallStreamObserver} and the service-side one is + * {@link ServerCallStreamObserver}. * *

Like {@code StreamObserver}, implementations are not required to be thread-safe; if multiple * threads will be writing to an instance concurrently, the application must synchronize its calls. From 64e827d9961edab2619c1cf25f51d3a46143efbe Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Tue, 28 Jan 2020 13:08:05 -0800 Subject: [PATCH 26/86] grpclb,xds: fix parseLoadBalancingConfigPolicy error message format --- .../main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java | 3 ++- xds/src/main/java/io/grpc/xds/CdsLoadBalancerProvider.java | 3 ++- xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java index c70197ab6da..eb748d3e1d4 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java @@ -70,7 +70,8 @@ public ConfigOrError parseLoadBalancingPolicyConfig( return parseLoadBalancingConfigPolicyInternal(rawLoadBalancingConfigPolicy); } catch (RuntimeException e) { return ConfigOrError.fromError( - Status.INTERNAL.withDescription("can't parse config: " + e.getMessage()).withCause(e)); + Status.fromThrowable(e).withDescription( + "Failed to parse GRPCLB config: " + rawLoadBalancingConfigPolicy)); } } diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancerProvider.java index eb1abb1be70..1950168ba9d 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancerProvider.java @@ -76,7 +76,8 @@ static ConfigOrError parseLoadBalancingConfigPolicy(Map rawLoadBalanc return ConfigOrError.fromConfig(new CdsConfig(cluster)); } catch (RuntimeException e) { return ConfigOrError.fromError( - Status.UNKNOWN.withDescription("Failed to parse config " + e.getMessage()).withCause(e)); + Status.fromThrowable(e).withDescription( + "Failed to parse CDS LB config: " + rawLoadBalancingPolicyConfig)); } } diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java index 2663883e664..82311647e3d 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java @@ -86,7 +86,8 @@ static ConfigOrError parseLoadBalancingConfigPolicy( new XdsConfig(childPolicy, fallbackPolicy, edsServiceName, lrsServerName)); } catch (RuntimeException e) { return ConfigOrError.fromError( - Status.UNKNOWN.withDescription("Failed to parse config " + e.getMessage()).withCause(e)); + Status.fromThrowable(e).withDescription( + "Failed to parse XDS LB config: " + rawLoadBalancingPolicyConfig)); } } From 2eccdb8337de265ec2f6ece4e3b421f0e8126c60 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Tue, 28 Jan 2020 15:36:32 -0800 Subject: [PATCH 27/86] Revert "xds: better error handling to avoid RPC hangup" This reverts commit a223263134c2bd4bf97b943bc52a0a128bc67268. --- xds/src/main/java/io/grpc/xds/LookasideLb.java | 1 - xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java | 5 ----- xds/src/test/java/io/grpc/xds/LookasideLbTest.java | 3 --- xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java | 5 ----- 4 files changed, 14 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/LookasideLb.java b/xds/src/main/java/io/grpc/xds/LookasideLb.java index f0c87115ed7..416d0091047 100644 --- a/xds/src/main/java/io/grpc/xds/LookasideLb.java +++ b/xds/src/main/java/io/grpc/xds/LookasideLb.java @@ -410,7 +410,6 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { @Override public void onError(Status error) { channelLogger.log(ChannelLogLevel.ERROR, "EDS load balancer received an error: {0}", error); - lookasideLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); endpointUpdateCallback.onError(); } } diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java index 564b085fdbb..fdbfdf5149c 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java @@ -28,7 +28,6 @@ import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.util.ForwardingLoadBalancerHelper; import io.grpc.xds.LookasideLb.EndpointUpdateCallback; -import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; import java.util.concurrent.TimeUnit; import javax.annotation.CheckForNull; import javax.annotation.Nullable; @@ -119,10 +118,6 @@ class EnterFallbackTask implements Runnable { @Override public void run() { - helper.updateBalancingState( - ConnectivityState.TRANSIENT_FAILURE, - new ErrorPicker(Status.UNAVAILABLE.withDescription( - "Channel is not ready when timeout for entering fallback mode happens"))); useFallbackPolicy(); } } diff --git a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java index 2c1ce73d982..42080404aed 100644 --- a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java +++ b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java @@ -666,12 +666,9 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, public void verifyErrorPropagation() { deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); - verify(helper, never()).updateBalancingState( - eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verify(edsUpdateCallback, never()).onError(); // Forwarding 20 seconds so that the xds client will deem EDS resource not available. fakeClock.forwardTime(20, TimeUnit.SECONDS); - verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verify(edsUpdateCallback).onError(); } diff --git a/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java b/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java index 694f5e5707c..db8b82e2ffc 100644 --- a/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java +++ b/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java @@ -19,9 +19,6 @@ import static com.google.common.truth.Truth.assertThat; import static io.grpc.ConnectivityState.CONNECTING; import static io.grpc.ConnectivityState.READY; -import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -147,9 +144,7 @@ public void timeoutAtStartup_expectUseFallback_thenBackendReady_expectExitFallba fakeClock.forwardTime(9, TimeUnit.SECONDS); edsUpdateCallback.onWorking(); verifyNotInFallbackMode(); - fakeClock.forwardTime(1, TimeUnit.SECONDS); - verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); verifyInFallbackMode(); SubchannelPicker subchannelPicker = mock(SubchannelPicker.class); From 4ad3acb8f2f7b4bbe02c0a1c8b2b7eb4eb6e23ee Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Wed, 29 Jan 2020 09:58:47 -0800 Subject: [PATCH 28/86] xds: print xDS responses nicely with protobuf JsonFormat (#6654) This change implements a mechanism for printing xDS responses, which contains com.google.protobuf.Any type fields in proto messages, in human-readable format. --- .../main/java/io/grpc/xds/XdsClientImpl.java | 53 +++- .../java/io/grpc/xds/XdsClientImplTest.java | 233 ++++++++++++++++++ 2 files changed, 282 insertions(+), 4 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 7554223b3fc..2e0043ca5ef 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -25,6 +25,8 @@ import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.MessageOrBuilder; +import com.google.protobuf.util.JsonFormat; import com.google.rpc.Code; import io.envoyproxy.envoy.api.v2.Cluster; import io.envoyproxy.envoy.api.v2.Cluster.DiscoveryType; @@ -84,6 +86,8 @@ final class XdsClientImpl extends XdsClient { static final String ADS_TYPE_URL_EDS = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"; + private final MessagePrinter respPrinter = new MessagePrinter(); + private final ManagedChannel channel; private final SynchronizationContext syncContext; private final ScheduledExecutorService timeService; @@ -456,7 +460,9 @@ private void startRpcStream() { * ACK request is sent to management server. */ private void handleLdsResponse(DiscoveryResponse ldsResponse) { - logger.log(Level.FINE, "Received an LDS response: {0}", ldsResponse); + if (logger.isLoggable(Level.FINE)) { + logger.log(Level.FINE, "Received an LDS response: {0}", respPrinter.print(ldsResponse)); + } checkState(ldsResourceName != null && configWatcher != null, "No LDS request was ever sent. Management server is doing something wrong"); @@ -569,7 +575,9 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { * invalid data for gRPC's usage. Otherwise, an ACK request is sent to management server. */ private void handleRdsResponse(DiscoveryResponse rdsResponse) { - logger.log(Level.FINE, "Received an RDS response: {0}", rdsResponse); + if (logger.isLoggable(Level.FINE)) { + logger.log(Level.FINE, "Received an RDS response: {0}", respPrinter.print(rdsResponse)); + } checkState(adsStream.rdsResourceName != null, "Never requested for RDS resources, management server is doing something wrong"); @@ -662,7 +670,9 @@ private String processRouteConfig(RouteConfiguration config) { * interested in the same clusters are added later. */ private void handleCdsResponse(DiscoveryResponse cdsResponse) { - logger.log(Level.FINE, "Received an CDS response: {0}", cdsResponse); + if (logger.isLoggable(Level.FINE)) { + logger.log(Level.FINE, "Received an CDS response: {0}", respPrinter.print(cdsResponse)); + } adsStream.cdsRespNonce = cdsResponse.getNonce(); // Unpack Cluster messages. @@ -813,7 +823,9 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { * are added later. */ private void handleEdsResponse(DiscoveryResponse edsResponse) { - logger.log(Level.FINE, "Received an EDS response: {0}", edsResponse); + if (logger.isLoggable(Level.FINE)) { + logger.log(Level.FINE, "Received an EDS response: {0}", respPrinter.print(edsResponse)); + } // Unpack ClusterLoadAssignment messages. List clusterLoadAssignments = @@ -1360,4 +1372,37 @@ static boolean matchHostName(String hostName, String pattern) { return index == pattern.length() - 1 && hostName.startsWith(pattern.substring(0, pattern.length() - 1)); } + + /** + * Convert protobuf message to human readable String format. Useful for protobuf messages + * containing {@link com.google.protobuf.Any} fields. + */ + @VisibleForTesting + static class MessagePrinter { + private final JsonFormat.Printer printer; + + @VisibleForTesting + MessagePrinter() { + com.google.protobuf.TypeRegistry registry = + com.google.protobuf.TypeRegistry.newBuilder() + .add(Listener.getDescriptor()) + .add(HttpConnectionManager.getDescriptor()) + .add(RouteConfiguration.getDescriptor()) + .add(Cluster.getDescriptor()) + .add(ClusterLoadAssignment.getDescriptor()) + .build(); + printer = JsonFormat.printer().usingTypeRegistry(registry); + } + + @VisibleForTesting + String print(MessageOrBuilder message) { + String res; + try { + res = printer.print(message); + } catch (InvalidProtocolBufferException e) { + res = message + " (failed to pretty-print: " + e + ")"; + } + return res; + } + } } diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index f0ab428e915..738b510af8a 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -92,6 +92,7 @@ import io.grpc.xds.XdsClient.EndpointUpdate; import io.grpc.xds.XdsClient.EndpointWatcher; import io.grpc.xds.XdsClient.XdsChannelFactory; +import io.grpc.xds.XdsClientImpl.MessagePrinter; import java.io.IOException; import java.util.ArrayDeque; import java.util.HashSet; @@ -3195,6 +3196,238 @@ public void matchHostName_postfixWildCard() { assertThat(XdsClientImpl.matchHostName("foo-bar", pattern)).isTrue(); } + @Test + public void messagePrinter_printLdsResponse() { + MessagePrinter printer = new MessagePrinter(); + List listeners = ImmutableList.of( + Any.pack(buildListener("foo.googleapis.com:8080", + Any.pack( + HttpConnectionManager.newBuilder() + .setRouteConfig( + buildRouteConfiguration("route-foo.googleapis.com", + ImmutableList.of( + buildVirtualHost( + ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + "cluster.googleapis.com")))) + .build())))); + DiscoveryResponse response = + buildDiscoveryResponse("0", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"); + + String expectedString = "{\n" + + " \"versionInfo\": \"0\",\n" + + " \"resources\": [{\n" + + " \"@type\": \"type.googleapis.com/envoy.api.v2.Listener\",\n" + + " \"name\": \"foo.googleapis.com:8080\",\n" + + " \"address\": {\n" + + " },\n" + + " \"filterChains\": [{\n" + + " }],\n" + + " \"apiListener\": {\n" + + " \"apiListener\": {\n" + + " \"@type\": \"type.googleapis.com/envoy.config.filter.network" + + ".http_connection_manager.v2.HttpConnectionManager\",\n" + + " \"routeConfig\": {\n" + + " \"name\": \"route-foo.googleapis.com\",\n" + + " \"virtualHosts\": [{\n" + + " \"name\": \"virtualhost00.googleapis.com\",\n" + + " \"domains\": [\"foo.googleapis.com\", \"bar.googleapis.com\"],\n" + + " \"routes\": [{\n" + + " \"route\": {\n" + + " \"cluster\": \"whatever cluster\"\n" + + " }\n" + + " }, {\n" + + " \"route\": {\n" + + " \"cluster\": \"cluster.googleapis.com\"\n" + + " }\n" + + " }]\n" + + " }]\n" + + " }\n" + + " }\n" + + " }\n" + + " }],\n" + + " \"typeUrl\": \"type.googleapis.com/envoy.api.v2.Listener\",\n" + + " \"nonce\": \"0000\"\n" + + "}"; + String res = printer.print(response); + assertThat(res).isEqualTo(expectedString); + } + + @Test + public void messagePrinter_printRdsResponse() { + MessagePrinter printer = new MessagePrinter(); + List routeConfigs = + ImmutableList.of( + Any.pack( + buildRouteConfiguration( + "route-foo.googleapis.com", + ImmutableList.of( + buildVirtualHost( + ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + "cluster.googleapis.com"))))); + DiscoveryResponse response = + buildDiscoveryResponse("213", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0052"); + + String expectedString = "{\n" + + " \"versionInfo\": \"213\",\n" + + " \"resources\": [{\n" + + " \"@type\": \"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n" + + " \"name\": \"route-foo.googleapis.com\",\n" + + " \"virtualHosts\": [{\n" + + " \"name\": \"virtualhost00.googleapis.com\",\n" + + " \"domains\": [\"foo.googleapis.com\", \"bar.googleapis.com\"],\n" + + " \"routes\": [{\n" + + " \"route\": {\n" + + " \"cluster\": \"whatever cluster\"\n" + + " }\n" + + " }, {\n" + + " \"route\": {\n" + + " \"cluster\": \"cluster.googleapis.com\"\n" + + " }\n" + + " }]\n" + + " }]\n" + + " }],\n" + + " \"typeUrl\": \"type.googleapis.com/envoy.api.v2.RouteConfiguration\",\n" + + " \"nonce\": \"0052\"\n" + + "}"; + String res = printer.print(response); + assertThat(res).isEqualTo(expectedString); + } + + @Test + public void messagePrinter_printCdsResponse() { + MessagePrinter printer = new MessagePrinter(); + List clusters = ImmutableList.of( + Any.pack(buildCluster("cluster-bar.googleapis.com", "service-blaze:cluster-bar", true)), + Any.pack(buildCluster("cluster-foo.googleapis.com", null, false))); + DiscoveryResponse response = + buildDiscoveryResponse("14", clusters, XdsClientImpl.ADS_TYPE_URL_CDS, "8"); + + String expectedString = "{\n" + + " \"versionInfo\": \"14\",\n" + + " \"resources\": [{\n" + + " \"@type\": \"type.googleapis.com/envoy.api.v2.Cluster\",\n" + + " \"name\": \"cluster-bar.googleapis.com\",\n" + + " \"type\": \"EDS\",\n" + + " \"edsClusterConfig\": {\n" + + " \"edsConfig\": {\n" + + " \"ads\": {\n" + + " }\n" + + " },\n" + + " \"serviceName\": \"service-blaze:cluster-bar\"\n" + + " },\n" + + " \"lrsServer\": {\n" + + " \"self\": {\n" + + " }\n" + + " }\n" + + " }, {\n" + + " \"@type\": \"type.googleapis.com/envoy.api.v2.Cluster\",\n" + + " \"name\": \"cluster-foo.googleapis.com\",\n" + + " \"type\": \"EDS\",\n" + + " \"edsClusterConfig\": {\n" + + " \"edsConfig\": {\n" + + " \"ads\": {\n" + + " }\n" + + " }\n" + + " }\n" + + " }],\n" + + " \"typeUrl\": \"type.googleapis.com/envoy.api.v2.Cluster\",\n" + + " \"nonce\": \"8\"\n" + + "}"; + String res = printer.print(response); + assertThat(res).isEqualTo(expectedString); + } + + @Test + public void messagePrinter_printEdsResponse() { + MessagePrinter printer = new MessagePrinter(); + List clusterLoadAssignments = ImmutableList.of( + Any.pack(buildClusterLoadAssignment("cluster-foo.googleapis.com", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HealthStatus.HEALTHY, 2)), + 1, 0), + buildLocalityLbEndpoints("region3", "zone3", "subzone3", + ImmutableList.of( + buildLbEndpoint("192.168.142.5", 80, HealthStatus.UNHEALTHY, 5)), + 2, 1)), + ImmutableList.of( + buildDropOverload("lb", 200), + buildDropOverload("throttle", 1000))))); + + DiscoveryResponse response = + buildDiscoveryResponse("5", clusterLoadAssignments, + XdsClientImpl.ADS_TYPE_URL_EDS, "004"); + + String expectedString = "{\n" + + " \"versionInfo\": \"5\",\n" + + " \"resources\": [{\n" + + " \"@type\": \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\",\n" + + " \"clusterName\": \"cluster-foo.googleapis.com\",\n" + + " \"endpoints\": [{\n" + + " \"locality\": {\n" + + " \"region\": \"region1\",\n" + + " \"zone\": \"zone1\",\n" + + " \"subZone\": \"subzone1\"\n" + + " },\n" + + " \"lbEndpoints\": [{\n" + + " \"endpoint\": {\n" + + " \"address\": {\n" + + " \"socketAddress\": {\n" + + " \"address\": \"192.168.0.1\",\n" + + " \"portValue\": 8080\n" + + " }\n" + + " }\n" + + " },\n" + + " \"healthStatus\": \"HEALTHY\",\n" + + " \"loadBalancingWeight\": 2\n" + + " }],\n" + + " \"loadBalancingWeight\": 1\n" + + " }, {\n" + + " \"locality\": {\n" + + " \"region\": \"region3\",\n" + + " \"zone\": \"zone3\",\n" + + " \"subZone\": \"subzone3\"\n" + + " },\n" + + " \"lbEndpoints\": [{\n" + + " \"endpoint\": {\n" + + " \"address\": {\n" + + " \"socketAddress\": {\n" + + " \"address\": \"192.168.142.5\",\n" + + " \"portValue\": 80\n" + + " }\n" + + " }\n" + + " },\n" + + " \"healthStatus\": \"UNHEALTHY\",\n" + + " \"loadBalancingWeight\": 5\n" + + " }],\n" + + " \"loadBalancingWeight\": 2,\n" + + " \"priority\": 1\n" + + " }],\n" + + " \"policy\": {\n" + + " \"dropOverloads\": [{\n" + + " \"category\": \"lb\",\n" + + " \"dropPercentage\": {\n" + + " \"numerator\": 200,\n" + + " \"denominator\": \"MILLION\"\n" + + " }\n" + + " }, {\n" + + " \"category\": \"throttle\",\n" + + " \"dropPercentage\": {\n" + + " \"numerator\": 1000,\n" + + " \"denominator\": \"MILLION\"\n" + + " }\n" + + " }],\n" + + " \"disableOverprovisioning\": true\n" + + " }\n" + + " }],\n" + + " \"typeUrl\": \"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment\",\n" + + " \"nonce\": \"004\"\n" + + "}"; + String res = printer.print(response); + assertThat(res).isEqualTo(expectedString); + } + private static LoadStatsRequest buildInitialLoadStatsRequest(String clusterName) { return LoadStatsRequest.newBuilder() From f04c4921d9ac27166200495a745355a1d40054f7 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Thu, 30 Jan 2020 09:49:22 -0800 Subject: [PATCH 29/86] xds: add EdsLoadBalancerProvider to shortcut CDS to EDS flow skipping fallback The `EdsLoadBalancerProvider` provides `LookasideLb` (Will rename `LookasideLb` to `EdsLoadBalancer` in future, but kept the name now to show better diff) with no-op callbacks for fallback. - `CdsLoadBalancer` will load `EdsLoadBalancerProvider/LookasideLb` directly skipping fallback. - The EDS-only flow is unchanged, still loading `XdsLoadBalancerProvider/XdsLoadBalancer2`, keeping current fallback behavior and producing horrible error message when both the primary and fallback policy fail. --- .../java/io/grpc/xds/CdsLoadBalancer.java | 4 +- .../io/grpc/xds/EdsLoadBalancerProvider.java | 75 +++++++++++++++++++ .../main/java/io/grpc/xds/LookasideLb.java | 6 ++ .../services/io.grpc.LoadBalancerProvider | 1 + .../java/io/grpc/xds/CdsLoadBalancerTest.java | 12 +-- .../java/io/grpc/xds/LookasideLbTest.java | 28 ++++++- 6 files changed, 117 insertions(+), 9 deletions(-) create mode 100644 xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index 5a7b0d8c578..358c5263ae7 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -18,7 +18,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.xds.XdsLoadBalancerProvider.XDS_POLICY_NAME; +import static io.grpc.xds.EdsLoadBalancerProvider.EDS_POLICY_NAME; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; @@ -308,7 +308,7 @@ public void onClusterChanged(ClusterUpdate newUpdate) { /* lrsServerName = */ newUpdate.getLrsServerName()); updateSslContextProvider(newUpdate.getUpstreamTlsContext()); if (edsBalancer == null) { - edsBalancer = lbRegistry.getProvider(XDS_POLICY_NAME).newLoadBalancer(helper); + edsBalancer = lbRegistry.getProvider(EDS_POLICY_NAME).newLoadBalancer(helper); } edsBalancer.handleResolvedAddresses( resolvedAddresses.toBuilder().setLoadBalancingPolicyConfig(edsConfig).build()); diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java new file mode 100644 index 00000000000..1ae5a7f6702 --- /dev/null +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java @@ -0,0 +1,75 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.xds; + +import io.grpc.Internal; +import io.grpc.LoadBalancer; +import io.grpc.LoadBalancer.Helper; +import io.grpc.LoadBalancerProvider; +import io.grpc.LoadBalancerRegistry; +import io.grpc.NameResolver.ConfigOrError; +import io.grpc.xds.LookasideLb.EndpointUpdateCallback; +import java.util.Map; + +/** + * The provider for the "eds" balancing policy. This class should not be directly referenced in + * code. The policy should be accessed through {@link io.grpc.LoadBalancerRegistry#getProvider} + * with the name "eds_experimental"). + */ +@Internal +public class EdsLoadBalancerProvider extends LoadBalancerProvider { + + static final String EDS_POLICY_NAME = "eds_experimental"; + + @Override + public boolean isAvailable() { + return true; + } + + @Override + public int getPriority() { + return 5; + } + + @Override + public String getPolicyName() { + return EDS_POLICY_NAME; + } + + @Override + public LoadBalancer newLoadBalancer(Helper helper) { + return new LookasideLb( + helper, + new EndpointUpdateCallback() { + @Override + public void onWorking() {} + + @Override + public void onError() {} + + @Override + public void onAllDrop() {} + }); + } + + @Override + public ConfigOrError parseLoadBalancingPolicyConfig( + Map rawLoadBalancingPolicyConfig) { + return XdsLoadBalancerProvider.parseLoadBalancingConfigPolicy( + rawLoadBalancingPolicyConfig, LoadBalancerRegistry.getDefaultRegistry()); + } +} diff --git a/xds/src/main/java/io/grpc/xds/LookasideLb.java b/xds/src/main/java/io/grpc/xds/LookasideLb.java index 416d0091047..ca555b8aabf 100644 --- a/xds/src/main/java/io/grpc/xds/LookasideLb.java +++ b/xds/src/main/java/io/grpc/xds/LookasideLb.java @@ -411,6 +411,12 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { public void onError(Status error) { channelLogger.log(ChannelLogLevel.ERROR, "EDS load balancer received an error: {0}", error); endpointUpdateCallback.onError(); + // If we get an error before getting any valid result, we should put the channel in + // TRANSIENT_FAILURE; if they get an error after getting a valid result, we keep using the + // previous channel state. + if (!firstEndpointUpdateReceived) { + lookasideLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); + } } } } diff --git a/xds/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider b/xds/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider index 7c4faab6f25..f17734af229 100644 --- a/xds/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider +++ b/xds/src/main/resources/META-INF/services/io.grpc.LoadBalancerProvider @@ -1,2 +1,3 @@ io.grpc.xds.CdsLoadBalancerProvider +io.grpc.xds.EdsLoadBalancerProvider io.grpc.xds.XdsLoadBalancerProvider diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java index 50efbc705bf..c0a068df74d 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java @@ -19,7 +19,7 @@ import static com.google.common.truth.Truth.assertThat; import static io.grpc.ConnectivityState.CONNECTING; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.xds.XdsLoadBalancerProvider.XDS_POLICY_NAME; +import static io.grpc.xds.EdsLoadBalancerProvider.EDS_POLICY_NAME; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; @@ -95,7 +95,7 @@ XdsClient createXdsClient() { ); private final LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry(); - private final LoadBalancerProvider fakeXdsLoadBlancerProvider = new LoadBalancerProvider() { + private final LoadBalancerProvider fakeEdsLoadBlancerProvider = new LoadBalancerProvider() { @Override public boolean isAvailable() { return true; @@ -108,7 +108,7 @@ public int getPriority() { @Override public String getPolicyName() { - return XDS_POLICY_NAME; + return EDS_POLICY_NAME; } @Override @@ -150,7 +150,7 @@ public void setUp() { doReturn(channelLogger).when(helper).getChannelLogger(); doReturn(syncContext).when(helper).getSynchronizationContext(); doReturn(fakeClock.getScheduledExecutorService()).when(helper).getScheduledExecutorService(); - lbRegistry.register(fakeXdsLoadBlancerProvider); + lbRegistry.register(fakeEdsLoadBlancerProvider); cdsLoadBalancer = new CdsLoadBalancer(helper, lbRegistry, mockTlsContextManager); } @@ -523,8 +523,8 @@ public void clusterWatcher_onErrorCalledBeforeAndAfterOnClusterChanged() throws @Test public void cdsBalancerIntegrateWithEdsBalancer() throws Exception { - lbRegistry.deregister(fakeXdsLoadBlancerProvider); - lbRegistry.register(new XdsLoadBalancerProvider()); + lbRegistry.deregister(fakeEdsLoadBlancerProvider); + lbRegistry.register(new EdsLoadBalancerProvider()); ResolvedAddresses resolvedAddresses1 = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) diff --git a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java index 42080404aed..e1758fc33fa 100644 --- a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java +++ b/xds/src/test/java/io/grpc/xds/LookasideLbTest.java @@ -663,13 +663,39 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, } @Test - public void verifyErrorPropagation() { + public void verifyErrorPropagation_noPreviousEndpointUpdateReceived() { deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); verify(edsUpdateCallback, never()).onError(); // Forwarding 20 seconds so that the xds client will deem EDS resource not available. fakeClock.forwardTime(20, TimeUnit.SECONDS); verify(edsUpdateCallback).onError(); + verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); + } + + @Test + public void verifyErrorPropagation_withPreviousEndpointUpdateReceived() { + deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + // Endpoint update received. + ClusterLoadAssignment clusterLoadAssignment = + buildClusterLoadAssignment("edsServiceName1", + ImmutableList.of( + buildLocalityLbEndpoints("region1", "zone1", "subzone1", + ImmutableList.of( + buildLbEndpoint("192.168.0.1", 8080, HEALTHY, 2)), + 1, 0)), + ImmutableList.of(buildDropOverload("throttle", 1000))); + receiveEndpointUpdate(clusterLoadAssignment); + + verify(helper, never()).updateBalancingState( + eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); + verify(edsUpdateCallback, never()).onError(); + + // XdsClient stream receives an error. + responseObserver.onError(new RuntimeException("fake error")); + verify(helper, never()).updateBalancingState( + eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); + verify(edsUpdateCallback).onError(); } /** From 2acca6ea3b0de20c10fd75a90f82b9ed11c507fe Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Thu, 30 Jan 2020 10:00:39 -0800 Subject: [PATCH 30/86] all: fix lint warnings (#6658) --- .../main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java | 1 - netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java | 2 +- xds/src/main/java/io/grpc/xds/XdsClientImpl.java | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java index eb748d3e1d4..57d85282e32 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java @@ -104,7 +104,6 @@ ConfigOrError parseLoadBalancingConfigPolicyInternal( * Gets a list from an object for the given key. Copy of * {@link io.grpc.internal.ServiceConfigUtil#getList}. */ - @SuppressWarnings("unchecked") @Nullable private static List getList(Map obj, String key) { assert key != null; diff --git a/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java b/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java index 3c93a88b55b..b35ab1f3922 100644 --- a/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java +++ b/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java @@ -276,7 +276,7 @@ protected void protocolNegotiationEventTriggered(ChannelHandlerContext ctx) { } else { nettyProxyHandler = new HttpProxyHandler(address, userName, password); } - ctx.pipeline().addBefore(ctx.name(), /* newName= */ null, nettyProxyHandler); + ctx.pipeline().addBefore(ctx.name(), /* name= */ null, nettyProxyHandler); } @Override diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 2e0043ca5ef..f1c8f4d5167 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -699,7 +699,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { // Management server is required to always send newly requested resources, even if they // may have been sent previously (proactively). Thus, client does not need to cache // unrequested resources. - if (!clusterWatchers.keySet().contains(clusterName)) { + if (!clusterWatchers.containsKey(clusterName)) { continue; } ClusterUpdate.Builder updateBuilder = ClusterUpdate.newBuilder(); @@ -1378,7 +1378,7 @@ static boolean matchHostName(String hostName, String pattern) { * containing {@link com.google.protobuf.Any} fields. */ @VisibleForTesting - static class MessagePrinter { + static final class MessagePrinter { private final JsonFormat.Printer printer; @VisibleForTesting From 084f5bc4bdd8e645c3528a195eef85d162b55097 Mon Sep 17 00:00:00 2001 From: Liam Miller-Cushon Date: Thu, 30 Jan 2020 10:44:23 -0800 Subject: [PATCH 31/86] java_grpc_library: Inline find_java_toolchain and find_java_runtime_toolchain These methods were used to migrate the Java toolchains to use toolchain resolution. Now that the migration is complete, the toolchain providers can be used directly. --- java_grpc_library.bzl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/java_grpc_library.bzl b/java_grpc_library.bzl index 3e93ad71e0f..cd385998d27 100644 --- a/java_grpc_library.bzl +++ b/java_grpc_library.bzl @@ -1,7 +1,5 @@ """Build rule for java_grpc_library.""" -load("@bazel_tools//tools/jdk:toolchain_utils.bzl", "find_java_runtime_toolchain", "find_java_toolchain") - _JavaRpcToolchainInfo = provider( fields = [ "host_javabase", @@ -107,8 +105,8 @@ def _java_rpc_library_impl(ctx): java_info = java_common.compile( ctx, - java_toolchain = find_java_toolchain(ctx, toolchain.java_toolchain), - host_javabase = find_java_runtime_toolchain(ctx, toolchain.host_javabase), + java_toolchain = toolchain.java_toolchain[java_common.JavaToolchainInfo], + host_javabase = toolchain.host_javabase[java_common.JavaRuntimeInfo], source_jars = [srcjar], output = ctx.outputs.jar, output_source_jar = ctx.outputs.srcjar, From b3aea8193f8b42f36dcdb9ebbeeb0e7230a0d283 Mon Sep 17 00:00:00 2001 From: Elliotte Rusty Harold Date: Thu, 30 Jan 2020 15:59:55 -0500 Subject: [PATCH 32/86] all: update google auth libraries (#6652) --- build.gradle | 2 +- examples/example-alts/build.gradle | 1 + repositories.bzl | 12 ++++++------ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/build.gradle b/build.gradle index 259d1403d47..d5a4ff90b82 100644 --- a/build.gradle +++ b/build.gradle @@ -47,7 +47,7 @@ subprojects { nettyVersion = '4.1.42.Final' guavaVersion = '28.1-android' - googleauthVersion = '0.19.0' + googleauthVersion = '0.20.0' protobufVersion = '3.11.0' protocVersion = protobufVersion opencensusVersion = '0.24.0' diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 57b8047153d..0284bf3bde0 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -11,6 +11,7 @@ repositories { maven { // The google mirror is less flaky than mavenCentral() url "https://maven-central.storage-download.googleapis.com/repos/central/data/" } + mavenCentral() mavenLocal() } diff --git a/repositories.bzl b/repositories.bzl index 78cdf086f39..c0a244be917 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -13,8 +13,8 @@ load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external") IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.android:annotations:4.1.1.4", "com.google.api.grpc:proto-google-common-protos:1.17.0", - "com.google.auth:google-auth-library-credentials:0.19.0", - "com.google.auth:google-auth-library-oauth2-http:0.19.0", + "com.google.auth:google-auth-library-credentials:0.20.0", + "com.google.auth:google-auth-library-oauth2-http:0.20.0", "com.google.code.findbugs:jsr305:3.0.2", "com.google.code.gson:gson:jar:2.8.6", "com.google.errorprone:error_prone_annotations:2.3.4", @@ -191,18 +191,18 @@ def com_google_api_grpc_proto_google_common_protos(): def com_google_auth_google_auth_library_credentials(): jvm_maven_import_external( name = "com_google_auth_google_auth_library_credentials", - artifact = "com.google.auth:google-auth-library-credentials:0.19.0", + artifact = "com.google.auth:google-auth-library-credentials:0.20.0", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "3f81ea05795abc40daf36f4c427487a738489f7cc0f515b7930f838ed301165a", + artifact_sha256 = "8a415273a5dae5c8f9080134e53b9592dc171ca5d13127488c910177c5903bd6", licenses = ["notice"], # BSD 3-clause ) def com_google_auth_google_auth_library_oauth2_http(): jvm_maven_import_external( name = "com_google_auth_google_auth_library_oauth2_http", - artifact = "com.google.auth:google-auth-library-oauth2-http:0.19.0", + artifact = "com.google.auth:google-auth-library-oauth2-http:0.20.0", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "51992d58ec0f903fb521412f677bd09d83895609a0198d4a9ae65a3d88e2aa4a", + artifact_sha256 = "43e96e8c07285c2887042eda4e35ca96522ef361f6c1843f469039d9ccdc8f8a", licenses = ["notice"], # BSD 3-clause ) From 06b983567e257f06a6b3118d2ad6ae08a4e29526 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Thu, 30 Jan 2020 13:19:14 -0800 Subject: [PATCH 33/86] xds: rename LookasideLb to EdsLoadBalancer and XdsLoadBalancer2 to XdsLoadBalancer This is a follow up cleanup of #6656 . --- ...{LookasideLb.java => EdsLoadBalancer.java} | 56 ++++++------- .../io/grpc/xds/EdsLoadBalancerProvider.java | 6 +- .../main/java/io/grpc/xds/LoadStatsStore.java | 8 +- ...oadBalancer2.java => XdsLoadBalancer.java} | 62 +++++++------- .../io/grpc/xds/XdsLoadBalancerProvider.java | 2 +- ...deLbTest.java => EdsLoadBalancerTest.java} | 54 ++++++------ ...cer2Test.java => XdsLoadBalancerTest.java} | 82 +++++++++---------- 7 files changed, 136 insertions(+), 134 deletions(-) rename xds/src/main/java/io/grpc/xds/{LookasideLb.java => EdsLoadBalancer.java} (90%) rename xds/src/main/java/io/grpc/xds/{XdsLoadBalancer2.java => XdsLoadBalancer.java} (80%) rename xds/src/test/java/io/grpc/xds/{LookasideLbTest.java => EdsLoadBalancerTest.java} (95%) rename xds/src/test/java/io/grpc/xds/{XdsLoadBalancer2Test.java => XdsLoadBalancerTest.java} (79%) diff --git a/xds/src/main/java/io/grpc/xds/LookasideLb.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java similarity index 90% rename from xds/src/main/java/io/grpc/xds/LookasideLb.java rename to xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java index ca555b8aabf..640a12dc4ba 100644 --- a/xds/src/main/java/io/grpc/xds/LookasideLb.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java @@ -52,17 +52,17 @@ import java.util.Objects; import javax.annotation.Nullable; -/** Lookaside load balancer that handles EDS config. */ -final class LookasideLb extends LoadBalancer { +/** Load balancer for the EDS LB policy. */ +final class EdsLoadBalancer extends LoadBalancer { private final ChannelLogger channelLogger; - private final EndpointUpdateCallback endpointUpdateCallback; + private final ResourceUpdateCallback resourceUpdateCallback; private final GracefulSwitchLoadBalancer switchingLoadBalancer; private final LoadBalancerRegistry lbRegistry; private final LocalityStoreFactory localityStoreFactory; private final Bootstrapper bootstrapper; private final XdsChannelFactory channelFactory; - private final Helper lookasideLbHelper; + private final Helper edsLbHelper; // Cache for load stats stores for each service in cluster keyed by cluster service names. private final Map loadStatsStoreMap = new HashMap<>(); @@ -81,10 +81,10 @@ final class LookasideLb extends LoadBalancer { @Nullable private String clusterName; - LookasideLb(Helper lookasideLbHelper, EndpointUpdateCallback endpointUpdateCallback) { + EdsLoadBalancer(Helper edsLbHelper, ResourceUpdateCallback resourceUpdateCallback) { this( - checkNotNull(lookasideLbHelper, "lookasideLbHelper"), - checkNotNull(endpointUpdateCallback, "endpointUpdateCallback"), + checkNotNull(edsLbHelper, "edsLbHelper"), + checkNotNull(resourceUpdateCallback, "resourceUpdateCallback"), LoadBalancerRegistry.getDefaultRegistry(), LocalityStoreFactory.getInstance(), Bootstrapper.getInstance(), @@ -92,19 +92,19 @@ final class LookasideLb extends LoadBalancer { } @VisibleForTesting - LookasideLb( - Helper lookasideLbHelper, - EndpointUpdateCallback endpointUpdateCallback, + EdsLoadBalancer( + Helper edsLbHelper, + ResourceUpdateCallback resourceUpdateCallback, LoadBalancerRegistry lbRegistry, LocalityStoreFactory localityStoreFactory, Bootstrapper bootstrapper, XdsChannelFactory channelFactory) { - this.lookasideLbHelper = lookasideLbHelper; - this.channelLogger = lookasideLbHelper.getChannelLogger(); - this.endpointUpdateCallback = endpointUpdateCallback; + this.edsLbHelper = edsLbHelper; + this.channelLogger = edsLbHelper.getChannelLogger(); + this.resourceUpdateCallback = resourceUpdateCallback; this.lbRegistry = lbRegistry; this.localityStoreFactory = localityStoreFactory; - this.switchingLoadBalancer = new GracefulSwitchLoadBalancer(lookasideLbHelper); + this.switchingLoadBalancer = new GracefulSwitchLoadBalancer(edsLbHelper); this.bootstrapper = bootstrapper; this.channelFactory = channelFactory; } @@ -115,7 +115,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { Object lbConfig = resolvedAddresses.getLoadBalancingPolicyConfig(); if (!(lbConfig instanceof XdsConfig)) { - lookasideLbHelper.updateBalancingState( + edsLbHelper.updateBalancingState( TRANSIENT_FAILURE, new ErrorPicker(Status.UNAVAILABLE.withDescription( "Load balancing config '" + lbConfig + "' is not an XdsConfig"))); @@ -143,7 +143,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { try { bootstrapInfo = bootstrapper.readBootstrap(); } catch (Exception e) { - lookasideLbHelper.updateBalancingState( + edsLbHelper.updateBalancingState( TRANSIENT_FAILURE, new ErrorPicker(Status.UNAVAILABLE.withCause(e))); return; @@ -152,7 +152,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { final List serverList = bootstrapInfo.getServers(); final Node node = bootstrapInfo.getNode(); if (serverList.isEmpty()) { - lookasideLbHelper.updateBalancingState( + edsLbHelper.updateBalancingState( TRANSIENT_FAILURE, new ErrorPicker( Status.UNAVAILABLE @@ -167,8 +167,8 @@ XdsClient createXdsClient() { serverList, channelFactory, node, - lookasideLbHelper.getSynchronizationContext(), - lookasideLbHelper.getScheduledExecutorService(), + edsLbHelper.getSynchronizationContext(), + edsLbHelper.getScheduledExecutorService(), new ExponentialBackoffPolicy.Provider(), GrpcUtil.STOPWATCH_SUPPLIER); } @@ -184,7 +184,7 @@ XdsClient createXdsClient() { // and if edsServiceName is not null, it will always be not null. String clusterServiceName = newXdsConfig.edsServiceName; if (clusterServiceName == null) { - clusterServiceName = lookasideLbHelper.getAuthority(); + clusterServiceName = edsLbHelper.getAuthority(); } if (clusterName == null) { // TODO(zdapeng): Use the correct cluster name. Currently load reporting will be broken if @@ -314,11 +314,11 @@ final class ClusterEndpointsBalancer extends LoadBalancer { endpointWatcher = new EndpointWatcherImpl(localityStore); xdsClient.watchEndpointData(clusterServiceName, endpointWatcher); - if (LookasideLb.this.endpointWatcher != null) { + if (EdsLoadBalancer.this.endpointWatcher != null) { xdsClient.cancelEndpointDataWatch( - oldClusterServiceName, LookasideLb.this.endpointWatcher); + oldClusterServiceName, EdsLoadBalancer.this.endpointWatcher); } - LookasideLb.this.endpointWatcher = endpointWatcher; + EdsLoadBalancer.this.endpointWatcher = endpointWatcher; } // TODO(zddapeng): In handleResolvedAddresses() handle child policy change if any. @@ -352,7 +352,7 @@ public void shutdown() { /** * Callbacks for the EDS-only-with-fallback usecase. Being deprecated. */ - interface EndpointUpdateCallback { + interface ResourceUpdateCallback { void onWorking(); @@ -379,7 +379,7 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { if (!firstEndpointUpdateReceived) { firstEndpointUpdateReceived = true; - endpointUpdateCallback.onWorking(); + resourceUpdateCallback.onWorking(); } List dropOverloads = endpointUpdate.getDropPolicies(); @@ -387,7 +387,7 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { for (DropOverload dropOverload : dropOverloads) { dropOverloadsBuilder.add(dropOverload); if (dropOverload.getDropsPerMillion() == 1_000_000) { - endpointUpdateCallback.onAllDrop(); + resourceUpdateCallback.onAllDrop(); break; } } @@ -410,12 +410,12 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { @Override public void onError(Status error) { channelLogger.log(ChannelLogLevel.ERROR, "EDS load balancer received an error: {0}", error); - endpointUpdateCallback.onError(); + resourceUpdateCallback.onError(); // If we get an error before getting any valid result, we should put the channel in // TRANSIENT_FAILURE; if they get an error after getting a valid result, we keep using the // previous channel state. if (!firstEndpointUpdateReceived) { - lookasideLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); + edsLbHelper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); } } } diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java index 1ae5a7f6702..7985c7f1c87 100644 --- a/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancerProvider.java @@ -22,7 +22,7 @@ import io.grpc.LoadBalancerProvider; import io.grpc.LoadBalancerRegistry; import io.grpc.NameResolver.ConfigOrError; -import io.grpc.xds.LookasideLb.EndpointUpdateCallback; +import io.grpc.xds.EdsLoadBalancer.ResourceUpdateCallback; import java.util.Map; /** @@ -52,9 +52,9 @@ public String getPolicyName() { @Override public LoadBalancer newLoadBalancer(Helper helper) { - return new LookasideLb( + return new EdsLoadBalancer( helper, - new EndpointUpdateCallback() { + new ResourceUpdateCallback() { @Override public void onWorking() {} diff --git a/xds/src/main/java/io/grpc/xds/LoadStatsStore.java b/xds/src/main/java/io/grpc/xds/LoadStatsStore.java index 8889db7d8e9..86eeb07a236 100644 --- a/xds/src/main/java/io/grpc/xds/LoadStatsStore.java +++ b/xds/src/main/java/io/grpc/xds/LoadStatsStore.java @@ -27,7 +27,7 @@ * (i.e., Google backends) are aggregated in locality granularity (i.e., Google cluster) while the * numbers of dropped calls are aggregated in cluster granularity. * - *

An {@code LoadStatsStore} lives the same span of lifecycle as {@link XdsLoadBalancer2} and + *

An {@code LoadStatsStore} lives the same span of lifecycle as a cluster and * only tracks loads for localities exposed by remote traffic director. A proper usage should be * *

    @@ -61,7 +61,7 @@ interface LoadStatsStore { * reporting. * *

    This method is not thread-safe and should be called from the same synchronized context - * returned by {@link XdsLoadBalancer2.Helper#getSynchronizationContext}. + * used by {@link XdsClient}. */ ClusterStats generateLoadReport(); @@ -73,7 +73,7 @@ interface LoadStatsStore { * balancer discovery responses before recording loads for those localities. * *

    This method is not thread-safe and should be called from the same synchronized context - * returned by {@link XdsLoadBalancer2.Helper#getSynchronizationContext}. + * used by {@link XdsClient}. */ void addLocality(Locality locality); @@ -88,7 +88,7 @@ interface LoadStatsStore { * waste and keep including zero-load upstream locality stats in generated load reports. * *

    This method is not thread-safe and should be called from the same synchronized context - * returned by {@link XdsLoadBalancer2.Helper#getSynchronizationContext}. + * used by {@link XdsClient}. */ void removeLocality(Locality locality); diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancer.java similarity index 80% rename from xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java rename to xds/src/main/java/io/grpc/xds/XdsLoadBalancer.java index fdbfdf5149c..a252df68cfa 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancer2.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancer.java @@ -27,7 +27,7 @@ import io.grpc.Status; import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.util.ForwardingLoadBalancerHelper; -import io.grpc.xds.LookasideLb.EndpointUpdateCallback; +import io.grpc.xds.EdsLoadBalancer.ResourceUpdateCallback; import java.util.concurrent.TimeUnit; import javax.annotation.CheckForNull; import javax.annotation.Nullable; @@ -36,33 +36,32 @@ * A {@link LoadBalancer} that uses the XDS protocol. * *

    This class manages fallback handling. The logic for child policy handling and fallback policy - * handling is provided by LookasideLb and FallbackLb. + * handling is provided by EdsLoadBalancer and FallbackLb. */ -// TODO(zdapeng): migrate name to XdsLoadBlancer -final class XdsLoadBalancer2 extends LoadBalancer { +final class XdsLoadBalancer extends LoadBalancer { private static final long FALLBACK_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(10); // same as grpclb private final Helper helper; - private final LoadBalancer lookasideLb; + private final LoadBalancer primaryLb; private final LoadBalancer.Factory fallbackLbFactory; - private final EndpointUpdateCallback edsUpdateCallback = new EndpointUpdateCallback() { + private final ResourceUpdateCallback resourceUpdateCallback = new ResourceUpdateCallback() { @Override public void onWorking() { - if (childPolicyHasBeenReady) { + if (primaryPolicyHasBeenReady) { // cancel Fallback-After-Startup timer if there's any cancelFallbackTimer(); } - adsWorked = true; + primaryPolicyWorked = true; } @Override public void onError() { - if (!adsWorked) { + if (!primaryPolicyWorked) { // start Fallback-at-Startup immediately useFallbackPolicy(); - } else if (childPolicyHasBeenReady) { + } else if (primaryPolicyHasBeenReady) { // TODO: schedule a timer for Fallback-After-Startup } // else: the Fallback-at-Startup timer is still pending, noop and wait } @@ -80,21 +79,21 @@ public void onAllDrop() { // Scheduled only once. Never reset to null. @CheckForNull private ScheduledHandle fallbackTimer; - private boolean adsWorked; - private boolean childPolicyHasBeenReady; + private boolean primaryPolicyWorked; + private boolean primaryPolicyHasBeenReady; - XdsLoadBalancer2(Helper helper) { - this(helper, new LookasideLbFactoryImpl(), new FallbackLbFactory()); + XdsLoadBalancer(Helper helper) { + this(helper, new EdsLoadBalancerFactory(), new FallbackLbFactory()); } @VisibleForTesting - XdsLoadBalancer2( + XdsLoadBalancer( Helper helper, - LookasideLbFactory lookasideLbFactory, + PrimaryLbFactory primaryLbFactory, LoadBalancer.Factory fallbackLbFactory) { this.helper = helper; - this.lookasideLb = lookasideLbFactory.newLoadBalancer(new LookasideLbHelper(), - edsUpdateCallback); + this.primaryLb = primaryLbFactory.newLoadBalancer( + new PrimaryLbHelper(), resourceUpdateCallback); this.fallbackLbFactory = fallbackLbFactory; } @@ -127,12 +126,12 @@ public void run() { helper.getScheduledExecutorService()); } - lookasideLb.handleResolvedAddresses(resolvedAddresses); + primaryLb.handleResolvedAddresses(resolvedAddresses); } @Override public void handleNameResolutionError(Status error) { - lookasideLb.handleNameResolutionError(error); + primaryLb.handleNameResolutionError(error); if (isInFallbackMode()) { fallbackLb.handleNameResolutionError(error); } @@ -140,7 +139,7 @@ public void handleNameResolutionError(Status error) { @Override public void requestConnection() { - lookasideLb.requestConnection(); + primaryLb.requestConnection(); if (isInFallbackMode()) { fallbackLb.requestConnection(); } @@ -150,7 +149,7 @@ public void requestConnection() { public void shutdown() { helper.getChannelLogger().log( ChannelLogLevel.INFO, "Shutting down XDS balancer"); - lookasideLb.shutdown(); + primaryLb.shutdown(); cancelFallback(); } @@ -198,7 +197,7 @@ private boolean isInFallbackMode() { return fallbackLb != null; } - private final class LookasideLbHelper extends ForwardingLoadBalancerHelper { + private final class PrimaryLbHelper extends ForwardingLoadBalancerHelper { @Override protected Helper delegate() { @@ -209,9 +208,9 @@ protected Helper delegate() { public void updateBalancingState(ConnectivityState newState, SubchannelPicker newPicker) { if (newState == ConnectivityState.READY) { checkState( - adsWorked, + primaryPolicyWorked, "channel goes to READY before the load balancer even worked"); - childPolicyHasBeenReady = true; + primaryPolicyHasBeenReady = true; cancelFallback(); } if (!isInFallbackMode()) { @@ -244,17 +243,18 @@ public void updateBalancingState(ConnectivityState newState, SubchannelPicker ne } } - /** Factory of a look-aside load balancer. The interface itself is for convenience in test. */ + /** Factory of load balancer for the primary policy.*/ + // The interface itself is for convenience in test. @VisibleForTesting - interface LookasideLbFactory { - LoadBalancer newLoadBalancer(Helper helper, EndpointUpdateCallback edsUpdateCallback); + interface PrimaryLbFactory { + LoadBalancer newLoadBalancer(Helper helper, ResourceUpdateCallback resourceUpdateCallback); } - private static final class LookasideLbFactoryImpl implements LookasideLbFactory { + private static final class EdsLoadBalancerFactory implements PrimaryLbFactory { @Override public LoadBalancer newLoadBalancer( - Helper lookasideLbHelper, EndpointUpdateCallback edsUpdateCallback) { - return new LookasideLb(lookasideLbHelper, edsUpdateCallback); + Helper edsLbHelper, ResourceUpdateCallback resourceUpdateCallback) { + return new EdsLoadBalancer(edsLbHelper, resourceUpdateCallback); } } diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java index 82311647e3d..47a7a1bffa0 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java @@ -63,7 +63,7 @@ public String getPolicyName() { @Override public LoadBalancer newLoadBalancer(Helper helper) { - return new XdsLoadBalancer2(helper); + return new XdsLoadBalancer(helper); } @Override diff --git a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java b/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java similarity index 95% rename from xds/src/test/java/io/grpc/xds/LookasideLbTest.java rename to xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java index e1758fc33fa..e69490906e6 100644 --- a/xds/src/test/java/io/grpc/xds/LookasideLbTest.java +++ b/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java @@ -73,8 +73,8 @@ import io.grpc.xds.Bootstrapper.BootstrapInfo; import io.grpc.xds.Bootstrapper.ChannelCreds; import io.grpc.xds.Bootstrapper.ServerInfo; +import io.grpc.xds.EdsLoadBalancer.ResourceUpdateCallback; import io.grpc.xds.LocalityStore.LocalityStoreFactory; -import io.grpc.xds.LookasideLb.EndpointUpdateCallback; import io.grpc.xds.XdsClient.EndpointUpdate; import io.grpc.xds.XdsClient.XdsChannelFactory; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; @@ -103,10 +103,10 @@ import org.mockito.junit.MockitoRule; /** - * Tests for {@link LookasideLb}. + * Tests for {@link EdsLoadBalancer}. */ @RunWith(Parameterized.class) -public class LookasideLbTest { +public class EdsLoadBalancerTest { private static final String SERVICE_AUTHORITY = "test.authority.example.com"; @@ -142,7 +142,7 @@ ManagedChannel createChannel(List servers) { @Mock private Helper helper; @Mock - private EndpointUpdateCallback edsUpdateCallback; + private ResourceUpdateCallback resourceUpdateCallback; @Mock private Bootstrapper bootstrapper; @Captor @@ -150,7 +150,7 @@ ManagedChannel createChannel(List servers) { @Captor ArgumentCaptor pickerCaptor; - private LoadBalancer lookasideLb; + private LoadBalancer edsLb; // Simulating a CDS to EDS flow, otherwise EDS only. @Parameter public boolean isFullFlow; @@ -206,7 +206,7 @@ public LoadBalancer newLoadBalancer(Helper helper) { @Override public StreamObserver streamAggregatedResources( final StreamObserver responseObserver) { - LookasideLbTest.this.responseObserver = responseObserver; + EdsLoadBalancerTest.this.responseObserver = responseObserver; @SuppressWarnings("unchecked") StreamObserver requestObserver = mock(StreamObserver.class); return requestObserver; @@ -239,13 +239,14 @@ public StreamObserver streamAggregatedResources( fakeClock.getStopwatchSupplier())); } - lookasideLb = new LookasideLb( - helper, edsUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, channelFactory); + edsLb = new EdsLoadBalancer( + helper, resourceUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, + channelFactory); } @After public void tearDown() { - lookasideLb.shutdown(); + edsLb.shutdown(); for (LoadBalancer childBalancer : childBalancers.values()) { verify(childBalancer).shutdown(); @@ -267,7 +268,7 @@ public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() { deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); // handleResolutionError() before receiving any endpoint update. - lookasideLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); + edsLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); // Endpoint update received. @@ -282,7 +283,7 @@ public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() { receiveEndpointUpdate(clusterLoadAssignment); // handleResolutionError() after receiving endpoint update. - lookasideLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); + edsLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); // No more TRANSIENT_FAILURE. verify(helper, times(1)).updateBalancingState( eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); @@ -433,7 +434,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); - verify(edsUpdateCallback, never()).onWorking(); + verify(resourceUpdateCallback, never()).onWorking(); // first EDS response ClusterLoadAssignment clusterLoadAssignment = @@ -446,7 +447,7 @@ public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { ImmutableList.of()); receiveEndpointUpdate(clusterLoadAssignment); - verify(edsUpdateCallback).onWorking(); + verify(resourceUpdateCallback).onWorking(); // second EDS response clusterLoadAssignment = @@ -459,8 +460,8 @@ public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { 1, 0)), ImmutableList.of()); receiveEndpointUpdate(clusterLoadAssignment); - verify(edsUpdateCallback, times(1)).onWorking(); - verify(edsUpdateCallback, never()).onError(); + verify(resourceUpdateCallback, times(1)).onWorking(); + verify(resourceUpdateCallback, never()).onError(); } @Test @@ -477,7 +478,7 @@ public void handleAllDropUpdates_pickersAreDropped() { ImmutableList.of()); receiveEndpointUpdate(clusterLoadAssignment); - verify(edsUpdateCallback, never()).onAllDrop(); + verify(resourceUpdateCallback, never()).onAllDrop(); assertThat(childBalancers).hasSize(1); verify(childBalancers.get("subzone1")).handleResolvedAddresses( argThat(RoundRobinBackendsMatcher.builder().addHostAndPort("192.168.0.1", 8080).build())); @@ -507,13 +508,13 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { buildDropOverload("cat_3", 4))); receiveEndpointUpdate(clusterLoadAssignment); - verify(edsUpdateCallback).onAllDrop(); + verify(resourceUpdateCallback).onAllDrop(); verify(helper, atLeastOnce()).updateBalancingState(eq(READY), pickerCaptor.capture()); SubchannelPicker pickerExpectedDropAll = pickerCaptor.getValue(); assertThat(pickerExpectedDropAll.pickSubchannel(mock(PickSubchannelArgs.class)).isDrop()) .isTrue(); - verify(edsUpdateCallback, never()).onError(); + verify(resourceUpdateCallback, never()).onError(); } @Test @@ -577,7 +578,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { childHelper2.updateBalancingState(READY, picker); assertLatestSubchannelPicker(subchannel); - verify(edsUpdateCallback, never()).onError(); + verify(resourceUpdateCallback, never()).onError(); } // Uses a fake LocalityStoreFactory that creates a mock LocalityStore, and verifies interaction @@ -601,8 +602,9 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, return localityStore; } }; - lookasideLb = new LookasideLb( - helper, edsUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, channelFactory); + edsLb = new EdsLoadBalancer( + helper, resourceUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, + channelFactory); deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); assertThat(localityStores).hasSize(1); @@ -666,10 +668,10 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, public void verifyErrorPropagation_noPreviousEndpointUpdateReceived() { deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); - verify(edsUpdateCallback, never()).onError(); + verify(resourceUpdateCallback, never()).onError(); // Forwarding 20 seconds so that the xds client will deem EDS resource not available. fakeClock.forwardTime(20, TimeUnit.SECONDS); - verify(edsUpdateCallback).onError(); + verify(resourceUpdateCallback).onError(); verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); } @@ -689,13 +691,13 @@ public void verifyErrorPropagation_withPreviousEndpointUpdateReceived() { verify(helper, never()).updateBalancingState( eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); - verify(edsUpdateCallback, never()).onError(); + verify(resourceUpdateCallback, never()).onError(); // XdsClient stream receives an error. responseObserver.onError(new RuntimeException("fake error")); verify(helper, never()).updateBalancingState( eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); - verify(edsUpdateCallback).onError(); + verify(resourceUpdateCallback).onError(); } /** @@ -730,7 +732,7 @@ private void deliverResolvedAddresses(XdsConfig xdsConfig) { Attributes.newBuilder().set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPoolFromResolveAddresses).build()); } - lookasideLb.handleResolvedAddresses(resolvedAddressBuilder.build()); + edsLb.handleResolvedAddresses(resolvedAddressBuilder.build()); } private void receiveEndpointUpdate(ClusterLoadAssignment clusterLoadAssignment) { diff --git a/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java b/xds/src/test/java/io/grpc/xds/XdsLoadBalancerTest.java similarity index 79% rename from xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java rename to xds/src/test/java/io/grpc/xds/XdsLoadBalancerTest.java index db8b82e2ffc..d2942834517 100644 --- a/xds/src/test/java/io/grpc/xds/XdsLoadBalancer2Test.java +++ b/xds/src/test/java/io/grpc/xds/XdsLoadBalancerTest.java @@ -40,8 +40,8 @@ import io.grpc.Status; import io.grpc.SynchronizationContext; import io.grpc.internal.FakeClock; -import io.grpc.xds.LookasideLb.EndpointUpdateCallback; -import io.grpc.xds.XdsLoadBalancer2.LookasideLbFactory; +import io.grpc.xds.EdsLoadBalancer.ResourceUpdateCallback; +import io.grpc.xds.XdsLoadBalancer.PrimaryLbFactory; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; @@ -55,9 +55,9 @@ import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; -/** Unit tests for {@link XdsLoadBalancer2}. */ +/** Unit tests for {@link XdsLoadBalancer}. */ @RunWith(JUnit4.class) -public class XdsLoadBalancer2Test { +public class XdsLoadBalancerTest { @Rule public final ExpectedException thrown = ExpectedException.none(); @Rule @@ -75,10 +75,10 @@ public void uncaughtException(Thread t, Throwable e) { @Mock private Helper helper; private LoadBalancer xdsLoadBalancer; - private EndpointUpdateCallback edsUpdateCallback; + private ResourceUpdateCallback resourceUpdateCallback; - private Helper lookasideLbHelper; - private final List lookasideLbs = new ArrayList<>(); + private Helper primaryLbHelper; + private final List primaryLbs = new ArrayList<>(); private Helper fallbackLbHelper; private final List fallbackLbs = new ArrayList<>(); @@ -87,16 +87,16 @@ public void uncaughtException(Thread t, Throwable e) { @Before public void setUp() { - LookasideLbFactory lookasideLbFactory = new LookasideLbFactory() { + PrimaryLbFactory primaryLbFactory = new PrimaryLbFactory() { @Override public LoadBalancer newLoadBalancer( - Helper helper, EndpointUpdateCallback edsUpdateCallback) { + Helper helper, ResourceUpdateCallback resourceUpdateCallback) { // just return a mock and record the input and output - lookasideLbHelper = helper; - XdsLoadBalancer2Test.this.edsUpdateCallback = edsUpdateCallback; - LoadBalancer lookasideLb = mock(LoadBalancer.class); - lookasideLbs.add(lookasideLb); - return lookasideLb; + primaryLbHelper = helper; + XdsLoadBalancerTest.this.resourceUpdateCallback = resourceUpdateCallback; + LoadBalancer primaryLb = mock(LoadBalancer.class); + primaryLbs.add(primaryLb); + return primaryLb; } }; LoadBalancer.Factory fallbackLbFactory = new LoadBalancer.Factory() { @@ -114,7 +114,7 @@ public LoadBalancer newLoadBalancer(Helper helper) { doReturn(mock(ChannelLogger.class)).when(helper).getChannelLogger(); xdsLoadBalancer = - new XdsLoadBalancer2(helper, lookasideLbFactory, fallbackLbFactory); + new XdsLoadBalancer(helper, primaryLbFactory, fallbackLbFactory); xdsLoadBalancer.handleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()).build()); @@ -122,10 +122,10 @@ public LoadBalancer newLoadBalancer(Helper helper) { @Test public void tearDown() { - assertThat(lookasideLbs).hasSize(1); + assertThat(primaryLbs).hasSize(1); xdsLoadBalancer.shutdown(); - for (LoadBalancer lookasideLb : lookasideLbs) { - verify(lookasideLb).shutdown(); + for (LoadBalancer primaryLb : primaryLbs) { + verify(primaryLb).shutdown(); } for (LoadBalancer fallbackLb : fallbackLbs) { verify(fallbackLb).shutdown(); @@ -142,13 +142,13 @@ public void canHandleEmptyAddressListFromNameResolution() { public void timeoutAtStartup_expectUseFallback_thenBackendReady_expectExitFallback() { verifyNotInFallbackMode(); fakeClock.forwardTime(9, TimeUnit.SECONDS); - edsUpdateCallback.onWorking(); + resourceUpdateCallback.onWorking(); verifyNotInFallbackMode(); fakeClock.forwardTime(1, TimeUnit.SECONDS); verifyInFallbackMode(); SubchannelPicker subchannelPicker = mock(SubchannelPicker.class); - lookasideLbHelper.updateBalancingState(READY, subchannelPicker); + primaryLbHelper.updateBalancingState(READY, subchannelPicker); verify(helper).updateBalancingState(READY, subchannelPicker); verifyNotInFallbackMode(); @@ -162,9 +162,9 @@ public void backendReadyBeforeTimeoutAtStartup_expectNoFallback() { verifyNotInFallbackMode(); assertThat(fakeClock.getPendingTasks()).hasSize(1); - edsUpdateCallback.onWorking(); + resourceUpdateCallback.onWorking(); SubchannelPicker subchannelPicker = mock(SubchannelPicker.class); - lookasideLbHelper.updateBalancingState(READY, subchannelPicker); + primaryLbHelper.updateBalancingState(READY, subchannelPicker); verify(helper).updateBalancingState(READY, subchannelPicker); assertThat(fakeClock.getPendingTasks()).isEmpty(); verifyNotInFallbackMode(); @@ -177,7 +177,7 @@ public void recevieAllDropBeforeTimeoutAtStartup_expectNoFallback() { verifyNotInFallbackMode(); assertThat(fakeClock.getPendingTasks()).hasSize(1); - edsUpdateCallback.onAllDrop(); + resourceUpdateCallback.onAllDrop(); assertThat(fakeClock.getPendingTasks()).isEmpty(); verifyNotInFallbackMode(); @@ -185,22 +185,22 @@ public void recevieAllDropBeforeTimeoutAtStartup_expectNoFallback() { } @Test - public void lookasideChannelFailsWithoutSeeingEdsResponseBeforeTimeoutAtStartup() { + public void primaryFailsWithoutSeeingEdsResponseBeforeTimeoutAtStartup() { verifyNotInFallbackMode(); assertThat(fakeClock.getPendingTasks()).hasSize(1); - edsUpdateCallback.onError(); + resourceUpdateCallback.onError(); verifyInFallbackMode(); assertThat(fallbackLbs).hasSize(1); } @Test - public void lookasideChannelSeeingEdsResponseThenFailsBeforeTimeoutAtStartup() { + public void primarySeeingEdsResponseThenFailsBeforeTimeoutAtStartup() { verifyNotInFallbackMode(); assertThat(fakeClock.getPendingTasks()).hasSize(1); - edsUpdateCallback.onWorking(); - edsUpdateCallback.onError(); + resourceUpdateCallback.onWorking(); + resourceUpdateCallback.onError(); verifyNotInFallbackMode(); fakeClock.forwardTime(10, TimeUnit.SECONDS); @@ -221,21 +221,21 @@ public void fallbackWillHandleLastResolvedAddresses() { .build(); xdsLoadBalancer.handleResolvedAddresses(resolvedAddresses); - edsUpdateCallback.onError(); + resourceUpdateCallback.onError(); LoadBalancer fallbackLb = Iterables.getLast(fallbackLbs); verify(fallbackLb).handleResolvedAddresses(same(resolvedAddresses)); } private void verifyInFallbackMode() { - assertThat(lookasideLbs).isNotEmpty(); + assertThat(primaryLbs).isNotEmpty(); assertThat(fallbackLbs).isNotEmpty(); - LoadBalancer lookasideLb = Iterables.getLast(lookasideLbs); + LoadBalancer primaryLb = Iterables.getLast(primaryLbs); LoadBalancer fallbackLb = Iterables.getLast(fallbackLbs); - verify(lookasideLb, never()).shutdown(); + verify(primaryLb, never()).shutdown(); verify(fallbackLb, never()).shutdown(); xdsLoadBalancer.requestConnection(); - verify(lookasideLb, times(++requestConnectionTimes)).requestConnection(); + verify(primaryLb, times(++requestConnectionTimes)).requestConnection(); verify(fallbackLb).requestConnection(); ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() @@ -245,16 +245,16 @@ private void verifyInFallbackMode() { .setLoadBalancingPolicyConfig(new Object()) .build(); xdsLoadBalancer.handleResolvedAddresses(resolvedAddresses); - verify(lookasideLb).handleResolvedAddresses(same(resolvedAddresses)); + verify(primaryLb).handleResolvedAddresses(same(resolvedAddresses)); verify(fallbackLb).handleResolvedAddresses(same(resolvedAddresses)); Status status = Status.DATA_LOSS.withDescription(""); xdsLoadBalancer.handleNameResolutionError(status); - verify(lookasideLb).handleNameResolutionError(same(status)); + verify(primaryLb).handleNameResolutionError(same(status)); verify(fallbackLb).handleNameResolutionError(same(status)); SubchannelPicker subchannelPicker = mock(SubchannelPicker.class); - lookasideLbHelper.updateBalancingState(CONNECTING, subchannelPicker); + primaryLbHelper.updateBalancingState(CONNECTING, subchannelPicker); verify(helper, never()).updateBalancingState(CONNECTING, subchannelPicker); fallbackLbHelper.updateBalancingState(CONNECTING, subchannelPicker); verify(helper).updateBalancingState(CONNECTING, subchannelPicker); @@ -264,10 +264,10 @@ private void verifyNotInFallbackMode() { for (LoadBalancer fallbackLb : fallbackLbs) { verify(fallbackLb).shutdown(); } - LoadBalancer lookasideLb = Iterables.getLast(lookasideLbs); + LoadBalancer primaryLb = Iterables.getLast(primaryLbs); xdsLoadBalancer.requestConnection(); - verify(lookasideLb, times(++requestConnectionTimes)).requestConnection(); + verify(primaryLb, times(++requestConnectionTimes)).requestConnection(); ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() .setAddresses(ImmutableList.of()) @@ -276,14 +276,14 @@ private void verifyNotInFallbackMode() { .setLoadBalancingPolicyConfig(new Object()) .build(); xdsLoadBalancer.handleResolvedAddresses(resolvedAddresses); - verify(lookasideLb).handleResolvedAddresses(same(resolvedAddresses)); + verify(primaryLb).handleResolvedAddresses(same(resolvedAddresses)); Status status = Status.DATA_LOSS.withDescription(""); xdsLoadBalancer.handleNameResolutionError(status); - verify(lookasideLb).handleNameResolutionError(same(status)); + verify(primaryLb).handleNameResolutionError(same(status)); SubchannelPicker subchannelPicker = mock(SubchannelPicker.class); - lookasideLbHelper.updateBalancingState(CONNECTING, subchannelPicker); + primaryLbHelper.updateBalancingState(CONNECTING, subchannelPicker); verify(helper).updateBalancingState(CONNECTING, subchannelPicker); } From 3e6a77a7ef4ab5b687da2e5ea04e798e9bd275af Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Thu, 30 Jan 2020 16:24:18 -0800 Subject: [PATCH 34/86] xds: precise logic for selecting the virtual host in RDS responses (#6661) Implements the precise logic for choosing the virtual host in RouteConfiguration of RDS responses. Specifically, fixes logic for domain search order. Minor fix for checking match field in RouteConfiguration. See RouteConfiguration Proto section in gRPC Client xDS API Flow design doc for specification. --- .../main/java/io/grpc/xds/XdsClientImpl.java | 58 ++++++--- .../java/io/grpc/xds/XdsClientImplTest.java | 119 ++++++++++++++++++ .../java/io/grpc/xds/XdsClientTestHelper.java | 25 ++-- 3 files changed, 176 insertions(+), 26 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index f1c8f4d5167..83340d070b6 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -508,7 +508,7 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { // data or one supersedes the other. TBD. if (requestedHttpConnManager.hasRouteConfig()) { RouteConfiguration rc = requestedHttpConnManager.getRouteConfig(); - clusterName = processRouteConfig(rc); + clusterName = findClusterNameInRouteConfig(rc, hostName); if (clusterName == null) { errorMessage = "Cannot find a valid cluster name in VirtualHost inside " + "RouteConfiguration with domains matching: " + hostName + "."; @@ -599,7 +599,7 @@ private void handleRdsResponse(DiscoveryResponse rdsResponse) { // Resolved cluster name for the requested resource, if exists. String clusterName = null; if (requestedRouteConfig != null) { - clusterName = processRouteConfig(requestedRouteConfig); + clusterName = findClusterNameInRouteConfig(requestedRouteConfig, hostName); if (clusterName == null) { adsStream.sendNackRequest(ADS_TYPE_URL_RDS, ImmutableList.of(adsStream.rdsResourceName), "Cannot find a valid cluster name in VirtualHost inside " @@ -624,38 +624,66 @@ private void handleRdsResponse(DiscoveryResponse rdsResponse) { } /** - * Processes RouteConfiguration message (from an resource information in an LDS or RDS - * response), which may contain a VirtualHost with domains matching the "xds:" - * URI hostname directly in-line. Returns the clusterName found in that VirtualHost - * message. Returns {@code null} if such a clusterName cannot be resolved. - * - *

    Note we only validate VirtualHosts with domains matching the "xds:" URI hostname. + * Processes a RouteConfiguration message to find the name of upstream cluster that requests + * for the given host will be routed to. Returns the clusterName if found. + * Otherwise, returns {@code null}. */ + @VisibleForTesting @Nullable - private String processRouteConfig(RouteConfiguration config) { + static String findClusterNameInRouteConfig(RouteConfiguration config, String hostName) { List virtualHosts = config.getVirtualHostsList(); - int matchingLen = -1; // longest length of wildcard pattern that matches host name + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. + // + // The longest wildcards match first. + // Assuming only a single virtual host in the entire route configuration can match + // on ``*`` and a domain must be unique across all virtual hosts. + int matchingLen = -1; // longest length of wildcard pattern that matches host name + boolean exactMatchFound = false; // true if a virtual host with exactly matched domain found VirtualHost targetVirtualHost = null; // target VirtualHost with longest matched domain for (VirtualHost vHost : virtualHosts) { for (String domain : vHost.getDomainsList()) { - if (matchHostName(hostName, domain) && domain.length() > matchingLen) { + boolean selected = false; + if (matchHostName(hostName, domain)) { // matching + if (!domain.contains("*")) { // exact matching + exactMatchFound = true; + targetVirtualHost = vHost; + break; + } else if (domain.length() > matchingLen) { // longer matching pattern + selected = true; + } else if (domain.length() == matchingLen && domain.startsWith("*")) { // suffix matching + selected = true; + } + } + if (selected) { matchingLen = domain.length(); targetVirtualHost = vHost; } } + if (exactMatchFound) { + break; + } } // Proceed with the virtual host that has longest wildcard matched domain name with the // hostname in original "xds:" URI. + // Note we would consider upstream cluster not found if the virtual host is not configured + // correctly for gRPC, even if there exist other virtual hosts with (lower priority) + // matching domains. if (targetVirtualHost != null) { // The client will look only at the last route in the list (the default route), - // whose match field must be empty and whose route field must be set. + // whose match field must contain a prefix field whose value is empty string + // and whose route field must be set. List routes = targetVirtualHost.getRoutesList(); if (!routes.isEmpty()) { Route route = routes.get(routes.size() - 1); - // TODO(chengyuanzhang): check the match field must be empty. - if (route.hasRoute()) { - return route.getRoute().getCluster(); + if (route.getMatch().getPrefix().equals("")) { + if (route.hasRoute()) { + return route.getRoute().getCluster(); + } } } } diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index 738b510af8a..55dccdd6fe4 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -58,6 +58,8 @@ import io.envoyproxy.envoy.api.v2.endpoint.ClusterStats; import io.envoyproxy.envoy.api.v2.route.RedirectAction; import io.envoyproxy.envoy.api.v2.route.Route; +import io.envoyproxy.envoy.api.v2.route.RouteAction; +import io.envoyproxy.envoy.api.v2.route.RouteMatch; import io.envoyproxy.envoy.api.v2.route.VirtualHost; import io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager; import io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2.Rds; @@ -3196,6 +3198,111 @@ public void matchHostName_postfixWildCard() { assertThat(XdsClientImpl.matchHostName("foo-bar", pattern)).isTrue(); } + @Test + public void findClusterNameInRouteConfig_exactMatchFirst() { + String hostname = "a.googleapis.com"; + String targetClusterName = "cluster-hello.googleapis.com"; + VirtualHost vHost1 = + VirtualHost.newBuilder() + .setName("virtualhost01.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("a.googleapis.com", "b.googleapis.com")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster(targetClusterName)) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + VirtualHost vHost2 = + VirtualHost.newBuilder() + .setName("virtualhost02.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("*.googleapis.com")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster("cluster-hi.googleapis.com")) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + VirtualHost vHost3 = + VirtualHost.newBuilder() + .setName("virtualhost03.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("*")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster("cluster-hey.googleapis.com")) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + RouteConfiguration routeConfig = + buildRouteConfiguration( + "route-foo.googleapis.com", ImmutableList.of(vHost1, vHost2, vHost3)); + String result = XdsClientImpl.findClusterNameInRouteConfig(routeConfig, hostname); + assertThat(result).isEqualTo(targetClusterName); + } + + @Test + public void findClusterNameInRouteConfig_preferSuffixDomainOverPrefixDomain() { + String hostname = "a.googleapis.com"; + String targetClusterName = "cluster-hello.googleapis.com"; + VirtualHost vHost1 = + VirtualHost.newBuilder() + .setName("virtualhost01.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("*.googleapis.com", "b.googleapis.com")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster(targetClusterName)) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + VirtualHost vHost2 = + VirtualHost.newBuilder() + .setName("virtualhost02.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("a.googleapis.*")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster("cluster-hi.googleapis.com")) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + VirtualHost vHost3 = + VirtualHost.newBuilder() + .setName("virtualhost03.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("*")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster("cluster-hey.googleapis.com")) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + RouteConfiguration routeConfig = + buildRouteConfiguration( + "route-foo.googleapis.com", ImmutableList.of(vHost1, vHost2, vHost3)); + String result = XdsClientImpl.findClusterNameInRouteConfig(routeConfig, hostname); + assertThat(result).isEqualTo(targetClusterName); + } + + @Test + public void findClusterNameInRouteConfig_asteriskMatchAnyDomain() { + String hostname = "a.googleapis.com"; + String targetClusterName = "cluster-hello.googleapis.com"; + VirtualHost vHost1 = + VirtualHost.newBuilder() + .setName("virtualhost01.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("*")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster(targetClusterName)) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + VirtualHost vHost2 = + VirtualHost.newBuilder() + .setName("virtualhost02.googleapis.com") // don't care + .addAllDomains(ImmutableList.of("b.googleapis.com")) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster("cluster-hi.googleapis.com")) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); + RouteConfiguration routeConfig = + buildRouteConfiguration( + "route-foo.googleapis.com", ImmutableList.of(vHost1, vHost2)); + String result = XdsClientImpl.findClusterNameInRouteConfig(routeConfig, hostname); + assertThat(result).isEqualTo(targetClusterName); + } + @Test public void messagePrinter_printLdsResponse() { MessagePrinter printer = new MessagePrinter(); @@ -3232,10 +3339,16 @@ public void messagePrinter_printLdsResponse() { + " \"name\": \"virtualhost00.googleapis.com\",\n" + " \"domains\": [\"foo.googleapis.com\", \"bar.googleapis.com\"],\n" + " \"routes\": [{\n" + + " \"match\": {\n" + + " \"prefix\": \"\"\n" + + " },\n" + " \"route\": {\n" + " \"cluster\": \"whatever cluster\"\n" + " }\n" + " }, {\n" + + " \"match\": {\n" + + " \"prefix\": \"\"\n" + + " },\n" + " \"route\": {\n" + " \"cluster\": \"cluster.googleapis.com\"\n" + " }\n" @@ -3276,10 +3389,16 @@ public void messagePrinter_printRdsResponse() { + " \"name\": \"virtualhost00.googleapis.com\",\n" + " \"domains\": [\"foo.googleapis.com\", \"bar.googleapis.com\"],\n" + " \"routes\": [{\n" + + " \"match\": {\n" + + " \"prefix\": \"\"\n" + + " },\n" + " \"route\": {\n" + " \"cluster\": \"whatever cluster\"\n" + " }\n" + " }, {\n" + + " \"match\": {\n" + + " \"prefix\": \"\"\n" + + " },\n" + " \"route\": {\n" + " \"cluster\": \"cluster.googleapis.com\"\n" + " }\n" diff --git a/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java b/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java index a31406d6a7d..6ef308f0e3d 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java @@ -45,6 +45,7 @@ import io.envoyproxy.envoy.api.v2.listener.FilterChain; import io.envoyproxy.envoy.api.v2.route.Route; import io.envoyproxy.envoy.api.v2.route.RouteAction; +import io.envoyproxy.envoy.api.v2.route.RouteMatch; import io.envoyproxy.envoy.api.v2.route.VirtualHost; import io.envoyproxy.envoy.config.listener.v2.ApiListener; import io.envoyproxy.envoy.type.FractionalPercent; @@ -104,17 +105,19 @@ static RouteConfiguration buildRouteConfiguration(String name, } static VirtualHost buildVirtualHost(List domains, String clusterName) { - return - VirtualHost.newBuilder() - .setName("virtualhost00.googleapis.com") // don't care - .addAllDomains(domains) - .addRoutes(Route.newBuilder() - .setRoute(RouteAction.newBuilder().setCluster("whatever cluster"))) - .addRoutes( - // Only the last (default) route matters. - Route.newBuilder() - .setRoute(RouteAction.newBuilder().setCluster(clusterName))) - .build(); + return VirtualHost.newBuilder() + .setName("virtualhost00.googleapis.com") // don't care + .addAllDomains(domains) + .addRoutes( + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster("whatever cluster")) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .addRoutes( + // Only the last (default) route matters. + Route.newBuilder() + .setRoute(RouteAction.newBuilder().setCluster(clusterName)) + .setMatch(RouteMatch.newBuilder().setPrefix(""))) + .build(); } static Cluster buildCluster(String clusterName, @Nullable String edsServiceName, From c0f37e59abc40d699d85aaed5b10f100feb6908e Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 31 Jan 2020 10:41:43 -0800 Subject: [PATCH 35/86] core, grpclb: change policy selection strategy for Grpclb policy (take one: eliminate special logic for deciding grpclb policy in core) (#6637) First take for grpclb selection stabilization: 1. Changed DnsNameResolver to return balancer addresses as a GrpcAttributes.ATTR_LB_ADDRS attribute in ResolutionResult, instead of among the addresses. 2. AutoConfiguredLoadBalancerFactory decides LB policy solely based on parsed service config without looking at resolved addresses. Behavior changes: - If no LB policy is specified in service config, default to pick_first, even if there exist balancer addresses (in attributes). - If grpclb specified but not available and no other specified policies available, it will fail without fallback to round_robin. 3. GrpclbLoadBalancer populates balancer addresses from ResolvedAddresses's attribute (GrpclbConstants.ATTR_LB_ADDRS) instead of sieving from addresses. --- .../alts/internal/AltsProtocolNegotiator.java | 1 + .../AutoConfiguredLoadBalancerFactory.java | 139 ++----- .../io/grpc/internal/DnsNameResolver.java | 21 +- .../java/io/grpc/internal/GrpcAttributes.java | 14 + .../internal/JndiResourceResolverFactory.java | 1 + ...AutoConfiguredLoadBalancerFactoryTest.java | 375 +++++------------- .../io/grpc/internal/DnsNameResolverTest.java | 118 ++++-- .../internal/JndiResourceResolverTest.java | 2 +- .../java/io/grpc/grpclb/GrpclbConstants.java | 11 + .../io/grpc/grpclb/GrpclbLoadBalancer.java | 33 +- .../main/java/io/grpc/grpclb/GrpclbState.java | 2 +- .../grpc/grpclb/GrpclbLoadBalancerTest.java | 375 ++++++++++-------- xds/src/main/java/io/grpc/xds/FallbackLb.java | 3 + 13 files changed, 484 insertions(+), 611 deletions(-) diff --git a/alts/src/main/java/io/grpc/alts/internal/AltsProtocolNegotiator.java b/alts/src/main/java/io/grpc/alts/internal/AltsProtocolNegotiator.java index 1e754f7e4f2..2a77e15bbb7 100644 --- a/alts/src/main/java/io/grpc/alts/internal/AltsProtocolNegotiator.java +++ b/alts/src/main/java/io/grpc/alts/internal/AltsProtocolNegotiator.java @@ -223,6 +223,7 @@ public AsciiString scheme() { return SCHEME; } + @SuppressWarnings("deprecation") @Override public ChannelHandler newHandler(GrpcHttp2ConnectionHandler grpcHandler) { ChannelHandler gnh = InternalProtocolNegotiators.grpcNegotiationHandler(grpcHandler); diff --git a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java index d0a71b3ded2..bb12bfb3c6b 100644 --- a/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java +++ b/core/src/main/java/io/grpc/internal/AutoConfiguredLoadBalancerFactory.java @@ -41,18 +41,13 @@ import io.grpc.Status; import io.grpc.internal.ServiceConfigUtil.LbConfig; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.logging.Logger; import javax.annotation.Nullable; // TODO(creamsoup) fully deprecate LoadBalancer.ATTR_LOAD_BALANCING_CONFIG @SuppressWarnings("deprecation") public final class AutoConfiguredLoadBalancerFactory { - private static final Logger logger = - Logger.getLogger(AutoConfiguredLoadBalancerFactory.class.getName()); - private static final String GRPCLB_POLICY_NAME = "grpclb"; private final LoadBalancerRegistry registry; private final String defaultPolicy; @@ -92,7 +87,6 @@ public final class AutoConfiguredLoadBalancer { private final Helper helper; private LoadBalancer delegate; private LoadBalancerProvider delegateProvider; - private boolean roundRobinDueToGrpclbDepMissing; AutoConfiguredLoadBalancer(Helper helper) { this.helper = helper; @@ -125,48 +119,53 @@ Status tryHandleResolvedAddresses(ResolvedAddresses resolvedAddresses) { } PolicySelection policySelection = (PolicySelection) resolvedAddresses.getLoadBalancingPolicyConfig(); - ResolvedPolicySelection resolvedSelection; - try { - resolvedSelection = resolveLoadBalancerProvider(servers, policySelection); - } catch (PolicyException e) { - Status s = Status.INTERNAL.withDescription(e.getMessage()); - helper.updateBalancingState(ConnectivityState.TRANSIENT_FAILURE, new FailingPicker(s)); - delegate.shutdown(); - delegateProvider = null; - delegate = new NoopLoadBalancer(); - return Status.OK; + if (policySelection == null) { + LoadBalancerProvider defaultProvider; + try { + defaultProvider = getProviderOrThrow(defaultPolicy, "using default policy"); + } catch (PolicyException e) { + Status s = Status.INTERNAL.withDescription(e.getMessage()); + helper.updateBalancingState(ConnectivityState.TRANSIENT_FAILURE, new FailingPicker(s)); + delegate.shutdown(); + delegateProvider = null; + delegate = new NoopLoadBalancer(); + return Status.OK; + } + policySelection = + new PolicySelection(defaultProvider, /* rawConfig= */ null, /* config= */ null); } - PolicySelection selection = resolvedSelection.policySelection; if (delegateProvider == null - || !selection.provider.getPolicyName().equals(delegateProvider.getPolicyName())) { + || !policySelection.provider.getPolicyName().equals(delegateProvider.getPolicyName())) { helper.updateBalancingState(ConnectivityState.CONNECTING, new EmptyPicker()); delegate.shutdown(); - delegateProvider = selection.provider; + delegateProvider = policySelection.provider; LoadBalancer old = delegate; delegate = delegateProvider.newLoadBalancer(helper); helper.getChannelLogger().log( ChannelLogLevel.INFO, "Load balancer changed from {0} to {1}", old.getClass().getSimpleName(), delegate.getClass().getSimpleName()); } - Object lbConfig = selection.config; + Object lbConfig = policySelection.config; if (lbConfig != null) { helper.getChannelLogger().log( - ChannelLogLevel.DEBUG, "Load-balancing config: {0}", selection.config); + ChannelLogLevel.DEBUG, "Load-balancing config: {0}", policySelection.config); attributes = - attributes.toBuilder().set(ATTR_LOAD_BALANCING_CONFIG, selection.rawConfig).build(); + attributes.toBuilder() + .set(ATTR_LOAD_BALANCING_CONFIG, policySelection.rawConfig) + .build(); } LoadBalancer delegate = getDelegate(); - if (resolvedSelection.serverList.isEmpty() + if (resolvedAddresses.getAddresses().isEmpty() && !delegate.canHandleEmptyAddressListFromNameResolution()) { return Status.UNAVAILABLE.withDescription( "NameResolver returned no usable address. addrs=" + servers + ", attrs=" + attributes); } else { delegate.handleResolvedAddresses( ResolvedAddresses.newBuilder() - .setAddresses(resolvedSelection.serverList) + .setAddresses(resolvedAddresses.getAddresses()) .setAttributes(attributes) .setLoadBalancingPolicyConfig(lbConfig) .build()); @@ -206,78 +205,6 @@ void setDelegate(LoadBalancer lb) { LoadBalancerProvider getDelegateProvider() { return delegateProvider; } - - /** - * Resolves a load balancer based on given criteria. If policySelection is {@code null} and - * given servers contains any gRPC LB addresses, it will fall back to "grpclb". If no gRPC LB - * addresses are not present, it will fall back to {@link #defaultPolicy}. - * - * @param servers The list of servers reported - * @param policySelection the selected policy from raw service config - * @return the resolved policy selection - */ - @VisibleForTesting - ResolvedPolicySelection resolveLoadBalancerProvider( - List servers, @Nullable PolicySelection policySelection) - throws PolicyException { - // Check for balancer addresses - boolean haveBalancerAddress = false; - List backendAddrs = new ArrayList<>(); - for (EquivalentAddressGroup s : servers) { - if (s.getAttributes().get(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY) != null) { - haveBalancerAddress = true; - } else { - backendAddrs.add(s); - } - } - - if (policySelection != null) { - String policyName = policySelection.provider.getPolicyName(); - return new ResolvedPolicySelection( - policySelection, policyName.equals(GRPCLB_POLICY_NAME) ? servers : backendAddrs); - } - - if (haveBalancerAddress) { - // This is a special case where the existence of balancer address in the resolved address - // selects "grpclb" policy if the service config couldn't select a policy - LoadBalancerProvider grpclbProvider = registry.getProvider(GRPCLB_POLICY_NAME); - if (grpclbProvider == null) { - if (backendAddrs.isEmpty()) { - throw new PolicyException( - "Received ONLY balancer addresses but grpclb runtime is missing"); - } - if (!roundRobinDueToGrpclbDepMissing) { - // We don't log the warning every time we have an update. - roundRobinDueToGrpclbDepMissing = true; - String errorMsg = "Found balancer addresses but grpclb runtime is missing." - + " Will use round_robin. Please include grpc-grpclb in your runtime dependencies."; - helper.getChannelLogger().log(ChannelLogLevel.ERROR, errorMsg); - logger.warning(errorMsg); - } - return new ResolvedPolicySelection( - new PolicySelection( - getProviderOrThrow( - "round_robin", "received balancer addresses but grpclb runtime is missing"), - /* rawConfig = */ null, - /* config= */ null), - backendAddrs); - } - return new ResolvedPolicySelection( - new PolicySelection( - grpclbProvider, /* rawConfig= */ null, /* config= */ null), servers); - } - // No balancer address this time. If balancer address shows up later, we want to make sure - // the warning is logged one more time. - roundRobinDueToGrpclbDepMissing = false; - - // No config nor balancer address. Use default. - return new ResolvedPolicySelection( - new PolicySelection( - getProviderOrThrow(defaultPolicy, "using default policy"), - /* rawConfig= */ null, - /* config= */ null), - servers); - } } private LoadBalancerProvider getProviderOrThrow(String policy, String choiceReason) @@ -406,26 +333,6 @@ public String toString() { } } - @VisibleForTesting - static final class ResolvedPolicySelection { - final PolicySelection policySelection; - final List serverList; - - ResolvedPolicySelection( - PolicySelection policySelection, List serverList) { - this.policySelection = checkNotNull(policySelection, "policySelection"); - this.serverList = Collections.unmodifiableList(checkNotNull(serverList, "serverList")); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("policySelection", policySelection) - .add("serverList", serverList) - .toString(); - } - } - private static final class EmptyPicker extends SubchannelPicker { @Override diff --git a/core/src/main/java/io/grpc/internal/DnsNameResolver.java b/core/src/main/java/io/grpc/internal/DnsNameResolver.java index f81c577820f..9a6224d4ac7 100644 --- a/core/src/main/java/io/grpc/internal/DnsNameResolver.java +++ b/core/src/main/java/io/grpc/internal/DnsNameResolver.java @@ -298,14 +298,12 @@ public void run() { for (InetAddress inetAddr : resolutionResults.addresses) { servers.add(new EquivalentAddressGroup(new InetSocketAddress(inetAddr, port))); } - servers.addAll(resolutionResults.balancerAddresses); - if (servers.isEmpty()) { - savedListener.onError(Status.UNAVAILABLE.withDescription( - "No DNS backend or balancer addresses found for " + host)); - return; - } ResolutionResult.Builder resultBuilder = ResolutionResult.newBuilder().setAddresses(servers); + Attributes.Builder attributesBuilder = Attributes.newBuilder(); + if (!resolutionResults.balancerAddresses.isEmpty()) { + attributesBuilder.set(GrpcAttributes.ATTR_LB_ADDRS, resolutionResults.balancerAddresses); + } if (!resolutionResults.txtRecords.isEmpty()) { ConfigOrError rawServiceConfig = parseServiceConfig(resolutionResults.txtRecords, random, getLocalHostname()); @@ -319,17 +317,14 @@ public void run() { Map verifiedRawServiceConfig = (Map) rawServiceConfig.getConfig(); ConfigOrError parsedServiceConfig = serviceConfigParser.parseServiceConfig(verifiedRawServiceConfig); - resultBuilder - .setAttributes( - Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, verifiedRawServiceConfig) - .build()) - .setServiceConfig(parsedServiceConfig); + resultBuilder.setServiceConfig(parsedServiceConfig); + attributesBuilder + .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, verifiedRawServiceConfig); } } else { logger.log(Level.FINE, "No TXT records found for {0}", new Object[]{host}); } - savedListener.onResult(resultBuilder.build()); + savedListener.onResult(resultBuilder.setAttributes(attributesBuilder.build()).build()); } } diff --git a/core/src/main/java/io/grpc/internal/GrpcAttributes.java b/core/src/main/java/io/grpc/internal/GrpcAttributes.java index b8138d7f26f..b7210a16778 100644 --- a/core/src/main/java/io/grpc/internal/GrpcAttributes.java +++ b/core/src/main/java/io/grpc/internal/GrpcAttributes.java @@ -21,6 +21,7 @@ import io.grpc.Grpc; import io.grpc.NameResolver; import io.grpc.SecurityLevel; +import java.util.List; import java.util.Map; /** @@ -37,10 +38,23 @@ public final class GrpcAttributes { public static final Attributes.Key> NAME_RESOLVER_SERVICE_CONFIG = Attributes.Key.create("service-config"); + /** + * Attribute key for gRPC LB server addresses. + * + *

    Deprecated: this will be used for grpclb specific logic, which will be moved out of core. + */ + @Deprecated + @NameResolver.ResolutionResultAttr + public static final Attributes.Key> ATTR_LB_ADDRS = + Attributes.Key.create("io.grpc.grpclb.lbAddrs"); + /** * The naming authority of a gRPC LB server address. It is an address-group-level attribute, * present when the address group is a LoadBalancer. + * + *

    Deprecated: this will be used for grpclb specific logic, which will be moved out of core. */ + @Deprecated @EquivalentAddressGroup.Attr public static final Attributes.Key ATTR_LB_ADDR_AUTHORITY = Attributes.Key.create("io.grpc.grpclb.lbAddrAuthority"); diff --git a/core/src/main/java/io/grpc/internal/JndiResourceResolverFactory.java b/core/src/main/java/io/grpc/internal/JndiResourceResolverFactory.java index 518393b43be..346ac1534c1 100644 --- a/core/src/main/java/io/grpc/internal/JndiResourceResolverFactory.java +++ b/core/src/main/java/io/grpc/internal/JndiResourceResolverFactory.java @@ -129,6 +129,7 @@ public List resolveTxt(String serviceConfigHostname) throws NamingExcept return Collections.unmodifiableList(serviceConfigTxtRecords); } + @SuppressWarnings("deprecation") @Override public List resolveSrv( AddressResolver addressResolver, String grpclbHostname) throws Exception { diff --git a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java index c0918151790..fda33cdf07e 100644 --- a/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java +++ b/core/src/test/java/io/grpc/internal/AutoConfiguredLoadBalancerFactoryTest.java @@ -19,13 +19,10 @@ import static com.google.common.truth.Truth.assertThat; import static io.grpc.LoadBalancer.ATTR_LOAD_BALANCING_CONFIG; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import static org.mockito.AdditionalAnswers.delegatesTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; -import static org.mockito.ArgumentMatchers.startsWith; -import static org.mockito.Mockito.RETURNS_DEEP_STUBS; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -50,23 +47,18 @@ import io.grpc.LoadBalancer.SubchannelStateListener; import io.grpc.LoadBalancerProvider; import io.grpc.LoadBalancerRegistry; -import io.grpc.ManagedChannel; import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; -import io.grpc.SynchronizationContext; import io.grpc.grpclb.GrpclbLoadBalancerProvider; import io.grpc.internal.AutoConfiguredLoadBalancerFactory.AutoConfiguredLoadBalancer; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory.PolicyException; import io.grpc.internal.AutoConfiguredLoadBalancerFactory.PolicySelection; -import io.grpc.internal.AutoConfiguredLoadBalancerFactory.ResolvedPolicySelection; import io.grpc.util.ForwardingLoadBalancerHelper; +import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -308,37 +300,42 @@ public void handleResolvedAddressGroups_propagateLbConfigToDelegate() throws Exc } @Test - public void handleResolvedAddressGroups_propagateOnlyBackendAddrsToDelegate() throws Exception { - // This case only happens when grpclb is missing. We will use a local registry - LoadBalancerRegistry registry = new LoadBalancerRegistry(); - registry.register(new PickFirstLoadBalancerProvider()); - registry.register( - new FakeLoadBalancerProvider( - "round_robin", testLbBalancer, /* nextParsedLbPolicyConfig= */ null)); + public void handleResolvedAddressGroups_propagateAddrsToDelegate() throws Exception { + Map rawServiceConfig = + parseConfig("{\"loadBalancingConfig\": [ {\"test_lb\": { \"setting1\": \"high\" } } ] }"); + ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfigs.getConfig()).isNotNull(); - final List servers = - Arrays.asList( - new EquivalentAddressGroup(new SocketAddress(){}), - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); Helper helper = new TestHelper(); - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory( - registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(helper); + AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); + List servers = + Collections.singletonList(new EquivalentAddressGroup(new InetSocketAddress(8080){})); Status handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(servers) - .setAttributes(Attributes.EMPTY) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); + verify(testLbBalancerProvider).newLoadBalancer(same(helper)); assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer); - verify(testLbBalancer).handleResolvedAddresses( + ArgumentCaptor resultCaptor = + ArgumentCaptor.forClass(ResolvedAddresses.class); + verify(testLbBalancer).handleResolvedAddresses(resultCaptor.capture()); + assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); + + servers = + Collections.singletonList(new EquivalentAddressGroup(new InetSocketAddress(9090){})); + handleResult = lb.tryHandleResolvedAddresses( ResolvedAddresses.newBuilder() - .setAddresses(Collections.singletonList(servers.get(0))) - .setAttributes(Attributes.EMPTY) + .setAddresses(servers) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) .build()); + + assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); + verify(testLbBalancer, times(2)).handleResolvedAddresses(resultCaptor.capture()); + assertThat(resultCaptor.getValue().getAddresses()).containsExactlyElementsIn(servers).inOrder(); } @Test @@ -392,267 +389,79 @@ public void handleResolvedAddressGroups_delegateAcceptsEmptyAddressList() } @Test - public void decideLoadBalancerProvider_noBalancerAddresses_noServiceConfig_pickFirst() - throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - PolicySelection policySelection = null; - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider) - .isInstanceOf(PickFirstLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_noBalancerAddresses_noServiceConfig_customDefault() - throws Exception { - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory("test_lb") - .newLoadBalancer(new TestHelper()); - PolicySelection policySelection = null; - List servers = - Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider).isSameInstanceAs(testLbBalancerProvider); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_oneBalancer_noServiceConfig_grpclb() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - PolicySelection policySelection = null; - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbPolicy() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig("{\"loadBalancingPolicy\": \"round_robin\"}"); - - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfig.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); - List servers = - Arrays.asList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), - new EquivalentAddressGroup( - new SocketAddress(){})); - List backends = Arrays.asList(servers.get(1)); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.serverList).isEqualTo(backends); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_serviceConfigLbConfig() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); + public void handleResolvedAddressGroups_useSelectedLbPolicy() throws Exception { Map rawServiceConfig = parseConfig("{\"loadBalancingConfig\": [{\"round_robin\": {}}]}"); + ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(lbConfigs.getConfig()).isNotNull(); + assertThat(((PolicySelection) lbConfigs.getConfig()).provider.getClass().getName()) + .isEqualTo("io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfig.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); - List servers = - Arrays.asList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), - new EquivalentAddressGroup( - new SocketAddress(){})); - List backends = Arrays.asList(servers.get(1)); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - assertThat(selection.serverList).isEqualTo(backends); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_grpclbConfigPropagated() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig( - "{\"loadBalancingConfig\": [" - + "{\"grpclb\": {\"childPolicy\": [ {\"pick_first\": {} } ] } }" - + "] }"); - ConfigOrError lbConfig = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfig.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfig.getConfig(); - - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config) - .isEqualTo(((PolicySelection) lbConfig.getConfig()).config); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_policyUnavailButGrpclbAddressPresent() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, null); - - assertThat(selection.policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); - assertThat(selection.serverList).isEqualTo(servers); - assertThat(selection.policySelection.config).isNull(); - verifyZeroInteractions(channelLogger); - } - - @Test - public void decideLoadBalancerProvider_grpclbProviderNotFound_fallbackToRoundRobin() - throws Exception { - LoadBalancerRegistry registry = new LoadBalancerRegistry(); - registry.register(new PickFirstLoadBalancerProvider()); - LoadBalancerProvider fakeRoundRobinProvider = - new FakeLoadBalancerProvider("round_robin", testLbBalancer, null); - registry.register(fakeRoundRobinProvider); - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory( - registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(new TestHelper()); - List servers = - Arrays.asList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build()), - new EquivalentAddressGroup(new SocketAddress(){})); - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, null); - - assertThat(selection.policySelection.provider).isSameInstanceAs(fakeRoundRobinProvider); - assertThat(selection.policySelection.config).isNull(); - verify(channelLogger).log( - eq(ChannelLogLevel.ERROR), - startsWith("Found balancer addresses but grpclb runtime is missing")); - - // Called for the second time, the warning is only logged once - selection = lb.resolveLoadBalancerProvider(servers, null); - - assertThat(selection.policySelection.provider).isSameInstanceAs(fakeRoundRobinProvider); - assertThat(selection.policySelection.config).isNull(); - // Balancer addresses are filtered out in the server list passed to round_robin - assertThat(selection.serverList).containsExactly(servers.get(1)); - verifyNoMoreInteractions(channelLogger);; + final List servers = + Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); + Helper helper = new TestHelper() { + @Override + public Subchannel createSubchannel(CreateSubchannelArgs args) { + assertThat(args.getAddresses()).isEqualTo(servers); + return new TestSubchannel(args); + } + }; + AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); + Status handleResult = lb.tryHandleResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(servers) + .setLoadBalancingPolicyConfig(lbConfigs.getConfig()) + .build()); + assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); + assertThat(lb.getDelegate().getClass().getName()) + .isEqualTo("io.grpc.util.RoundRobinLoadBalancer"); } @Test - public void decideLoadBalancerProvider_grpclbProviderNotFound_noBackendAddress() - throws Exception { - LoadBalancerRegistry registry = new LoadBalancerRegistry(); - registry.register(new PickFirstLoadBalancerProvider()); - registry.register(new FakeLoadBalancerProvider("round_robin", testLbBalancer, null)); - AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory( - registry, GrpcUtil.DEFAULT_LB_POLICY).newLoadBalancer(new TestHelper()); - List servers = - Collections.singletonList( - new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - try { - lb.resolveLoadBalancerProvider(servers, null); - fail("Should throw"); - } catch (PolicyException e) { - assertThat(e) - .hasMessageThat() - .isEqualTo("Received ONLY balancer addresses but grpclb runtime is missing"); - } + public void handleResolvedAddressGroups_noLbPolicySelected_defaultToPickFirst() { + final List servers = + Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); + Helper helper = new TestHelper() { + @Override + public Subchannel createSubchannel(CreateSubchannelArgs args) { + assertThat(args.getAddresses()).isEqualTo(servers); + return new TestSubchannel(args); + } + }; + AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(helper); + Status handleResult = lb.tryHandleResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(servers) + .setLoadBalancingPolicyConfig(null) + .build()); + assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); + assertThat(lb.getDelegate()).isInstanceOf(PickFirstLoadBalancer.class); } @Test - public void decideLoadBalancerProvider_serviceConfigLbConfigOverridesDefault() throws Exception { - AutoConfiguredLoadBalancer lb = lbf.newLoadBalancer(new TestHelper()); - Map rawServiceConfig = - parseConfig("{\"loadBalancingConfig\": [ {\"round_robin\": {} } ] }"); - ConfigOrError lbConfigs = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); - assertThat(lbConfigs.getConfig()).isNotNull(); - PolicySelection policySelection = (PolicySelection) lbConfigs.getConfig(); + public void handleResolvedAddressGroups_noLbPolicySelected_defaultToCustomDefault() { + AutoConfiguredLoadBalancer lb = new AutoConfiguredLoadBalancerFactory("test_lb") + .newLoadBalancer(new TestHelper()); List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); - - ResolvedPolicySelection selection = lb.resolveLoadBalancerProvider(servers, policySelection); - - assertThat(selection.policySelection.provider.getClass().getName()).isEqualTo( - "io.grpc.util.SecretRoundRobinLoadBalancerProvider$Provider"); - verifyZeroInteractions(channelLogger); + Status handleResult = lb.tryHandleResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(servers) + .setLoadBalancingPolicyConfig(null) + .build()); + assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); + assertThat(lb.getDelegate()).isSameInstanceAs(testLbBalancer); } @Test public void channelTracing_lbPolicyChanged() throws Exception { - final FakeClock clock = new FakeClock(); List servers = Collections.singletonList(new EquivalentAddressGroup(new SocketAddress(){})); Helper helper = new TestHelper() { - @Override - @Deprecated - public Subchannel createSubchannel(List addrs, Attributes attrs) { - return new TestSubchannel(CreateSubchannelArgs.newBuilder() - .setAddresses(addrs) - .setAttributes(attrs) - .build()); - } - @Override public Subchannel createSubchannel(CreateSubchannelArgs args) { return new TestSubchannel(args); } - - @Override - public ManagedChannel createOobChannel(EquivalentAddressGroup eag, String authority) { - return mock(ManagedChannel.class, RETURNS_DEEP_STUBS); - } - - @Override - public String getAuthority() { - return "fake_authority"; - } - - @Override - public SynchronizationContext getSynchronizationContext() { - return new SynchronizationContext( - new Thread.UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - throw new AssertionError(e); - } - }); - } - - @Override - public ScheduledExecutorService getScheduledExecutorService() { - return clock.getScheduledExecutorService(); - } }; AutoConfiguredLoadBalancer lb = @@ -705,23 +514,6 @@ public ScheduledExecutorService getScheduledExecutorService() { eq("Load-balancing config: {0}"), eq(testLbParsedConfig.getConfig())); verifyNoMoreInteractions(channelLogger); - - servers = Collections.singletonList(new EquivalentAddressGroup( - new SocketAddress(){}, - Attributes.newBuilder().set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "ok").build())); - handleResult = lb.tryHandleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(servers) - .setAttributes(Attributes.EMPTY) - .build()); - - assertThat(handleResult.getCode()).isEqualTo(Status.Code.OK); - verify(channelLogger).log( - eq(ChannelLogLevel.INFO), - eq("Load balancer changed from {0} to {1}"), - eq(testLbBalancer.getClass().getSimpleName()), eq("GrpclbLoadBalancer")); - - verifyNoMoreInteractions(channelLogger); } @Test @@ -834,6 +626,21 @@ public void parseLoadBalancerConfig_someProvidesAreNotAvailable() throws Excepti eq(new ArrayList<>(Collections.singletonList("magic_balancer")))); } + @Test + public void parseLoadBalancerConfig_lbConfigPropagated() throws Exception { + Map rawServiceConfig = + parseConfig( + "{\"loadBalancingConfig\": [" + + "{\"grpclb\": {\"childPolicy\": [ {\"pick_first\": {} } ] } }" + + "] }"); + ConfigOrError parsed = lbf.parseLoadBalancerPolicy(rawServiceConfig, channelLogger); + assertThat(parsed).isNotNull(); + assertThat(parsed.getConfig()).isNotNull(); + PolicySelection policySelection = (PolicySelection) parsed.getConfig(); + assertThat(policySelection.config).isNotNull(); + assertThat(policySelection.provider).isInstanceOf(GrpclbLoadBalancerProvider.class); + verifyZeroInteractions(channelLogger); + } public static class ForwardingLoadBalancer extends LoadBalancer { private final LoadBalancer delegate; diff --git a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java index 192d03d3341..26cbc9dd7f1 100644 --- a/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java +++ b/core/src/test/java/io/grpc/internal/DnsNameResolverTest.java @@ -36,6 +36,7 @@ import com.google.common.collect.Iterables; import com.google.common.net.InetAddresses; import com.google.common.testing.FakeTicker; +import io.grpc.Attributes; import io.grpc.ChannelLogger; import io.grpc.EquivalentAddressGroup; import io.grpc.HttpConnectProxiedSocketAddress; @@ -46,7 +47,6 @@ import io.grpc.ProxyDetector; import io.grpc.StaticTestingClassLoader; import io.grpc.Status; -import io.grpc.Status.Code; import io.grpc.SynchronizationContext; import io.grpc.internal.DnsNameResolver.AddressResolver; import io.grpc.internal.DnsNameResolver.ResolutionResults; @@ -154,7 +154,8 @@ private DnsNameResolver newResolver(String name, int defaultPort) { private DnsNameResolver newResolver(String name, int defaultPort, boolean isAndroid) { return newResolver( - name, defaultPort, GrpcUtil.NOOP_PROXY_DETECTOR, Stopwatch.createUnstarted(), isAndroid); + name, defaultPort, GrpcUtil.NOOP_PROXY_DETECTOR, Stopwatch.createUnstarted(), + isAndroid, false); } private DnsNameResolver newResolver( @@ -162,7 +163,7 @@ private DnsNameResolver newResolver( int defaultPort, ProxyDetector proxyDetector, Stopwatch stopwatch) { - return newResolver(name, defaultPort, proxyDetector, stopwatch, false); + return newResolver(name, defaultPort, proxyDetector, stopwatch, false, false); } private DnsNameResolver newResolver( @@ -170,7 +171,8 @@ private DnsNameResolver newResolver( final int defaultPort, final ProxyDetector proxyDetector, Stopwatch stopwatch, - boolean isAndroid) { + boolean isAndroid, + boolean enableSrv) { NameResolver.Args args = NameResolver.Args.newBuilder() .setDefaultPort(defaultPort) @@ -179,19 +181,34 @@ private DnsNameResolver newResolver( .setServiceConfigParser(mock(ServiceConfigParser.class)) .setChannelLogger(mock(ChannelLogger.class)) .build(); - return newResolver(name, stopwatch, isAndroid, args); + return newResolver(name, stopwatch, isAndroid, args, enableSrv); } private DnsNameResolver newResolver( String name, Stopwatch stopwatch, boolean isAndroid, NameResolver.Args args) { + return newResolver(name, stopwatch, isAndroid, args, /* enableSrv= */ false); + } + + private DnsNameResolver newResolver( + String name, + Stopwatch stopwatch, + boolean isAndroid, + NameResolver.Args args, + boolean enableSrv) { DnsNameResolver dnsResolver = new DnsNameResolver( - null, name, args, fakeExecutorResource, stopwatch, isAndroid, /* enableSrv= */ false); + null, name, args, fakeExecutorResource, stopwatch, isAndroid, enableSrv); // By default, using the mocked ResourceResolver to avoid I/O dnsResolver.setResourceResolver(new JndiResourceResolver(recordFetcher)); return dnsResolver; } + private DnsNameResolver newSrvEnabledResolver(String name, int defaultPort) { + return newResolver( + name, defaultPort, GrpcUtil.NOOP_PROXY_DETECTOR, Stopwatch.createUnstarted(), + false, true); + } + @Before public void setUp() { DnsNameResolver.enableJndi = true; @@ -363,26 +380,6 @@ public void execute(Runnable command) { assertThat(executions.get()).isEqualTo(1); } - @Test - public void resolveAll_failsOnEmptyResult() { - DnsNameResolver nr = newResolver("dns:///addr.fake:1234", 443); - nr.setAddressResolver(new AddressResolver() { - @Override - public List resolveAddress(String host) throws Exception { - return Collections.emptyList(); - } - }); - - nr.start(mockListener); - assertThat(fakeExecutor.runDueTasks()).isEqualTo(1); - - ArgumentCaptor ac = ArgumentCaptor.forClass(Status.class); - verify(mockListener).onError(ac.capture()); - verifyNoMoreInteractions(mockListener); - assertThat(ac.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); - assertThat(ac.getValue().getDescription()).contains("No DNS backend or balancer addresses"); - } - @Test public void resolve_cacheForever() throws Exception { System.setProperty(DnsNameResolver.NETWORKADDRESS_CACHE_TTL_PROPERTY, "-1"); @@ -531,6 +528,75 @@ private void resolveDefaultValue() throws Exception { verify(mockResolver, times(2)).resolveAddress(anyString()); } + @Test + public void resolve_emptyResult() { + DnsNameResolver nr = newResolver("dns:///addr.fake:1234", 443); + nr.setAddressResolver(new AddressResolver() { + @Override + public List resolveAddress(String host) throws Exception { + return Collections.emptyList(); + } + }); + nr.setResourceResolver(new ResourceResolver() { + @Override + public List resolveTxt(String host) throws Exception { + return Collections.emptyList(); + } + + @Override + public List resolveSrv(AddressResolver addressResolver, String host) + throws Exception { + return Collections.emptyList(); + } + }); + + nr.start(mockListener); + assertThat(fakeExecutor.runDueTasks()).isEqualTo(1); + + ArgumentCaptor ac = ArgumentCaptor.forClass(ResolutionResult.class); + verify(mockListener).onResult(ac.capture()); + verifyNoMoreInteractions(mockListener); + assertThat(ac.getValue().getAddresses()).isEmpty(); + assertThat(ac.getValue().getAttributes()).isEqualTo(Attributes.EMPTY); + assertThat(ac.getValue().getServiceConfig()).isNull(); + } + + @SuppressWarnings("deprecation") + @Test + public void resolve_balancerAddrsAsAttributes() throws Exception { + InetAddress backendAddr = InetAddress.getByAddress(new byte[] {127, 0, 0, 0}); + final EquivalentAddressGroup balancerAddr = + new EquivalentAddressGroup( + new SocketAddress() {}, + Attributes.newBuilder() + .set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "foo.example.com") + .build()); + String name = "foo.googleapis.com"; + + AddressResolver mockAddressResolver = mock(AddressResolver.class); + when(mockAddressResolver.resolveAddress(anyString())) + .thenReturn(Collections.singletonList(backendAddr)); + ResourceResolver mockResourceResolver = mock(ResourceResolver.class); + when(mockResourceResolver.resolveTxt(anyString())).thenReturn(Collections.emptyList()); + when(mockResourceResolver.resolveSrv(ArgumentMatchers.any(AddressResolver.class), anyString())) + .thenReturn(Collections.singletonList(balancerAddr)); + + DnsNameResolver resolver = newSrvEnabledResolver(name, 81); + resolver.setAddressResolver(mockAddressResolver); + resolver.setResourceResolver(mockResourceResolver); + + resolver.start(mockListener); + assertEquals(1, fakeExecutor.runDueTasks()); + verify(mockListener).onResult(resultCaptor.capture()); + ResolutionResult result = resultCaptor.getValue(); + InetSocketAddress resolvedBackendAddr = + (InetSocketAddress) Iterables.getOnlyElement( + Iterables.getOnlyElement(result.getAddresses()).getAddresses()); + assertThat(resolvedBackendAddr.getAddress()).isEqualTo(backendAddr); + assertThat(result.getAttributes().get(GrpcAttributes.ATTR_LB_ADDRS)) + .containsExactly(balancerAddr); + } + @Test public void resolveAll_nullResourceResolver() throws Exception { final String hostname = "addr.fake"; diff --git a/core/src/test/java/io/grpc/internal/JndiResourceResolverTest.java b/core/src/test/java/io/grpc/internal/JndiResourceResolverTest.java index c2e9111a50c..965ef8f51cf 100644 --- a/core/src/test/java/io/grpc/internal/JndiResourceResolverTest.java +++ b/core/src/test/java/io/grpc/internal/JndiResourceResolverTest.java @@ -24,7 +24,6 @@ import io.grpc.Attributes; import io.grpc.EquivalentAddressGroup; import io.grpc.internal.DnsNameResolver.AddressResolver; -import io.grpc.internal.GrpcAttributes; import io.grpc.internal.JndiResourceResolverFactory.JndiRecordFetcher; import io.grpc.internal.JndiResourceResolverFactory.JndiResourceResolver; import io.grpc.internal.JndiResourceResolverFactory.RecordFetcher; @@ -81,6 +80,7 @@ public void txtRecordLookup() throws Exception { assertThat(resolver.resolveTxt("service.example.com")).isEqualTo(golden); } + @SuppressWarnings("deprecation") @Test public void srvRecordLookup() throws Exception { AddressResolver addressResolver = mock(AddressResolver.class); diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java index 65f4832f540..db5e84f08c6 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java @@ -20,6 +20,7 @@ import io.grpc.EquivalentAddressGroup; import io.grpc.ExperimentalApi; import io.grpc.Metadata; +import java.util.List; /** * Constants for the GRPCLB load-balancer. @@ -41,5 +42,15 @@ public final class GrpclbConstants { static final Attributes.Key TOKEN_ATTRIBUTE_KEY = Attributes.Key.create("lb-token"); + @SuppressWarnings("deprecation") + @EquivalentAddressGroup.Attr + static final Attributes.Key> ATTR_LB_ADDRS = + io.grpc.internal.GrpcAttributes.ATTR_LB_ADDRS; + + @SuppressWarnings("deprecation") + @EquivalentAddressGroup.Attr + static final Attributes.Key ATTR_LB_ADDR_AUTHORITY = + io.grpc.internal.GrpcAttributes.ATTR_LB_ADDR_AUTHORITY; + private GrpclbConstants() { } } diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java index cd539bdb6ac..6174fb5627d 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java @@ -91,22 +91,30 @@ public void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo n @Override @SuppressWarnings("deprecation") // TODO(creamsoup) migrate to use parsed service config public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - List updatedServers = resolvedAddresses.getAddresses(); Attributes attributes = resolvedAddresses.getAttributes(); - // LB addresses and backend addresses are treated separately + List newLbAddresses = attributes.get(GrpcAttributes.ATTR_LB_ADDRS); + if ((newLbAddresses == null || newLbAddresses.isEmpty()) + && resolvedAddresses.getAddresses().isEmpty()) { + handleNameResolutionError( + Status.UNAVAILABLE.withDescription("No backend or balancer addresses found")); + return; + } List newLbAddressGroups = new ArrayList<>(); - List newBackendServers = new ArrayList<>(); - for (EquivalentAddressGroup server : updatedServers) { - String lbAddrAuthority = server.getAttributes().get(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY); - if (lbAddrAuthority != null) { - newLbAddressGroups.add(new LbAddressGroup(server, lbAddrAuthority)); - } else { - newBackendServers.add(server); + + if (newLbAddresses != null) { + for (EquivalentAddressGroup lbAddr : newLbAddresses) { + String lbAddrAuthority = lbAddr.getAttributes().get(GrpclbConstants.ATTR_LB_ADDR_AUTHORITY); + if (lbAddrAuthority == null) { + throw new AssertionError( + "This is a bug: LB address " + lbAddr + " does not have an authority."); + } + newLbAddressGroups.add(new LbAddressGroup(lbAddr, lbAddrAuthority)); } } newLbAddressGroups = Collections.unmodifiableList(newLbAddressGroups); - newBackendServers = Collections.unmodifiableList(newBackendServers); + List newBackendServers = + Collections.unmodifiableList(resolvedAddresses.getAddresses()); Map rawLbConfigValue = attributes.get(ATTR_LOAD_BALANCING_CONFIG); Mode newMode = retrieveModeFromLbConfig(rawLbConfigValue, helper.getChannelLogger()); if (!mode.equals(newMode)) { @@ -184,6 +192,11 @@ public void handleNameResolutionError(Status error) { } } + @Override + public boolean canHandleEmptyAddressListFromNameResolution() { + return true; + } + @VisibleForTesting @Nullable GrpclbState getGrpclbState() { diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java index a2679472f6f..4016bc56bc2 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java @@ -794,7 +794,7 @@ private LbAddressGroup flattenLbAddressGroups(List groupList) { // actually used in the normal case. https://github.com/grpc/grpc-java/issues/4618 should allow // this to be more obvious. Attributes attrs = Attributes.newBuilder() - .set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, authority) + .set(GrpclbConstants.ATTR_LB_ADDR_AUTHORITY, authority) .build(); return new LbAddressGroup(flattenEquivalentAddressGroup(eags, attrs), authority); } diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java index a88b840d4d4..db424a4a8e3 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java @@ -468,9 +468,11 @@ public void loadReporting() { when(args.getHeaders()).thenReturn(headers); long loadReportIntervalMillis = 1983; - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); // Fallback timer is started as soon as address is resolved. assertEquals(1, fakeClock.numPendingTasks(FALLBACK_MODE_TASK_FILTER)); @@ -485,7 +487,7 @@ public void loadReporting() { inOrder.verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Simulate receiving LB response @@ -593,7 +595,7 @@ public void loadReporting() { .setLoadBalanceToken("token0003") .setNumCalls(1) // pick4 .build()) - .build()); + .build()); PickResult pick5 = picker.pickSubchannel(args); assertSame(subchannel1, pick1.getSubchannel()); @@ -641,7 +643,7 @@ public void loadReporting() { inOrder.verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Load reporting is also requested @@ -693,9 +695,11 @@ public void abundantInitialResponse() { PickSubchannelArgs args = mock(PickSubchannelArgs.class); when(args.getHeaders()).thenReturn(headers); - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); assertEquals(1, fakeOobChannels.size()); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); StreamObserver lbResponseObserver = lbResponseObserverCaptor.getValue(); @@ -731,9 +735,11 @@ public void raceBetweenLoadReportingAndLbStreamClosure() { PickSubchannelArgs args = mock(PickSubchannelArgs.class); when(args.getHeaders()).thenReturn(headers); - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); assertEquals(1, fakeOobChannels.size()); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); StreamObserver lbResponseObserver = lbResponseObserverCaptor.getValue(); @@ -743,7 +749,7 @@ public void raceBetweenLoadReportingAndLbStreamClosure() { inOrder.verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Simulate receiving LB response @@ -780,11 +786,25 @@ private void assertNextReport( eq(LoadBalanceRequest.newBuilder() .setClientStats( ClientStats.newBuilder(expectedReport) - .setTimestamp(Timestamps.fromNanos(fakeClock.getTicker().read())) - .build()) + .setTimestamp(Timestamps.fromNanos(fakeClock.getTicker().read())) + .build()) .build())); } + @Test + public void receiveNoBackendAndBalancerAddress() { + deliverResolvedAddresses( + Collections.emptyList(), + Collections.emptyList(), + Attributes.EMPTY); + verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture()); + RoundRobinPicker picker = (RoundRobinPicker) pickerCaptor.getValue(); + assertThat(picker.dropList).isEmpty(); + Status error = Iterables.getOnlyElement(picker.pickList).picked(new Metadata()).getStatus(); + assertThat(error.getCode()).isEqualTo(Code.UNAVAILABLE); + assertThat(error.getDescription()).isEqualTo("No backend or balancer addresses found"); + } + @Test public void nameResolutionFailsThenRecover() { Status error = Status.NOT_FOUND.withDescription("www.google.com not found"); @@ -803,11 +823,12 @@ public void nameResolutionFailsThenRecover() { assertThat(picker.pickList).containsExactly(new ErrorEntry(error)); // Recover with a subsequent success - List resolvedServers = createResolvedServerAddresses(true); - EquivalentAddressGroup eag = resolvedServers.get(0); + List grpclbBalancerList = createResolvedBalancerAddresses(1); + EquivalentAddressGroup eag = grpclbBalancerList.get(0); Attributes resolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(resolvedServers, resolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), grpclbBalancerList, resolutionAttrs); verify(helper).createOobChannel(eq(eag), eq(lbAuthority(0))); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); @@ -817,11 +838,13 @@ public void nameResolutionFailsThenRecover() { public void grpclbThenNameResolutionFails() { InOrder inOrder = inOrder(helper, subchannelPool); // Go to GRPCLB first - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); - verify(helper).createOobChannel(eq(grpclbResolutionList.get(0)), eq(lbAuthority(0))); + verify(helper).createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); assertEquals(1, fakeOobChannels.size()); ManagedChannel oobChannel = fakeOobChannels.poll(); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); @@ -854,59 +877,63 @@ public void grpclbThenNameResolutionFails() { @Test public void grpclbUpdatedAddresses_avoidsReconnect() { - List grpclbResolutionList = - createResolvedServerAddresses(true, false); + List backendList = createResolvedBackendAddresses(1); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, grpclbResolutionAttrs); - verify(helper).createOobChannel(eq(grpclbResolutionList.get(0)), eq(lbAuthority(0))); + verify(helper).createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); ManagedChannel oobChannel = fakeOobChannels.poll(); assertEquals(1, lbRequestObservers.size()); - List grpclbResolutionList2 = - createResolvedServerAddresses(true, false, true); + List backendList2 = createResolvedBackendAddresses(1); + List grpclbBalancerList2 = createResolvedBalancerAddresses(2); EquivalentAddressGroup combinedEag = new EquivalentAddressGroup(Arrays.asList( - grpclbResolutionList2.get(0).getAddresses().get(0), - grpclbResolutionList2.get(2).getAddresses().get(0)), + grpclbBalancerList2.get(0).getAddresses().get(0), + grpclbBalancerList2.get(1).getAddresses().get(0)), lbAttributes(lbAuthority(0))); - deliverResolvedAddresses(grpclbResolutionList2, grpclbResolutionAttrs); + deliverResolvedAddresses(backendList2, grpclbBalancerList2, grpclbResolutionAttrs); verify(helper).updateOobChannelAddresses(eq(oobChannel), eq(combinedEag)); assertEquals(1, lbRequestObservers.size()); // No additional RPC } @Test public void grpclbUpdatedAddresses_reconnectOnAuthorityChange() { - List grpclbResolutionList = - createResolvedServerAddresses(true, false); + List backendList = createResolvedBackendAddresses(1); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, grpclbResolutionAttrs); - verify(helper).createOobChannel(eq(grpclbResolutionList.get(0)), eq(lbAuthority(0))); + verify(helper).createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); ManagedChannel oobChannel = fakeOobChannels.poll(); assertEquals(1, lbRequestObservers.size()); final String newAuthority = "some-new-authority"; - List grpclbResolutionList2 = - createResolvedServerAddresses(false); - grpclbResolutionList2.add(new EquivalentAddressGroup( - new FakeSocketAddress("somethingNew"), lbAttributes(newAuthority))); - deliverResolvedAddresses(grpclbResolutionList2, grpclbResolutionAttrs); + List backendList2 = createResolvedBackendAddresses(1); + List grpclbBalancerList2 = + Collections.singletonList( + new EquivalentAddressGroup( + new FakeSocketAddress("somethingNew"), lbAttributes(newAuthority))); + deliverResolvedAddresses( + backendList2, grpclbBalancerList2, grpclbResolutionAttrs); assertTrue(oobChannel.isTerminated()); - verify(helper).createOobChannel(eq(grpclbResolutionList2.get(1)), eq(newAuthority)); + verify(helper).createOobChannel(eq(grpclbBalancerList2.get(0)), eq(newAuthority)); assertEquals(2, lbRequestObservers.size()); // An additional RPC } @Test public void grpclbWorking() { InOrder inOrder = inOrder(helper, subchannelPool); - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); // Fallback timer is started as soon as the addresses are resolved. assertEquals(1, fakeClock.numPendingTasks(FALLBACK_MODE_TASK_FILTER)); - verify(helper).createOobChannel(eq(grpclbResolutionList.get(0)), eq(lbAuthority(0))); + verify(helper).createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); assertEquals(1, fakeOobChannels.size()); ManagedChannel oobChannel = fakeOobChannels.poll(); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); @@ -915,7 +942,7 @@ public void grpclbWorking() { StreamObserver lbRequestObserver = lbRequestObservers.poll(); verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Simulate receiving LB response @@ -1174,13 +1201,14 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { long loadReportIntervalMillis = 1983; InOrder inOrder = inOrder(helper, subchannelPool); - // Create a resolution list with a mixture of balancer and backend addresses - List resolutionList = - createResolvedServerAddresses(false, true, false); + // Create balancer and backend addresses + List backendList = createResolvedBackendAddresses(2); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes resolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(resolutionList, resolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, resolutionAttrs); - inOrder.verify(helper).createOobChannel(eq(resolutionList.get(1)), eq(lbAuthority(0))); + inOrder.verify(helper) + .createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); // Attempted to connect to balancer assertEquals(1, fakeOobChannels.size()); @@ -1192,7 +1220,7 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); lbResponseObserver.onNext(buildInitialResponse(loadReportIntervalMillis)); // We don't care if these methods have been run. @@ -1214,11 +1242,11 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { assertEquals(0, fakeClock.numPendingTasks(FALLBACK_MODE_TASK_FILTER)); List fallbackList = - Arrays.asList(resolutionList.get(0), resolutionList.get(2)); + Arrays.asList(backendList.get(0), backendList.get(1)); assertThat(logs).containsExactly( "INFO: Using fallback backends", "INFO: Using RR list=[[[FakeSocketAddress-fake-address-0]/{}], " - + "[[FakeSocketAddress-fake-address-2]/{}]], drop=[null, null]", + + "[[FakeSocketAddress-fake-address-1]/{}]], drop=[null, null]", "INFO: CONNECTING: picks=[BUFFER_ENTRY], drops=[null, null]").inOrder(); // Fall back to the backends from resolver @@ -1228,20 +1256,21 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { verify(lbRequestObserver, never()).onCompleted(); } - //////////////////////////////////////////////////////// - // Name resolver sends new list without any backend addr - //////////////////////////////////////////////////////// - resolutionList = createResolvedServerAddresses(true, true); - deliverResolvedAddresses(resolutionList, resolutionAttrs); + ////////////////////////////////////////////////////////////////////// + // Name resolver sends new resolution results without any backend addr + ////////////////////////////////////////////////////////////////////// + grpclbBalancerList = createResolvedBalancerAddresses(2); + deliverResolvedAddresses( + Collections.emptyList(),grpclbBalancerList, resolutionAttrs); // New addresses are updated to the OobChannel inOrder.verify(helper).updateOobChannelAddresses( same(oobChannel), eq(new EquivalentAddressGroup( - Arrays.asList( - resolutionList.get(0).getAddresses().get(0), - resolutionList.get(1).getAddresses().get(0)), - lbAttributes(lbAuthority(0))))); + Arrays.asList( + grpclbBalancerList.get(0).getAddresses().get(0), + grpclbBalancerList.get(1).getAddresses().get(0)), + lbAttributes(lbAuthority(0))))); if (timerExpires) { // Still in fallback logic, except that the backend list is empty @@ -1249,21 +1278,22 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { inOrder, Collections.emptyList()); } - ////////////////////////////////////////////////// - // Name resolver sends new list with backend addrs - ////////////////////////////////////////////////// - resolutionList = createResolvedServerAddresses(true, false, false); - deliverResolvedAddresses(resolutionList, resolutionAttrs); + //////////////////////////////////////////////////////////////// + // Name resolver sends new resolution results with backend addrs + //////////////////////////////////////////////////////////////// + backendList = createResolvedBackendAddresses(2); + grpclbBalancerList = createResolvedBalancerAddresses(1); + deliverResolvedAddresses(backendList, grpclbBalancerList, resolutionAttrs); // New LB address is updated to the OobChannel inOrder.verify(helper).updateOobChannelAddresses( same(oobChannel), - eq(resolutionList.get(0))); + eq(grpclbBalancerList.get(0))); if (timerExpires) { // New backend addresses are used for fallback fallbackTestVerifyUseOfFallbackBackendLists( - inOrder, Arrays.asList(resolutionList.get(1), resolutionList.get(2))); + inOrder, Arrays.asList(backendList.get(0), backendList.get(1))); } //////////////////////////////////////////////// @@ -1283,7 +1313,7 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { lbRequestObserver = lbRequestObservers.poll(); verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); } @@ -1302,8 +1332,9 @@ private void subtestGrpclbFallbackInitialTimeout(boolean timerExpires) { /////////////////////////////////////////////////////////////// // New backend addresses from resolver outside of fallback mode /////////////////////////////////////////////////////////////// - resolutionList = createResolvedServerAddresses(true, false); - deliverResolvedAddresses(resolutionList, resolutionAttrs); + backendList = createResolvedBackendAddresses(1); + grpclbBalancerList = createResolvedBalancerAddresses(1); + deliverResolvedAddresses(backendList, grpclbBalancerList, resolutionAttrs); // Will not affect the round robin list at all inOrder.verify(helper, never()) .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); @@ -1317,13 +1348,13 @@ public void grpclbFallback_breakLbStreamBeforeFallbackTimerExpires() { long loadReportIntervalMillis = 1983; InOrder inOrder = inOrder(helper, subchannelPool); - // Create a resolution list with a mixture of balancer and backend addresses - List resolutionList = - createResolvedServerAddresses(false, true, false); + // Create balancer and backend addresses + List backendList = createResolvedBackendAddresses(2); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes resolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(resolutionList, resolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, resolutionAttrs); - inOrder.verify(helper).createOobChannel(eq(resolutionList.get(1)), eq(lbAuthority(0))); + inOrder.verify(helper).createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); // Attempted to connect to balancer assertThat(fakeOobChannels).hasSize(1); @@ -1334,7 +1365,7 @@ public void grpclbFallback_breakLbStreamBeforeFallbackTimerExpires() { verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); lbResponseObserver.onNext(buildInitialResponse(loadReportIntervalMillis)); // We don't care if these methods have been run. @@ -1353,7 +1384,7 @@ public void grpclbFallback_breakLbStreamBeforeFallbackTimerExpires() { // Fall back to the backends from resolver fallbackTestVerifyUseOfFallbackBackendLists( - inOrder, Arrays.asList(resolutionList.get(0), resolutionList.get(2))); + inOrder, Arrays.asList(backendList.get(0), backendList.get(1))); // A new stream is created verify(mockLbService, times(2)).balanceLoad(lbResponseObserverCaptor.capture()); @@ -1361,7 +1392,7 @@ public void grpclbFallback_breakLbStreamBeforeFallbackTimerExpires() { lbRequestObserver = lbRequestObservers.poll(); verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); } @@ -1369,19 +1400,20 @@ public void grpclbFallback_breakLbStreamBeforeFallbackTimerExpires() { public void grpclbFallback_noBalancerAddress() { InOrder inOrder = inOrder(helper, subchannelPool); - // Create a resolution list with just backend addresses - List resolutionList = createResolvedServerAddresses(false, false); + // Create just backend addresses + List backendList = createResolvedBackendAddresses(2); Attributes resolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(resolutionList, resolutionAttrs); + deliverResolvedAddresses( + backendList, Collections.emptyList(), resolutionAttrs); assertThat(logs).containsExactly( - "INFO: Using fallback backends", - "INFO: Using RR list=[[[FakeSocketAddress-fake-address-0]/{}], " - + "[[FakeSocketAddress-fake-address-1]/{}]], drop=[null, null]", - "INFO: CONNECTING: picks=[BUFFER_ENTRY], drops=[null, null]").inOrder(); + "INFO: Using fallback backends", + "INFO: Using RR list=[[[FakeSocketAddress-fake-address-0]/{}], " + + "[[FakeSocketAddress-fake-address-1]/{}]], drop=[null, null]", + "INFO: CONNECTING: picks=[BUFFER_ENTRY], drops=[null, null]").inOrder(); // Fall back to the backends from resolver - fallbackTestVerifyUseOfFallbackBackendLists(inOrder, resolutionList); + fallbackTestVerifyUseOfFallbackBackendLists(inOrder, backendList); // No fallback timeout timer scheduled. assertEquals(0, fakeClock.numPendingTasks(FALLBACK_MODE_TASK_FILTER)); @@ -1410,13 +1442,13 @@ private void subtestGrpclbFallbackConnectionLost( long loadReportIntervalMillis = 1983; InOrder inOrder = inOrder(helper, mockLbService, subchannelPool); - // Create a resolution list with a mixture of balancer and backend addresses - List resolutionList = - createResolvedServerAddresses(false, true, false); + // Create balancer and backend addresses + List backendList = createResolvedBackendAddresses(2); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes resolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(resolutionList, resolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, resolutionAttrs); - inOrder.verify(helper).createOobChannel(eq(resolutionList.get(1)), eq(lbAuthority(0))); + inOrder.verify(helper).createOobChannel(eq(grpclbBalancerList.get(0)), eq(lbAuthority(0))); // Attempted to connect to balancer assertEquals(1, fakeOobChannels.size()); @@ -1428,7 +1460,7 @@ private void subtestGrpclbFallbackConnectionLost( verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); lbResponseObserver.onNext(buildInitialResponse(loadReportIntervalMillis)); // We don't care if these methods have been run. @@ -1465,7 +1497,7 @@ private void subtestGrpclbFallbackConnectionLost( if (balancerBroken && allSubchannelsBroken) { // Going into fallback subchannels = fallbackTestVerifyUseOfFallbackBackendLists( - inOrder, Arrays.asList(resolutionList.get(0), resolutionList.get(2))); + inOrder, Arrays.asList(backendList.get(0), backendList.get(1))); // When in fallback mode, fallback timer should not be scheduled when all backend // connections are lost @@ -1486,9 +1518,9 @@ private void subtestGrpclbFallbackConnectionLost( if (!(balancerBroken && allSubchannelsBroken)) { verify(subchannelPool, never()).takeOrCreateSubchannel( - eq(resolutionList.get(0)), any(Attributes.class)); + eq(backendList.get(0)), any(Attributes.class)); verify(subchannelPool, never()).takeOrCreateSubchannel( - eq(resolutionList.get(2)), any(Attributes.class)); + eq(backendList.get(1)), any(Attributes.class)); } } @@ -1555,15 +1587,15 @@ private List fallbackTestVerifyUseOfBackendLists( @Test public void grpclbMultipleAuthorities() throws Exception { - List grpclbResolutionList = Arrays.asList( + List backendList = Collections.singletonList( + new EquivalentAddressGroup(new FakeSocketAddress("not-a-lb-address"))); + List grpclbBalancerList = Arrays.asList( new EquivalentAddressGroup( new FakeSocketAddress("fake-address-1"), lbAttributes("fake-authority-1")), new EquivalentAddressGroup( new FakeSocketAddress("fake-address-2"), lbAttributes("fake-authority-2")), - new EquivalentAddressGroup( - new FakeSocketAddress("not-a-lb-address")), new EquivalentAddressGroup( new FakeSocketAddress("fake-address-3"), lbAttributes("fake-authority-1"))); @@ -1574,7 +1606,7 @@ public void grpclbMultipleAuthorities() throws Exception { lbAttributes("fake-authority-1")); // Supporting multiple authorities would be good, one day Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, grpclbResolutionAttrs); verify(helper).createOobChannel(goldenOobChannelEag, "fake-authority-1"); } @@ -1588,9 +1620,11 @@ public void grpclbBalancerStreamClosedAndRetried() throws Exception { .build(); InOrder inOrder = inOrder(mockLbService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2, helper); - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); assertEquals(1, fakeOobChannels.size()); @SuppressWarnings("unused") @@ -1693,11 +1727,13 @@ public void grpclbWorking_pickFirstMode() throws Exception { InOrder inOrder = inOrder(helper); String lbConfig = "{\"childPolicy\" : [ {\"pick_first\" : {}} ]}"; - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.newBuilder().set( LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)).build(); - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); assertEquals(1, fakeOobChannels.size()); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); @@ -1706,7 +1742,7 @@ public void grpclbWorking_pickFirstMode() throws Exception { StreamObserver lbRequestObserver = lbRequestObservers.poll(); verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Simulate receiving LB response @@ -1722,8 +1758,8 @@ public void grpclbWorking_pickFirstMode() throws Exception { // the new createSubchannel(). inOrder.verify(helper).createSubchannel( eq(Arrays.asList( - new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), - new EquivalentAddressGroup(backends1.get(1).addr, eagAttrsWithToken("token0002")))), + new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), + new EquivalentAddressGroup(backends1.get(1).addr, eagAttrsWithToken("token0002")))), any(Attributes.class)); // Initially IDLE @@ -1779,9 +1815,9 @@ public void grpclbWorking_pickFirstMode() throws Exception { assertThat(mockSubchannels).isEmpty(); verify(subchannel).updateAddresses( eq(Arrays.asList( - new EquivalentAddressGroup(backends2.get(0).addr, eagAttrsWithToken("token0001")), - new EquivalentAddressGroup(backends2.get(2).addr, - eagAttrsWithToken("token0004"))))); + new EquivalentAddressGroup(backends2.get(0).addr, eagAttrsWithToken("token0001")), + new EquivalentAddressGroup(backends2.get(2).addr, + eagAttrsWithToken("token0004"))))); inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture()); RoundRobinPicker picker4 = (RoundRobinPicker) pickerCaptor.getValue(); assertThat(picker4.dropList).containsExactly( @@ -1825,10 +1861,12 @@ public void shutdownWithoutSubchannel_pickFirst() throws Exception { @SuppressWarnings("deprecation") // TODO(creamsoup) use parsed object private void subtestShutdownWithoutSubchannel(String childPolicy) throws Exception { String lbConfig = "{\"childPolicy\" : [ {\"" + childPolicy + "\" : {}} ]}"; - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.newBuilder().set( LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)).build(); - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); assertEquals(1, lbRequestObservers.size()); StreamObserver requestObserver = lbRequestObservers.poll(); @@ -1848,12 +1886,12 @@ public void pickFirstMode_fallback() throws Exception { String lbConfig = "{\"childPolicy\" : [ {\"pick_first\" : {}} ]}"; - // Name resolver returns a mix of balancer and backend addresses - List grpclbResolutionList = - createResolvedServerAddresses(false, true, false); + // Name resolver returns balancer and backend addresses + List backendList = createResolvedBackendAddresses(2); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.newBuilder().set( LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)).build(); - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, grpclbResolutionAttrs); // Attempted to connect to balancer assertEquals(1, fakeOobChannels.size()); @@ -1868,7 +1906,7 @@ public void pickFirstMode_fallback() throws Exception { // TODO(zhangkun83): remove the deprecation suppression on this method once migrated to // the new createSubchannel(). inOrder.verify(helper).createSubchannel( - eq(Arrays.asList(grpclbResolutionList.get(0), grpclbResolutionList.get(2))), + eq(Arrays.asList(backendList.get(0), backendList.get(1))), any(Attributes.class)); assertThat(mockSubchannels).hasSize(1); @@ -1905,9 +1943,9 @@ public void pickFirstMode_fallback() throws Exception { assertThat(mockSubchannels).isEmpty(); verify(subchannel).updateAddresses( eq(Arrays.asList( - new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), - new EquivalentAddressGroup(backends1.get(1).addr, - eagAttrsWithToken("token0002"))))); + new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), + new EquivalentAddressGroup(backends1.get(1).addr, + eagAttrsWithToken("token0002"))))); inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture()); RoundRobinPicker picker2 = (RoundRobinPicker) pickerCaptor.getValue(); assertThat(picker2.dropList).containsExactly(null, null); @@ -1927,11 +1965,13 @@ public void switchMode() throws Exception { InOrder inOrder = inOrder(helper); String lbConfig = "{\"childPolicy\" : [ {\"round_robin\" : {}} ]}"; - List grpclbResolutionList = createResolvedServerAddresses(true); + List grpclbBalancerList = createResolvedBalancerAddresses(1); Attributes grpclbResolutionAttrs = Attributes.newBuilder().set( LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)).build(); - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); assertEquals(1, fakeOobChannels.size()); ManagedChannel oobChannel = fakeOobChannels.poll(); @@ -1941,7 +1981,7 @@ public void switchMode() throws Exception { StreamObserver lbRequestObserver = lbRequestObservers.poll(); verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Simulate receiving LB response @@ -1971,7 +2011,9 @@ public void switchMode() throws Exception { lbConfig = "{\"childPolicy\" : [ {\"pick_first\" : {}} ]}"; grpclbResolutionAttrs = Attributes.newBuilder().set( LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)).build(); - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, grpclbResolutionAttrs); // GrpclbState will be shutdown, and a new one will be created @@ -1989,7 +2031,7 @@ public void switchMode() throws Exception { lbRequestObserver = lbRequestObservers.poll(); verify(lbRequestObserver).onNext( eq(LoadBalanceRequest.newBuilder().setInitialRequest( - InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) .build())); // Simulate receiving LB response @@ -2003,8 +2045,8 @@ public void switchMode() throws Exception { // the new createSubchannel(). inOrder.verify(helper).createSubchannel( eq(Arrays.asList( - new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), - new EquivalentAddressGroup(backends1.get(1).addr, eagAttrsWithToken("token0002")))), + new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), + new EquivalentAddressGroup(backends1.get(1).addr, eagAttrsWithToken("token0002")))), any(Attributes.class)); inOrder.verify(helper).updateBalancingState(eq(IDLE), any(SubchannelPicker.class)); @@ -2088,19 +2130,18 @@ public void retrieveModeFromLbConfig_badConfigDefaultToRoundRobin() throws Excep @Test public void grpclbWorking_lbSendsFallbackMessage() { InOrder inOrder = inOrder(helper, subchannelPool); - List grpclbResolutionList = - createResolvedServerAddresses(true, true, false, false); - List fallbackEags = grpclbResolutionList.subList(2, 4); + List backendList = createResolvedBackendAddresses(2); + List grpclbBalancerList = createResolvedBalancerAddresses(2); Attributes grpclbResolutionAttrs = Attributes.EMPTY; - deliverResolvedAddresses(grpclbResolutionList, grpclbResolutionAttrs); + deliverResolvedAddresses(backendList, grpclbBalancerList, grpclbResolutionAttrs); // Fallback timer is started as soon as the addresses are resolved. assertEquals(1, fakeClock.numPendingTasks(FALLBACK_MODE_TASK_FILTER)); List addrs = new ArrayList<>(); - addrs.addAll(grpclbResolutionList.get(0).getAddresses()); - addrs.addAll(grpclbResolutionList.get(1).getAddresses()); - Attributes attr = grpclbResolutionList.get(0).getAttributes(); + addrs.addAll(grpclbBalancerList.get(0).getAddresses()); + addrs.addAll(grpclbBalancerList.get(1).getAddresses()); + Attributes attr = grpclbBalancerList.get(0).getAttributes(); EquivalentAddressGroup oobChannelEag = new EquivalentAddressGroup(addrs, attr); verify(helper).createOobChannel(eq(oobChannelEag), eq(lbAuthority(0))); assertEquals(1, fakeOobChannels.size()); @@ -2206,7 +2247,7 @@ public void grpclbWorking_lbSendsFallbackMessage() { .returnSubchannel(eq(subchannel2), eq(ConnectivityStateInfo.forNonError(READY))); // verify fallback - fallbackTestVerifyUseOfFallbackBackendLists(inOrder, fallbackEags); + fallbackTestVerifyUseOfFallbackBackendLists(inOrder, backendList); assertFalse(oobChannel.isShutdown()); verify(lbRequestObserver, never()).onCompleted(); @@ -2293,48 +2334,62 @@ public void grpclbWorking_lbSendsFallbackMessage() { private void deliverSubchannelState( final Subchannel subchannel, final ConnectivityStateInfo newState) { syncContext.execute(new Runnable() { - @Override - public void run() { - // TODO(zhangkun83): remove the deprecation suppression on this method once migrated to - // the new API. - balancer.handleSubchannelState(subchannel, newState); - } - }); + @Override + public void run() { + // TODO(zhangkun83): remove the deprecation suppression on this method once migrated to + // the new API. + balancer.handleSubchannelState(subchannel, newState); + } + }); } private void deliverNameResolutionError(final Status error) { syncContext.execute(new Runnable() { - @Override - public void run() { - balancer.handleNameResolutionError(error); - } - }); + @Override + public void run() { + balancer.handleNameResolutionError(error); + } + }); } private void deliverResolvedAddresses( - final List addrs, final Attributes attrs) { + final List backendAddrs, + final List balancerAddrs, + Attributes attrs) { + if (!balancerAddrs.isEmpty()) { + attrs = attrs.toBuilder().set(GrpclbConstants.ATTR_LB_ADDRS, balancerAddrs).build(); + } + final Attributes finalAttrs = attrs; syncContext.execute(new Runnable() { - @Override - public void run() { - balancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(addrs).setAttributes(attrs).build()); - } - }); + @Override + public void run() { + balancer.handleResolvedAddresses( + ResolvedAddresses.newBuilder() + .setAddresses(backendAddrs) + .setAttributes(finalAttrs) + .build()); + } + }); } private GrpclbClientLoadRecorder getLoadRecorder() { return balancer.getGrpclbState().getLoadRecorder(); } - private static List createResolvedServerAddresses(boolean ... isLb) { - ArrayList list = new ArrayList<>(); - for (int i = 0; i < isLb.length; i++) { + private static List createResolvedBackendAddresses(int n) { + List list = new ArrayList<>(); + for (int i = 0; i < n; i++) { + SocketAddress addr = new FakeSocketAddress("fake-address-" + i); + list.add(new EquivalentAddressGroup(addr)); + } + return list; + } + + private static List createResolvedBalancerAddresses(int n) { + List list = new ArrayList<>(); + for (int i = 0; i < n; i++) { SocketAddress addr = new FakeSocketAddress("fake-address-" + i); - EquivalentAddressGroup eag = - new EquivalentAddressGroup( - addr, - isLb[i] ? lbAttributes(lbAuthority(i)) : Attributes.EMPTY); - list.add(eag); + list.add(new EquivalentAddressGroup(addr, lbAttributes(lbAuthority(i)))); } return list; } @@ -2346,7 +2401,7 @@ private static String lbAuthority(int unused) { private static Attributes lbAttributes(String authority) { return Attributes.newBuilder() - .set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, authority) + .set(GrpclbConstants.ATTR_LB_ADDR_AUTHORITY, authority) .build(); } diff --git a/xds/src/main/java/io/grpc/xds/FallbackLb.java b/xds/src/main/java/io/grpc/xds/FallbackLb.java index 41486e56eb7..380a22dba19 100644 --- a/xds/src/main/java/io/grpc/xds/FallbackLb.java +++ b/xds/src/main/java/io/grpc/xds/FallbackLb.java @@ -61,6 +61,7 @@ protected LoadBalancer delegate() { return fallbackPolicyLb; } + @SuppressWarnings("deprecation") @Override public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { Attributes attributes = resolvedAddresses.getAttributes(); @@ -113,6 +114,8 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { List servers = resolvedAddresses.getAddresses(); // Some addresses in the list may be grpclb-v1 balancer addresses, so if the fallback policy // does not support grpclb-v1 balancer addresses, then we need to exclude them from the list. + // TODO(chengyuanzhang): delete the following logic after changing internal resolver + // to not include grpclb server addresses. if (!newFallbackPolicyName.equals("grpclb") && !newFallbackPolicyName.equals(XDS_POLICY_NAME)) { ImmutableList.Builder backends = ImmutableList.builder(); for (EquivalentAddressGroup eag : resolvedAddresses.getAddresses()) { From 2e12b53f2fe11b9ef8741ee66678416c3e20a08e Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Fri, 31 Jan 2020 11:13:44 -0800 Subject: [PATCH 36/86] examples: Add XDS client example --- .travis.yml | 1 + RELEASING.md | 1 + buildscripts/kokoro/unix.sh | 3 ++ examples/example-xds/README.md | 79 ++++++++++++++++++++++++++++ examples/example-xds/build.gradle | 43 +++++++++++++++ examples/example-xds/settings.gradle | 3 ++ 6 files changed, 130 insertions(+) create mode 100644 examples/example-xds/README.md create mode 100644 examples/example-xds/build.gradle create mode 100644 examples/example-xds/settings.gradle diff --git a/.travis.yml b/.travis.yml index 53f1d01f8ca..c81d28675a3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,6 +29,7 @@ install: - pushd examples/example-hostname && mvn verify && popd - pushd examples/example-tls && ../gradlew clean build && popd - pushd examples/example-kotlin && ../gradlew build && popd + - pushd examples/example-xds && ../gradlew build && popd before_script: - test -z "$(git status --porcelain)" || (git status && echo Error Working directory is not clean. Forget to commit generated files? && false) diff --git a/RELEASING.md b/RELEASING.md index e476ab7e5b2..34836768446 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -51,6 +51,7 @@ $ VERSION_FILES=( examples/example-kotlin/android/helloworld/app/build.gradle examples/example-tls/build.gradle examples/example-tls/pom.xml + examples/example-xds/build.gradle ) ``` diff --git a/buildscripts/kokoro/unix.sh b/buildscripts/kokoro/unix.sh index ec265009994..a559aa42ee7 100755 --- a/buildscripts/kokoro/unix.sh +++ b/buildscripts/kokoro/unix.sh @@ -71,6 +71,9 @@ if [[ -z "${SKIP_TESTS:-}" ]]; then pushd examples/example-tls mvn clean verify --batch-mode popd + pushd examples/example-xds + ../gradlew build $GRADLE_FLAGS + popd # TODO(zpencer): also build the GAE examples fi diff --git a/examples/example-xds/README.md b/examples/example-xds/README.md new file mode 100644 index 00000000000..b01a9ecdacd --- /dev/null +++ b/examples/example-xds/README.md @@ -0,0 +1,79 @@ +gRPC XDS Example +================ + +The XDS example is a Hello World client capable of being configured with the +XDS management protocol. Out-of-the-box it behaves the same as hello world +client. + +__XDS support is incomplete and experimental, with limited compatibility. It +will be very hard to produce a working enviornment just by this example. Please +refer to documentation specific for your XDS management server and +environment.__ + +The example requires grpc-xds, but grpc-xds is not currently being published. +You will thus need to build it yourself. This should guide you, but if you +encounter issues please consult [COMPILING.md](../../COMPILING.md). + +### Build the example + +1. The server does not use XDS, so recent releases work fine. Building using +recent releases is much easier, so check out the most recent release tag: +``` +$ git checkout v1.27.0 +``` + +2. Build the hello-world example server or the hostname example server. See + [the examples README](../README.md) or the + [hostname example README](../example-hostname/README.md). + +3. Since XDS is still developing rapidly, XDS-using code should be built from +master: +``` +$ git checkout master +``` + +4. Building protoc-gen-grpc-java (the protoc plugin) requires a bit of work and + isn't necessary. So change the hello-world example to use the last released + version of the plugin. In `grpc-java/examples/build.gradle`, change: +``` + grpc { artifact = "io.grpc:protoc-gen-grpc-java:${grpcVersion}" } +``` +To: +``` + grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.27.0" } +``` + + +5. Build this client. From the `grpc-java/examples/examples-xds` directory: +``` +$ ../gradlew -PskipCodegen=true -PskipAndroid=true --include-build ../.. installDist +``` + +This creates the script `build/install/example-xds/bin/xds-hello-world-client` +that runs the example. + +To start the server, run: + +``` +$ ../build/install/hostname/bin/hello-world-server +$ # or +$ ../example-hostname/build/install/hostname/bin/hostname-server +``` + +And in a different terminal window run this client: + +``` +$ ./build/install/example-xds/bin/xds-hello-world-client +``` + +However, that didn't use XDS! To use XDS we assume you have deployed the server +in your deployment environment and know its name. You need to set the +`GRPC_XDS_BOOTSTRAP` environment variable to point to a gRPC XDS bootstrap +file (see [gRFC A27](https://github.com/grpc/proposal/pull/170) for the +bootstrap format). Then use the `xds-experimental:` target scheme during +channel creation. + +``` +$ export GRPC_XDS_BOOTSTRAP=/path/to/bootstrap.json +$ ./build/install/example-xds/bin/xds-hello-world-client "XDS world" xds-experimental:///yourServersName +``` diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle new file mode 100644 index 00000000000..b9b03a4a5aa --- /dev/null +++ b/examples/example-xds/build.gradle @@ -0,0 +1,43 @@ +plugins { + id 'application' // Provide convenience executables for trying out the examples. + id 'java' +} + +repositories { + maven { // The google mirror is less flaky than mavenCentral() + url "https://maven-central.storage-download.googleapis.com/repos/central/data/" } + mavenCentral() + mavenLocal() +} + +sourceCompatibility = 1.7 +targetCompatibility = 1.7 + +// IMPORTANT: You probably want the non-SNAPSHOT version of gRPC. Make sure you +// are looking at a tagged version of the example and not "master"! + +// Feel free to delete the comment at the next line. It is just for safely +// updating the version in our release process. +def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + +dependencies { + // This example's client is the same as the helloworld client. We depend on the helloworld + // client's code here + implementation ':examples' + // The only change necessary is an extra runtime dependency on io.grpc:grpc-xds + runtimeOnly "io.grpc:grpc-xds:${grpcVersion}" +} + +startScripts.enabled = false + +task helloWorldClient(type: CreateStartScripts) { + mainClassName = 'io.grpc.examples.helloworld.HelloWorldClient' + applicationName = 'xds-hello-world-client' + outputDir = new File(project.buildDir, 'tmp') + classpath = startScripts.classpath +} + +applicationDistribution.into('bin') { + from(helloWorldClient) + fileMode = 0755 +} diff --git a/examples/example-xds/settings.gradle b/examples/example-xds/settings.gradle new file mode 100644 index 00000000000..9f46a95b6f5 --- /dev/null +++ b/examples/example-xds/settings.gradle @@ -0,0 +1,3 @@ +rootProject.name = 'example-xds' + +includeBuild '..' From 66f08c71b145fb7a9a29a32cef90519d424a778a Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Fri, 31 Jan 2020 11:31:48 -0800 Subject: [PATCH 37/86] core,xds: clean up xds config parsing from service config util --- .../io/grpc/internal/ServiceConfigUtil.java | 34 ------ .../grpc/internal/ServiceConfigUtilTest.java | 107 ------------------ .../io/grpc/xds/XdsLoadBalancerProvider.java | 14 +-- 3 files changed, 7 insertions(+), 148 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java b/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java index e72937946b4..e677bb6f1af 100644 --- a/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java +++ b/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java @@ -314,40 +314,6 @@ public static List unwrapLoadBalancingConfigList(List> return Collections.unmodifiableList(result); } - /** - * Extract the server name to use in EDS query. - */ - @Nullable - public static String getEdsServiceNameFromXdsConfig(Map rawXdsConfig) { - return JsonUtil.getString(rawXdsConfig, "edsServiceName"); - } - - /** - * Extract the LRS server name to send load reports to. - */ - @Nullable - public static String getLrsServerNameFromXdsConfig(Map rawXdsConfig) { - return JsonUtil.getString(rawXdsConfig, "lrsLoadReportingServerName"); - } - - /** - * Extracts list of child policies from xds loadbalancer config. - */ - @Nullable - public static List getChildPolicyFromXdsConfig(Map rawXdsConfig) { - return unwrapLoadBalancingConfigList( - JsonUtil.getListOfObjects(rawXdsConfig, "childPolicy")); - } - - /** - * Extracts list of fallback policies from xds loadbalancer config. - */ - @Nullable - public static List getFallbackPolicyFromXdsConfig(Map rawXdsConfig) { - return unwrapLoadBalancingConfigList( - JsonUtil.getListOfObjects(rawXdsConfig, "fallbackPolicy")); - } - /** * Extracts the stickiness metadata key from a service config, or {@code null}. */ diff --git a/core/src/test/java/io/grpc/internal/ServiceConfigUtilTest.java b/core/src/test/java/io/grpc/internal/ServiceConfigUtilTest.java index ff856401172..a49ae33f515 100644 --- a/core/src/test/java/io/grpc/internal/ServiceConfigUtilTest.java +++ b/core/src/test/java/io/grpc/internal/ServiceConfigUtilTest.java @@ -31,113 +31,6 @@ */ @RunWith(JUnit4.class) public class ServiceConfigUtilTest { - @Test - public void getChildPolicyFromXdsConfig() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" - + "}"; - LbConfig expectedChildPolicy1 = ServiceConfigUtil.unwrapLoadBalancingConfig( - checkObject(JsonParser.parse("{\"round_robin\" : {}}"))); - LbConfig expectedChildPolicy2 = ServiceConfigUtil.unwrapLoadBalancingConfig( - checkObject(JsonParser.parse("{\"lbPolicy2\" : {\"key\" : \"val\"}}"))); - - List childPolicies = ServiceConfigUtil.getChildPolicyFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - - assertThat(childPolicies).containsExactly(expectedChildPolicy1, expectedChildPolicy2); - } - - @Test - public void getChildPolicyFromXdsConfig_null() throws Exception { - String rawLbConfig = "{" - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" - + "}"; - - List childPolicies = ServiceConfigUtil.getChildPolicyFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - - assertThat(childPolicies).isNull(); - } - - @Test - public void getFallbackPolicyFromXdsConfig() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" - + "}"; - LbConfig expectedFallbackPolicy1 = ServiceConfigUtil.unwrapLoadBalancingConfig( - checkObject(JsonParser.parse("{\"lbPolicy3\" : {\"key\" : \"val\"}}"))); - LbConfig expectedFallbackPolicy2 = ServiceConfigUtil.unwrapLoadBalancingConfig( - checkObject(JsonParser.parse("{\"lbPolicy4\" : {}}"))); - - List childPolicies = ServiceConfigUtil.getFallbackPolicyFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - - assertThat(childPolicies).containsExactly(expectedFallbackPolicy1, expectedFallbackPolicy2); - } - - @Test - public void getFallbackPolicyFromXdsConfig_null() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]" - + "}"; - - List fallbackPolicies = ServiceConfigUtil.getFallbackPolicyFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - - assertThat(fallbackPolicies).isNull(); - } - - @Test - public void getEdsServiceNameFromXdsConfig() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]," - + "\"edsServiceName\" : \"dns:///eds.service.com:8080\"" - + "}"; - - String edsServiceName = ServiceConfigUtil.getEdsServiceNameFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - assertThat(edsServiceName).isEqualTo("dns:///eds.service.com:8080"); - } - - @Test - public void getEdsServiceNameFromXdsConfig_null() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" - + "}"; - - String edsServiceName = ServiceConfigUtil.getEdsServiceNameFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - assertThat(edsServiceName).isNull(); - } - - @Test - public void getLrsServerNameFromXdsConfig() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]," - + "\"lrsLoadReportingServerName\" : \"dns:///lrs.service.com:8080\"" - + "}"; - - String lrsServerName = ServiceConfigUtil.getLrsServerNameFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - assertThat(lrsServerName).isEqualTo("dns:///lrs.service.com:8080"); - } - - @Test - public void getLrsServerNameFromXdsConfig_null() throws Exception { - String rawLbConfig = "{" - + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," - + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" - + "}"; - - String lrsServerName = ServiceConfigUtil.getLrsServerNameFromXdsConfig( - checkObject(JsonParser.parse(rawLbConfig))); - assertThat(lrsServerName).isNull(); - } @Test public void unwrapLoadBalancingConfig() throws Exception { diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java index 47a7a1bffa0..7b3faf67dd7 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java @@ -27,6 +27,7 @@ import io.grpc.LoadBalancerRegistry; import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; +import io.grpc.internal.JsonUtil; import io.grpc.internal.ServiceConfigUtil; import io.grpc.internal.ServiceConfigUtil.LbConfig; import java.util.List; @@ -78,10 +79,9 @@ static ConfigOrError parseLoadBalancingConfigPolicy( try { LbConfig childPolicy = selectChildPolicy(rawLoadBalancingPolicyConfig, registry); LbConfig fallbackPolicy = selectFallbackPolicy(rawLoadBalancingPolicyConfig, registry); - String edsServiceName = - ServiceConfigUtil.getEdsServiceNameFromXdsConfig(rawLoadBalancingPolicyConfig); + String edsServiceName = JsonUtil.getString(rawLoadBalancingPolicyConfig, "edsServiceName"); String lrsServerName = - ServiceConfigUtil.getLrsServerNameFromXdsConfig(rawLoadBalancingPolicyConfig); + JsonUtil.getString(rawLoadBalancingPolicyConfig, "lrsLoadReportingServerName"); return ConfigOrError.fromConfig( new XdsConfig(childPolicy, fallbackPolicy, edsServiceName, lrsServerName)); } catch (RuntimeException e) { @@ -94,8 +94,8 @@ static ConfigOrError parseLoadBalancingConfigPolicy( @VisibleForTesting static LbConfig selectFallbackPolicy( Map rawLoadBalancingPolicyConfig, LoadBalancerRegistry lbRegistry) { - List fallbackConfigs = - ServiceConfigUtil.getFallbackPolicyFromXdsConfig(rawLoadBalancingPolicyConfig); + List fallbackConfigs = ServiceConfigUtil.unwrapLoadBalancingConfigList( + JsonUtil.getListOfObjects(rawLoadBalancingPolicyConfig, "fallbackPolicy")); LbConfig fallbackPolicy = selectSupportedLbPolicy(fallbackConfigs, lbRegistry); return fallbackPolicy == null ? DEFAULT_FALLBACK_POLICY : fallbackPolicy; } @@ -104,8 +104,8 @@ static LbConfig selectFallbackPolicy( @VisibleForTesting static LbConfig selectChildPolicy( Map rawLoadBalancingPolicyConfig, LoadBalancerRegistry lbRegistry) { - List childConfigs = - ServiceConfigUtil.getChildPolicyFromXdsConfig(rawLoadBalancingPolicyConfig); + List childConfigs = ServiceConfigUtil.unwrapLoadBalancingConfigList( + JsonUtil.getListOfObjects(rawLoadBalancingPolicyConfig, "childPolicy")); return selectSupportedLbPolicy(childConfigs, lbRegistry); } From 307f7d0e096304bbfed533d05dec0bbcd6dbe92e Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Fri, 31 Jan 2020 11:32:26 -0800 Subject: [PATCH 38/86] grpclb,xds: fix lint warnings (#6666) --- grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java | 1 - xds/src/main/java/io/grpc/xds/XdsClientImpl.java | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java index 6174fb5627d..922a8b61970 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java @@ -208,7 +208,6 @@ GrpclbState getGrpclbState() { * Gets a list from an object for the given key. Copy of * {@link io.grpc.internal.ServiceConfigUtil#getList}. */ - @SuppressWarnings("unchecked") @Nullable private static List getList(Map obj, String key) { assert key != null; diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 83340d070b6..dea30bcc9ac 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -680,7 +680,7 @@ static String findClusterNameInRouteConfig(RouteConfiguration config, String hos List routes = targetVirtualHost.getRoutesList(); if (!routes.isEmpty()) { Route route = routes.get(routes.size() - 1); - if (route.getMatch().getPrefix().equals("")) { + if (route.getMatch().getPrefix().isEmpty()) { if (route.hasRoute()) { return route.getRoute().getCluster(); } From 2734e22ef5cc5a9cd3d9003d39f8f42e3dd12d4f Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 31 Jan 2020 15:03:22 -0800 Subject: [PATCH 39/86] context, stub: check result of CountDownLatch.await() in test (#6664) --- context/src/test/java/io/grpc/ContextTest.java | 2 +- stub/src/test/java/io/grpc/stub/ServerCallsTest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/context/src/test/java/io/grpc/ContextTest.java b/context/src/test/java/io/grpc/ContextTest.java index 66b56372e66..47b9dff0a5e 100644 --- a/context/src/test/java/io/grpc/ContextTest.java +++ b/context/src/test/java/io/grpc/ContextTest.java @@ -609,7 +609,7 @@ public void cancelled(Context context) { latch.countDown(); } }, MoreExecutors.directExecutor()); - latch.await(3, TimeUnit.SECONDS); + assertTrue("cancellation failed", latch.await(3, TimeUnit.SECONDS)); if (error.get() != null) { throw error.get(); } diff --git a/stub/src/test/java/io/grpc/stub/ServerCallsTest.java b/stub/src/test/java/io/grpc/stub/ServerCallsTest.java index 77e24fe522f..04f88755429 100644 --- a/stub/src/test/java/io/grpc/stub/ServerCallsTest.java +++ b/stub/src/test/java/io/grpc/stub/ServerCallsTest.java @@ -498,7 +498,7 @@ public void onClose(Status status, Metadata trailers) { semaphore.acquire(); clientCall.request(3); clientCall.halfClose(); - latch.await(5, TimeUnit.SECONDS); + assertThat(latch.await(5, TimeUnit.SECONDS)).isTrue(); // Very that number of messages produced in each onReady handler call matches the number // requested by the client. assertArrayEquals(new int[]{0, 1, 1, 2, 2, 2}, receivedMessages); From 6548c6ff5e8253c2c92ca9e7c9b0580f81afac41 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 31 Jan 2020 15:04:20 -0800 Subject: [PATCH 40/86] github: add Github workflow for Gradle wrapper validation (#6634) --- .github/workflows/gradle-wrapper-validation.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/workflows/gradle-wrapper-validation.yml diff --git a/.github/workflows/gradle-wrapper-validation.yml b/.github/workflows/gradle-wrapper-validation.yml new file mode 100644 index 00000000000..405a2b30659 --- /dev/null +++ b/.github/workflows/gradle-wrapper-validation.yml @@ -0,0 +1,10 @@ +name: "Validate Gradle Wrapper" +on: [push, pull_request] + +jobs: + validation: + name: "Validation" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: gradle/wrapper-validation-action@v1 From be27d0ba8b97dc03b28ebdc9473d9dee2f9d7f4d Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 31 Jan 2020 15:32:44 -0800 Subject: [PATCH 41/86] Revert "github: add Github workflow for Gradle wrapper validation (#6634)" (#6668) This reverts commit 6548c6ff5e8253c2c92ca9e7c9b0580f81afac41. --- .github/workflows/gradle-wrapper-validation.yml | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 .github/workflows/gradle-wrapper-validation.yml diff --git a/.github/workflows/gradle-wrapper-validation.yml b/.github/workflows/gradle-wrapper-validation.yml deleted file mode 100644 index 405a2b30659..00000000000 --- a/.github/workflows/gradle-wrapper-validation.yml +++ /dev/null @@ -1,10 +0,0 @@ -name: "Validate Gradle Wrapper" -on: [push, pull_request] - -jobs: - validation: - name: "Validation" - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: gradle/wrapper-validation-action@v1 From 26bff62ff3d9c06bccc716b4174bf40b57d3958e Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 31 Jan 2020 15:44:30 -0800 Subject: [PATCH 42/86] grpclb: internal accessor for balancer address related attribute keys (#6667) Creates an internal accessor for attribute keys in grpclb package that is used by name resolver implementations to set balancer addresses as name resolution result attributes. --- grpclb/build.gradle | 4 ++ .../java/io/grpc/grpclb/GrpclbConstants.java | 1 - .../io/grpc/grpclb/GrpclbLoadBalancer.java | 3 +- .../InternalGrpclbConstantsAccessor.java | 49 +++++++++++++++++++ 4 files changed, 54 insertions(+), 3 deletions(-) create mode 100644 grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java diff --git a/grpclb/build.gradle b/grpclb/build.gradle index ed48c1a5eba..82ddb89c397 100644 --- a/grpclb/build.gradle +++ b/grpclb/build.gradle @@ -26,3 +26,7 @@ dependencies { } configureProtoCompilation() + +javadoc { + exclude 'io/grpc/grpclb/Internal*' +} diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java index db5e84f08c6..75267757273 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbConstants.java @@ -43,7 +43,6 @@ public final class GrpclbConstants { Attributes.Key.create("lb-token"); @SuppressWarnings("deprecation") - @EquivalentAddressGroup.Attr static final Attributes.Key> ATTR_LB_ADDRS = io.grpc.internal.GrpcAttributes.ATTR_LB_ADDRS; diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java index 922a8b61970..00af68f844b 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java @@ -30,7 +30,6 @@ import io.grpc.Status; import io.grpc.grpclb.GrpclbState.Mode; import io.grpc.internal.BackoffPolicy; -import io.grpc.internal.GrpcAttributes; import io.grpc.internal.ServiceConfigUtil; import io.grpc.internal.ServiceConfigUtil.LbConfig; import io.grpc.internal.TimeProvider; @@ -92,7 +91,7 @@ public void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo n @SuppressWarnings("deprecation") // TODO(creamsoup) migrate to use parsed service config public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { Attributes attributes = resolvedAddresses.getAttributes(); - List newLbAddresses = attributes.get(GrpcAttributes.ATTR_LB_ADDRS); + List newLbAddresses = attributes.get(GrpclbConstants.ATTR_LB_ADDRS); if ((newLbAddresses == null || newLbAddresses.isEmpty()) && resolvedAddresses.getAddresses().isEmpty()) { handleNameResolutionError( diff --git a/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java b/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java new file mode 100644 index 00000000000..298add4b738 --- /dev/null +++ b/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java @@ -0,0 +1,49 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.grpclb; + +import io.grpc.Attributes; +import io.grpc.EquivalentAddressGroup; +import io.grpc.Internal; +import java.util.List; + +/** + * Internal {@link GrpclbConstants} accessor. This is intended for usage internal to the gRPC + * team. If you *really* think you need to use this, contact the gRPC team first. + */ +@Internal +public class InternalGrpclbConstantsAccessor { + + // Prevent instantiation. + private InternalGrpclbConstantsAccessor() { + } + + /** + * Sets attribute for gRPC LB address authority. + */ + public static Attributes setLbAddrAuthorityAttr( + @EquivalentAddressGroup.Attr Attributes attrs, String authority) { + return attrs.toBuilder().set(GrpclbConstants.ATTR_LB_ADDR_AUTHORITY, authority).build(); + } + + /** + * Sets attribute for gRPC LB addresses. + */ + public static Attributes setLbAddrAttr(Attributes attrs, List lbAddrs) { + return attrs.toBuilder().set(GrpclbConstants.ATTR_LB_ADDRS, lbAddrs).build(); + } +} From 295b64b5ff03cf7ec87ec86408436040fa26446d Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 31 Jan 2020 17:50:04 -0800 Subject: [PATCH 43/86] grpclb: expose balancer address related attributes in internal accessor (#6669) --- .../InternalGrpclbConstantsAccessor.java | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java b/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java index 298add4b738..3360a701b03 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java +++ b/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java @@ -32,18 +32,10 @@ public class InternalGrpclbConstantsAccessor { private InternalGrpclbConstantsAccessor() { } - /** - * Sets attribute for gRPC LB address authority. - */ - public static Attributes setLbAddrAuthorityAttr( - @EquivalentAddressGroup.Attr Attributes attrs, String authority) { - return attrs.toBuilder().set(GrpclbConstants.ATTR_LB_ADDR_AUTHORITY, authority).build(); - } + public static Attributes.Key> ATTR_LB_ADDRS = + GrpclbConstants.ATTR_LB_ADDRS; - /** - * Sets attribute for gRPC LB addresses. - */ - public static Attributes setLbAddrAttr(Attributes attrs, List lbAddrs) { - return attrs.toBuilder().set(GrpclbConstants.ATTR_LB_ADDRS, lbAddrs).build(); - } + @EquivalentAddressGroup.Attr + public static Attributes.Key ATTR_LB_ADDR_AUTHORITY = + GrpclbConstants.ATTR_LB_ADDR_AUTHORITY; } From e0ee52cc223952608a0455208ad8fd0f3bc03d97 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Mon, 3 Feb 2020 10:34:30 -0800 Subject: [PATCH 44/86] grpclb: fix lint warnings (#6670) --- .../io/grpc/grpclb/InternalGrpclbConstantsAccessor.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java b/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java index 3360a701b03..a458cb87d55 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java +++ b/grpclb/src/main/java/io/grpc/grpclb/InternalGrpclbConstantsAccessor.java @@ -26,16 +26,16 @@ * team. If you *really* think you need to use this, contact the gRPC team first. */ @Internal -public class InternalGrpclbConstantsAccessor { +public final class InternalGrpclbConstantsAccessor { // Prevent instantiation. private InternalGrpclbConstantsAccessor() { } - public static Attributes.Key> ATTR_LB_ADDRS = + public static final Attributes.Key> ATTR_LB_ADDRS = GrpclbConstants.ATTR_LB_ADDRS; @EquivalentAddressGroup.Attr - public static Attributes.Key ATTR_LB_ADDR_AUTHORITY = + public static final Attributes.Key ATTR_LB_ADDR_AUTHORITY = GrpclbConstants.ATTR_LB_ADDR_AUTHORITY; } From 5b726c07fe3d964a2eeefda2f6defac366b145ee Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 3 Feb 2020 16:49:36 -0800 Subject: [PATCH 45/86] alts: _Actually_ use Conscrypt when available Previously the check for Conscrypt would always fail because CONSCRYPT was guaranteed to be null. --- .../grpc/alts/internal/AesGcmAeadCrypter.java | 6 ++-- .../alts/internal/AesGcmAeadCrypterTest.java | 34 +++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 alts/src/test/java/io/grpc/alts/internal/AesGcmAeadCrypterTest.java diff --git a/alts/src/main/java/io/grpc/alts/internal/AesGcmAeadCrypter.java b/alts/src/main/java/io/grpc/alts/internal/AesGcmAeadCrypter.java index fa9cc91e1ee..e3e6302b591 100644 --- a/alts/src/main/java/io/grpc/alts/internal/AesGcmAeadCrypter.java +++ b/alts/src/main/java/io/grpc/alts/internal/AesGcmAeadCrypter.java @@ -18,6 +18,7 @@ import static com.google.common.base.Preconditions.checkArgument; +import com.google.common.annotations.VisibleForTesting; import io.grpc.internal.ConscryptLoader; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; @@ -110,7 +111,8 @@ static int getKeyLength() { return KEY_LENGTH; } - private static Provider getConscrypt() { + @VisibleForTesting + static Provider getConscrypt() { if (!ConscryptLoader.isPresent()) { return null; } @@ -129,7 +131,7 @@ private static Provider getConscrypt() { return null; } try { - Cipher.getInstance(AES_GCM, CONSCRYPT); + Cipher.getInstance(AES_GCM, provider); } catch (SecurityException t) { // Pre-Java 7u121/Java 8u111 fails with SecurityException: // JCE cannot authenticate the provider Conscrypt diff --git a/alts/src/test/java/io/grpc/alts/internal/AesGcmAeadCrypterTest.java b/alts/src/test/java/io/grpc/alts/internal/AesGcmAeadCrypterTest.java new file mode 100644 index 00000000000..bcf8c2810ee --- /dev/null +++ b/alts/src/test/java/io/grpc/alts/internal/AesGcmAeadCrypterTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.alts.internal; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.TruthJUnit.assume; + +import org.conscrypt.Conscrypt; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public final class AesGcmAeadCrypterTest { + @Test + public void getConscrypt_worksWhenConscryptIsAvailable() { + assume().that(Conscrypt.isAvailable()).isTrue(); + assertThat(AesGcmAeadCrypter.getConscrypt()).isNotNull(); + } +} From 461d30adfb38fb552017edeeb85ab90882947920 Mon Sep 17 00:00:00 2001 From: sanjaypujare Date: Tue, 4 Feb 2020 13:03:34 -0800 Subject: [PATCH 46/86] netty: fix server and client handlers to check the correct alpn list (#6603) --- .../java/io/grpc/netty/GrpcSslContexts.java | 2 +- .../io/grpc/netty/ProtocolNegotiators.java | 7 +- .../grpc/netty/ProtocolNegotiatorsTest.java | 184 ++++++++++++++++++ 3 files changed, 189 insertions(+), 4 deletions(-) diff --git a/netty/src/main/java/io/grpc/netty/GrpcSslContexts.java b/netty/src/main/java/io/grpc/netty/GrpcSslContexts.java index 72701cabc56..9be8f9e849c 100644 --- a/netty/src/main/java/io/grpc/netty/GrpcSslContexts.java +++ b/netty/src/main/java/io/grpc/netty/GrpcSslContexts.java @@ -56,7 +56,7 @@ private GrpcSslContexts() {} /* * List of ALPN/NPN protocols in order of preference. */ - static final List NEXT_PROTOCOL_VERSIONS = + private static final List NEXT_PROTOCOL_VERSIONS = Collections.unmodifiableList(Arrays.asList(HTTP2_VERSION)); /* diff --git a/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java b/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java index b35ab1f3922..a420d20837d 100644 --- a/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java +++ b/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java @@ -18,7 +18,6 @@ import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; -import static io.grpc.netty.GrpcSslContexts.NEXT_PROTOCOL_VERSIONS; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -190,7 +189,8 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc return; } SslHandler sslHandler = ctx.pipeline().get(SslHandler.class); - if (!NEXT_PROTOCOL_VERSIONS.contains(sslHandler.applicationProtocol())) { + if (!sslContext.applicationProtocolNegotiator().protocols().contains( + sslHandler.applicationProtocol())) { logSslEngineDetails(Level.FINE, ctx, "TLS negotiation failed for new client.", null); ctx.fireExceptionCaught(unavailableException( "Failed protocol negotiation: Unable to find compatible protocol")); @@ -359,7 +359,8 @@ protected void userEventTriggered0(ChannelHandlerContext ctx, Object evt) throws SslHandshakeCompletionEvent handshakeEvent = (SslHandshakeCompletionEvent) evt; if (handshakeEvent.isSuccess()) { SslHandler handler = ctx.pipeline().get(SslHandler.class); - if (NEXT_PROTOCOL_VERSIONS.contains(handler.applicationProtocol())) { + if (sslContext.applicationProtocolNegotiator().protocols() + .contains(handler.applicationProtocol())) { // Successfully negotiated the protocol. logSslEngineDetails(Level.FINER, ctx, "TLS negotiation succeeded.", null); propagateTlsComplete(ctx, handler.engine().getSession()); diff --git a/netty/src/test/java/io/grpc/netty/ProtocolNegotiatorsTest.java b/netty/src/test/java/io/grpc/netty/ProtocolNegotiatorsTest.java index 5747e74e2f5..92b95eefa25 100644 --- a/netty/src/test/java/io/grpc/netty/ProtocolNegotiatorsTest.java +++ b/netty/src/test/java/io/grpc/netty/ProtocolNegotiatorsTest.java @@ -35,6 +35,7 @@ import io.grpc.SecurityLevel; import io.grpc.internal.GrpcAttributes; import io.grpc.internal.testing.TestUtils; +import io.grpc.netty.ProtocolNegotiators.ClientTlsHandler; import io.grpc.netty.ProtocolNegotiators.ClientTlsProtocolNegotiator; import io.grpc.netty.ProtocolNegotiators.HostPort; import io.grpc.netty.ProtocolNegotiators.ServerTlsHandler; @@ -76,6 +77,7 @@ import io.netty.handler.codec.http2.Http2ServerUpgradeCodec; import io.netty.handler.codec.http2.Http2Settings; import io.netty.handler.proxy.ProxyConnectException; +import io.netty.handler.ssl.ApplicationProtocolConfig; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslHandler; @@ -85,6 +87,8 @@ import java.io.File; import java.net.InetSocketAddress; import java.net.SocketAddress; +import java.util.Arrays; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -349,6 +353,186 @@ public String applicationProtocol() { assertNotNull(grpcHandlerCtx); } + @Test + public void serverTlsHandler_userEventTriggeredSslEvent_supportedProtocolCustom() + throws Exception { + SslHandler goodSslHandler = new SslHandler(engine, false) { + @Override + public String applicationProtocol() { + return "managed_mtls"; + } + }; + + File serverCert = TestUtils.loadCert("server1.pem"); + File key = TestUtils.loadCert("server1.key"); + List alpnList = Arrays.asList("managed_mtls", "h2"); + ApplicationProtocolConfig apn = new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + alpnList); + + sslContext = GrpcSslContexts.forServer(serverCert, key) + .ciphers(TestUtils.preferredTestCiphers(), SupportedCipherSuiteFilter.INSTANCE) + .applicationProtocolConfig(apn).build(); + + ChannelHandler handler = new ServerTlsHandler(grpcHandler, sslContext, null); + pipeline.addLast(handler); + + pipeline.replace(SslHandler.class, null, goodSslHandler); + channelHandlerCtx = pipeline.context(handler); + Object sslEvent = SslHandshakeCompletionEvent.SUCCESS; + + pipeline.fireUserEventTriggered(sslEvent); + + assertTrue(channel.isOpen()); + ChannelHandlerContext grpcHandlerCtx = pipeline.context(grpcHandler); + assertNotNull(grpcHandlerCtx); + } + + @Test + public void serverTlsHandler_userEventTriggeredSslEvent_unsupportedProtocolCustom() + throws Exception { + SslHandler badSslHandler = new SslHandler(engine, false) { + @Override + public String applicationProtocol() { + return "badprotocol"; + } + }; + + File serverCert = TestUtils.loadCert("server1.pem"); + File key = TestUtils.loadCert("server1.key"); + List alpnList = Arrays.asList("managed_mtls", "h2"); + ApplicationProtocolConfig apn = new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + alpnList); + + sslContext = GrpcSslContexts.forServer(serverCert, key) + .ciphers(TestUtils.preferredTestCiphers(), SupportedCipherSuiteFilter.INSTANCE) + .applicationProtocolConfig(apn).build(); + ChannelHandler handler = new ServerTlsHandler(grpcHandler, sslContext, null); + pipeline.addLast(handler); + + final AtomicReference error = new AtomicReference<>(); + ChannelHandler errorCapture = new ChannelInboundHandlerAdapter() { + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + error.set(cause); + } + }; + + pipeline.addLast(errorCapture); + + pipeline.replace(SslHandler.class, null, badSslHandler); + channelHandlerCtx = pipeline.context(handler); + Object sslEvent = SslHandshakeCompletionEvent.SUCCESS; + + pipeline.fireUserEventTriggered(sslEvent); + + // No h2 protocol was specified, so there should be an error, (normally handled by WBAEH) + assertThat(error.get()).hasMessageThat().contains("Unable to find compatible protocol"); + ChannelHandlerContext grpcHandlerCtx = pipeline.context(grpcHandler); + assertNull(grpcHandlerCtx); + } + + @Test + public void clientTlsHandler_userEventTriggeredSslEvent_supportedProtocolH2() throws Exception { + SslHandler goodSslHandler = new SslHandler(engine, false) { + @Override + public String applicationProtocol() { + return "h2"; + } + }; + DefaultEventLoopGroup elg = new DefaultEventLoopGroup(1); + + ClientTlsHandler handler = new ClientTlsHandler(grpcHandler, sslContext, "authority", elg); + pipeline.addLast(handler); + pipeline.replace(SslHandler.class, null, goodSslHandler); + pipeline.fireUserEventTriggered(ProtocolNegotiationEvent.DEFAULT); + channelHandlerCtx = pipeline.context(handler); + Object sslEvent = SslHandshakeCompletionEvent.SUCCESS; + + pipeline.fireUserEventTriggered(sslEvent); + + ChannelHandlerContext grpcHandlerCtx = pipeline.context(grpcHandler); + assertNotNull(grpcHandlerCtx); + } + + @Test + public void clientTlsHandler_userEventTriggeredSslEvent_supportedProtocolCustom() + throws Exception { + SslHandler goodSslHandler = new SslHandler(engine, false) { + @Override + public String applicationProtocol() { + return "managed_mtls"; + } + }; + DefaultEventLoopGroup elg = new DefaultEventLoopGroup(1); + + File clientCert = TestUtils.loadCert("client.pem"); + File key = TestUtils.loadCert("client.key"); + List alpnList = Arrays.asList("managed_mtls", "h2"); + ApplicationProtocolConfig apn = new ApplicationProtocolConfig( + ApplicationProtocolConfig.Protocol.ALPN, + ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, + ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, + alpnList); + + sslContext = GrpcSslContexts.forClient() + .keyManager(clientCert, key) + .ciphers(TestUtils.preferredTestCiphers(), SupportedCipherSuiteFilter.INSTANCE) + .applicationProtocolConfig(apn).build(); + + ClientTlsHandler handler = new ClientTlsHandler(grpcHandler, sslContext, "authority", elg); + pipeline.addLast(handler); + pipeline.replace(SslHandler.class, null, goodSslHandler); + pipeline.fireUserEventTriggered(ProtocolNegotiationEvent.DEFAULT); + channelHandlerCtx = pipeline.context(handler); + Object sslEvent = SslHandshakeCompletionEvent.SUCCESS; + + pipeline.fireUserEventTriggered(sslEvent); + + ChannelHandlerContext grpcHandlerCtx = pipeline.context(grpcHandler); + assertNotNull(grpcHandlerCtx); + } + + @Test + public void clientTlsHandler_userEventTriggeredSslEvent_unsupportedProtocol() throws Exception { + SslHandler goodSslHandler = new SslHandler(engine, false) { + @Override + public String applicationProtocol() { + return "badproto"; + } + }; + DefaultEventLoopGroup elg = new DefaultEventLoopGroup(1); + + ClientTlsHandler handler = new ClientTlsHandler(grpcHandler, sslContext, "authority", elg); + pipeline.addLast(handler); + + final AtomicReference error = new AtomicReference<>(); + ChannelHandler errorCapture = new ChannelInboundHandlerAdapter() { + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + error.set(cause); + } + }; + + pipeline.addLast(errorCapture); + pipeline.replace(SslHandler.class, null, goodSslHandler); + pipeline.fireUserEventTriggered(ProtocolNegotiationEvent.DEFAULT); + channelHandlerCtx = pipeline.context(handler); + Object sslEvent = SslHandshakeCompletionEvent.SUCCESS; + + pipeline.fireUserEventTriggered(sslEvent); + + // Bad protocol was specified, so there should be an error, (normally handled by WBAEH) + assertThat(error.get()).hasMessageThat().contains("Unable to find compatible protocol"); + ChannelHandlerContext grpcHandlerCtx = pipeline.context(grpcHandler); + assertNull(grpcHandlerCtx); + } + @Test public void engineLog() { ChannelHandler handler = new ServerTlsHandler(grpcHandler, sslContext, null); From cd35a8153c097adfd3eeb3f1bde89814a32824ec Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Tue, 4 Feb 2020 14:53:31 -0800 Subject: [PATCH 47/86] interop-testing: fix bug for xds dependency not published yet In v1.27.0 release the grpc-interop-testing artifact in maven includes grpc-xds, but grpc-xds is not yet published. It should be removed from the dependency list in maven artifact. --- interop-testing/build.gradle | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/interop-testing/build.gradle b/interop-testing/build.gradle index 70cdc547915..5c28286ddb0 100644 --- a/interop-testing/build.gradle +++ b/interop-testing/build.gradle @@ -25,7 +25,6 @@ dependencies { project(':grpc-protobuf'), project(':grpc-stub'), project(':grpc-testing'), - project(':grpc-xds'), libraries.google_auth_oauth2_http, libraries.junit, libraries.truth @@ -114,10 +113,19 @@ task grpclb_long_lived_affinity_test_client(type: CreateStartScripts) { } task xds_test_client(type: CreateStartScripts) { + // Use task dependsOn instead of depending on project(':grpc-xds') in configurations because + // grpc-xds is not published yet and we don't want grpc-interop-testin to depend on it in maven. + dependsOn ':grpc-xds:jar' mainClassName = "io.grpc.testing.integration.XdsTestClient" applicationName = "xds-test-client" outputDir = new File(project.buildDir, 'tmp') - classpath = jar.outputs.files + configurations.runtime + classpath = startScripts.classpath + fileTree("${project(':grpc-xds').buildDir}/libs") + doLast { + unixScript.text = unixScript.text.replace( + '\$APP_HOME/lib/grpc-xds', "${project(':grpc-xds').buildDir}/libs/grpc-xds") + windowsScript.text = windowsScript.text.replace( + '%APP_HOME%\\lib\\grpc-xds', "${project(':grpc-xds').buildDir}\\libs\\grpc-xds") + } } task xds_test_server(type: CreateStartScripts) { From 258fe12f0281e96f76d36cda6cbffeb6476e65d7 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Wed, 5 Feb 2020 16:47:13 -0800 Subject: [PATCH 48/86] interop-testing: fix bug of xds test missing transitive dependency This fixes a bug introduced in #6675 . grpc-interop-testing should have added all dependency e.g. enovy that grpc-xds needs. --- interop-testing/build.gradle | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/interop-testing/build.gradle b/interop-testing/build.gradle index 5c28286ddb0..e528d6104cb 100644 --- a/interop-testing/build.gradle +++ b/interop-testing/build.gradle @@ -115,7 +115,9 @@ task grpclb_long_lived_affinity_test_client(type: CreateStartScripts) { task xds_test_client(type: CreateStartScripts) { // Use task dependsOn instead of depending on project(':grpc-xds') in configurations because // grpc-xds is not published yet and we don't want grpc-interop-testin to depend on it in maven. - dependsOn ':grpc-xds:jar' + dependsOn ':grpc-xds:shadowJar' + // Add all other dependencies that grpc-xds needs. + dependencies { compile project(':grpc-services'), libraries.netty_epoll } mainClassName = "io.grpc.testing.integration.XdsTestClient" applicationName = "xds-test-client" outputDir = new File(project.buildDir, 'tmp') From 557da62305b7b4499300f9b201a25b44fa0d60ef Mon Sep 17 00:00:00 2001 From: ZhenLian Date: Thu, 6 Feb 2020 10:03:15 -0800 Subject: [PATCH 49/86] netty: Upgrade Netty (4.1.45.Final) & tcnative (2.0.28.Final) --- SECURITY.md | 3 +- build.gradle | 4 +- examples/example-tls/build.gradle | 2 +- .../testing/integration/Http2OkHttpTest.java | 3 +- .../io/grpc/netty/ProtocolNegotiators.java | 3 +- .../io/grpc/netty/NettyServerHandlerTest.java | 41 ++++++++--- repositories.bzl | 72 +++++++++---------- 7 files changed, 76 insertions(+), 52 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index c727626edb0..9ffc46367b2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -396,7 +396,8 @@ grpc-netty version | netty-handler version | netty-tcnative-boringssl-static ver 1.20.x-1.21.x | 4.1.34.Final | 2.0.22.Final 1.22.x | 4.1.35.Final | 2.0.25.Final 1.23.x-1.24.x | 4.1.38.Final | 2.0.25.Final -1.25.x- | 4.1.42.Final | 2.0.26.Final +1.25.x-1.27.x | 4.1.42.Final | 2.0.26.Final +1.28.x- | 4.1.45.Final | 2.0.28.Final _(grpc-netty-shaded avoids issues with keeping these versions in sync.)_ diff --git a/build.gradle b/build.gradle index d5a4ff90b82..292f931f4aa 100644 --- a/build.gradle +++ b/build.gradle @@ -45,7 +45,7 @@ subprojects { protocPluginBaseName = 'protoc-gen-grpc-java' javaPluginPath = "$rootDir/compiler/build/exe/java_plugin/$protocPluginBaseName$exeSuffix" - nettyVersion = '4.1.42.Final' + nettyVersion = '4.1.45.Final' guavaVersion = '28.1-android' googleauthVersion = '0.20.0' protobufVersion = '3.11.0' @@ -147,7 +147,7 @@ subprojects { // SECURITY.md (multiple occurrences) // examples/example-tls/build.gradle // examples/example-tls/pom.xml - netty_tcnative: 'io.netty:netty-tcnative-boringssl-static:2.0.26.Final', + netty_tcnative: 'io.netty:netty-tcnative-boringssl-static:2.0.28.Final', conscrypt: 'org.conscrypt:conscrypt-openjdk-uber:2.2.1', re2j: 'com.google.re2j:re2j:1.2', diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 62940bb6096..4e81a126790 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -24,7 +24,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION -def nettyTcNativeVersion = '2.0.26.Final' +def nettyTcNativeVersion = '2.0.28.Final' def protocVersion = '3.11.0' dependencies { diff --git a/interop-testing/src/test/java/io/grpc/testing/integration/Http2OkHttpTest.java b/interop-testing/src/test/java/io/grpc/testing/integration/Http2OkHttpTest.java index 927b0ed4478..bdb1594b53d 100644 --- a/interop-testing/src/test/java/io/grpc/testing/integration/Http2OkHttpTest.java +++ b/interop-testing/src/test/java/io/grpc/testing/integration/Http2OkHttpTest.java @@ -33,7 +33,6 @@ import io.grpc.okhttp.internal.Platform; import io.grpc.stub.StreamObserver; import io.grpc.testing.integration.EmptyProtos.Empty; -import io.netty.handler.ssl.OpenSsl; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslProvider; @@ -68,7 +67,7 @@ protected AbstractServerImplBuilder getServerBuilder() { // Starts the server with HTTPS. try { SslProvider sslProvider = SslContext.defaultServerProvider(); - if (sslProvider == SslProvider.OPENSSL && !OpenSsl.isAlpnSupported()) { + if (sslProvider == SslProvider.OPENSSL && !SslProvider.isAlpnSupported(SslProvider.OPENSSL)) { // OkHttp only supports Jetty ALPN on OpenJDK. So if OpenSSL doesn't support ALPN, then we // are forced to use Jetty ALPN for Netty instead of OpenSSL. sslProvider = SslProvider.JDK; diff --git a/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java b/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java index a420d20837d..27b2c74be2d 100644 --- a/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java +++ b/netty/src/main/java/io/grpc/netty/ProtocolNegotiators.java @@ -52,6 +52,7 @@ import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslHandler; import io.netty.handler.ssl.SslHandshakeCompletionEvent; +import io.netty.handler.ssl.SslProvider; import io.netty.util.AsciiString; import io.netty.util.Attribute; import io.netty.util.AttributeMap; @@ -555,7 +556,7 @@ static void logSslEngineDetails(Level level, ChannelHandlerContext ctx, String m builder.append(" OpenSSL, "); builder.append("Version: 0x").append(Integer.toHexString(OpenSsl.version())); builder.append(" (").append(OpenSsl.versionString()).append("), "); - builder.append("ALPN supported: ").append(OpenSsl.isAlpnSupported()); + builder.append("ALPN supported: ").append(SslProvider.isAlpnSupported(SslProvider.OPENSSL)); } else if (JettyTlsUtil.isJettyAlpnConfigured()) { builder.append(" Jetty ALPN"); } else if (JettyTlsUtil.isJettyNpnConfigured()) { diff --git a/netty/src/test/java/io/grpc/netty/NettyServerHandlerTest.java b/netty/src/test/java/io/grpc/netty/NettyServerHandlerTest.java index f35adf0bae9..453d8222f57 100644 --- a/netty/src/test/java/io/grpc/netty/NettyServerHandlerTest.java +++ b/netty/src/test/java/io/grpc/netty/NettyServerHandlerTest.java @@ -28,7 +28,6 @@ import static io.grpc.netty.Utils.HTTP_METHOD; import static io.grpc.netty.Utils.TE_HEADER; import static io.grpc.netty.Utils.TE_TRAILERS; -import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -435,8 +434,14 @@ public void headersWithInvalidContentTypeShouldFail() throws Exception { .status("" + 415) .set(CONTENT_TYPE_HEADER, "text/plain; encoding=utf-8"); - verifyWrite().writeHeaders(eq(ctx()), eq(STREAM_ID), eq(responseHeaders), eq(0), - eq(DEFAULT_PRIORITY_WEIGHT), eq(false), eq(0), eq(false), any(ChannelPromise.class)); + verifyWrite() + .writeHeaders( + eq(ctx()), + eq(STREAM_ID), + eq(responseHeaders), + eq(0), + eq(false), + any(ChannelPromise.class)); } @Test @@ -454,8 +459,14 @@ public void headersWithInvalidMethodShouldFail() throws Exception { .status("" + 405) .set(CONTENT_TYPE_HEADER, "text/plain; encoding=utf-8"); - verifyWrite().writeHeaders(eq(ctx()), eq(STREAM_ID), eq(responseHeaders), eq(0), - eq(DEFAULT_PRIORITY_WEIGHT), eq(false), eq(0), eq(false), any(ChannelPromise.class)); + verifyWrite() + .writeHeaders( + eq(ctx()), + eq(STREAM_ID), + eq(responseHeaders), + eq(0), + eq(false), + any(ChannelPromise.class)); } @Test @@ -472,8 +483,14 @@ public void headersWithMissingPathShouldFail() throws Exception { .status("" + 404) .set(CONTENT_TYPE_HEADER, "text/plain; encoding=utf-8"); - verifyWrite().writeHeaders(eq(ctx()), eq(STREAM_ID), eq(responseHeaders), eq(0), - eq(DEFAULT_PRIORITY_WEIGHT), eq(false), eq(0), eq(false), any(ChannelPromise.class)); + verifyWrite() + .writeHeaders( + eq(ctx()), + eq(STREAM_ID), + eq(responseHeaders), + eq(0), + eq(false), + any(ChannelPromise.class)); } @Test @@ -491,8 +508,14 @@ public void headersWithInvalidPathShouldFail() throws Exception { .status("" + 404) .set(CONTENT_TYPE_HEADER, "text/plain; encoding=utf-8"); - verifyWrite().writeHeaders(eq(ctx()), eq(STREAM_ID), eq(responseHeaders), eq(0), - eq(DEFAULT_PRIORITY_WEIGHT), eq(false), eq(0), eq(false), any(ChannelPromise.class)); + verifyWrite() + .writeHeaders( + eq(ctx()), + eq(STREAM_ID), + eq(responseHeaders), + eq(0), + eq(false), + any(ChannelPromise.class)); } @Test diff --git a/repositories.bzl b/repositories.bzl index c0a244be917..b4ee73a47e8 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -24,18 +24,18 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.truth:truth:1.0", "com.squareup.okhttp:okhttp:2.5.0", "com.squareup.okio:okio:1.13.0", - "io.netty:netty-buffer:4.1.42.Final", - "io.netty:netty-codec-http2:4.1.42.Final", - "io.netty:netty-codec-http:4.1.42.Final", - "io.netty:netty-codec-socks:4.1.42.Final", - "io.netty:netty-codec:4.1.42.Final", - "io.netty:netty-common:4.1.42.Final", - "io.netty:netty-handler-proxy:4.1.42.Final", - "io.netty:netty-handler:4.1.42.Final", - "io.netty:netty-resolver:4.1.42.Final", - "io.netty:netty-tcnative-boringssl-static:2.0.26.Final", - "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.42.Final", - "io.netty:netty-transport:4.1.42.Final", + "io.netty:netty-buffer:4.1.45.Final", + "io.netty:netty-codec-http2:4.1.45.Final", + "io.netty:netty-codec-http:4.1.45.Final", + "io.netty:netty-codec-socks:4.1.45.Final", + "io.netty:netty-codec:4.1.45.Final", + "io.netty:netty-common:4.1.45.Final", + "io.netty:netty-handler-proxy:4.1.45.Final", + "io.netty:netty-handler:4.1.45.Final", + "io.netty:netty-resolver:4.1.45.Final", + "io.netty:netty-tcnative-boringssl-static:2.0.28.Final", + "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.45.Final", + "io.netty:netty-transport:4.1.45.Final", "io.opencensus:opencensus-api:0.24.0", "io.opencensus:opencensus-contrib-grpc-metrics:0.24.0", "io.perfmark:perfmark-api:0.19.0", @@ -319,108 +319,108 @@ def io_grpc_grpc_proto(): def io_netty_netty_buffer(): jvm_maven_import_external( name = "io_netty_netty_buffer", - artifact = "io.netty:netty-buffer:4.1.42.Final", + artifact = "io.netty:netty-buffer:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "7b0171a4e8bcd573e08d9f2bba053c67b557ab5012106a5982ccbae5743814c0", + artifact_sha256 = "8437b43e03c272093066837567e1b89019ef291f06f5ace1051017981d98d59f", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_codec(): jvm_maven_import_external( name = "io_netty_netty_codec", - artifact = "io.netty:netty-codec:4.1.42.Final", + artifact = "io.netty:netty-codec:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "e96ced697fb7df589da7c20c995e01f75a9cb246be242bbc4cd3b4af424ff189", + artifact_sha256 = "47e211ad8c4c2b809b6e04541d6c8e3893dea63918dabe93fa5cf63914ffc9cc", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_codec_http(): jvm_maven_import_external( name = "io_netty_netty_codec_http", - artifact = "io.netty:netty-codec-http:4.1.42.Final", + artifact = "io.netty:netty-codec-http:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "eb349c0f1b249af7c7a8fbbd1c761d65d9bc230880cd8d37feab9e8278292625", + artifact_sha256 = "db8d8bf478bd3ad723c3d23fdf1cbf62ab9d419a8636e17add3f82f51f8e0bc1", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_codec_http2(): jvm_maven_import_external( name = "io_netty_netty_codec_http2", - artifact = "io.netty:netty-codec-http2:4.1.42.Final", + artifact = "io.netty:netty-codec-http2:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "8bac9625eb68635396eb0c13c9cc0b22bde7c83d0cd2dae3fe9b6f9cf929e372", + artifact_sha256 = "09f82454ec9d4203b569c6027e11dd2f265aeba4bec5f61cef71845547e61faa", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_codec_socks(): jvm_maven_import_external( name = "io_netty_netty_codec_socks", - artifact = "io.netty:netty-codec-socks:4.1.42.Final", + artifact = "io.netty:netty-codec-socks:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "7f14b3a95ee9aa5a26f66af668690578a81a883683ac1c4ca9e9afdf4d4c7894", + artifact_sha256 = "f5aa6197d3df9009bbb889ada2b1ae09b23559ebe748030478652c05a5977a25", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_common(): jvm_maven_import_external( name = "io_netty_netty_common", - artifact = "io.netty:netty-common:4.1.42.Final", + artifact = "io.netty:netty-common:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "3d0a918d78292eeca02a7bb2188daa4e5053b6e29b71e6308309033e121242b5", + artifact_sha256 = "6f3c61684cf8c0f09df7ebb5a19df29d5d9fc175ce68ae237993b91366ccc43e", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_handler(): jvm_maven_import_external( name = "io_netty_netty_handler", - artifact = "io.netty:netty-handler:4.1.42.Final", + artifact = "io.netty:netty-handler:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "11eda86500c33b9d386719b5419f513fd9c097d13894f25dd0c75b610d636e03", + artifact_sha256 = "2ad6785ba22fb522dba8128a0599b3f5ee47c210dddb8d8ec678f7765ac406f0", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_handler_proxy(): jvm_maven_import_external( name = "io_netty_netty_handler_proxy", - artifact = "io.netty:netty-handler-proxy:4.1.42.Final", + artifact = "io.netty:netty-handler-proxy:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "25f22da21c29ab0d3b6b889412351bcfc5f9ccd42e07d2d5513d5c4eb571f343", + artifact_sha256 = "410fc065171e26bb9a24ed5f7f88b3200e641cb65605ec11bc9c7625da28429a", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_resolver(): jvm_maven_import_external( name = "io_netty_netty_resolver", - artifact = "io.netty:netty-resolver:4.1.42.Final", + artifact = "io.netty:netty-resolver:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "89768242b6b7cce9bd9f5945ad21d1b4bae515c6b1bf03a8af5d1899779cebc9", + artifact_sha256 = "1d762ecfa9da9241db339b611fad0529491bb0c3098c16e9c80d64f04d80323c", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_tcnative_boringssl_static(): jvm_maven_import_external( name = "io_netty_netty_tcnative_boringssl_static", - artifact = "io.netty:netty-tcnative-boringssl-static:2.0.26.Final", + artifact = "io.netty:netty-tcnative-boringssl-static:2.0.28.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "5f074a4b112bf7d087331e33d2da720745c5bda047b34b64bd70aaaae4de24c6", + artifact_sha256 = "2cf327d8185202804c998cc8ae9f493f483c5398c597d5385366c6fc9ad20aef", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_transport(): jvm_maven_import_external( name = "io_netty_netty_transport", - artifact = "io.netty:netty-transport:4.1.42.Final", + artifact = "io.netty:netty-transport:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "dfa817a156ea263aa9ad8364a2e226527665c9722aca40a7945f228c2c14f1da", + artifact_sha256 = "ca38fc85e9e18c4921d9ce92830445efad05d0fb3e8dd6ba3536e0843cdf723b", licenses = ["notice"], # Apache 2.0 ) def io_netty_netty_transport_native_epoll_linux_x86_64(): jvm_maven_import_external( name = "io_netty_netty_transport_native_epoll_linux_x86_64", - artifact = "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.42.Final", + artifact = "io.netty:netty-transport-native-epoll:jar:linux-x86_64:4.1.45.Final", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "7bdf3003d5b60b061b494e62d1bafc420caf800efb743b14ec01ceaef1d3fa3e", + artifact_sha256 = "3a9a5645177e20adef2fadb0feabba0d544e8f4403b2fc07353658717593af3e", licenses = ["notice"], # Apache 2.0 ) From 60bc74620fc88b321a46791241326939de70ed20 Mon Sep 17 00:00:00 2001 From: Georg Welzel Date: Fri, 7 Feb 2020 18:48:29 +0100 Subject: [PATCH 50/86] census: Set SpanKind on Client/Server traces (#6680) --- census/src/main/java/io/grpc/census/CensusTracingModule.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/census/src/main/java/io/grpc/census/CensusTracingModule.java b/census/src/main/java/io/grpc/census/CensusTracingModule.java index e9e12941ed4..e04db099d4d 100644 --- a/census/src/main/java/io/grpc/census/CensusTracingModule.java +++ b/census/src/main/java/io/grpc/census/CensusTracingModule.java @@ -238,6 +238,7 @@ final class ClientCallTracer extends ClientStreamTracer.Factory { generateTraceSpanName(false, method.getFullMethodName()), parentSpan) .setRecordEvents(true) + .setSpanKind(Span.Kind.CLIENT) .startSpan(); } @@ -308,6 +309,7 @@ private final class ServerTracer extends ServerStreamTracer { generateTraceSpanName(true, fullMethodName), remoteSpan) .setRecordEvents(true) + .setSpanKind(Span.Kind.SERVER) .startSpan(); } From 84b10350eb4b1d679849257a1415450828b29902 Mon Sep 17 00:00:00 2001 From: Eric Gribkoff Date: Sun, 9 Feb 2020 19:43:11 -0800 Subject: [PATCH 51/86] buildscripts,interop-testing: Add xDS tests on GCP (#6662) --- buildscripts/kokoro/xds.cfg | 5 +++++ buildscripts/kokoro/xds.sh | 25 +++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 buildscripts/kokoro/xds.cfg create mode 100755 buildscripts/kokoro/xds.sh diff --git a/buildscripts/kokoro/xds.cfg b/buildscripts/kokoro/xds.cfg new file mode 100644 index 00000000000..f96ec47da50 --- /dev/null +++ b/buildscripts/kokoro/xds.cfg @@ -0,0 +1,5 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-java/buildscripts/kokoro/xds.sh" +timeout_mins: 60 diff --git a/buildscripts/kokoro/xds.sh b/buildscripts/kokoro/xds.sh new file mode 100755 index 00000000000..52acc2978f4 --- /dev/null +++ b/buildscripts/kokoro/xds.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -exu -o pipefail +if [[ -f /VERSION ]]; then + cat /VERSION +fi + +sudo apt-get install -y python3-pip +sudo python3 -m pip install grpcio grpcio-tools google-api-python-client google-auth-httplib2 + +cd github + +pushd grpc-java/interop-testing +../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true +popd + +git clone https://github.com/ericgribkoff/grpc.git + +grpc/tools/run_tests/helper_scripts/prep_xds.sh +python3 grpc/tools/run_tests/run_xds_tests.py \ + --test_case=all \ + --project_id=grpc-testing \ + --gcp_suffix=$(date '+%s') \ + --verbose \ + --client_cmd='grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-client --server=xds-experimental:///{service_host}:{service_port} --stats_port={stats_port} --qps={qps}' From 255e5feb24f93bd10161dec5229ebef7b81afd0e Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Mon, 10 Feb 2020 10:58:05 -0800 Subject: [PATCH 52/86] Sync grpc-proto to 1ff78907 This noticed that load_balancer.proto had local changes introduced in #6549. This was not noticed by Bazel because grpclb was not using the io_grpc_grpc_proto repository. These issues have been fixed. --- alts/src/main/proto/grpc/gcp/handshaker.proto | 9 ++++++++ grpclb/BUILD.bazel | 21 +++---------------- .../main/proto/grpc/lb/v1/load_balancer.proto | 5 ++--- repositories.bzl | 6 +++--- 4 files changed, 17 insertions(+), 24 deletions(-) diff --git a/alts/src/main/proto/grpc/gcp/handshaker.proto b/alts/src/main/proto/grpc/gcp/handshaker.proto index 0248c8868ce..02764ba4c08 100644 --- a/alts/src/main/proto/grpc/gcp/handshaker.proto +++ b/alts/src/main/proto/grpc/gcp/handshaker.proto @@ -105,6 +105,9 @@ message StartClientHandshakeReq { // (Optional) RPC protocol versions supported by the client. RpcProtocolVersions rpc_versions = 9; + + // (Optional) Maximum frame size supported by the client. + uint32 max_frame_size = 10; } message ServerHandshakeParameters { @@ -143,6 +146,9 @@ message StartServerHandshakeReq { // (Optional) RPC protocol versions supported by the server. RpcProtocolVersions rpc_versions = 6; + + // (Optional) Maximum frame size supported by the server. + uint32 max_frame_size = 7; } message NextHandshakeMessageReq { @@ -190,6 +196,9 @@ message HandshakerResult { // The RPC protocol versions supported by the peer. RpcProtocolVersions peer_rpc_versions = 7; + + // The maximum frame size of the peer. + uint32 max_frame_size = 8; } message HandshakerStatus { diff --git a/grpclb/BUILD.bazel b/grpclb/BUILD.bazel index b261225ac1d..b69fb234733 100644 --- a/grpclb/BUILD.bazel +++ b/grpclb/BUILD.bazel @@ -1,4 +1,3 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") load("//:java_grpc_library.bzl", "java_grpc_library") java_library( @@ -9,7 +8,6 @@ java_library( visibility = ["//visibility:public"], deps = [ ":load_balancer_java_grpc", - ":load_balancer_java_proto", "//api", "//core:internal", "//core:util", @@ -18,25 +16,12 @@ java_library( "@com_google_guava_guava//jar", "@com_google_j2objc_j2objc_annotations//jar", "@com_google_protobuf//:protobuf_java_util", + "@io_grpc_grpc_proto//:grpclb_load_balancer_java_proto", ], ) -proto_library( - name = "load_balancer_proto", - srcs = ["src/main/proto/grpc/lb/v1/load_balancer.proto"], - deps = [ - "@com_google_protobuf//:duration_proto", - "@com_google_protobuf//:timestamp_proto", - ], -) - -java_proto_library( - name = "load_balancer_java_proto", - deps = [":load_balancer_proto"], -) - java_grpc_library( name = "load_balancer_java_grpc", - srcs = [":load_balancer_proto"], - deps = [":load_balancer_java_proto"], + srcs = ["@io_grpc_grpc_proto//:grpclb_load_balancer_proto"], + deps = ["@io_grpc_grpc_proto//:grpclb_load_balancer_java_proto"], ) diff --git a/grpclb/src/main/proto/grpc/lb/v1/load_balancer.proto b/grpclb/src/main/proto/grpc/lb/v1/load_balancer.proto index 6f65d323af2..a9588b0db94 100644 --- a/grpclb/src/main/proto/grpc/lb/v1/load_balancer.proto +++ b/grpclb/src/main/proto/grpc/lb/v1/load_balancer.proto @@ -97,11 +97,12 @@ message LoadBalanceResponse { // If this field is set, then the client should eagerly enter fallback // mode (even if there are existing, healthy connections to backends). - // See go/grpclb-explicit-fallback for more details. FallbackResponse fallback_response = 3; } } +message FallbackResponse {} + message InitialLoadBalanceResponse { // This is an application layer redirect that indicates the client should use // the specified server for load balancing. When this field is non-empty in @@ -116,8 +117,6 @@ message InitialLoadBalanceResponse { google.protobuf.Duration client_stats_report_interval = 2; } -message FallbackResponse {} - message ServerList { // Contains a list of servers selected by the load balancer. The list will // be updated when server resolutions change or as needed to balance load diff --git a/repositories.bzl b/repositories.bzl index b4ee73a47e8..d8316da7e25 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -311,9 +311,9 @@ def com_squareup_okio_okio(): def io_grpc_grpc_proto(): http_archive( name = "io_grpc_grpc_proto", - sha256 = "9d96f861f01ed9e3d805024e72a6b218b626da2114c69c1cad5d0e967c8e23be", - strip_prefix = "grpc-proto-435d723289d348e1bc420d420b364369d565182a", - urls = ["https://github.com/grpc/grpc-proto/archive/435d723289d348e1bc420d420b364369d565182a.zip"], + sha256 = "e3f2bf5caa217b8a703acb52cc5a503f06545b6be7889e44ea428f29e7320e9d", + strip_prefix = "grpc-proto-1ff7890719cea62825beeff16d6c2c66cf51ea90", + urls = ["https://github.com/grpc/grpc-proto/archive/1ff7890719cea62825beeff16d6c2c66cf51ea90.zip"], ) def io_netty_netty_buffer(): From 043ab93b3bba333931660d94b5fd9f2a8fe8dbdb Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Tue, 11 Feb 2020 09:48:53 -0800 Subject: [PATCH 53/86] xds: fix bug of return resolution error twice in xDS resolver (#6671) --- xds/src/main/java/io/grpc/xds/XdsNameResolver.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index 6451df79588..cb1651363d8 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -178,8 +178,9 @@ public void onError(Status error) { // a temporary solution. More design discussion needs to be done. if (error.getCode().equals(Code.NOT_FOUND)) { listener.onResult(ResolutionResult.newBuilder().build()); + return; } - listener.onError(error); + listener.onError(Status.UNAVAILABLE.withDescription(error.getDescription())); } }); } From 45bb403f8aa95d028023195873dc6b61d9367492 Mon Sep 17 00:00:00 2001 From: Eric Anderson Date: Tue, 28 Jan 2020 13:16:58 -0800 Subject: [PATCH 54/86] Update README etc to reference 1.27.0 --- README.md | 28 ++++++++++++------------ cronet/README.md | 2 +- documentation/android-channel-builder.md | 4 ++-- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 5f46db90523..9d8d449be16 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/quickstart/java.html) or the more explanatory [gRPC basics](https://grpc.io/docs/tutorials/basic/java.html). -The [examples](https://github.com/grpc/grpc-java/tree/v1.26.0/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.26.0/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.27.0/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.27.0/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -42,37 +42,37 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.26.0 + 1.27.0 io.grpc grpc-protobuf - 1.26.0 + 1.27.0 io.grpc grpc-stub - 1.26.0 + 1.27.0 ``` Or for Gradle with non-Android, add to your dependencies: ```gradle -implementation 'io.grpc:grpc-netty-shaded:1.26.0' -implementation 'io.grpc:grpc-protobuf:1.26.0' -implementation 'io.grpc:grpc-stub:1.26.0' +implementation 'io.grpc:grpc-netty-shaded:1.27.0' +implementation 'io.grpc:grpc-protobuf:1.27.0' +implementation 'io.grpc:grpc-stub:1.27.0' ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.26.0' -implementation 'io.grpc:grpc-protobuf-lite:1.26.0' -implementation 'io.grpc:grpc-stub:1.26.0' +implementation 'io.grpc:grpc-okhttp:1.27.0' +implementation 'io.grpc:grpc-protobuf-lite:1.27.0' +implementation 'io.grpc:grpc-stub:1.27.0' ``` [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.26.0 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.27.0 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -104,7 +104,7 @@ For protobuf-based codegen integrated with the Maven build system, you can use com.google.protobuf:protoc:3.11.0:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.26.0:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.27.0:exe:${os.detected.classifier} @@ -134,7 +134,7 @@ protobuf { } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.26.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0' } } generateProtoTasks { diff --git a/cronet/README.md b/cronet/README.md index c1e24835e13..563af0fd4ce 100644 --- a/cronet/README.md +++ b/cronet/README.md @@ -26,7 +26,7 @@ In your app module's `build.gradle` file, include a dependency on both `grpc-cro Google Play Services Client Library for Cronet ``` -implementation 'io.grpc:grpc-cronet:1.26.0' +implementation 'io.grpc:grpc-cronet:1.27.0' implementation 'com.google.android.gms:play-services-cronet:16.0.0' ``` diff --git a/documentation/android-channel-builder.md b/documentation/android-channel-builder.md index 2559a855733..9ae35e62940 100644 --- a/documentation/android-channel-builder.md +++ b/documentation/android-channel-builder.md @@ -36,8 +36,8 @@ In your `build.gradle` file, include a dependency on both `grpc-android` and `grpc-okhttp`: ``` -implementation 'io.grpc:grpc-android:1.26.0' -implementation 'io.grpc:grpc-okhttp:1.26.0' +implementation 'io.grpc:grpc-android:1.27.0' +implementation 'io.grpc:grpc-okhttp:1.27.0' ``` You also need permission to access the device's network state in your From 774f2763c970c756eeff65996a7921e8b8c800a6 Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Tue, 11 Feb 2020 10:27:47 -0800 Subject: [PATCH 55/86] grpclb: add serviceName config to grpclb policy config (#6563) --- .../java/io/grpc/grpclb/GrpclbConfig.java | 83 +++++++ .../io/grpc/grpclb/GrpclbLoadBalancer.java | 96 +------- .../grpclb/GrpclbLoadBalancerProvider.java | 68 ++---- .../main/java/io/grpc/grpclb/GrpclbState.java | 29 ++- .../GrpclbLoadBalancerProviderTest.java | 162 +++++++++++++ .../grpc/grpclb/GrpclbLoadBalancerTest.java | 228 +++++++++++++----- 6 files changed, 468 insertions(+), 198 deletions(-) create mode 100644 grpclb/src/main/java/io/grpc/grpclb/GrpclbConfig.java create mode 100644 grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerProviderTest.java diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbConfig.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbConfig.java new file mode 100644 index 00000000000..60f22a2e0e8 --- /dev/null +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbConfig.java @@ -0,0 +1,83 @@ +/* + * Copyright 2019 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.grpclb; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; +import com.google.common.base.Objects; +import io.grpc.grpclb.GrpclbState.Mode; +import javax.annotation.Nullable; + +final class GrpclbConfig { + + private final Mode mode; + @Nullable + private final String serviceName; + + private GrpclbConfig(Mode mode, @Nullable String serviceName) { + this.mode = checkNotNull(mode, "mode"); + this.serviceName = serviceName; + } + + static GrpclbConfig create(Mode mode) { + return create(mode, null); + } + + static GrpclbConfig create(Mode mode, @Nullable String serviceName) { + return new GrpclbConfig(mode, serviceName); + } + + Mode getMode() { + return mode; + } + + /** + * If specified, it overrides the name of the sevice name to be sent to the balancer. if not, the + * target to be sent to the balancer will continue to be obtained from the target URI passed + * to the gRPC client channel. + */ + @Nullable + String getServiceName() { + return serviceName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GrpclbConfig that = (GrpclbConfig) o; + return mode == that.mode && Objects.equal(serviceName, that.serviceName); + } + + @Override + public int hashCode() { + return Objects.hashCode(mode, serviceName); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("mode", mode) + .add("serviceName", serviceName) + .toString(); + } +} diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java index 00af68f844b..39647b08c78 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancer.java @@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; import io.grpc.Attributes; -import io.grpc.ChannelLogger; import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.ConnectivityStateInfo; import io.grpc.EquivalentAddressGroup; @@ -30,15 +29,10 @@ import io.grpc.Status; import io.grpc.grpclb.GrpclbState.Mode; import io.grpc.internal.BackoffPolicy; -import io.grpc.internal.ServiceConfigUtil; -import io.grpc.internal.ServiceConfigUtil.LbConfig; import io.grpc.internal.TimeProvider; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; -import java.util.logging.Level; -import java.util.logging.Logger; import javax.annotation.Nullable; /** @@ -48,8 +42,8 @@ * or round-robin balancer. */ class GrpclbLoadBalancer extends LoadBalancer { - private static final Mode DEFAULT_MODE = Mode.ROUND_ROBIN; - private static final Logger logger = Logger.getLogger(GrpclbLoadBalancer.class.getName()); + + private static final GrpclbConfig DEFAULT_CONFIG = GrpclbConfig.create(Mode.ROUND_ROBIN); private final Helper helper; private final TimeProvider time; @@ -57,7 +51,7 @@ class GrpclbLoadBalancer extends LoadBalancer { private final SubchannelPool subchannelPool; private final BackoffPolicy.Provider backoffPolicyProvider; - private Mode mode = Mode.ROUND_ROBIN; + private GrpclbConfig config = DEFAULT_CONFIG; // All mutable states in this class are mutated ONLY from Channel Executor @Nullable @@ -88,7 +82,6 @@ public void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo n } @Override - @SuppressWarnings("deprecation") // TODO(creamsoup) migrate to use parsed service config public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { Attributes attributes = resolvedAddresses.getAttributes(); List newLbAddresses = attributes.get(GrpclbConstants.ATTR_LB_ADDRS); @@ -114,11 +107,13 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { newLbAddressGroups = Collections.unmodifiableList(newLbAddressGroups); List newBackendServers = Collections.unmodifiableList(resolvedAddresses.getAddresses()); - Map rawLbConfigValue = attributes.get(ATTR_LOAD_BALANCING_CONFIG); - Mode newMode = retrieveModeFromLbConfig(rawLbConfigValue, helper.getChannelLogger()); - if (!mode.equals(newMode)) { - mode = newMode; - helper.getChannelLogger().log(ChannelLogLevel.INFO, "Mode: " + newMode); + GrpclbConfig newConfig = (GrpclbConfig) resolvedAddresses.getLoadBalancingPolicyConfig(); + if (newConfig == null) { + newConfig = DEFAULT_CONFIG; + } + if (!config.equals(newConfig)) { + config = newConfig; + helper.getChannelLogger().log(ChannelLogLevel.INFO, "Config: " + newConfig); recreateStates(); } grpclbState.handleAddresses(newLbAddressGroups, newBackendServers); @@ -131,40 +126,6 @@ public void requestConnection() { } } - @VisibleForTesting - static Mode retrieveModeFromLbConfig( - @Nullable Map rawLbConfigValue, ChannelLogger channelLogger) { - try { - if (rawLbConfigValue == null) { - return DEFAULT_MODE; - } - List rawChildPolicies = getList(rawLbConfigValue, "childPolicy"); - if (rawChildPolicies == null) { - return DEFAULT_MODE; - } - List childPolicies = - ServiceConfigUtil.unwrapLoadBalancingConfigList(checkObjectList(rawChildPolicies)); - for (LbConfig childPolicy : childPolicies) { - String childPolicyName = childPolicy.getPolicyName(); - switch (childPolicyName) { - case "round_robin": - return Mode.ROUND_ROBIN; - case "pick_first": - return Mode.PICK_FIRST; - default: - channelLogger.log( - ChannelLogLevel.DEBUG, - "grpclb ignoring unsupported child policy " + childPolicyName); - } - } - } catch (RuntimeException e) { - channelLogger.log(ChannelLogLevel.WARNING, "Bad grpclb config, using " + DEFAULT_MODE); - logger.log( - Level.WARNING, "Bad grpclb config: " + rawLbConfigValue + ", using " + DEFAULT_MODE, e); - } - return DEFAULT_MODE; - } - private void resetStates() { if (grpclbState != null) { grpclbState.shutdown(); @@ -175,8 +136,8 @@ private void resetStates() { private void recreateStates() { resetStates(); checkState(grpclbState == null, "Should've been cleared"); - grpclbState = new GrpclbState(mode, helper, subchannelPool, time, stopwatch, - backoffPolicyProvider); + grpclbState = + new GrpclbState(config, helper, subchannelPool, time, stopwatch, backoffPolicyProvider); } @Override @@ -201,37 +162,4 @@ public boolean canHandleEmptyAddressListFromNameResolution() { GrpclbState getGrpclbState() { return grpclbState; } - - // TODO(carl-mastrangelo): delete getList and checkObjectList once apply is complete for SVCCFG. - /** - * Gets a list from an object for the given key. Copy of - * {@link io.grpc.internal.ServiceConfigUtil#getList}. - */ - @Nullable - private static List getList(Map obj, String key) { - assert key != null; - if (!obj.containsKey(key)) { - return null; - } - Object value = obj.get(key); - if (!(value instanceof List)) { - throw new ClassCastException( - String.format("value '%s' for key '%s' in %s is not List", value, key, obj)); - } - return (List) value; - } - - /** - * Copy of {@link io.grpc.internal.ServiceConfigUtil#checkObjectList}. - */ - @SuppressWarnings("unchecked") - private static List> checkObjectList(List rawList) { - for (int i = 0; i < rawList.size(); i++) { - if (!(rawList.get(i) instanceof Map)) { - throw new ClassCastException( - String.format("value %s for idx %d in %s is not object", rawList.get(i), i, rawList)); - } - } - return (List>) rawList; - } } diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java index 57d85282e32..e929616ce86 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbLoadBalancerProvider.java @@ -24,12 +24,13 @@ import io.grpc.Status; import io.grpc.grpclb.GrpclbState.Mode; import io.grpc.internal.ExponentialBackoffPolicy; +import io.grpc.internal.JsonUtil; import io.grpc.internal.ServiceConfigUtil; import io.grpc.internal.ServiceConfigUtil.LbConfig; import io.grpc.internal.TimeProvider; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import javax.annotation.Nullable; /** * The provider for the "grpclb" balancing policy. This class should not be directly referenced in @@ -38,6 +39,7 @@ */ @Internal public final class GrpclbLoadBalancerProvider extends LoadBalancerProvider { + private static final Mode DEFAULT_MODE = Mode.ROUND_ROBIN; @Override @@ -78,57 +80,37 @@ public ConfigOrError parseLoadBalancingPolicyConfig( ConfigOrError parseLoadBalancingConfigPolicyInternal( Map rawLoadBalancingPolicyConfig) { if (rawLoadBalancingPolicyConfig == null) { - return ConfigOrError.fromConfig(DEFAULT_MODE); + return ConfigOrError.fromConfig(GrpclbConfig.create(DEFAULT_MODE)); + } + String serviceName = JsonUtil.getString(rawLoadBalancingPolicyConfig, "serviceName"); + List rawChildPolicies = JsonUtil.getList(rawLoadBalancingPolicyConfig, "childPolicy"); + List childPolicies = null; + if (rawChildPolicies != null) { + childPolicies = + ServiceConfigUtil + .unwrapLoadBalancingConfigList(JsonUtil.checkObjectList(rawChildPolicies)); } - List rawChildPolicies = getList(rawLoadBalancingPolicyConfig, "childPolicy"); - if (rawChildPolicies == null) { - return ConfigOrError.fromConfig(DEFAULT_MODE); + + if (childPolicies == null || childPolicies.isEmpty()) { + return ConfigOrError.fromConfig(GrpclbConfig.create(DEFAULT_MODE, serviceName)); } - List childPolicies = - ServiceConfigUtil.unwrapLoadBalancingConfigList(checkObjectList(rawChildPolicies)); + + List policiesTried = new ArrayList<>(); for (LbConfig childPolicy : childPolicies) { String childPolicyName = childPolicy.getPolicyName(); switch (childPolicyName) { case "round_robin": - return ConfigOrError.fromConfig(Mode.ROUND_ROBIN); + return ConfigOrError.fromConfig(GrpclbConfig.create(Mode.ROUND_ROBIN, serviceName)); case "pick_first": - return ConfigOrError.fromConfig(Mode.PICK_FIRST); + return ConfigOrError.fromConfig(GrpclbConfig.create(Mode.PICK_FIRST, serviceName)); default: - // TODO(zhangkun83): maybe log? - } - } - return ConfigOrError.fromConfig(DEFAULT_MODE); - } - - /** - * Gets a list from an object for the given key. Copy of - * {@link io.grpc.internal.ServiceConfigUtil#getList}. - */ - @Nullable - private static List getList(Map obj, String key) { - assert key != null; - if (!obj.containsKey(key)) { - return null; - } - Object value = obj.get(key); - if (!(value instanceof List)) { - throw new ClassCastException( - String.format("value '%s' for key '%s' in %s is not List", value, key, obj)); - } - return (List) value; - } - - /** - * Copy of {@link io.grpc.internal.ServiceConfigUtil#checkObjectList}. - */ - @SuppressWarnings("unchecked") - private static List> checkObjectList(List rawList) { - for (int i = 0; i < rawList.size(); i++) { - if (!(rawList.get(i) instanceof Map)) { - throw new ClassCastException( - String.format("value %s for idx %d in %s is not object", rawList.get(i), i, rawList)); + policiesTried.add(childPolicyName); } } - return (List>) rawList; + return ConfigOrError.fromError( + Status + .INVALID_ARGUMENT + .withDescription( + "None of " + policiesTried + " specified child policies are available.")); } } diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java index 4016bc56bc2..e767aac1551 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java @@ -142,7 +142,7 @@ static enum Mode { @Nullable private LbStream lbStream; private Map, Subchannel> subchannels = Collections.emptyMap(); - private final Mode mode; + private final GrpclbConfig config; // Has the same size as the round-robin list from the balancer. // A drop entry from the round-robin list becomes a DropEntry here. @@ -154,22 +154,27 @@ static enum Mode { new RoundRobinPicker(Collections.emptyList(), Arrays.asList(BUFFER_ENTRY)); GrpclbState( - Mode mode, + GrpclbConfig config, Helper helper, SubchannelPool subchannelPool, TimeProvider time, Stopwatch stopwatch, BackoffPolicy.Provider backoffPolicyProvider) { - this.mode = checkNotNull(mode, "mode"); + this.config = checkNotNull(config, "config"); this.helper = checkNotNull(helper, "helper"); this.syncContext = checkNotNull(helper.getSynchronizationContext(), "syncContext"); this.subchannelPool = - mode == Mode.ROUND_ROBIN ? checkNotNull(subchannelPool, "subchannelPool") : null; + config.getMode() == Mode.ROUND_ROBIN + ? checkNotNull(subchannelPool, "subchannelPool") : null; this.time = checkNotNull(time, "time provider"); this.stopwatch = checkNotNull(stopwatch, "stopwatch"); this.timerService = checkNotNull(helper.getScheduledExecutorService(), "timerService"); this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); - this.serviceName = checkNotNull(helper.getAuthority(), "helper returns null authority"); + if (config.getServiceName() != null) { + this.serviceName = config.getServiceName(); + } else { + this.serviceName = checkNotNull(helper.getAuthority(), "helper returns null authority"); + } this.logger = checkNotNull(helper.getChannelLogger(), "logger"); } @@ -183,7 +188,7 @@ void handleSubchannelState(Subchannel subchannel, ConnectivityStateInfo newState } return; } - if (mode == Mode.ROUND_ROBIN && newState.getState() == IDLE) { + if (config.getMode() == Mode.ROUND_ROBIN && newState.getState() == IDLE) { subchannel.requestConnection(); } subchannel.getAttributes().get(STATE_INFO).set(newState); @@ -328,7 +333,7 @@ private void cancelLbRpcRetryTimer() { void shutdown() { shutdownLbComm(); - switch (mode) { + switch (config.getMode()) { case ROUND_ROBIN: // We close the subchannels through subchannelPool instead of helper just for convenience of // testing. @@ -344,7 +349,7 @@ void shutdown() { } break; default: - throw new AssertionError("Missing case for " + mode); + throw new AssertionError("Missing case for " + config.getMode()); } subchannels = Collections.emptyMap(); cancelFallbackTimer(); @@ -385,7 +390,7 @@ private void useRoundRobinLists( new HashMap<>(); List newBackendList = new ArrayList<>(); - switch (mode) { + switch (config.getMode()) { case ROUND_ROBIN: for (BackendAddressGroup backendAddr : newBackendAddrList) { EquivalentAddressGroup eag = backendAddr.getAddresses(); @@ -448,7 +453,7 @@ private void useRoundRobinLists( new BackendEntry(subchannel, new TokenAttachingTracerFactory(loadRecorder))); break; default: - throw new AssertionError("Missing case for " + mode); + throw new AssertionError("Missing case for " + config.getMode()); } dropList = Collections.unmodifiableList(newDropList); @@ -693,7 +698,7 @@ private void cleanUp() { private void maybeUpdatePicker() { List pickList; ConnectivityState state; - switch (mode) { + switch (config.getMode()) { case ROUND_ROBIN: pickList = new ArrayList<>(backendList.size()); Status error = null; @@ -751,7 +756,7 @@ private void maybeUpdatePicker() { } break; default: - throw new AssertionError("Missing case for " + mode); + throw new AssertionError("Missing case for " + config.getMode()); } maybeUpdatePicker(state, new RoundRobinPicker(dropList, pickList)); } diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerProviderTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerProviderTest.java new file mode 100644 index 00000000000..dda9700f64a --- /dev/null +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerProviderTest.java @@ -0,0 +1,162 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.grpclb; + +import static com.google.common.truth.Truth.assertThat; + +import io.grpc.NameResolver.ConfigOrError; +import io.grpc.grpclb.GrpclbState.Mode; +import io.grpc.internal.JsonParser; +import java.util.Map; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class GrpclbLoadBalancerProviderTest { + private final GrpclbLoadBalancerProvider provider = new GrpclbLoadBalancerProvider(); + + @Test + public void retrieveModeFromLbConfig_pickFirst() throws Exception { + String lbConfig = "{\"childPolicy\" : [{\"pick_first\" : {}}, {\"round_robin\" : {}}]}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.PICK_FIRST); + assertThat(config.getServiceName()).isNull(); + } + + @Test + public void retrieveModeFromLbConfig_roundRobin() throws Exception { + String lbConfig = "{\"childPolicy\" : [{\"round_robin\" : {}}, {\"pick_first\" : {}}]}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.ROUND_ROBIN); + assertThat(config.getServiceName()).isNull(); + } + + @Test + public void retrieveModeFromLbConfig_nullConfigUseRoundRobin() throws Exception { + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(null); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.ROUND_ROBIN); + assertThat(config.getServiceName()).isNull(); + } + + @Test + public void retrieveModeFromLbConfig_emptyConfigUseRoundRobin() throws Exception { + String lbConfig = "{}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.ROUND_ROBIN); + assertThat(config.getServiceName()).isNull(); + } + + @Test + public void retrieveModeFromLbConfig_emptyChildPolicyUseRoundRobin() throws Exception { + String lbConfig = "{\"childPolicy\" : []}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.ROUND_ROBIN); + assertThat(config.getServiceName()).isNull(); + } + + @Test + public void retrieveModeFromLbConfig_unsupportedChildPolicy() + throws Exception { + String lbConfig = "{\"childPolicy\" : [ {\"nonono\" : {}} ]}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getError()).isNotNull(); + assertThat(configOrError.getError().getDescription()) + .contains("None of [nonono] specified child policies are available."); + } + + @Test + public void retrieveModeFromLbConfig_skipUnsupportedChildPolicy() throws Exception { + String lbConfig = "{\"childPolicy\" : [ {\"nono\" : {}}, {\"pick_first\" : {} } ]}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.PICK_FIRST); + assertThat(config.getServiceName()).isNull(); + } + + @Test + public void retrieveModeFromLbConfig_skipUnsupportedChildPolicyWithTarget() throws Exception { + String lbConfig = "{\"childPolicy\" : [ {\"nono\" : {}}, {\"pick_first\" : {}} ]," + + "\"serviceName\": \"foo.google.com\"}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getConfig()).isNotNull(); + GrpclbConfig config = (GrpclbConfig) configOrError.getConfig(); + assertThat(config.getMode()).isEqualTo(Mode.PICK_FIRST); + assertThat(config.getServiceName()).isEqualTo("foo.google.com"); + } + + @Test + public void retrieveModeFromLbConfig_wrongChildPolicyType() throws Exception { + String lbConfig = "{\"childPolicy\" : {}}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getError()).isNotNull(); + assertThat(configOrError.getError().getCause()).hasMessageThat().contains("is not List"); + } + + @Test + public void retrieveModeFromLbConfig_wrongChildPolicyTypeWithTarget() throws Exception { + String lbConfig = "{\"childPolicy\" : {}, \"serviceName\": \"foo.google.com\"}"; + + ConfigOrError configOrError = + provider.parseLoadBalancingPolicyConfig(parseJsonObject(lbConfig)); + + assertThat(configOrError.getError()).isNotNull(); + assertThat(configOrError.getError().getCause()).hasMessageThat().contains("is not List"); + } + + @SuppressWarnings("unchecked") + private static Map parseJsonObject(String json) throws Exception { + return (Map) JsonParser.parse(json); + } +} diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java index db424a4a8e3..6d9abc70c43 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java @@ -22,7 +22,6 @@ import static io.grpc.ConnectivityState.READY; import static io.grpc.ConnectivityState.SHUTDOWN; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.grpclb.GrpclbLoadBalancer.retrieveModeFromLbConfig; import static io.grpc.grpclb.GrpclbState.BUFFER_ENTRY; import static io.grpc.grpclb.GrpclbState.DROP_PICK_RESULT; import static org.junit.Assert.assertEquals; @@ -181,6 +180,8 @@ public void uncaughtException(Thread t, Throwable e) { throw new AssertionError(e); } }); + private final GrpclbLoadBalancerProvider grpclbLoadBalancerProvider = + new GrpclbLoadBalancerProvider(); private static final ClientStreamTracer.StreamInfo STREAM_INFO = ClientStreamTracer.StreamInfo.newBuilder().build(); @@ -2057,74 +2058,167 @@ private static Attributes eagAttrsWithToken(String token) { } @Test - public void retrieveModeFromLbConfig_pickFirst() throws Exception { - String lbConfig = "{\"childPolicy\" : [{\"pick_first\" : {}}, {\"round_robin\" : {}}]}"; + @SuppressWarnings("deprecation") + public void switchMode_nullLbPolicy() throws Exception { + InOrder inOrder = inOrder(helper); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).isEmpty(); - assertThat(mode).isEqualTo(Mode.PICK_FIRST); - } + final List grpclbBalancerList = createResolvedBalancerAddresses(1); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, + Attributes.EMPTY, + /* grpclbConfig= */ null); - @Test - public void retrieveModeFromLbConfig_roundRobin() throws Exception { - String lbConfig = "{\"childPolicy\" : [{\"round_robin\" : {}}, {\"pick_first\" : {}}]}"; + assertEquals(1, fakeOobChannels.size()); + ManagedChannel oobChannel = fakeOobChannels.poll(); + verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); + StreamObserver lbResponseObserver = lbResponseObserverCaptor.getValue(); + assertEquals(1, lbRequestObservers.size()); + StreamObserver lbRequestObserver = lbRequestObservers.poll(); + verify(lbRequestObserver).onNext( + eq(LoadBalanceRequest.newBuilder().setInitialRequest( + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + .build())); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).isEmpty(); - assertThat(mode).isEqualTo(Mode.ROUND_ROBIN); - } + // Simulate receiving LB response + List backends1 = Arrays.asList( + new ServerEntry("127.0.0.1", 2000, "token0001"), + new ServerEntry("127.0.0.1", 2010, "token0002")); + inOrder.verify(helper, never()) + .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); + lbResponseObserver.onNext(buildInitialResponse()); + lbResponseObserver.onNext(buildLbResponse(backends1)); - @Test - public void retrieveModeFromLbConfig_nullConfigUseRoundRobin() throws Exception { - Mode mode = retrieveModeFromLbConfig(null, channelLogger); - assertThat(logs).isEmpty(); - assertThat(mode).isEqualTo(Mode.ROUND_ROBIN); - } + // ROUND_ROBIN: create one subchannel per server + verify(subchannelPool).takeOrCreateSubchannel( + eq(new EquivalentAddressGroup(backends1.get(0).addr, LB_BACKEND_ATTRS)), + any(Attributes.class)); + verify(subchannelPool).takeOrCreateSubchannel( + eq(new EquivalentAddressGroup(backends1.get(1).addr, LB_BACKEND_ATTRS)), + any(Attributes.class)); + inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class)); + assertEquals(2, mockSubchannels.size()); + Subchannel subchannel1 = mockSubchannels.poll(); + Subchannel subchannel2 = mockSubchannels.poll(); + verify(subchannelPool, never()) + .returnSubchannel(any(Subchannel.class), any(ConnectivityStateInfo.class)); - @Test - public void retrieveModeFromLbConfig_emptyConfigUseRoundRobin() throws Exception { - String lbConfig = "{}"; + // Switch to PICK_FIRST + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, + Attributes.EMPTY, + GrpclbConfig.create(Mode.PICK_FIRST)); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).isEmpty(); - assertThat(mode).isEqualTo(Mode.ROUND_ROBIN); - } + // GrpclbState will be shutdown, and a new one will be created + assertThat(oobChannel.isShutdown()).isTrue(); + verify(subchannelPool) + .returnSubchannel(same(subchannel1), eq(ConnectivityStateInfo.forNonError(IDLE))); + verify(subchannelPool) + .returnSubchannel(same(subchannel2), eq(ConnectivityStateInfo.forNonError(IDLE))); - @Test - public void retrieveModeFromLbConfig_emptyChildPolicyUseRoundRobin() throws Exception { - String lbConfig = "{\"childPolicy\" : []}"; + // A new LB stream is created + assertEquals(1, fakeOobChannels.size()); + verify(mockLbService, times(2)).balanceLoad(lbResponseObserverCaptor.capture()); + lbResponseObserver = lbResponseObserverCaptor.getValue(); + assertEquals(1, lbRequestObservers.size()); + lbRequestObserver = lbRequestObservers.poll(); + verify(lbRequestObserver).onNext( + eq(LoadBalanceRequest.newBuilder().setInitialRequest( + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + .build())); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).isEmpty(); - assertThat(mode).isEqualTo(Mode.ROUND_ROBIN); - } + // Simulate receiving LB response + inOrder.verify(helper, never()) + .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); + lbResponseObserver.onNext(buildInitialResponse()); + lbResponseObserver.onNext(buildLbResponse(backends1)); - @Test - public void retrieveModeFromLbConfig_unsupportedChildPolicyUseRoundRobin() - throws Exception { - String lbConfig = "{\"childPolicy\" : [ {\"nonono\" : {}} ]}"; + // PICK_FIRST Subchannel + // TODO(zhangkun83): remove the deprecation suppression on this method once migrated to + // the new createSubchannel(). + inOrder.verify(helper).createSubchannel( + eq(Arrays.asList( + new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), + new EquivalentAddressGroup(backends1.get(1).addr, eagAttrsWithToken("token0002")))), + any(Attributes.class)); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).containsExactly("DEBUG: grpclb ignoring unsupported child policy nonono"); - assertThat(mode).isEqualTo(Mode.ROUND_ROBIN); + inOrder.verify(helper).updateBalancingState(eq(IDLE), any(SubchannelPicker.class)); } + @SuppressWarnings("deprecation") @Test - public void retrieveModeFromLbConfig_skipUnsupportedChildPolicy() throws Exception { - String lbConfig = "{\"childPolicy\" : [ {\"nono\" : {}}, {\"pick_first\" : {} } ]}"; + public void switchServiceName() throws Exception { + InOrder inOrder = inOrder(helper); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).containsExactly("DEBUG: grpclb ignoring unsupported child policy nono"); - assertThat(mode).isEqualTo(Mode.PICK_FIRST); - } + String lbConfig = "{\"serviceName\": \"foo.google.com\"}"; + List grpclbBalancerList = createResolvedBalancerAddresses(1); + Attributes grpclbResolutionAttrs = Attributes.newBuilder() + .set(LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)) + .build(); - @Test - public void retrieveModeFromLbConfig_badConfigDefaultToRoundRobin() throws Exception { - String lbConfig = "{\"childPolicy\" : {}}"; + deliverResolvedAddresses( + Collections.emptyList(), grpclbBalancerList, grpclbResolutionAttrs); + + assertEquals(1, fakeOobChannels.size()); + ManagedChannel oobChannel = fakeOobChannels.poll(); + verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); + StreamObserver lbResponseObserver = lbResponseObserverCaptor.getValue(); + assertEquals(1, lbRequestObservers.size()); + StreamObserver lbRequestObserver = lbRequestObservers.poll(); + verify(lbRequestObserver).onNext( + eq(LoadBalanceRequest.newBuilder().setInitialRequest( + InitialLoadBalanceRequest.newBuilder().setName("foo.google.com").build()) + .build())); + + // Simulate receiving LB response + List backends1 = Arrays.asList( + new ServerEntry("127.0.0.1", 2000, "token0001"), + new ServerEntry("127.0.0.1", 2010, "token0002")); + inOrder.verify(helper, never()) + .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); + lbResponseObserver.onNext(buildInitialResponse()); + lbResponseObserver.onNext(buildLbResponse(backends1)); - Mode mode = retrieveModeFromLbConfig(parseJsonObject(lbConfig), channelLogger); - assertThat(logs).containsExactly("WARNING: Bad grpclb config, using ROUND_ROBIN"); - assertThat(mode).isEqualTo(Mode.ROUND_ROBIN); + // ROUND_ROBIN: create one subchannel per server + verify(subchannelPool).takeOrCreateSubchannel( + eq(new EquivalentAddressGroup(backends1.get(0).addr, LB_BACKEND_ATTRS)), + any(Attributes.class)); + verify(subchannelPool).takeOrCreateSubchannel( + eq(new EquivalentAddressGroup(backends1.get(1).addr, LB_BACKEND_ATTRS)), + any(Attributes.class)); + inOrder.verify(helper).updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class)); + assertEquals(2, mockSubchannels.size()); + Subchannel subchannel1 = mockSubchannels.poll(); + Subchannel subchannel2 = mockSubchannels.poll(); + verify(subchannelPool, never()) + .returnSubchannel(any(Subchannel.class), any(ConnectivityStateInfo.class)); + + // Switch to different serviceName + lbConfig = "{\"serviceName\": \"bar.google.com\"}"; + grpclbResolutionAttrs = Attributes.newBuilder().set( + LoadBalancer.ATTR_LOAD_BALANCING_CONFIG, parseJsonObject(lbConfig)).build(); + List newGrpclbResolutionList = createResolvedBalancerAddresses(1); + deliverResolvedAddresses( + Collections.emptyList(), + newGrpclbResolutionList, + grpclbResolutionAttrs); + + // GrpclbState will be shutdown, and a new one will be created + assertThat(oobChannel.isShutdown()).isTrue(); + verify(subchannelPool) + .returnSubchannel(same(subchannel1), eq(ConnectivityStateInfo.forNonError(IDLE))); + verify(subchannelPool) + .returnSubchannel(same(subchannel2), eq(ConnectivityStateInfo.forNonError(IDLE))); + + assertEquals(1, fakeOobChannels.size()); + verify(mockLbService, times(2)).balanceLoad(lbResponseObserverCaptor.capture()); + assertEquals(1, lbRequestObservers.size()); + lbRequestObserver = lbRequestObservers.poll(); + verify(lbRequestObserver).onNext( + eq(LoadBalanceRequest.newBuilder().setInitialRequest( + InitialLoadBalanceRequest.newBuilder().setName("bar.google.com").build()) + .build())); } @Test @@ -2352,21 +2446,37 @@ public void run() { }); } + @SuppressWarnings("deprecation") // TODO(creamsoup) migrate test cases to use GrpclbConfig. private void deliverResolvedAddresses( - final List backendAddrs, - final List balancerAddrs, + List backendAddrs, + List balancerAddrs, Attributes attrs) { - if (!balancerAddrs.isEmpty()) { - attrs = attrs.toBuilder().set(GrpclbConstants.ATTR_LB_ADDRS, balancerAddrs).build(); + GrpclbConfig grpclbConfig; + Map lbJsonMap = attrs.get(LoadBalancer.ATTR_LOAD_BALANCING_CONFIG); + if (lbJsonMap != null) { + grpclbConfig = (GrpclbConfig) grpclbLoadBalancerProvider + .parseLoadBalancingPolicyConfig(lbJsonMap).getConfig(); + } else { + grpclbConfig = GrpclbConfig.create(Mode.ROUND_ROBIN); } - final Attributes finalAttrs = attrs; + deliverResolvedAddresses(backendAddrs, balancerAddrs, attrs, grpclbConfig); + } + + private void deliverResolvedAddresses( + final List backendAddrs, + List balancerAddrs, + Attributes attributes, + final GrpclbConfig grpclbConfig) { + final Attributes attrs = + attributes.toBuilder().set(GrpclbConstants.ATTR_LB_ADDRS, balancerAddrs).build(); syncContext.execute(new Runnable() { @Override public void run() { balancer.handleResolvedAddresses( ResolvedAddresses.newBuilder() .setAddresses(backendAddrs) - .setAttributes(finalAttrs) + .setAttributes(attrs) + .setLoadBalancingPolicyConfig(grpclbConfig) .build()); } }); From 659987185f9369f6d53edd2d9cfab2461409f910 Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Tue, 11 Feb 2020 10:58:32 -0800 Subject: [PATCH 56/86] core: revert stickiness from round robin (#6698) --- .../io/grpc/internal/ServiceConfigUtil.java | 9 - .../io/grpc/util/RoundRobinLoadBalancer.java | 160 +------- .../grpc/util/RoundRobinLoadBalancerTest.java | 382 +----------------- 3 files changed, 13 insertions(+), 538 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java b/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java index e677bb6f1af..a7dbfbd673f 100644 --- a/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java +++ b/core/src/main/java/io/grpc/internal/ServiceConfigUtil.java @@ -314,15 +314,6 @@ public static List unwrapLoadBalancingConfigList(List> return Collections.unmodifiableList(result); } - /** - * Extracts the stickiness metadata key from a service config, or {@code null}. - */ - @Nullable - public static String getStickinessMetadataKeyFromServiceConfig( - Map serviceConfig) { - return JsonUtil.getString(serviceConfig, "stickinessMetadataKey"); - } - /** * A LoadBalancingConfig that includes the policy name (the key) and its raw config value (parsed * JSON). diff --git a/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java b/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java index 82d803a8294..c8cb2084b3b 100644 --- a/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java +++ b/core/src/main/java/io/grpc/util/RoundRobinLoadBalancer.java @@ -27,20 +27,13 @@ import com.google.common.base.MoreObjects; import com.google.common.base.Objects; import com.google.common.base.Preconditions; - import io.grpc.Attributes; -import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.ConnectivityState; import io.grpc.ConnectivityStateInfo; import io.grpc.EquivalentAddressGroup; import io.grpc.LoadBalancer; -import io.grpc.LoadBalancer.SubchannelStateListener; -import io.grpc.Metadata; -import io.grpc.Metadata.Key; import io.grpc.NameResolver; import io.grpc.Status; -import io.grpc.internal.GrpcAttributes; -import io.grpc.internal.ServiceConfigUtil; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -48,15 +41,10 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Queue; import java.util.Random; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import javax.annotation.Nonnull; -import javax.annotation.Nullable; /** * A {@link LoadBalancer} that provides round-robin load-balancing over the {@link @@ -66,8 +54,6 @@ final class RoundRobinLoadBalancer extends LoadBalancer { @VisibleForTesting static final Attributes.Key> STATE_INFO = Attributes.Key.create("state-info"); - // package-private to avoid synthetic access - static final Attributes.Key> STICKY_REF = Attributes.Key.create("sticky-ref"); private final Helper helper; private final Map subchannels = @@ -77,40 +63,18 @@ final class RoundRobinLoadBalancer extends LoadBalancer { private ConnectivityState currentState; private RoundRobinPicker currentPicker = new EmptyPicker(EMPTY_OK); - @Nullable - private StickinessState stickinessState; - RoundRobinLoadBalancer(Helper helper) { this.helper = checkNotNull(helper, "helper"); this.random = new Random(); } @Override - @SuppressWarnings("deprecation") public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { List servers = resolvedAddresses.getAddresses(); - Attributes attributes = resolvedAddresses.getAttributes(); Set currentAddrs = subchannels.keySet(); Map latestAddrs = stripAttrs(servers); Set removedAddrs = setsDifference(currentAddrs, latestAddrs.keySet()); - Map serviceConfig = attributes.get(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG); - if (serviceConfig != null) { - String stickinessMetadataKey = - ServiceConfigUtil.getStickinessMetadataKeyFromServiceConfig(serviceConfig); - if (stickinessMetadataKey != null) { - if (stickinessMetadataKey.endsWith(Metadata.BINARY_HEADER_SUFFIX)) { - helper.getChannelLogger().log( - ChannelLogLevel.WARNING, - "Binary stickiness header is not supported. The header \"{0}\" will be ignored", - stickinessMetadataKey); - } else if (stickinessState == null - || !stickinessState.key.name().equals(stickinessMetadataKey)) { - stickinessState = new StickinessState(stickinessMetadataKey); - } - } - } - for (Map.Entry latestEntry : latestAddrs.entrySet()) { EquivalentAddressGroup strippedAddressGroup = latestEntry.getKey(); @@ -133,11 +97,6 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { .set(STATE_INFO, new Ref<>(ConnectivityStateInfo.forNonError(IDLE))); - Ref stickyRef = null; - if (stickinessState != null) { - subchannelAttrs.set(STICKY_REF, stickyRef = new Ref<>(null)); - } - final Subchannel subchannel = checkNotNull( helper.createSubchannel(CreateSubchannelArgs.newBuilder() .setAddresses(originalAddressGroup) @@ -150,9 +109,6 @@ public void onSubchannelState(ConnectivityStateInfo state) { processSubchannelState(subchannel, state); } }); - if (stickyRef != null) { - stickyRef.value = subchannel; - } subchannels.put(strippedAddressGroup, subchannel); subchannel.requestConnection(); } @@ -183,9 +139,6 @@ private void processSubchannelState(Subchannel subchannel, ConnectivityStateInfo if (subchannels.get(stripAttrs(subchannel.getAddresses())) != subchannel) { return; } - if (stateInfo.getState() == SHUTDOWN && stickinessState != null) { - stickinessState.remove(subchannel); - } if (stateInfo.getState() == IDLE) { subchannel.requestConnection(); } @@ -197,9 +150,6 @@ private void shutdownSubchannel(Subchannel subchannel) { subchannel.shutdown(); getSubchannelStateInfoRef(subchannel).value = ConnectivityStateInfo.forNonError(SHUTDOWN); - if (stickinessState != null) { - stickinessState.remove(subchannel); - } } @Override @@ -241,7 +191,7 @@ private void updateBalancingState() { // initialize the Picker to a random start index to ensure that a high frequency of Picker // churn does not skew subchannel selection. int startIndex = random.nextInt(activeList.size()); - updateBalancingState(READY, new ReadyPicker(activeList, startIndex, stickinessState)); + updateBalancingState(READY, new ReadyPicker(activeList, startIndex)); } } @@ -305,90 +255,6 @@ private static Set setsDifference(Set a, Set b) { return aCopy; } - Map> getStickinessMapForTest() { - if (stickinessState == null) { - return null; - } - return stickinessState.stickinessMap; - } - - /** - * Holds stickiness related states: The stickiness key, a registry mapping stickiness values to - * the associated Subchannel Ref, and a map from Subchannel to Subchannel Ref. - */ - @VisibleForTesting - static final class StickinessState { - static final int MAX_ENTRIES = 1000; - - final Key key; - final ConcurrentMap> stickinessMap = - new ConcurrentHashMap<>(); - - final Queue evictionQueue = new ConcurrentLinkedQueue<>(); - - StickinessState(@Nonnull String stickinessKey) { - this.key = Key.of(stickinessKey, Metadata.ASCII_STRING_MARSHALLER); - } - - /** - * Returns the subchannel associated to the stickiness value if available in both the - * registry and the round robin list, otherwise associates the given subchannel with the - * stickiness key in the registry and returns the given subchannel. - */ - @Nonnull - Subchannel maybeRegister( - String stickinessValue, @Nonnull Subchannel subchannel) { - final Ref newSubchannelRef = subchannel.getAttributes().get(STICKY_REF); - while (true) { - Ref existingSubchannelRef = - stickinessMap.putIfAbsent(stickinessValue, newSubchannelRef); - if (existingSubchannelRef == null) { - // new entry - addToEvictionQueue(stickinessValue); - return subchannel; - } else { - // existing entry - Subchannel existingSubchannel = existingSubchannelRef.value; - if (existingSubchannel != null && isReady(existingSubchannel)) { - return existingSubchannel; - } - } - // existingSubchannelRef is not null but no longer valid, replace it - if (stickinessMap.replace(stickinessValue, existingSubchannelRef, newSubchannelRef)) { - return subchannel; - } - // another thread concurrently removed or updated the entry, try again - } - } - - private void addToEvictionQueue(String value) { - String oldValue; - while (stickinessMap.size() >= MAX_ENTRIES && (oldValue = evictionQueue.poll()) != null) { - stickinessMap.remove(oldValue); - } - evictionQueue.add(value); - } - - /** - * Unregister the subchannel from StickinessState. - */ - void remove(Subchannel subchannel) { - subchannel.getAttributes().get(STICKY_REF).value = null; - } - - /** - * Gets the subchannel associated with the stickiness value if there is. - */ - @Nullable - Subchannel getSubchannel(String stickinessValue) { - Ref subchannelRef = stickinessMap.get(stickinessValue); - if (subchannelRef != null) { - return subchannelRef.value; - } - return null; - } - } - // Only subclasses are ReadyPicker or EmptyPicker private abstract static class RoundRobinPicker extends SubchannelPicker { abstract boolean isEquivalentTo(RoundRobinPicker picker); @@ -400,33 +266,18 @@ static final class ReadyPicker extends RoundRobinPicker { AtomicIntegerFieldUpdater.newUpdater(ReadyPicker.class, "index"); private final List list; // non-empty - @Nullable - private final RoundRobinLoadBalancer.StickinessState stickinessState; @SuppressWarnings("unused") private volatile int index; - ReadyPicker(List list, int startIndex, - @Nullable RoundRobinLoadBalancer.StickinessState stickinessState) { + ReadyPicker(List list, int startIndex) { Preconditions.checkArgument(!list.isEmpty(), "empty list"); this.list = list; - this.stickinessState = stickinessState; this.index = startIndex - 1; } @Override public PickResult pickSubchannel(PickSubchannelArgs args) { - Subchannel subchannel = null; - if (stickinessState != null) { - String stickinessValue = args.getHeaders().get(stickinessState.key); - if (stickinessValue != null) { - subchannel = stickinessState.getSubchannel(stickinessValue); - if (subchannel == null || !RoundRobinLoadBalancer.isReady(subchannel)) { - subchannel = stickinessState.maybeRegister(stickinessValue, nextSubchannel()); - } - } - } - - return PickResult.withSubchannel(subchannel != null ? subchannel : nextSubchannel()); + return PickResult.withSubchannel(nextSubchannel()); } @Override @@ -457,9 +308,8 @@ boolean isEquivalentTo(RoundRobinPicker picker) { } ReadyPicker other = (ReadyPicker) picker; // the lists cannot contain duplicate subchannels - return other == this || (stickinessState == other.stickinessState - && list.size() == other.list.size() - && new HashSet<>(list).containsAll(other.list)); + return other == this + || (list.size() == other.list.size() && new HashSet<>(list).containsAll(other.list)); } } diff --git a/core/src/test/java/io/grpc/util/RoundRobinLoadBalancerTest.java b/core/src/test/java/io/grpc/util/RoundRobinLoadBalancerTest.java index a682ddd1ffd..4346accf709 100644 --- a/core/src/test/java/io/grpc/util/RoundRobinLoadBalancerTest.java +++ b/core/src/test/java/io/grpc/util/RoundRobinLoadBalancerTest.java @@ -25,17 +25,12 @@ import static io.grpc.util.RoundRobinLoadBalancer.STATE_INFO; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isA; -import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -59,19 +54,14 @@ import io.grpc.LoadBalancer.Subchannel; import io.grpc.LoadBalancer.SubchannelPicker; import io.grpc.LoadBalancer.SubchannelStateListener; -import io.grpc.Metadata; -import io.grpc.Metadata.Key; import io.grpc.Status; -import io.grpc.internal.GrpcAttributes; import io.grpc.util.RoundRobinLoadBalancer.EmptyPicker; import io.grpc.util.RoundRobinLoadBalancer.ReadyPicker; import io.grpc.util.RoundRobinLoadBalancer.Ref; -import io.grpc.util.RoundRobinLoadBalancer.StickinessState; import java.net.SocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -115,7 +105,6 @@ public class RoundRobinLoadBalancerTest { private PickSubchannelArgs mockArgs; @Before - @SuppressWarnings("unchecked") public void setUp() { MockitoAnnotations.initMocks(this); @@ -188,7 +177,6 @@ public void pickAfterResolved() throws Exception { verifyNoMoreInteractions(mockHelper); } - @SuppressWarnings("unchecked") @Test public void pickAfterResolvedUpdatedHosts() throws Exception { Subchannel removedSubchannel = mock(Subchannel.class); @@ -268,7 +256,6 @@ public void pickAfterResolvedUpdatedHosts() throws Exception { verifyNoMoreInteractions(mockHelper); } - @SuppressWarnings("unchecked") @Test public void pickAfterStateChange() throws Exception { InOrder inOrder = inOrder(mockHelper); @@ -307,10 +294,6 @@ public void pickAfterStateChange() throws Exception { verifyNoMoreInteractions(mockHelper); } - private Subchannel nextSubchannel(Subchannel current, List allSubChannels) { - return allSubChannels.get((allSubChannels.indexOf(current) + 1) % allSubChannels.size()); - } - @Test public void pickerRoundRobin() throws Exception { Subchannel subchannel = mock(Subchannel.class); @@ -319,7 +302,7 @@ public void pickerRoundRobin() throws Exception { ReadyPicker picker = new ReadyPicker(Collections.unmodifiableList( Lists.newArrayList(subchannel, subchannel1, subchannel2)), - 0 /* startIndex */, null /* stickinessState */); + 0 /* startIndex */); assertThat(picker.getList()).containsExactly(subchannel, subchannel1, subchannel2); @@ -349,7 +332,6 @@ public void nameResolutionErrorWithNoChannels() throws Exception { verifyNoMoreInteractions(mockHelper); } - @SuppressWarnings("unchecked") @Test public void nameResolutionErrorWithActiveChannels() throws Exception { final Subchannel readySubchannel = subchannels.values().iterator().next(); @@ -421,356 +403,10 @@ public void subchannelStateIsolation() throws Exception { assertThat(pickers.hasNext()).isFalse(); } - @Test - public void noStickinessEnabled_withStickyHeader() { - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(Attributes.EMPTY) - .build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(any(ConnectivityState.class), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - Key stickinessKey = Key.of("my-sticky-key", Metadata.ASCII_STRING_MARSHALLER); - Metadata headerWithStickinessValue = new Metadata(); - headerWithStickinessValue.put(stickinessKey, "my-sticky-value"); - doReturn(headerWithStickinessValue).when(mockArgs).getHeaders(); - - List allSubchannels = getList(picker); - Subchannel sc1 = picker.pickSubchannel(mockArgs).getSubchannel(); - Subchannel sc2 = picker.pickSubchannel(mockArgs).getSubchannel(); - Subchannel sc3 = picker.pickSubchannel(mockArgs).getSubchannel(); - Subchannel sc4 = picker.pickSubchannel(mockArgs).getSubchannel(); - - assertEquals(nextSubchannel(sc1, allSubchannels), sc2); - assertEquals(nextSubchannel(sc2, allSubchannels), sc3); - assertEquals(nextSubchannel(sc3, allSubchannels), sc1); - assertEquals(sc4, sc1); - - assertNull(loadBalancer.getStickinessMapForTest()); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickinessEnabled_withoutStickyHeader() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("stickinessMetadataKey", "my-sticky-key"); - Attributes attributes = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes).build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - doReturn(new Metadata()).when(mockArgs).getHeaders(); - - List allSubchannels = getList(picker); - - Subchannel sc1 = picker.pickSubchannel(mockArgs).getSubchannel(); - Subchannel sc2 = picker.pickSubchannel(mockArgs).getSubchannel(); - Subchannel sc3 = picker.pickSubchannel(mockArgs).getSubchannel(); - Subchannel sc4 = picker.pickSubchannel(mockArgs).getSubchannel(); - - assertEquals(nextSubchannel(sc1, allSubchannels), sc2); - assertEquals(nextSubchannel(sc2, allSubchannels), sc3); - assertEquals(nextSubchannel(sc3, allSubchannels), sc1); - assertEquals(sc4, sc1); - verify(mockArgs, times(4)).getHeaders(); - assertNotNull(loadBalancer.getStickinessMapForTest()); - assertThat(loadBalancer.getStickinessMapForTest()).isEmpty(); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickinessEnabled_withStickyHeader() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("stickinessMetadataKey", "my-sticky-key"); - Attributes attributes = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes).build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - Key stickinessKey = Key.of("my-sticky-key", Metadata.ASCII_STRING_MARSHALLER); - Metadata headerWithStickinessValue = new Metadata(); - headerWithStickinessValue.put(stickinessKey, "my-sticky-value"); - doReturn(headerWithStickinessValue).when(mockArgs).getHeaders(); - - Subchannel sc1 = picker.pickSubchannel(mockArgs).getSubchannel(); - assertEquals(sc1, picker.pickSubchannel(mockArgs).getSubchannel()); - assertEquals(sc1, picker.pickSubchannel(mockArgs).getSubchannel()); - assertEquals(sc1, picker.pickSubchannel(mockArgs).getSubchannel()); - assertEquals(sc1, picker.pickSubchannel(mockArgs).getSubchannel()); - - verify(mockArgs, atLeast(4)).getHeaders(); - assertNotNull(loadBalancer.getStickinessMapForTest()); - assertThat(loadBalancer.getStickinessMapForTest()).hasSize(1); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickinessEnabled_withDifferentStickyHeaders() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("stickinessMetadataKey", "my-sticky-key"); - Attributes attributes = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes).build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - Key stickinessKey = Key.of("my-sticky-key", Metadata.ASCII_STRING_MARSHALLER); - Metadata headerWithStickinessValue1 = new Metadata(); - headerWithStickinessValue1.put(stickinessKey, "my-sticky-value"); - - Metadata headerWithStickinessValue2 = new Metadata(); - headerWithStickinessValue2.put(stickinessKey, "my-sticky-value2"); - - List allSubchannels = getList(picker); - - doReturn(headerWithStickinessValue1).when(mockArgs).getHeaders(); - Subchannel sc1a = picker.pickSubchannel(mockArgs).getSubchannel(); - - doReturn(headerWithStickinessValue2).when(mockArgs).getHeaders(); - Subchannel sc2a = picker.pickSubchannel(mockArgs).getSubchannel(); - - doReturn(headerWithStickinessValue1).when(mockArgs).getHeaders(); - Subchannel sc1b = picker.pickSubchannel(mockArgs).getSubchannel(); - - doReturn(headerWithStickinessValue2).when(mockArgs).getHeaders(); - Subchannel sc2b = picker.pickSubchannel(mockArgs).getSubchannel(); - - assertEquals(sc1a, sc1b); - assertEquals(sc2a, sc2b); - assertEquals(nextSubchannel(sc1a, allSubchannels), sc2a); - assertEquals(nextSubchannel(sc1b, allSubchannels), sc2b); - - verify(mockArgs, atLeast(4)).getHeaders(); - assertNotNull(loadBalancer.getStickinessMapForTest()); - assertThat(loadBalancer.getStickinessMapForTest()).hasSize(2); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickiness_goToTransientFailure_pick_backToReady() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("stickinessMetadataKey", "my-sticky-key"); - Attributes attributes = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes).build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - Key stickinessKey = Key.of("my-sticky-key", Metadata.ASCII_STRING_MARSHALLER); - Metadata headerWithStickinessValue = new Metadata(); - headerWithStickinessValue.put(stickinessKey, "my-sticky-value"); - doReturn(headerWithStickinessValue).when(mockArgs).getHeaders(); - - // first pick - Subchannel sc1 = picker.pickSubchannel(mockArgs).getSubchannel(); - - // go to transient failure - deliverSubchannelState(sc1, ConnectivityStateInfo.forTransientFailure(Status.UNAVAILABLE)); - - verify(mockHelper, times(5)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - picker = pickerCaptor.getValue(); - - // second pick - Subchannel sc2 = picker.pickSubchannel(mockArgs).getSubchannel(); - - // go back to ready - deliverSubchannelState(sc1, ConnectivityStateInfo.forNonError(READY)); - - verify(mockHelper, times(6)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - picker = pickerCaptor.getValue(); - - // third pick - Subchannel sc3 = picker.pickSubchannel(mockArgs).getSubchannel(); - assertEquals(sc2, sc3); - verify(mockArgs, atLeast(3)).getHeaders(); - assertNotNull(loadBalancer.getStickinessMapForTest()); - assertThat(loadBalancer.getStickinessMapForTest()).hasSize(1); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickiness_goToTransientFailure_backToReady_pick() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("stickinessMetadataKey", "my-sticky-key"); - Attributes attributes = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes).build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - Key stickinessKey = Key.of("my-sticky-key", Metadata.ASCII_STRING_MARSHALLER); - Metadata headerWithStickinessValue1 = new Metadata(); - headerWithStickinessValue1.put(stickinessKey, "my-sticky-value"); - doReturn(headerWithStickinessValue1).when(mockArgs).getHeaders(); - - // first pick - Subchannel sc1 = picker.pickSubchannel(mockArgs).getSubchannel(); - - // go to transient failure - deliverSubchannelState(sc1, ConnectivityStateInfo.forTransientFailure(Status.UNAVAILABLE)); - - Metadata headerWithStickinessValue2 = new Metadata(); - headerWithStickinessValue2.put(stickinessKey, "my-sticky-value2"); - doReturn(headerWithStickinessValue2).when(mockArgs).getHeaders(); - verify(mockHelper, times(5)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - picker = pickerCaptor.getValue(); - - // second pick with a different stickiness value - @SuppressWarnings("unused") - Subchannel sc2 = picker.pickSubchannel(mockArgs).getSubchannel(); - - // go back to ready - deliverSubchannelState(sc1, ConnectivityStateInfo.forNonError(READY)); - - doReturn(headerWithStickinessValue1).when(mockArgs).getHeaders(); - verify(mockHelper, times(6)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - picker = pickerCaptor.getValue(); - - // third pick with my-sticky-value1 - Subchannel sc3 = picker.pickSubchannel(mockArgs).getSubchannel(); - assertEquals(sc1, sc3); - - verify(mockArgs, atLeast(3)).getHeaders(); - assertNotNull(loadBalancer.getStickinessMapForTest()); - assertThat(loadBalancer.getStickinessMapForTest()).hasSize(2); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickiness_oneSubchannelShutdown() { - Map serviceConfig = new HashMap<>(); - serviceConfig.put("stickinessMetadataKey", "my-sticky-key"); - Attributes attributes = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes).build()); - for (Subchannel subchannel : subchannels.values()) { - deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); - } - verify(mockHelper, times(4)) - .updateBalancingState(stateCaptor.capture(), pickerCaptor.capture()); - SubchannelPicker picker = pickerCaptor.getValue(); - - Key stickinessKey = Key.of("my-sticky-key", Metadata.ASCII_STRING_MARSHALLER); - Metadata headerWithStickinessValue = new Metadata(); - headerWithStickinessValue.put(stickinessKey, "my-sticky-value"); - doReturn(headerWithStickinessValue).when(mockArgs).getHeaders(); - - List allSubchannels = Lists.newArrayList(getList(picker)); - - Subchannel sc1 = picker.pickSubchannel(mockArgs).getSubchannel(); - - // shutdown channel directly - deliverSubchannelState(sc1, ConnectivityStateInfo.forNonError(ConnectivityState.SHUTDOWN)); - - assertNull(loadBalancer.getStickinessMapForTest().get("my-sticky-value").value); - - assertEquals(nextSubchannel(sc1, allSubchannels), - picker.pickSubchannel(mockArgs).getSubchannel()); - assertThat(loadBalancer.getStickinessMapForTest()).hasSize(1); - verify(mockArgs, atLeast(2)).getHeaders(); - - Subchannel sc2 = picker.pickSubchannel(mockArgs).getSubchannel(); - - assertEquals(sc2, loadBalancer.getStickinessMapForTest().get("my-sticky-value").value); - - // shutdown channel via name resolver change - List newServers = new ArrayList<>(servers); - newServers.remove(sc2.getAddresses()); - - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(newServers).setAttributes(attributes).build()); - - verify(sc2, times(1)).shutdown(); - - deliverSubchannelState(sc2, ConnectivityStateInfo.forNonError(SHUTDOWN)); - - assertNull(loadBalancer.getStickinessMapForTest().get("my-sticky-value").value); - - assertEquals(nextSubchannel(sc2, allSubchannels), - picker.pickSubchannel(mockArgs).getSubchannel()); - assertThat(loadBalancer.getStickinessMapForTest()).hasSize(1); - verify(mockArgs, atLeast(2)).getHeaders(); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickiness_resolveTwice_metadataKeyChanged() { - Map serviceConfig1 = new HashMap<>(); - serviceConfig1.put("stickinessMetadataKey", "my-sticky-key1"); - Attributes attributes1 = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig1).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes1).build()); - Map stickinessMap1 = loadBalancer.getStickinessMapForTest(); - - Map serviceConfig2 = new HashMap<>(); - serviceConfig2.put("stickinessMetadataKey", "my-sticky-key2"); - Attributes attributes2 = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig2).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes2).build()); - Map stickinessMap2 = loadBalancer.getStickinessMapForTest(); - - assertNotSame(stickinessMap1, stickinessMap2); - } - - @Test - @SuppressWarnings("deprecation") // migrate to parsed object - public void stickiness_resolveTwice_metadataKeyUnChanged() { - Map serviceConfig1 = new HashMap<>(); - serviceConfig1.put("stickinessMetadataKey", "my-sticky-key1"); - Attributes attributes1 = Attributes.newBuilder() - .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, serviceConfig1).build(); - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes1).build()); - Map stickinessMap1 = loadBalancer.getStickinessMapForTest(); - - loadBalancer.handleResolvedAddresses( - ResolvedAddresses.newBuilder().setAddresses(servers).setAttributes(attributes1).build()); - Map stickinessMap2 = loadBalancer.getStickinessMapForTest(); - - assertSame(stickinessMap1, stickinessMap2); - } - @Test(expected = IllegalArgumentException.class) public void readyPicker_emptyList() { // ready picker list must be non-empty - new ReadyPicker(Collections.emptyList(), 0, null); + new ReadyPicker(Collections.emptyList(), 0); } @Test @@ -782,24 +418,22 @@ public void internalPickerComparisons() { Iterator subchannelIterator = subchannels.values().iterator(); Subchannel sc1 = subchannelIterator.next(); Subchannel sc2 = subchannelIterator.next(); - StickinessState stickinessState = new StickinessState("stick-key"); - ReadyPicker ready1 = new ReadyPicker(Arrays.asList(sc1, sc2), 0, null); - ReadyPicker ready2 = new ReadyPicker(Arrays.asList(sc1), 0, null); - ReadyPicker ready3 = new ReadyPicker(Arrays.asList(sc2, sc1), 1, null); - ReadyPicker ready4 = new ReadyPicker(Arrays.asList(sc1, sc2), 1, stickinessState); - ReadyPicker ready5 = new ReadyPicker(Arrays.asList(sc2, sc1), 0, stickinessState); + ReadyPicker ready1 = new ReadyPicker(Arrays.asList(sc1, sc2), 0); + ReadyPicker ready2 = new ReadyPicker(Arrays.asList(sc1), 0); + ReadyPicker ready3 = new ReadyPicker(Arrays.asList(sc2, sc1), 1); + ReadyPicker ready4 = new ReadyPicker(Arrays.asList(sc1, sc2), 1); + ReadyPicker ready5 = new ReadyPicker(Arrays.asList(sc2, sc1), 0); assertTrue(emptyOk1.isEquivalentTo(emptyOk2)); assertFalse(emptyOk1.isEquivalentTo(emptyErr)); assertFalse(ready1.isEquivalentTo(ready2)); assertTrue(ready1.isEquivalentTo(ready3)); - assertFalse(ready3.isEquivalentTo(ready4)); + assertTrue(ready3.isEquivalentTo(ready4)); assertTrue(ready4.isEquivalentTo(ready5)); assertFalse(emptyOk1.isEquivalentTo(ready1)); assertFalse(ready1.isEquivalentTo(emptyOk1)); } - private static List getList(SubchannelPicker picker) { return picker instanceof ReadyPicker ? ((ReadyPicker) picker).getList() : Collections.emptyList(); From a8bdf5cb1abf7a857e263a5d1500f1bba2c940e3 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Tue, 11 Feb 2020 12:48:56 -0800 Subject: [PATCH 57/86] xds: support reporting loads for more than one cluster services (#6688) Although current LRS client API takes in load stats data for multiple cluster services, it only expects the management server to ask loads for a single cluster services (the LRS response will be ignored if management server asks for more than one). This change removes that assumption/restriction, the actual loads to be reported will be the intersection of services that we have loads for and services that management server asks for. This change also cleans up LRS client's tests. --- .../io/grpc/xds/LoadReportClientImpl.java | 50 +- .../io/grpc/xds/LoadReportClientImplTest.java | 434 ++++++------------ 2 files changed, 166 insertions(+), 318 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java b/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java index 57c4c971d5e..ac5e6a89d18 100644 --- a/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java @@ -35,10 +35,10 @@ import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.internal.BackoffPolicy; import io.grpc.stub.StreamObserver; -import java.util.Collections; import java.util.HashMap; -import java.util.List; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.logging.Level; @@ -164,6 +164,8 @@ private void startLrsRpc() { private class LrsStream implements StreamObserver { + // Cluster services to report loads for, instructed by LRS responses. + final Set clusterServiceNames = new HashSet<>(); final LoadReportingServiceGrpc.LoadReportingServiceStub stub; final Stopwatch reportStopwatch; StreamObserver lrsRequestWriter; @@ -172,13 +174,6 @@ private class LrsStream implements StreamObserver { long loadReportIntervalNano = -1; ScheduledHandle loadReportTimer; - // Name of cluster service to report loads for, instructed by LRS responses. - // Currently we expect a gRPC client only talks to a single service per cluster. But we - // could support switching cluster services, for which loads for a cluster may - // spread to multiple services. - @Nullable - String clusterServiceName; - LrsStream(LoadReportingServiceGrpc.LoadReportingServiceStub stub, Stopwatch stopwatch) { this.stub = checkNotNull(stub, "stub"); reportStopwatch = checkNotNull(stopwatch, "stopwatch"); @@ -232,17 +227,21 @@ private void sendLoadReport() { long interval = reportStopwatch.elapsed(TimeUnit.NANOSECONDS); reportStopwatch.reset().start(); LoadStatsRequest.Builder requestBuilder = LoadStatsRequest.newBuilder().setNode(node); - if (loadStatsStoreMap.containsKey(clusterServiceName)) { - LoadStatsStore loadStatsStore = loadStatsStoreMap.get(clusterServiceName); - ClusterStats report = - loadStatsStore.generateLoadReport() - .toBuilder() - .setClusterName(clusterServiceName) - .setLoadReportInterval(Durations.fromNanos(interval)) - .build(); - requestBuilder.addClusterStats(report); + for (String serviceName : clusterServiceNames) { + if (loadStatsStoreMap.containsKey(serviceName)) { + LoadStatsStore loadStatsStore = loadStatsStoreMap.get(serviceName); + ClusterStats report = + loadStatsStore.generateLoadReport() + .toBuilder() + .setClusterName(serviceName) + .setLoadReportInterval(Durations.fromNanos(interval)) + .build(); + requestBuilder.addClusterStats(report); + } } - lrsRequestWriter.onNext(requestBuilder.build()); + LoadStatsRequest request = requestBuilder.build(); + lrsRequestWriter.onNext(request); + logger.log(Level.FINE, "Sent LoadStatsRequest\n{0}", request); scheduleNextLoadReport(); } @@ -272,17 +271,8 @@ private void handleResponse(LoadStatsResponse response) { } loadReportIntervalNano = Durations.toNanos(response.getLoadReportingInterval()); callback.onReportResponse(loadReportIntervalNano); - List serviceList = Collections.unmodifiableList(response.getClustersList()); - // For current gRPC use case, we expect traffic director only request client to report - // loads for a single service per cluster (which is the cluster service gRPC client talks - // to). We could support reporting loads for multiple services per cluster that gRPC - // client sends loads to due to service switching. - if (serviceList.size() != 1) { - logger.log(Level.FINE, "Received clusters: {0}, expect exactly one", - serviceList); - return; - } - clusterServiceName = serviceList.get(0); + clusterServiceNames.clear(); + clusterServiceNames.addAll(response.getClustersList()); scheduleNextLoadReport(); } diff --git a/xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java b/xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java index 3ab659439ce..633e2799d96 100644 --- a/xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java @@ -19,6 +19,7 @@ import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; import static org.mockito.AdditionalAnswers.delegatesTo; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; @@ -27,6 +28,8 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.MoreExecutors; import com.google.protobuf.util.Durations; @@ -51,6 +54,9 @@ import io.grpc.testing.GrpcCleanupRule; import io.grpc.xds.LoadReportClient.LoadReportCallback; import java.util.ArrayDeque; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -61,6 +67,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatcher; import org.mockito.Captor; import org.mockito.InOrder; import org.mockito.Mock; @@ -90,12 +97,6 @@ public boolean shouldAccept(Runnable command) { .contains(LoadReportClientImpl.LrsRpcRetryTask.class.getSimpleName()); } }; - private static final Locality TEST_LOCALITY = - Locality.newBuilder() - .setRegion("test_region") - .setZone("test_zone") - .setSubZone("test_subzone") - .build(); private static final LoadStatsRequest EXPECTED_INITIAL_REQ = LoadStatsRequest.newBuilder() .setNode(NODE) @@ -123,7 +124,9 @@ public void uncaughtException(Thread t, Throwable e) { @Mock private BackoffPolicy backoffPolicy2; @Mock - private LoadStatsStore mockLoadStatsStore; + private LoadStatsStore loadStatsStore1; + @Mock + private LoadStatsStore loadStatsStore2; @Mock private LoadReportCallback callback; @Captor @@ -186,201 +189,69 @@ public void tearDown() { } @Test - public void loadReportInitialRequest() { - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); - // No more request should be sent until receiving initial response. No load reporting - // should be scheduled. - assertThat(fakeClock.getPendingTasks(LOAD_REPORTING_TASK_FILTER)).isEmpty(); - verifyNoMoreInteractions(requestObserver); - } - - @Test - public void startAndStopCanBeCalledMultipleTimes() { - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.peek(); - verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); - lrsClient.startLoadReporting(callback); - assertThat(lrsRequestObservers).hasSize(1); - lrsClient.startLoadReporting(callback); - assertThat(lrsRequestObservers).hasSize(1); - verifyNoMoreInteractions(requestObserver); - - lrsClient.stopLoadReporting(); - assertThat(callEnded.get()).isTrue(); - assertThat(fakeClock.getPendingTasks(LRS_RPC_RETRY_TASK_FILTER)).isEmpty(); - lrsClient.stopLoadReporting(); - assertThat(callEnded.get()).isTrue(); - - lrsClient.startLoadReporting(callback); - verify(mockLoadReportingService, times(2)).streamLoadStats(lrsResponseObserverCaptor.capture()); - assertThat(lrsRequestObservers).hasSize(2); - } - - // Currently we expect each gRPC client talks to a single service per cluster, so we test LRS - // client reporting load for a single cluster service only. - // TODO(chengyuanzhang): Existing test suites for LRS client implementation have poor behavior - // coverage and are not robust. Should improve once its usage is finalized without too much - // assumption. - - @Test - public void loadReportActualIntervalAsSpecified() { + public void typicalWorkflow() { verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.poll(); - - // Add load stats source for some cluster service. - when(mockLoadStatsStore.generateLoadReport()).thenReturn(ClusterStats.newBuilder().build()); - lrsClient.addLoadStatsStore("namespace-foo:service-blade", mockLoadStatsStore); - - InOrder inOrder = inOrder(requestObserver, mockLoadStatsStore); + StreamObserver requestObserver = + Iterables.getOnlyElement(lrsRequestObservers); + InOrder inOrder = inOrder(requestObserver); inOrder.verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 1453)); - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, - buildEmptyClusterStats("namespace-foo:service-blade", 1453)); - verify(callback).onReportResponse(1453); - } - - @Test - public void loadReportIntervalUpdate() { - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); - StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.poll(); - - // Add load stats source for some cluster service. - when(mockLoadStatsStore.generateLoadReport()).thenReturn(ClusterStats.newBuilder().build()); - lrsClient.addLoadStatsStore("namespace-foo:service-blade", mockLoadStatsStore); + String service1 = "namespace-foo:service-blade"; + ClusterStats rawStats1 = generateServiceLoadStats(); + when(loadStatsStore1.generateLoadReport()).thenReturn(rawStats1); + lrsClient.addLoadStatsStore(service1, loadStatsStore1); + responseObserver.onNext(buildLrsResponse(ImmutableList.of(service1), 1000)); - InOrder inOrder = inOrder(requestObserver, mockLoadStatsStore); - inOrder.verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); + ArgumentMatcher expectedLoadReportMatcher = + new LoadStatsRequestMatcher(ImmutableMap.of(service1, rawStats1), 1000); + fakeClock.forwardNanos(999); + inOrder.verifyNoMoreInteractions(); + fakeClock.forwardNanos(1); + inOrder.verify(requestObserver).onNext(argThat(expectedLoadReportMatcher)); - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 1362)); - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, - buildEmptyClusterStats("namespace-foo:service-blade", 1362)); - verify(callback).onReportResponse(1362); + fakeClock.forwardNanos(1000); + inOrder.verify(requestObserver).onNext(argThat(expectedLoadReportMatcher)); - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 2183345)); - // Updated load reporting interval becomes effective immediately. - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, - buildEmptyClusterStats("namespace-foo:service-blade", 2183345)); - verify(callback).onReportResponse(2183345); - } + // Management server updates the interval of sending load reports. + responseObserver.onNext(buildLrsResponse(ImmutableList.of(service1), 2000)); - @Test - public void reportNothingIfLoadStatsSourceNotAvailable() { - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); - StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); + fakeClock.forwardNanos(1000); + inOrder.verifyNoMoreInteractions(); - // Server asks to report load for some cluster service. - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 1395)); + fakeClock.forwardNanos(1000); + inOrder.verify(requestObserver) + .onNext(argThat(new LoadStatsRequestMatcher(ImmutableMap.of(service1, rawStats1), 2000))); - // Nothing to be reported as no load stats data is available. - fakeClock.forwardNanos(1395); - ArgumentCaptor reportCaptor = ArgumentCaptor.forClass(null); - verify(requestObserver, times(2)).onNext(reportCaptor.capture()); - assertThat(reportCaptor.getValue().getClusterStatsCount()).isEqualTo(0); + String service2 = "namespace-bar:service-baz"; + ClusterStats rawStats2 = generateServiceLoadStats(); + when(loadStatsStore2.generateLoadReport()).thenReturn(rawStats2); + lrsClient.addLoadStatsStore(service2, loadStatsStore2); - // Add load stats source. - ClusterStats clusterStats = ClusterStats.newBuilder() - .setClusterName("namespace-foo:service-blade") - .setLoadReportInterval(Durations.fromNanos(50)) - .addUpstreamLocalityStats(UpstreamLocalityStats.newBuilder() - .setLocality(TEST_LOCALITY) - .setTotalRequestsInProgress(542) - .setTotalSuccessfulRequests(645) - .setTotalErrorRequests(85) - .setTotalIssuedRequests(27)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("lb") - .setDroppedCount(0)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("throttle") - .setDroppedCount(14)) - .setTotalDroppedRequests(14) - .build(); - when(mockLoadStatsStore.generateLoadReport()).thenReturn(clusterStats); - lrsClient.addLoadStatsStore("namespace-foo:service-blade", mockLoadStatsStore); - - // Loads reported. - fakeClock.forwardNanos(1395); - verify(requestObserver, times(3)).onNext(reportCaptor.capture()); - assertThat(reportCaptor.getValue().getClusterStatsCount()).isEqualTo(1); - - // Delete load stats source. - lrsClient.removeLoadStatsStore("namespace-foo:service-blade"); - - // Nothing to report as load stats data is not available. - fakeClock.forwardNanos(1395); - verify(requestObserver, times(4)).onNext(reportCaptor.capture()); - assertThat(reportCaptor.getValue().getClusterStatsCount()).isEqualTo(0); - } + // Management server asks to report loads for an extra cluster service. + responseObserver.onNext(buildLrsResponse(ImmutableList.of(service1, service2), 2000)); - @Test - public void reportRecordedLoadData() { - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); - StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.poll(); + fakeClock.forwardNanos(2000); + inOrder.verify(requestObserver) + .onNext( + argThat( + new LoadStatsRequestMatcher( + ImmutableMap.of(service1, rawStats1, service2, rawStats2), 2000))); - long callsInProgress = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); - long callsSucceeded = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); - long callsFailed = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); - long callsIssued = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); - long numLbDrops = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); - long numThrottleDrops = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + // Load reports for one of existing service is no longer wanted. + responseObserver.onNext(buildLrsResponse(ImmutableList.of(service2), 2000)); - ClusterStats expectedStats1 = ClusterStats.newBuilder() - .setClusterName("namespace-foo:service-blade") - .setLoadReportInterval(Durations.fromNanos(1362)) - .addUpstreamLocalityStats(UpstreamLocalityStats.newBuilder() - .setLocality(TEST_LOCALITY) - .setTotalRequestsInProgress(callsInProgress) - .setTotalSuccessfulRequests(callsSucceeded) - .setTotalErrorRequests(callsFailed) - .setTotalIssuedRequests(callsIssued)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("lb") - .setDroppedCount(numLbDrops)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("throttle") - .setDroppedCount(numThrottleDrops)) - .setTotalDroppedRequests(numLbDrops + numThrottleDrops) - .build(); - ClusterStats expectedStats2 = ClusterStats.newBuilder() - .setClusterName("namespace-foo:service-blade") - .setLoadReportInterval(Durations.fromNanos(1362)) - .addUpstreamLocalityStats(UpstreamLocalityStats.newBuilder() - .setLocality(TEST_LOCALITY) - .setTotalRequestsInProgress(callsInProgress)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("lb") - .setDroppedCount(0)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("throttle") - .setDroppedCount(0)) - .setTotalDroppedRequests(0) - .build(); - - // Add load stats source for some cluster service. - when(mockLoadStatsStore.generateLoadReport()).thenReturn(expectedStats1, expectedStats2); - lrsClient.addLoadStatsStore("namespace-foo:service-blade", mockLoadStatsStore); - - InOrder inOrder = inOrder(requestObserver, mockLoadStatsStore); - inOrder.verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); + fakeClock.forwardNanos(2000); + inOrder.verify(requestObserver) + .onNext(argThat(new LoadStatsRequestMatcher(ImmutableMap.of(service2, rawStats2), 2000))); - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 1362)); - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, expectedStats1); + // Management server asks loads for a cluster service that client has no load data. + responseObserver.onNext(buildLrsResponse(ImmutableList.of("namespace-ham:service-spam"), 2000)); - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, expectedStats2); + fakeClock.forwardNanos(2000); + ArgumentCaptor reportCaptor = ArgumentCaptor.forClass(null); + inOrder.verify(requestObserver).onNext(reportCaptor.capture()); + assertThat(reportCaptor.getValue().getClusterStatsCount()).isEqualTo(0); } @Test @@ -436,7 +307,9 @@ public void lrsStreamClosedAndRetried() { assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Balancer sends a response asking for loads of some cluster service. - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 0)); + String serviceName = "namespace-foo:service-blade"; + responseObserver + .onNext(buildLrsResponse(ImmutableList.of(serviceName), 0)); // Then breaks the RPC responseObserver.onError(Status.UNAVAILABLE.asException()); @@ -468,87 +341,23 @@ public void lrsStreamClosedAndRetried() { verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); + // Load reporting back to normal. + responseObserver = lrsResponseObserverCaptor.getValue(); + ClusterStats stats = generateServiceLoadStats(); + when(loadStatsStore1.generateLoadReport()).thenReturn(stats); + lrsClient.addLoadStatsStore(serviceName, loadStatsStore1); + responseObserver + .onNext(buildLrsResponse(ImmutableList.of(serviceName), 10)); + fakeClock.forwardNanos(10); + verify(requestObserver) + .onNext(argThat(new LoadStatsRequestMatcher(ImmutableMap.of(serviceName, stats), 10))); + // Wrapping up verify(backoffPolicyProvider, times(2)).get(); verify(backoffPolicy1, times(2)).nextBackoffNanos(); verify(backoffPolicy2, times(1)).nextBackoffNanos(); } - @Test - public void lrsStreamRetryAndRereport() { - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); - StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); - assertThat(lrsRequestObservers).hasSize(1); - StreamObserver requestObserver = lrsRequestObservers.poll(); - - // Add load stats source for some cluster service. - ClusterStats stats1 = ClusterStats.newBuilder() - .setClusterName("namespace-foo:service-blade") - .setLoadReportInterval(Durations.fromNanos(50)) - .addUpstreamLocalityStats(UpstreamLocalityStats.newBuilder() - .setLocality(TEST_LOCALITY) - .setTotalRequestsInProgress(542) - .setTotalSuccessfulRequests(645) - .setTotalErrorRequests(85) - .setTotalIssuedRequests(27)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("lb") - .setDroppedCount(0)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("throttle") - .setDroppedCount(14)) - .setTotalDroppedRequests(14) - .build(); - ClusterStats stats2 = ClusterStats.newBuilder() - .setClusterName("namespace-foo:service-blade") - .setLoadReportInterval(Durations.fromNanos(50)) - .addUpstreamLocalityStats(UpstreamLocalityStats.newBuilder() - .setLocality(TEST_LOCALITY) - .setTotalRequestsInProgress(89)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("lb") - .setDroppedCount(0)) - .addDroppedRequests(DroppedRequests.newBuilder() - .setCategory("throttle") - .setDroppedCount(0)) - .setTotalDroppedRequests(0) - .build(); - when(mockLoadStatsStore.generateLoadReport()).thenReturn(stats1, stats2); - lrsClient.addLoadStatsStore("namespace-foo:service-blade", mockLoadStatsStore); - - // First LRS request sent. - verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); - assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); - - // Balancer sends a response asking for loads of some cluster service. - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 100)); - - // A load reporting task is scheduled. - assertEquals(1, fakeClock.numPendingTasks(LOAD_REPORTING_TASK_FILTER)); - fakeClock.forwardNanos(99); - verifyNoMoreInteractions(requestObserver); - - // Balancer closes the stream with error. - responseObserver.onError(Status.UNKNOWN.asException()); - - // The unsent load report is cancelled. - assertEquals(0, fakeClock.numPendingTasks(LOAD_REPORTING_TASK_FILTER)); - // Will retry immediately as balancer has responded previously. - verify(mockLoadReportingService, times(2)).streamLoadStats(lrsResponseObserverCaptor.capture()); - responseObserver = lrsResponseObserverCaptor.getValue(); - assertThat(lrsRequestObservers).hasSize(1); - requestObserver = lrsRequestObservers.poll(); - InOrder inOrder = inOrder(requestObserver, mockLoadStatsStore); - inOrder.verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); - - // Balancer sends another response with a different report interval. - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 50)); - - // Load reporting runs normally. - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, stats1); - assertNextReport(inOrder, requestObserver, mockLoadStatsStore, stats2); - } - @Test public void raceBetweenLoadReportingAndLbStreamClosure() { verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); @@ -562,7 +371,8 @@ public void raceBetweenLoadReportingAndLbStreamClosure() { // Simulate receiving a response from traffic director. assertEquals(0, fakeClock.numPendingTasks(LOAD_REPORTING_TASK_FILTER)); - responseObserver.onNext(buildLrsResponse("namespace-foo:service-blade", 1983)); + responseObserver + .onNext(buildLrsResponse(ImmutableList.of("namespace-foo:service-blade"), 1983)); // Load reporting task is scheduled assertEquals(1, fakeClock.numPendingTasks(LOAD_REPORTING_TASK_FILTER)); FakeClock.ScheduledTask scheduledTask = @@ -583,34 +393,82 @@ public void raceBetweenLoadReportingAndLbStreamClosure() { assertEquals(0, fakeClock.numPendingTasks(LOAD_REPORTING_TASK_FILTER)); } - private static ClusterStats buildEmptyClusterStats(String clusterServiceName, - long loadReportIntervalNanos) { - return ClusterStats.newBuilder() - .setClusterName(clusterServiceName) - .setLoadReportInterval(Durations.fromNanos(loadReportIntervalNanos)).build(); + private static LoadStatsResponse buildLrsResponse( + List clusterServiceNames, long loadReportIntervalNanos) { + return + LoadStatsResponse + .newBuilder() + .addAllClusters(clusterServiceNames) + .setLoadReportingInterval(Durations.fromNanos(loadReportIntervalNanos)) + .build(); } - private static LoadStatsResponse buildLrsResponse(String clusterServiceName, - long loadReportIntervalNanos) { - return LoadStatsResponse.newBuilder() - .addClusters(clusterServiceName) - .setLoadReportingInterval(Durations.fromNanos(loadReportIntervalNanos)).build(); + /** + * Generates a raw service load stats report with random data. + */ + private static ClusterStats generateServiceLoadStats() { + long callsInProgress = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + long callsSucceeded = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + long callsFailed = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + long callsIssued = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + long numLbDrops = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + long numThrottleDrops = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); + + return + ClusterStats.newBuilder() + .addUpstreamLocalityStats( + UpstreamLocalityStats.newBuilder() + .setLocality( + Locality.newBuilder() + .setRegion("region-foo") + .setZone("zone-bar") + .setSubZone("subzone-baz")) + .setTotalRequestsInProgress(callsInProgress) + .setTotalSuccessfulRequests(callsSucceeded) + .setTotalErrorRequests(callsFailed) + .setTotalIssuedRequests(callsIssued)) + .addDroppedRequests( + DroppedRequests.newBuilder() + .setCategory("lb") + .setDroppedCount(numLbDrops)) + .addDroppedRequests( + DroppedRequests.newBuilder() + .setCategory("throttle") + .setDroppedCount(numThrottleDrops)) + .setTotalDroppedRequests(numLbDrops + numThrottleDrops) + .build(); } - private void assertNextReport(InOrder inOrder, StreamObserver requestObserver, - LoadStatsStore loadStatsStore, ClusterStats expectedStats) { - long loadReportIntervalNanos = Durations.toNanos(expectedStats.getLoadReportInterval()); - assertEquals(0, fakeClock.forwardTime(loadReportIntervalNanos - 1, TimeUnit.NANOSECONDS)); - inOrder.verifyNoMoreInteractions(); - assertEquals(1, fakeClock.forwardTime(1, TimeUnit.NANOSECONDS)); - // A second load report is scheduled upon the first is sent. - assertEquals(1, fakeClock.numPendingTasks(LOAD_REPORTING_TASK_FILTER)); - inOrder.verify(loadStatsStore).generateLoadReport(); - ArgumentCaptor reportCaptor = ArgumentCaptor.forClass(null); - inOrder.verify(requestObserver).onNext(reportCaptor.capture()); - LoadStatsRequest report = reportCaptor.getValue(); - assertEquals(report.getNode(), NODE); - assertEquals(1, report.getClusterStatsCount()); - assertThat(report.getClusterStats(0)).isEqualTo(expectedStats); + /** + * For comparing LoadStatsRequest based on a collection of raw service load stats. + */ + private static class LoadStatsRequestMatcher implements ArgumentMatcher { + private final Map expectedStats = new HashMap<>(); + + LoadStatsRequestMatcher(Map serviceStats, long expectedIntervalNano) { + for (String serviceName : serviceStats.keySet()) { + // TODO(chengyuanzhang): the field to be populated should be cluster_service_name. + ClusterStats statsWithInterval = + serviceStats.get(serviceName) + .toBuilder() + .setClusterName(serviceName) + .setLoadReportInterval(Durations.fromNanos(expectedIntervalNano)) + .build(); + expectedStats.put(serviceName, statsWithInterval); + } + } + + @Override + public boolean matches(LoadStatsRequest argument) { + if (argument.getClusterStatsCount() != expectedStats.size()) { + return false; + } + for (ClusterStats stats : argument.getClusterStatsList()) { + if (!stats.equals(expectedStats.get(stats.getClusterName()))) { + return false; + } + } + return true; + } } } From 941de0e80b3afe2148d234f612dc51cadb708ef6 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Thu, 13 Feb 2020 10:33:52 -0800 Subject: [PATCH 58/86] xds: refactor io.grpc.xds.sds to io.grpc.xds.internal.sds --- xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java | 6 +++--- .../sds/ClientSslContextProviderFactory.java | 4 ++-- .../grpc/xds/{ => internal}/sds/CommonTlsContextUtil.java | 2 +- .../xds/{ => internal}/sds/FileBasedPluginCredential.java | 2 +- .../sds/ReferenceCountingSslContextProviderMap.java | 2 +- .../java/io/grpc/xds/{ => internal}/sds/SdsClient.java | 4 +--- .../internal => internal/sds}/SdsProtocolNegotiators.java | 8 ++------ .../xds/{ => internal}/sds/SdsSslContextProvider.java | 4 ++-- .../sds/SecretVolumeSslContextProvider.java | 4 ++-- .../sds/ServerSslContextProviderFactory.java | 4 ++-- .../grpc/xds/{ => internal}/sds/SslContextProvider.java | 4 +--- .../io/grpc/xds/{ => internal}/sds/TlsContextManager.java | 4 +--- .../xds/{ => internal}/sds/TlsContextManagerImpl.java | 6 ++---- .../io/grpc/xds/{ => internal}/sds/XdsChannelBuilder.java | 5 +---- .../io/grpc/xds/{ => internal}/sds/XdsServerBuilder.java | 5 +---- .../xds/{ => internal}/sds/trust/CertificateUtils.java | 2 +- .../{ => internal}/sds/trust/SdsTrustManagerFactory.java | 7 +++---- .../xds/{ => internal}/sds/trust/SdsX509TrustManager.java | 2 +- xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java | 6 +++--- .../sds/ClientSslContextProviderFactoryTest.java | 2 +- .../xds/{ => internal}/sds/CommonTlsContextTestsUtil.java | 2 +- .../sds/ReferenceCountingSslContextProviderMapTest.java | 4 ++-- .../sds/SdsClientFileBasedMetadataTest.java | 2 +- .../io/grpc/xds/{ => internal}/sds/SdsClientTest.java | 2 +- .../sds/SdsClientUdsFileBasedMetadataTest.java | 2 +- .../io/grpc/xds/{ => internal}/sds/SdsClientUdsTest.java | 2 +- .../sds}/SdsProtocolNegotiatorsTest.java | 6 +++--- .../xds/{ => internal}/sds/SdsSslContextProviderTest.java | 8 ++++---- .../sds/SecretVolumeSslContextProviderTest.java | 2 +- .../sds/ServerSslContextProviderFactoryTest.java | 2 +- .../io/grpc/xds/{ => internal}/sds/TestSdsServer.java | 2 +- .../xds/{ => internal}/sds/TlsContextManagerTest.java | 4 ++-- .../xds/{ => internal}/sds/XdsChannelBuilderTest.java | 2 +- .../xds/{ => internal}/sds/XdsSdsClientServerTest.java | 2 +- .../grpc/xds/{ => internal}/sds/XdsServerBuilderTest.java | 2 +- .../sds/trust/SdsTrustManagerFactoryTest.java | 2 +- .../{ => internal}/sds/trust/SdsX509TrustManagerTest.java | 2 +- 37 files changed, 56 insertions(+), 75 deletions(-) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/ClientSslContextProviderFactory.java (94%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/CommonTlsContextUtil.java (98%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/FileBasedPluginCredential.java (99%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/ReferenceCountingSslContextProviderMap.java (99%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/SdsClient.java (99%) rename xds/src/main/java/io/grpc/xds/{sds/internal => internal/sds}/SdsProtocolNegotiators.java (98%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/SdsSslContextProvider.java (99%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/SecretVolumeSslContextProvider.java (98%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/ServerSslContextProviderFactory.java (94%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/SslContextProvider.java (98%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/TlsContextManager.java (97%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/TlsContextManagerImpl.java (95%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/XdsChannelBuilder.java (94%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/XdsServerBuilder.java (96%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/trust/CertificateUtils.java (98%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/trust/SdsTrustManagerFactory.java (96%) rename xds/src/main/java/io/grpc/xds/{ => internal}/sds/trust/SdsX509TrustManager.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/ClientSslContextProviderFactoryTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/CommonTlsContextTestsUtil.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/ReferenceCountingSslContextProviderMapTest.java (97%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/SdsClientFileBasedMetadataTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/SdsClientTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/SdsClientUdsFileBasedMetadataTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/SdsClientUdsTest.java (99%) rename xds/src/test/java/io/grpc/xds/{sds/internal => internal/sds}/SdsProtocolNegotiatorsTest.java (98%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/SdsSslContextProviderTest.java (97%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/SecretVolumeSslContextProviderTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/ServerSslContextProviderFactoryTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/TestSdsServer.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/TlsContextManagerTest.java (98%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/XdsChannelBuilderTest.java (97%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/XdsSdsClientServerTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/XdsServerBuilderTest.java (97%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/trust/SdsTrustManagerFactoryTest.java (99%) rename xds/src/test/java/io/grpc/xds/{ => internal}/sds/trust/SdsX509TrustManagerTest.java (99%) diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index 358c5263ae7..f83a096d8f5 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -38,9 +38,9 @@ import io.grpc.xds.XdsClient.ClusterWatcher; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; -import io.grpc.xds.sds.SslContextProvider; -import io.grpc.xds.sds.TlsContextManager; -import io.grpc.xds.sds.TlsContextManagerImpl; +import io.grpc.xds.internal.sds.SslContextProvider; +import io.grpc.xds.internal.sds.TlsContextManager; +import io.grpc.xds.internal.sds.TlsContextManagerImpl; import java.util.ArrayList; import java.util.List; import java.util.Objects; diff --git a/xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java b/xds/src/main/java/io/grpc/xds/internal/sds/ClientSslContextProviderFactory.java similarity index 94% rename from xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java rename to xds/src/main/java/io/grpc/xds/internal/sds/ClientSslContextProviderFactory.java index dfb5ba63f09..bd1e6635707 100644 --- a/xds/src/main/java/io/grpc/xds/sds/ClientSslContextProviderFactory.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/ClientSslContextProviderFactory.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; @@ -22,7 +22,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; import io.grpc.xds.Bootstrapper; -import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; +import io.grpc.xds.internal.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; import java.io.IOException; import java.util.concurrent.Executors; diff --git a/xds/src/main/java/io/grpc/xds/sds/CommonTlsContextUtil.java b/xds/src/main/java/io/grpc/xds/internal/sds/CommonTlsContextUtil.java similarity index 98% rename from xds/src/main/java/io/grpc/xds/sds/CommonTlsContextUtil.java rename to xds/src/main/java/io/grpc/xds/internal/sds/CommonTlsContextUtil.java index 0e514f99e04..00c0777fead 100644 --- a/xds/src/main/java/io/grpc/xds/sds/CommonTlsContextUtil.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/CommonTlsContextUtil.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkNotNull; diff --git a/xds/src/main/java/io/grpc/xds/sds/FileBasedPluginCredential.java b/xds/src/main/java/io/grpc/xds/internal/sds/FileBasedPluginCredential.java similarity index 99% rename from xds/src/main/java/io/grpc/xds/sds/FileBasedPluginCredential.java rename to xds/src/main/java/io/grpc/xds/internal/sds/FileBasedPluginCredential.java index 57dc592d50c..3f08f34ce83 100644 --- a/xds/src/main/java/io/grpc/xds/sds/FileBasedPluginCredential.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/FileBasedPluginCredential.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; diff --git a/xds/src/main/java/io/grpc/xds/sds/ReferenceCountingSslContextProviderMap.java b/xds/src/main/java/io/grpc/xds/internal/sds/ReferenceCountingSslContextProviderMap.java similarity index 99% rename from xds/src/main/java/io/grpc/xds/sds/ReferenceCountingSslContextProviderMap.java rename to xds/src/main/java/io/grpc/xds/internal/sds/ReferenceCountingSslContextProviderMap.java index 94cfe3920fe..7b7963f3f76 100644 --- a/xds/src/main/java/io/grpc/xds/sds/ReferenceCountingSslContextProviderMap.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/ReferenceCountingSslContextProviderMap.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; diff --git a/xds/src/main/java/io/grpc/xds/sds/SdsClient.java b/xds/src/main/java/io/grpc/xds/internal/sds/SdsClient.java similarity index 99% rename from xds/src/main/java/io/grpc/xds/sds/SdsClient.java rename to xds/src/main/java/io/grpc/xds/internal/sds/SdsClient.java index 019c31a95fb..426d07559eb 100644 --- a/xds/src/main/java/io/grpc/xds/sds/SdsClient.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/SdsClient.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; @@ -39,7 +39,6 @@ import io.envoyproxy.envoy.service.discovery.v2.SecretDiscoveryServiceGrpc; import io.envoyproxy.envoy.service.discovery.v2.SecretDiscoveryServiceGrpc.SecretDiscoveryServiceStub; import io.grpc.CallCredentials; -import io.grpc.Internal; import io.grpc.ManagedChannel; import io.grpc.Status; import io.grpc.inprocess.InProcessChannelBuilder; @@ -66,7 +65,6 @@ * most likely a temporary implementation until merged with the XdsClient. */ // TODO(sanjaypujare): once XdsClientImpl is ready, merge with it and add retry logic -@Internal @NotThreadSafe final class SdsClient { private static final Logger logger = Logger.getLogger(SdsClient.class.getName()); diff --git a/xds/src/main/java/io/grpc/xds/sds/internal/SdsProtocolNegotiators.java b/xds/src/main/java/io/grpc/xds/internal/sds/SdsProtocolNegotiators.java similarity index 98% rename from xds/src/main/java/io/grpc/xds/sds/internal/SdsProtocolNegotiators.java rename to xds/src/main/java/io/grpc/xds/internal/sds/SdsProtocolNegotiators.java index 9419591c80a..9f09aba5463 100644 --- a/xds/src/main/java/io/grpc/xds/sds/internal/SdsProtocolNegotiators.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/SdsProtocolNegotiators.java @@ -14,14 +14,13 @@ * limitations under the License. */ -package io.grpc.xds.sds.internal; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.annotations.VisibleForTesting; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.Internal; import io.grpc.netty.GrpcHttp2ConnectionHandler; import io.grpc.netty.InternalNettyChannelBuilder; import io.grpc.netty.InternalNettyChannelBuilder.ProtocolNegotiatorFactory; @@ -30,8 +29,6 @@ import io.grpc.netty.InternalProtocolNegotiators; import io.grpc.netty.NettyChannelBuilder; import io.grpc.xds.XdsAttributes; -import io.grpc.xds.sds.SslContextProvider; -import io.grpc.xds.sds.TlsContextManagerImpl; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; @@ -48,8 +45,7 @@ * Provides client and server side gRPC {@link ProtocolNegotiator}s that use SDS to provide the SSL * context. */ -@Internal -public final class SdsProtocolNegotiators { +final class SdsProtocolNegotiators { private static final Logger logger = Logger.getLogger(SdsProtocolNegotiators.class.getName()); diff --git a/xds/src/main/java/io/grpc/xds/sds/SdsSslContextProvider.java b/xds/src/main/java/io/grpc/xds/internal/sds/SdsSslContextProvider.java similarity index 99% rename from xds/src/main/java/io/grpc/xds/sds/SdsSslContextProvider.java rename to xds/src/main/java/io/grpc/xds/internal/sds/SdsSslContextProvider.java index 3c7cf8bbdf7..88a0a1dae1a 100644 --- a/xds/src/main/java/io/grpc/xds/sds/SdsSslContextProvider.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/SdsSslContextProvider.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; @@ -30,7 +30,7 @@ import io.envoyproxy.envoy.api.v2.core.Node; import io.grpc.Status; import io.grpc.netty.GrpcSslContexts; -import io.grpc.xds.sds.trust.SdsTrustManagerFactory; +import io.grpc.xds.internal.sds.trust.SdsTrustManagerFactory; import io.netty.handler.ssl.ApplicationProtocolConfig; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; diff --git a/xds/src/main/java/io/grpc/xds/sds/SecretVolumeSslContextProvider.java b/xds/src/main/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProvider.java similarity index 98% rename from xds/src/main/java/io/grpc/xds/sds/SecretVolumeSslContextProvider.java rename to xds/src/main/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProvider.java index c4bea97ab96..d843b3bf587 100644 --- a/xds/src/main/java/io/grpc/xds/sds/SecretVolumeSslContextProvider.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProvider.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; @@ -29,7 +29,7 @@ import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; import io.envoyproxy.envoy.api.v2.core.DataSource.SpecifierCase; import io.grpc.netty.GrpcSslContexts; -import io.grpc.xds.sds.trust.SdsTrustManagerFactory; +import io.grpc.xds.internal.sds.trust.SdsTrustManagerFactory; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import java.io.File; diff --git a/xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java b/xds/src/main/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactory.java similarity index 94% rename from xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java rename to xds/src/main/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactory.java index e5d69c74bc7..24289b5cbf7 100644 --- a/xds/src/main/java/io/grpc/xds/sds/ServerSslContextProviderFactory.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactory.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; @@ -22,7 +22,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.grpc.xds.Bootstrapper; -import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; +import io.grpc.xds.internal.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; import java.io.IOException; import java.util.concurrent.Executors; diff --git a/xds/src/main/java/io/grpc/xds/sds/SslContextProvider.java b/xds/src/main/java/io/grpc/xds/internal/sds/SslContextProvider.java similarity index 98% rename from xds/src/main/java/io/grpc/xds/sds/SslContextProvider.java rename to xds/src/main/java/io/grpc/xds/internal/sds/SslContextProvider.java index a00afbd9dfb..e8b4f222520 100644 --- a/xds/src/main/java/io/grpc/xds/sds/SslContextProvider.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/SslContextProvider.java @@ -14,14 +14,13 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkNotNull; import io.envoyproxy.envoy.api.v2.auth.CommonTlsContext; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.Internal; import io.netty.handler.ssl.SslContext; import java.util.concurrent.Executor; import java.util.logging.Level; @@ -33,7 +32,6 @@ * stream that is receiving the requested secret(s) or it could represent file-system based * secret(s) that are dynamic. */ -@Internal public abstract class SslContextProvider { private static final Logger logger = Logger.getLogger(SslContextProvider.class.getName()); diff --git a/xds/src/main/java/io/grpc/xds/sds/TlsContextManager.java b/xds/src/main/java/io/grpc/xds/internal/sds/TlsContextManager.java similarity index 97% rename from xds/src/main/java/io/grpc/xds/sds/TlsContextManager.java rename to xds/src/main/java/io/grpc/xds/internal/sds/TlsContextManager.java index eeabc91a689..6dee14353e6 100644 --- a/xds/src/main/java/io/grpc/xds/sds/TlsContextManager.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/TlsContextManager.java @@ -14,13 +14,11 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.Internal; -@Internal public interface TlsContextManager { /** Creates a SslContextProvider. Used for retrieving a server-side SslContext. */ diff --git a/xds/src/main/java/io/grpc/xds/sds/TlsContextManagerImpl.java b/xds/src/main/java/io/grpc/xds/internal/sds/TlsContextManagerImpl.java similarity index 95% rename from xds/src/main/java/io/grpc/xds/sds/TlsContextManagerImpl.java rename to xds/src/main/java/io/grpc/xds/internal/sds/TlsContextManagerImpl.java index 934172d0529..84335b0cd50 100644 --- a/xds/src/main/java/io/grpc/xds/sds/TlsContextManagerImpl.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/TlsContextManagerImpl.java @@ -14,15 +14,14 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.annotations.VisibleForTesting; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.Internal; -import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; +import io.grpc.xds.internal.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; /** * Class to manage {@link SslContextProvider} objects created from inputs we get from xDS. Used by @@ -30,7 +29,6 @@ * {@link SslContextProvider} objects as shared resources via ref-counting as described in {@link * ReferenceCountingSslContextProviderMap}. */ -@Internal public final class TlsContextManagerImpl implements TlsContextManager { private static TlsContextManagerImpl instance; diff --git a/xds/src/main/java/io/grpc/xds/sds/XdsChannelBuilder.java b/xds/src/main/java/io/grpc/xds/internal/sds/XdsChannelBuilder.java similarity index 94% rename from xds/src/main/java/io/grpc/xds/sds/XdsChannelBuilder.java rename to xds/src/main/java/io/grpc/xds/internal/sds/XdsChannelBuilder.java index 1091d86782f..49d5f5411b1 100644 --- a/xds/src/main/java/io/grpc/xds/sds/XdsChannelBuilder.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/XdsChannelBuilder.java @@ -14,16 +14,14 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.ExperimentalApi; import io.grpc.ForwardingChannelBuilder; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.netty.InternalNettyChannelBuilder; import io.grpc.netty.NettyChannelBuilder; -import io.grpc.xds.sds.internal.SdsProtocolNegotiators; import java.net.SocketAddress; import javax.annotation.CheckReturnValue; import javax.annotation.Nullable; @@ -32,7 +30,6 @@ * A version of {@link ManagedChannelBuilder} to create xDS managed channels that will use SDS to * set up SSL with peers. Note, this is not ready to use yet. */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/6268") public final class XdsChannelBuilder extends ForwardingChannelBuilder { private final NettyChannelBuilder delegate; diff --git a/xds/src/main/java/io/grpc/xds/sds/XdsServerBuilder.java b/xds/src/main/java/io/grpc/xds/internal/sds/XdsServerBuilder.java similarity index 96% rename from xds/src/main/java/io/grpc/xds/sds/XdsServerBuilder.java rename to xds/src/main/java/io/grpc/xds/internal/sds/XdsServerBuilder.java index 8f3b28b8761..d3611f0c496 100644 --- a/xds/src/main/java/io/grpc/xds/sds/XdsServerBuilder.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/XdsServerBuilder.java @@ -14,13 +14,12 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.grpc.BindableService; import io.grpc.CompressorRegistry; import io.grpc.DecompressorRegistry; -import io.grpc.ExperimentalApi; import io.grpc.HandlerRegistry; import io.grpc.Server; import io.grpc.ServerBuilder; @@ -29,7 +28,6 @@ import io.grpc.ServerStreamTracer; import io.grpc.ServerTransportFilter; import io.grpc.netty.NettyServerBuilder; -import io.grpc.xds.sds.internal.SdsProtocolNegotiators; import java.io.File; import java.net.InetSocketAddress; import java.util.concurrent.Executor; @@ -40,7 +38,6 @@ * A version of {@link ServerBuilder} to create xDS managed servers that will use SDS to set up SSL * with peers. Note, this is not ready to use yet. */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/6268") public final class XdsServerBuilder extends ServerBuilder { private final NettyServerBuilder delegate; diff --git a/xds/src/main/java/io/grpc/xds/sds/trust/CertificateUtils.java b/xds/src/main/java/io/grpc/xds/internal/sds/trust/CertificateUtils.java similarity index 98% rename from xds/src/main/java/io/grpc/xds/sds/trust/CertificateUtils.java rename to xds/src/main/java/io/grpc/xds/internal/sds/trust/CertificateUtils.java index 0057ca6e8f2..834065a3cb5 100644 --- a/xds/src/main/java/io/grpc/xds/sds/trust/CertificateUtils.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/trust/CertificateUtils.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds.trust; +package io.grpc.xds.internal.sds.trust; import java.io.BufferedInputStream; import java.io.File; diff --git a/xds/src/main/java/io/grpc/xds/sds/trust/SdsTrustManagerFactory.java b/xds/src/main/java/io/grpc/xds/internal/sds/trust/SdsTrustManagerFactory.java similarity index 96% rename from xds/src/main/java/io/grpc/xds/sds/trust/SdsTrustManagerFactory.java rename to xds/src/main/java/io/grpc/xds/internal/sds/trust/SdsTrustManagerFactory.java index 3f2b9941122..e88e6aa7e36 100644 --- a/xds/src/main/java/io/grpc/xds/sds/trust/SdsTrustManagerFactory.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/trust/SdsTrustManagerFactory.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds.trust; +package io.grpc.xds.internal.sds.trust; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; @@ -23,7 +23,7 @@ import com.google.common.base.Strings; import io.envoyproxy.envoy.api.v2.auth.CertificateValidationContext; import io.envoyproxy.envoy.api.v2.core.DataSource.SpecifierCase; -import io.grpc.Internal; +import io.grpc.xds.internal.sds.TlsContextManagerImpl; import io.netty.handler.ssl.util.SimpleTrustManagerFactory; import java.io.File; import java.io.IOException; @@ -41,10 +41,9 @@ import javax.net.ssl.X509ExtendedTrustManager; /** - * Factory class used by providers of {@link io.grpc.xds.sds.TlsContextManagerImpl} to provide a + * Factory class used by providers of {@link TlsContextManagerImpl} to provide a * {@link SdsX509TrustManager} for trust and SAN checks. */ -@Internal public final class SdsTrustManagerFactory extends SimpleTrustManagerFactory { private static final Logger logger = Logger.getLogger(SdsTrustManagerFactory.class.getName()); diff --git a/xds/src/main/java/io/grpc/xds/sds/trust/SdsX509TrustManager.java b/xds/src/main/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManager.java similarity index 99% rename from xds/src/main/java/io/grpc/xds/sds/trust/SdsX509TrustManager.java rename to xds/src/main/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManager.java index ce843d983c5..c1068865662 100644 --- a/xds/src/main/java/io/grpc/xds/sds/trust/SdsX509TrustManager.java +++ b/xds/src/main/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManager.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds.trust; +package io.grpc.xds.internal.sds.trust; import static com.google.common.base.Preconditions.checkNotNull; diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java index c0a068df74d..b328a14cdf9 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java @@ -57,9 +57,9 @@ import io.grpc.xds.XdsClient.RefCountedXdsClientObjectPool; import io.grpc.xds.XdsClient.XdsClientFactory; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; -import io.grpc.xds.sds.SecretVolumeSslContextProviderTest; -import io.grpc.xds.sds.SslContextProvider; -import io.grpc.xds.sds.TlsContextManager; +import io.grpc.xds.internal.sds.SecretVolumeSslContextProviderTest; +import io.grpc.xds.internal.sds.SslContextProvider; +import io.grpc.xds.internal.sds.TlsContextManager; import java.net.InetSocketAddress; import java.util.ArrayDeque; import java.util.ArrayList; diff --git a/xds/src/test/java/io/grpc/xds/sds/ClientSslContextProviderFactoryTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/ClientSslContextProviderFactoryTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/ClientSslContextProviderFactoryTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/ClientSslContextProviderFactoryTest.java index 2cb89af8817..a72b70931c2 100644 --- a/xds/src/test/java/io/grpc/xds/sds/ClientSslContextProviderFactoryTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/ClientSslContextProviderFactoryTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/CommonTlsContextTestsUtil.java b/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/CommonTlsContextTestsUtil.java rename to xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java index 9759979bcb5..b3d95aa9d5f 100644 --- a/xds/src/test/java/io/grpc/xds/sds/CommonTlsContextTestsUtil.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import com.google.common.base.Strings; import io.envoyproxy.envoy.api.v2.auth.CertificateValidationContext; diff --git a/xds/src/test/java/io/grpc/xds/sds/ReferenceCountingSslContextProviderMapTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/ReferenceCountingSslContextProviderMapTest.java similarity index 97% rename from xds/src/test/java/io/grpc/xds/sds/ReferenceCountingSslContextProviderMapTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/ReferenceCountingSslContextProviderMapTest.java index bc56eadce62..7cab5060d32 100644 --- a/xds/src/test/java/io/grpc/xds/sds/ReferenceCountingSslContextProviderMapTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/ReferenceCountingSslContextProviderMapTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.fail; @@ -24,7 +24,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; +import io.grpc.xds.internal.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; import org.junit.Before; import org.junit.Rule; import org.junit.Test; diff --git a/xds/src/test/java/io/grpc/xds/sds/SdsClientFileBasedMetadataTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/SdsClientFileBasedMetadataTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java index b732422c0cd..d67e37649b2 100644 --- a/xds/src/test/java/io/grpc/xds/sds/SdsClientFileBasedMetadataTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.fail; diff --git a/xds/src/test/java/io/grpc/xds/sds/SdsClientTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/SdsClientTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SdsClientTest.java index d5f57df5089..0927e7e286e 100644 --- a/xds/src/test/java/io/grpc/xds/sds/SdsClientTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.fail; diff --git a/xds/src/test/java/io/grpc/xds/sds/SdsClientUdsFileBasedMetadataTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientUdsFileBasedMetadataTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/SdsClientUdsFileBasedMetadataTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SdsClientUdsFileBasedMetadataTest.java index f49865178ea..02fde83dfc3 100644 --- a/xds/src/test/java/io/grpc/xds/sds/SdsClientUdsFileBasedMetadataTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientUdsFileBasedMetadataTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.doReturn; diff --git a/xds/src/test/java/io/grpc/xds/sds/SdsClientUdsTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientUdsTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/SdsClientUdsTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SdsClientUdsTest.java index c84bff4edbd..80f9ac18f69 100644 --- a/xds/src/test/java/io/grpc/xds/sds/SdsClientUdsTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientUdsTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.mock; diff --git a/xds/src/test/java/io/grpc/xds/sds/internal/SdsProtocolNegotiatorsTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsProtocolNegotiatorsTest.java similarity index 98% rename from xds/src/test/java/io/grpc/xds/sds/internal/SdsProtocolNegotiatorsTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SdsProtocolNegotiatorsTest.java index ea3c65364e2..f1678fba85e 100644 --- a/xds/src/test/java/io/grpc/xds/sds/internal/SdsProtocolNegotiatorsTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsProtocolNegotiatorsTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds.internal; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertNotNull; @@ -30,8 +30,8 @@ import io.grpc.internal.testing.TestUtils; import io.grpc.netty.GrpcHttp2ConnectionHandler; import io.grpc.netty.InternalProtocolNegotiationEvent; -import io.grpc.xds.sds.internal.SdsProtocolNegotiators.ClientSdsHandler; -import io.grpc.xds.sds.internal.SdsProtocolNegotiators.ClientSdsProtocolNegotiator; +import io.grpc.xds.internal.sds.SdsProtocolNegotiators.ClientSdsHandler; +import io.grpc.xds.internal.sds.SdsProtocolNegotiators.ClientSdsProtocolNegotiator; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPipeline; diff --git a/xds/src/test/java/io/grpc/xds/sds/SdsSslContextProviderTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java similarity index 97% rename from xds/src/test/java/io/grpc/xds/sds/SdsSslContextProviderTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java index e5d3faa19a7..186e1554bab 100644 --- a/xds/src/test/java/io/grpc/xds/sds/SdsSslContextProviderTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java @@ -14,12 +14,12 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; -import static io.grpc.xds.sds.SdsClientTest.getOneCertificateValidationContextSecret; -import static io.grpc.xds.sds.SdsClientTest.getOneTlsCertSecret; -import static io.grpc.xds.sds.SecretVolumeSslContextProviderTest.doChecksOnSslContext; +import static io.grpc.xds.internal.sds.SdsClientTest.getOneCertificateValidationContextSecret; +import static io.grpc.xds.internal.sds.SdsClientTest.getOneTlsCertSecret; +import static io.grpc.xds.internal.sds.SecretVolumeSslContextProviderTest.doChecksOnSslContext; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/xds/src/test/java/io/grpc/xds/sds/SecretVolumeSslContextProviderTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/SecretVolumeSslContextProviderTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java index c2df8e62d4a..572ba583d3b 100644 --- a/xds/src/test/java/io/grpc/xds/sds/SecretVolumeSslContextProviderTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/ServerSslContextProviderFactoryTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/ServerSslContextProviderFactoryTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java index bb241becb59..283c60d39e3 100644 --- a/xds/src/test/java/io/grpc/xds/sds/ServerSslContextProviderFactoryTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/TestSdsServer.java b/xds/src/test/java/io/grpc/xds/internal/sds/TestSdsServer.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/TestSdsServer.java rename to xds/src/test/java/io/grpc/xds/internal/sds/TestSdsServer.java index 028123135c0..7e1eb148d2d 100644 --- a/xds/src/test/java/io/grpc/xds/sds/TestSdsServer.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/TestSdsServer.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.base.Preconditions.checkNotNull; diff --git a/xds/src/test/java/io/grpc/xds/sds/TlsContextManagerTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/TlsContextManagerTest.java similarity index 98% rename from xds/src/test/java/io/grpc/xds/sds/TlsContextManagerTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/TlsContextManagerTest.java index d736f77c956..253005ed8da 100644 --- a/xds/src/test/java/io/grpc/xds/sds/TlsContextManagerTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/TlsContextManagerTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.mock; @@ -25,7 +25,7 @@ import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.xds.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; +import io.grpc.xds.internal.sds.ReferenceCountingSslContextProviderMap.SslContextProviderFactory; import java.lang.reflect.Field; import org.junit.Before; import org.junit.Rule; diff --git a/xds/src/test/java/io/grpc/xds/sds/XdsChannelBuilderTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/XdsChannelBuilderTest.java similarity index 97% rename from xds/src/test/java/io/grpc/xds/sds/XdsChannelBuilderTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/XdsChannelBuilderTest.java index 4789cd8ec40..b09e42a1a79 100644 --- a/xds/src/test/java/io/grpc/xds/sds/XdsChannelBuilderTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/XdsChannelBuilderTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/XdsSdsClientServerTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/XdsSdsClientServerTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/XdsSdsClientServerTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/XdsSdsClientServerTest.java index 5a2687ba853..f3063c4436e 100644 --- a/xds/src/test/java/io/grpc/xds/sds/XdsSdsClientServerTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/XdsSdsClientServerTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/XdsServerBuilderTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/XdsServerBuilderTest.java similarity index 97% rename from xds/src/test/java/io/grpc/xds/sds/XdsServerBuilderTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/XdsServerBuilderTest.java index 805d073e9ce..69a57b068e5 100644 --- a/xds/src/test/java/io/grpc/xds/sds/XdsServerBuilderTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/XdsServerBuilderTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds; +package io.grpc.xds.internal.sds; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/trust/SdsTrustManagerFactoryTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsTrustManagerFactoryTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/trust/SdsTrustManagerFactoryTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsTrustManagerFactoryTest.java index f03e836081c..466c90c3226 100644 --- a/xds/src/test/java/io/grpc/xds/sds/trust/SdsTrustManagerFactoryTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsTrustManagerFactoryTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds.trust; +package io.grpc.xds.internal.sds.trust; import static com.google.common.truth.Truth.assertThat; diff --git a/xds/src/test/java/io/grpc/xds/sds/trust/SdsX509TrustManagerTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java similarity index 99% rename from xds/src/test/java/io/grpc/xds/sds/trust/SdsX509TrustManagerTest.java rename to xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java index 64b2fd1b8a1..b0618bfd0fa 100644 --- a/xds/src/test/java/io/grpc/xds/sds/trust/SdsX509TrustManagerTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package io.grpc.xds.sds.trust; +package io.grpc.xds.internal.sds.trust; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.fail; From e320b9bee505798ed6326b94dc02483c7899c99d Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Thu, 13 Feb 2020 11:00:12 -0800 Subject: [PATCH 59/86] Include xDS example's README.md when updating release tag version. --- RELEASING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/RELEASING.md b/RELEASING.md index 34836768446..4387c5ddd3e 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -123,6 +123,7 @@ Tagging the Release $ ${EDITOR:-nano -w} README.md $ ${EDITOR:-nano -w} documentation/android-channel-builder.md $ ${EDITOR:-nano -w} cronet/README.md + $ ${EDITOR:-nano -w} examples/example-xds/README.md $ git commit -a -m "Update README etc to reference $MAJOR.$MINOR.$PATCH" ``` 3. Change root build files to remove "-SNAPSHOT" for the next release version From 5555ec9a35442f5282840d97935db5e3b71d2430 Mon Sep 17 00:00:00 2001 From: Eric Gribkoff Date: Thu, 13 Feb 2020 18:51:08 -0800 Subject: [PATCH 60/86] buildscripts: fix url of grpc core repo (#6708) --- buildscripts/kokoro/xds.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildscripts/kokoro/xds.sh b/buildscripts/kokoro/xds.sh index 52acc2978f4..9372a324543 100755 --- a/buildscripts/kokoro/xds.sh +++ b/buildscripts/kokoro/xds.sh @@ -14,7 +14,7 @@ pushd grpc-java/interop-testing ../gradlew installDist -x test -PskipCodegen=true -PskipAndroid=true popd -git clone https://github.com/ericgribkoff/grpc.git +git clone https://github.com/grpc/grpc.git grpc/tools/run_tests/helper_scripts/prep_xds.sh python3 grpc/tools/run_tests/run_xds_tests.py \ From cd049ed48b663e187af1accb94e0469a6a0a5005 Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Fri, 14 Feb 2020 10:58:50 -0800 Subject: [PATCH 61/86] xds: have cluster balancer cancel cluster watcher only during shutdown Previously when CdsConfig is changed, the old cluster watcher is canceled immediately even it's in graceful switch period, so the old cluster balancer won't receive any new updates. This behavior is not as good/clean as cancelling the old watch only once the old cluster balancer is shutdown. --- .../java/io/grpc/xds/CdsLoadBalancer.java | 59 ++++++------------- .../java/io/grpc/xds/CdsLoadBalancerTest.java | 2 +- 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index f83a096d8f5..6ec4263f03d 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -59,12 +59,9 @@ public final class CdsLoadBalancer extends LoadBalancer { // The following fields become non-null once handleResolvedAddresses() successfully. - // Most recent CdsConfig. + // Most recent cluster name. @Nullable - private CdsConfig cdsConfig; - // Most recent ClusterWatcher. - @Nullable - private ClusterWatcher clusterWatcher; + private String clusterName; @Nullable private ObjectPool xdsClientPool; @Nullable @@ -110,29 +107,19 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { } CdsConfig newCdsConfig = (CdsConfig) lbConfig; - // If CdsConfig is changed, do a graceful switch. - if (!newCdsConfig.equals(cdsConfig)) { - LoadBalancer.Factory fixedCdsConfigBalancerFactory = - new FixedCdsConfigBalancerFactory(newCdsConfig); - switchingLoadBalancer.switchTo(fixedCdsConfigBalancerFactory); + // If cluster is changed, do a graceful switch. + if (!newCdsConfig.name.equals(clusterName)) { + LoadBalancer.Factory clusterBalancerFactory = new ClusterBalancerFactory(newCdsConfig.name); + switchingLoadBalancer.switchTo(clusterBalancerFactory); } - switchingLoadBalancer.handleResolvedAddresses(resolvedAddresses); - - // The clusterWatcher is also updated after switchingLoadBalancer.handleResolvedAddresses(). - cdsConfig = newCdsConfig; + clusterName = newCdsConfig.name; } @Override public void handleNameResolutionError(Status error) { channelLogger.log(ChannelLogLevel.ERROR, "Name resolution error: {0}", error); - // Go into TRANSIENT_FAILURE if we have not yet received any cluster resource. Otherwise, - // we keep running with the data we had previously. - if (clusterWatcher == null) { - helper.updateBalancingState(TRANSIENT_FAILURE, new ErrorPicker(error)); - } else { - switchingLoadBalancer.handleNameResolutionError(error); - } + switchingLoadBalancer.handleNameResolutionError(error); } @Override @@ -151,32 +138,28 @@ public void shutdown() { } /** - * A load balancer factory that provides a load balancer for a given CdsConfig. + * A load balancer factory that provides a load balancer for a given cluster. */ - private final class FixedCdsConfigBalancerFactory extends LoadBalancer.Factory { + private final class ClusterBalancerFactory extends LoadBalancer.Factory { - final CdsConfig cdsConfig; - final CdsConfig oldCdsConfig; - final ClusterWatcher oldClusterWatcher; + final String clusterName; - FixedCdsConfigBalancerFactory(CdsConfig cdsConfig) { - this.cdsConfig = cdsConfig; - oldCdsConfig = CdsLoadBalancer.this.cdsConfig; - oldClusterWatcher = CdsLoadBalancer.this.clusterWatcher; + ClusterBalancerFactory(String clusterName) { + this.clusterName = clusterName; } @Override public boolean equals(Object o) { - if (!(o instanceof FixedCdsConfigBalancerFactory)) { + if (!(o instanceof ClusterBalancerFactory)) { return false; } - FixedCdsConfigBalancerFactory that = (FixedCdsConfigBalancerFactory) o; - return cdsConfig.equals(that.cdsConfig); + ClusterBalancerFactory that = (ClusterBalancerFactory) o; + return clusterName.equals(that.clusterName); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), cdsConfig); + return Objects.hash(super.hashCode(), clusterName); } @Override @@ -207,7 +190,7 @@ public void shutdown() { if (clusterWatcher.edsBalancer != null) { clusterWatcher.edsBalancer.shutdown(); } - xdsClient.cancelClusterDataWatch(cdsConfig.name, clusterWatcher); + xdsClient.cancelClusterDataWatch(clusterName, clusterWatcher); } } @@ -215,11 +198,7 @@ public void shutdown() { public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { if (clusterWatcher == null) { clusterWatcher = new ClusterWatcherImpl(helper, resolvedAddresses); - xdsClient.watchClusterData(cdsConfig.name, clusterWatcher); - if (oldCdsConfig != null) { - xdsClient.cancelClusterDataWatch(oldCdsConfig.name, oldClusterWatcher); - } - CdsLoadBalancer.this.clusterWatcher = clusterWatcher; + xdsClient.watchClusterData(clusterName, clusterWatcher); } } }; diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java index b328a14cdf9..912ad72d94a 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java @@ -264,7 +264,6 @@ public void handleCdsConfigs() throws Exception { ArgumentCaptor clusterWatcherCaptor2 = ArgumentCaptor.forClass(null); verify(xdsClient).watchClusterData(eq("bar.googleapis.com"), clusterWatcherCaptor2.capture()); - verify(xdsClient).cancelClusterDataWatch("foo.googleapis.com", clusterWatcher1); ClusterWatcher clusterWatcher2 = clusterWatcherCaptor2.getValue(); clusterWatcher2.onClusterChanged( @@ -301,6 +300,7 @@ public void handleCdsConfigs() throws Exception { edsLbHelper2.updateBalancingState(ConnectivityState.READY, picker2); verify(helper).updateBalancingState(ConnectivityState.READY, picker2); verify(edsLoadBalancer1).shutdown(); + verify(xdsClient).cancelClusterDataWatch("foo.googleapis.com", clusterWatcher1); clusterWatcher2.onClusterChanged( ClusterUpdate.newBuilder() From 92c9237fe667f41bdbbf0192ba3666e24ee922fb Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Fri, 14 Feb 2020 11:11:15 -0800 Subject: [PATCH 62/86] xds: have ClusterEndpointsBalancer cancel endpoint watcher only during shutdown Previously when eds service name is changed, the old endpoint watcher is canceled immediately even it's in graceful switch period, so the old ClusterEndpointsBalancer won't receive any new updates. This behavior is not as good/clean as cancelling the old watch only once the old ClusterEndpointsBalancer is shutdown. --- .../java/io/grpc/xds/EdsLoadBalancer.java | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java index 640a12dc4ba..30963c1eb3d 100644 --- a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java @@ -69,9 +69,6 @@ final class EdsLoadBalancer extends LoadBalancer { // Most recent XdsConfig. @Nullable private XdsConfig xdsConfig; - // Most recent EndpointWatcher. - @Nullable - private EndpointWatcher endpointWatcher; @Nullable private ObjectPool xdsClientPool; @Nullable @@ -214,11 +211,7 @@ XdsClient createXdsClient() { new ClusterEndpointsBalancerFactory(clusterServiceName); switchingLoadBalancer.switchTo(clusterEndpointsLoadBalancerFactory); } - resolvedAddresses = resolvedAddresses.toBuilder() - .setLoadBalancingPolicyConfig(newXdsConfig) - .build(); switchingLoadBalancer.handleResolvedAddresses(resolvedAddresses); - this.xdsConfig = newXdsConfig; } @@ -263,16 +256,9 @@ private void cancelClientStatsReport() { */ private final class ClusterEndpointsBalancerFactory extends LoadBalancer.Factory { final String clusterServiceName; - @Nullable - final String oldClusterServiceName; ClusterEndpointsBalancerFactory(String clusterServiceName) { this.clusterServiceName = clusterServiceName; - if (xdsConfig != null) { - oldClusterServiceName = xdsConfig.edsServiceName; - } else { - oldClusterServiceName = null; - } } @Override @@ -314,11 +300,6 @@ final class ClusterEndpointsBalancer extends LoadBalancer { endpointWatcher = new EndpointWatcherImpl(localityStore); xdsClient.watchEndpointData(clusterServiceName, endpointWatcher); - if (EdsLoadBalancer.this.endpointWatcher != null) { - xdsClient.cancelEndpointDataWatch( - oldClusterServiceName, EdsLoadBalancer.this.endpointWatcher); - } - EdsLoadBalancer.this.endpointWatcher = endpointWatcher; } // TODO(zddapeng): In handleResolvedAddresses() handle child policy change if any. From 161a26ecff84090465fa0f385462129afcdaca9f Mon Sep 17 00:00:00 2001 From: Eric Gribkoff Date: Fri, 14 Feb 2020 14:05:23 -0800 Subject: [PATCH 63/86] interop-testing: sync SimpleResponse definition with core and go (#6713) --- .../main/proto/grpc/testing/messages.proto | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/interop-testing/src/main/proto/grpc/testing/messages.proto b/interop-testing/src/main/proto/grpc/testing/messages.proto index 35a3429272c..59fe4cc2530 100644 --- a/interop-testing/src/main/proto/grpc/testing/messages.proto +++ b/interop-testing/src/main/proto/grpc/testing/messages.proto @@ -41,6 +41,21 @@ message EchoStatus { string message = 2; } +// The type of route that a client took to reach a server w.r.t. gRPCLB. +// The server must fill in "fallback" if it detects that the RPC reached +// the server via the "gRPCLB fallback" path, and "backend" if it detects +// that the RPC reached the server via "gRPCLB backend" path (i.e. if it got +// the address of this server from the gRPCLB server BalanceLoad RPC). Exactly +// how this detection is done is context and server dependent. +enum GrpclbRouteType { + // Server didn't detect the route that a client took to reach it. + GRPCLB_ROUTE_TYPE_UNKNOWN = 0; + // Indicates that a client reached a server via gRPCLB fallback. + GRPCLB_ROUTE_TYPE_FALLBACK = 1; + // Indicates that a client reached a server as a gRPCLB-given backend. + GRPCLB_ROUTE_TYPE_BACKEND = 2; +} + // Unary request. message SimpleRequest { reserved 1; @@ -85,8 +100,10 @@ message SimpleResponse { // Server ID. This must be unique among different server instances, // but the same across all RPC's made to a particular server instance. string server_id = 4; + // gRPCLB Path. + GrpclbRouteType grpclb_route_type = 5; // Server hostname. - string hostname = 5; + string hostname = 6; } message SimpleContext { From bb37524750fa4923f7dc390f5f961602b46b3790 Mon Sep 17 00:00:00 2001 From: Ziad Hatahet Date: Tue, 18 Feb 2020 13:16:08 -0500 Subject: [PATCH 64/86] examples: Fix typo in comments (#6721) --- .../examples/manualflowcontrol/ManualFlowControlClient.java | 2 +- .../examples/manualflowcontrol/ManualFlowControlServer.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlClient.java b/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlClient.java index 1e4a3762edb..f442de6d527 100644 --- a/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlClient.java +++ b/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlClient.java @@ -66,7 +66,7 @@ public void beforeStart(final ClientCallStreamObserver requestStre // Note: the onReadyHandler's invocation is serialized on the same thread pool as the incoming // StreamObserver's onNext(), onError(), and onComplete() handlers. Blocking the onReadyHandler will prevent // additional messages from being processed by the incoming StreamObserver. The onReadyHandler must return - // in a timely manor or else message processing throughput will suffer. + // in a timely manner or else message processing throughput will suffer. requestStream.setOnReadyHandler(new Runnable() { // An iterator is used so we can pause and resume iteration of the request data. Iterator iterator = names().iterator(); diff --git a/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java b/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java index 694330dfdb6..4ad59308437 100644 --- a/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java +++ b/examples/src/main/java/io/grpc/examples/manualflowcontrol/ManualFlowControlServer.java @@ -46,8 +46,8 @@ public StreamObserver sayHelloStreaming(final StreamObserver Date: Wed, 19 Feb 2020 09:07:07 -0800 Subject: [PATCH 65/86] xds: support for xDS data types needed for server side SDS support (#6718) --- .../io/grpc/xds/EnvoyServerProtoData.java | 301 ++++++++++++++++++ xds/src/main/java/io/grpc/xds/XdsClient.java | 18 +- .../io/grpc/xds/EnvoyServerProtoDataTest.java | 134 ++++++++ .../sds/CommonTlsContextTestsUtil.java | 46 +-- .../sds/SdsSslContextProviderTest.java | 2 +- .../SecretVolumeSslContextProviderTest.java | 17 +- .../ServerSslContextProviderFactoryTest.java | 4 +- 7 files changed, 484 insertions(+), 38 deletions(-) create mode 100644 xds/src/main/java/io/grpc/xds/EnvoyServerProtoData.java create mode 100644 xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java diff --git a/xds/src/main/java/io/grpc/xds/EnvoyServerProtoData.java b/xds/src/main/java/io/grpc/xds/EnvoyServerProtoData.java new file mode 100644 index 00000000000..9d5e2605ff4 --- /dev/null +++ b/xds/src/main/java/io/grpc/xds/EnvoyServerProtoData.java @@ -0,0 +1,301 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.xds; + +import com.google.common.annotations.VisibleForTesting; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import javax.annotation.Nullable; + +/** + * Defines gRPC data types for Envoy protobuf messages used in xDS protocol on the server side, + * similar to how {@link EnvoyProtoData} defines it for the client side. + */ +final class EnvoyServerProtoData { + + // Prevent instantiation. + private EnvoyServerProtoData() { + } + + static final class CidrRange { + private final String addressPrefix; + private final int prefixLen; + + @VisibleForTesting + CidrRange(String addressPrefix, int prefixLen) { + this.addressPrefix = addressPrefix; + this.prefixLen = prefixLen; + } + + static CidrRange fromEnvoyProtoCidrRange( + io.envoyproxy.envoy.api.v2.core.CidrRange proto) { + return new CidrRange(proto.getAddressPrefix(), proto.getPrefixLen().getValue()); + } + + public String getAddressPrefix() { + return addressPrefix; + } + + public int getPrefixLen() { + return prefixLen; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CidrRange cidrRange = (CidrRange) o; + return prefixLen == cidrRange.prefixLen + && java.util.Objects.equals(addressPrefix, cidrRange.addressPrefix); + } + + @Override + public int hashCode() { + return java.util.Objects.hash(addressPrefix, prefixLen); + } + + @Override + public String toString() { + return "CidrRange{" + + "addressPrefix='" + addressPrefix + '\'' + + ", prefixLen=" + prefixLen + + '}'; + } + } + + /** + * Corresponds to Envoy proto message + * {@link io.envoyproxy.envoy.api.v2.listener.FilterChainMatch}. + */ + static final class FilterChainMatch { + private final int destinationPort; + private final List prefixRanges; + private final List applicationProtocols; + + private FilterChainMatch(int destinationPort, + List prefixRanges, List applicationProtocols) { + this.destinationPort = destinationPort; + this.prefixRanges = Collections.unmodifiableList(prefixRanges); + this.applicationProtocols = Collections.unmodifiableList(applicationProtocols); + } + + static FilterChainMatch fromEnvoyProtoFilterChainMatch( + io.envoyproxy.envoy.api.v2.listener.FilterChainMatch proto) { + List prefixRanges = new ArrayList<>(); + for (io.envoyproxy.envoy.api.v2.core.CidrRange range : proto.getPrefixRangesList()) { + prefixRanges.add(CidrRange.fromEnvoyProtoCidrRange(range)); + } + List applicationProtocols = new ArrayList<>(); + for (String appProtocol : proto.getApplicationProtocolsList()) { + applicationProtocols.add(appProtocol); + } + return new FilterChainMatch( + proto.getDestinationPort().getValue(), + prefixRanges, + applicationProtocols); + } + + public int getDestinationPort() { + return destinationPort; + } + + public List getPrefixRanges() { + return prefixRanges; + } + + public List getApplicationProtocols() { + return applicationProtocols; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FilterChainMatch that = (FilterChainMatch) o; + return destinationPort == that.destinationPort + && java.util.Objects.equals(prefixRanges, that.prefixRanges) + && java.util.Objects.equals(applicationProtocols, that.applicationProtocols); + } + + @Override + public int hashCode() { + return java.util.Objects.hash(destinationPort, prefixRanges, applicationProtocols); + } + + @Override + public String toString() { + return "FilterChainMatch{" + + "destinationPort=" + destinationPort + + ", prefixRanges=" + prefixRanges + + ", applicationProtocols=" + applicationProtocols + + '}'; + } + } + + /** + * Corresponds to Envoy proto message {@link io.envoyproxy.envoy.api.v2.listener.FilterChain}. + */ + static final class FilterChain { + private final FilterChainMatch filterChainMatch; + // TODO(sanjaypujare): remove dependency on envoy data type along with rest of the code. + private final io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext downstreamTlsContext; + + private FilterChain(FilterChainMatch filterChainMatch, + io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext downstreamTlsContext) { + this.filterChainMatch = filterChainMatch; + this.downstreamTlsContext = downstreamTlsContext; + } + + static FilterChain fromEnvoyProtoFilterChain( + io.envoyproxy.envoy.api.v2.listener.FilterChain proto) { + return new FilterChain( + FilterChainMatch.fromEnvoyProtoFilterChainMatch(proto.getFilterChainMatch()), + proto.getTlsContext() + ); + } + + public FilterChainMatch getFilterChainMatch() { + return filterChainMatch; + } + + public io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext getDownstreamTlsContext() { + return downstreamTlsContext; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FilterChain that = (FilterChain) o; + return java.util.Objects.equals(filterChainMatch, that.filterChainMatch) + && java.util.Objects.equals(downstreamTlsContext, that.downstreamTlsContext); + } + + @Override + public int hashCode() { + return java.util.Objects.hash(filterChainMatch, downstreamTlsContext); + } + + @Override + public String toString() { + return "FilterChain{" + + "filterChainMatch=" + filterChainMatch + + ", downstreamTlsContext=" + downstreamTlsContext + + '}'; + } + } + + /** + * Corresponds to Envoy proto message {@link io.envoyproxy.envoy.api.v2.Listener} & related + * classes. + */ + static final class Listener { + private final String name; + @Nullable + private final String address; + private final List filterChains; + + private Listener(String name, String address, + List filterChains) { + this.name = name; + this.address = address; + this.filterChains = Collections.unmodifiableList(filterChains); + } + + private static String convertEnvoyAddressToString( + io.envoyproxy.envoy.api.v2.core.Address proto) { + if (proto.hasSocketAddress()) { + io.envoyproxy.envoy.api.v2.core.SocketAddress socketAddress = proto.getSocketAddress(); + String address = socketAddress.getAddress(); + switch (socketAddress.getPortSpecifierCase()) { + case NAMED_PORT: + return address + ":" + socketAddress.getNamedPort(); + case PORT_VALUE: + return address + ":" + socketAddress.getPortValue(); + default: + return address; + } + } + return null; + } + + static Listener fromEnvoyProtoListener(io.envoyproxy.envoy.api.v2.Listener proto) { + List filterChains = new ArrayList<>(proto.getFilterChainsCount()); + for (io.envoyproxy.envoy.api.v2.listener.FilterChain filterChain : + proto.getFilterChainsList()) { + filterChains.add(FilterChain.fromEnvoyProtoFilterChain(filterChain)); + } + return new Listener( + proto.getName(), + convertEnvoyAddressToString(proto.getAddress()), + filterChains); + } + + public String getName() { + return name; + } + + public String getAddress() { + return address; + } + + public List getFilterChains() { + return filterChains; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Listener listener = (Listener) o; + return java.util.Objects.equals(name, listener.name) + && java.util.Objects.equals(address, listener.address) + && java.util.Objects.equals(filterChains, listener.filterChains); + } + + @Override + public int hashCode() { + return java.util.Objects.hash(name, address, filterChains); + } + + @Override + public String toString() { + return "Listener{" + + "name='" + name + '\'' + + ", address='" + address + '\'' + + ", filterChains=" + filterChains + + '}'; + } + } +} diff --git a/xds/src/main/java/io/grpc/xds/XdsClient.java b/xds/src/main/java/io/grpc/xds/XdsClient.java index 85d15f5bca9..288ad2fb92e 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClient.java +++ b/xds/src/main/java/io/grpc/xds/XdsClient.java @@ -34,6 +34,7 @@ import io.grpc.xds.EnvoyProtoData.DropOverload; import io.grpc.xds.EnvoyProtoData.Locality; import io.grpc.xds.EnvoyProtoData.LocalityLbEndpoints; +import io.grpc.xds.EnvoyServerProtoData.Listener; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; @@ -59,21 +60,29 @@ abstract class XdsClient { */ static final class ConfigUpdate { private final String clusterName; + private final Listener listener; - private ConfigUpdate(String clusterName) { + private ConfigUpdate(String clusterName, @Nullable Listener listener) { this.clusterName = clusterName; + this.listener = listener; } String getClusterName() { return clusterName; } + @Nullable + public Listener getListener() { + return listener; + } + static Builder newBuilder() { return new Builder(); } static final class Builder { private String clusterName; + @Nullable private Listener listener; // Use ConfigUpdate.newBuilder(). private Builder() { @@ -84,9 +93,14 @@ Builder setClusterName(String clusterName) { return this; } + Builder setListener(Listener listener) { + this.listener = listener; + return this; + } + ConfigUpdate build() { Preconditions.checkState(clusterName != null, "clusterName is not set"); - return new ConfigUpdate(clusterName); + return new ConfigUpdate(clusterName, listener); } } } diff --git a/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java b/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java new file mode 100644 index 00000000000..a9e76474b2c --- /dev/null +++ b/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.xds; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.protobuf.Any; +import com.google.protobuf.UInt32Value; +import io.envoyproxy.envoy.api.v2.auth.CommonTlsContext; +import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; +import io.envoyproxy.envoy.api.v2.auth.SdsSecretConfig; +import io.envoyproxy.envoy.api.v2.core.CidrRange; +import io.envoyproxy.envoy.api.v2.core.SocketAddress; +import io.envoyproxy.envoy.api.v2.listener.Filter; +import io.envoyproxy.envoy.api.v2.listener.FilterChain; +import io.envoyproxy.envoy.api.v2.listener.FilterChainMatch; +import io.grpc.xds.EnvoyServerProtoData.Listener; +import io.grpc.xds.internal.sds.CommonTlsContextTestsUtil; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** + * Unit tests for {@link EnvoyServerProtoData}. + */ +@RunWith(JUnit4.class) +public class EnvoyServerProtoDataTest { + + @Test + public void listener_convertFromListenerProto() { + io.envoyproxy.envoy.api.v2.core.Address address = + io.envoyproxy.envoy.api.v2.core.Address.newBuilder() + .setSocketAddress(SocketAddress.newBuilder() + .setPortValue(8000) + .setAddress("10.2.1.34") + .build()) + .build(); + io.envoyproxy.envoy.api.v2.Listener listener = + io.envoyproxy.envoy.api.v2.Listener.newBuilder() + .setName("8000") + .setAddress(address) + .addFilterChains(createOutFilter()) + .addFilterChains(createInFilter()) + .build(); + + Listener xdsListener = Listener.fromEnvoyProtoListener(listener); + assertThat(xdsListener.getName()).isEqualTo("8000"); + assertThat(xdsListener.getAddress()).isEqualTo("10.2.1.34:8000"); + List filterChains = xdsListener.getFilterChains(); + assertThat(filterChains).isNotNull(); + assertThat(filterChains.size()).isEqualTo(2); + EnvoyServerProtoData.FilterChain outFilter = filterChains.get(0); + assertThat(outFilter).isNotNull(); + EnvoyServerProtoData.FilterChainMatch outFilterChainMatch = outFilter.getFilterChainMatch(); + assertThat(outFilterChainMatch).isNotNull(); + assertThat(outFilterChainMatch.getDestinationPort()).isEqualTo(8000); + assertThat(outFilterChainMatch.getApplicationProtocols()).isEmpty(); + assertThat(outFilterChainMatch.getPrefixRanges()).isEmpty(); + assertThat(outFilter.getDownstreamTlsContext()) + .isEqualTo(DownstreamTlsContext.getDefaultInstance()); + + EnvoyServerProtoData.FilterChain inFilter = filterChains.get(1); + assertThat(inFilter).isNotNull(); + EnvoyServerProtoData.FilterChainMatch inFilterChainMatch = inFilter.getFilterChainMatch(); + assertThat(inFilterChainMatch).isNotNull(); + assertThat(inFilterChainMatch.getDestinationPort()).isEqualTo(8000); + assertThat(inFilterChainMatch.getApplicationProtocols()).containsExactly("managed-mtls"); + assertThat(inFilterChainMatch.getPrefixRanges()).containsExactly( + new EnvoyServerProtoData.CidrRange("10.20.0.15", 32)); + DownstreamTlsContext inFilterTlsContext = inFilter.getDownstreamTlsContext(); + assertThat(inFilterTlsContext).isNotNull(); + CommonTlsContext commonTlsContext = inFilterTlsContext.getCommonTlsContext(); + assertThat(commonTlsContext).isNotNull(); + List tlsCertSdsConfigs = commonTlsContext + .getTlsCertificateSdsSecretConfigsList(); + assertThat(tlsCertSdsConfigs).isNotNull(); + assertThat(tlsCertSdsConfigs).hasSize(1); + assertThat(tlsCertSdsConfigs.get(0).getName()).isEqualTo("google-sds-config-default"); + } + + private static FilterChain createOutFilter() { + FilterChain filterChain = + FilterChain.newBuilder() + .setFilterChainMatch( + FilterChainMatch.newBuilder() + .setDestinationPort(UInt32Value.newBuilder().setValue(8000).build()) + .build()) + .addFilters(Filter.newBuilder() + .setName("envoy.http_connection_manager") + .build()) + .build(); + return filterChain; + } + + private static FilterChain createInFilter() { + FilterChain filterChain = + FilterChain.newBuilder() + .setFilterChainMatch( + FilterChainMatch.newBuilder() + .setDestinationPort(UInt32Value.newBuilder().setValue(8000) + .build()) + .addPrefixRanges(CidrRange.newBuilder() + .setAddressPrefix("10.20.0.15") + .setPrefixLen(UInt32Value.newBuilder().setValue(32) + .build()).build()) + .addApplicationProtocols("managed-mtls") + .build()) + .setTlsContext(CommonTlsContextTestsUtil.buildTestDownstreamTlsContext()) + .addFilters(Filter.newBuilder() + .setName("envoy.http_connection_manager") + .setTypedConfig(Any.newBuilder() + .setTypeUrl( + "type.googleapis.com/envoy.config.filter.network.http_connection_manager" + + ".v2.HttpConnectionManager")) + .build()) + .build(); + return filterChain; + } +} diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java b/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java index b3d95aa9d5f..0fc7511c2f7 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java @@ -20,9 +20,11 @@ import io.envoyproxy.envoy.api.v2.auth.CertificateValidationContext; import io.envoyproxy.envoy.api.v2.auth.CommonTlsContext; import io.envoyproxy.envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext; +import io.envoyproxy.envoy.api.v2.auth.DownstreamTlsContext; import io.envoyproxy.envoy.api.v2.auth.SdsSecretConfig; import io.envoyproxy.envoy.api.v2.auth.TlsCertificate; import io.envoyproxy.envoy.api.v2.core.DataSource; +import java.util.Arrays; /** Utility class for client and server ssl provider tests. */ public class CommonTlsContextTestsUtil { @@ -57,27 +59,6 @@ static CommonTlsContext buildCommonTlsContextFromSdsConfigForValidationContext( return builder.build(); } - static CommonTlsContext buildCommonTlsContextFromSdsConfigsForAll( - String certName, - String certTargetUri, - String validationContextName, - String validationContextTargetUri, - String channelType) { - - CommonTlsContext.Builder builder = CommonTlsContext.newBuilder(); - - SdsSecretConfig sdsSecretConfig = buildSdsSecretConfig(certName, certTargetUri, channelType); - if (sdsSecretConfig != null) { - builder.addTlsCertificateSdsSecretConfigs(sdsSecretConfig); - } - sdsSecretConfig = - buildSdsSecretConfig(validationContextName, validationContextTargetUri, channelType); - if (sdsSecretConfig != null) { - builder.setValidationContextSdsSecretConfig(sdsSecretConfig); - } - return builder.build(); - } - static CommonTlsContext buildCommonTlsContextFromSdsConfigForTlsCertificate( String name, String targetUri, String trustCa) { @@ -134,4 +115,27 @@ static CommonTlsContext buildCommonTlsContextWithAdditionalValues( } return builder.build(); } + + /** + * Helper method to build DownstreamTlsContext for multiple test classes. + */ + static DownstreamTlsContext buildDownstreamTlsContext(CommonTlsContext commonTlsContext) { + DownstreamTlsContext downstreamTlsContext = + DownstreamTlsContext.newBuilder().setCommonTlsContext(commonTlsContext).build(); + return downstreamTlsContext; + } + + /** Helper method for creating DownstreamTlsContext values for tests. */ + public static DownstreamTlsContext buildTestDownstreamTlsContext() { + return buildDownstreamTlsContext( + buildCommonTlsContextWithAdditionalValues( + "google-sds-config-default", + "unix:/var/run/sds/uds_path", + "ROOTCA", + "unix:/var/run/sds/uds_path", + Arrays.asList("spiffe://grpc-sds-testing.svc.id.goog/ns/default/sa/bob"), + Arrays.asList("managed-tls"), + null + )); + } } diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java index 186e1554bab..1a31b6c6b77 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsSslContextProviderTest.java @@ -80,7 +80,7 @@ private SdsSslContextProvider getSdsSslContextProvider( return server ? SdsSslContextProvider.getProviderForServer( - SecretVolumeSslContextProviderTest.buildDownstreamTlsContext(commonTlsContext), + CommonTlsContextTestsUtil.buildDownstreamTlsContext(commonTlsContext), node, MoreExecutors.directExecutor(), MoreExecutors.directExecutor()) diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java index 572ba583d3b..6477188621e 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SecretVolumeSslContextProviderTest.java @@ -277,7 +277,8 @@ public void getProviderForServer_defaultTlsCertificate_throwsException() { TlsCertificate tlsCert = TlsCertificate.getDefaultInstance(); try { SecretVolumeSslContextProvider.getProviderForServer( - buildDownstreamTlsContext(getCommonTlsContext(tlsCert, /* certContext= */ null))); + CommonTlsContextTestsUtil + .buildDownstreamTlsContext(getCommonTlsContext(tlsCert, /* certContext= */ null))); Assert.fail("no exception thrown"); } catch (IllegalArgumentException expected) { assertThat(expected).hasMessageThat().isEqualTo("filename expected"); @@ -297,7 +298,8 @@ public void getProviderForServer_certContextWithInlineString_throwsException() { .build(); try { SecretVolumeSslContextProvider.getProviderForServer( - buildDownstreamTlsContext(getCommonTlsContext(tlsCert, certContext))); + CommonTlsContextTestsUtil + .buildDownstreamTlsContext(getCommonTlsContext(tlsCert, certContext))); Assert.fail("no exception thrown"); } catch (IllegalArgumentException expected) { assertThat(expected.getMessage()).isEqualTo("filename expected"); @@ -419,7 +421,7 @@ static void doChecksOnSslContext(boolean server, SslContext sslContext, */ static DownstreamTlsContext buildDownstreamTlsContextFromFilenames( String privateKey, String certChain, String trustCa) { - return buildDownstreamTlsContext( + return CommonTlsContextTestsUtil.buildDownstreamTlsContext( buildCommonTlsContextFromFilenames(privateKey, certChain, trustCa)); } @@ -464,15 +466,6 @@ private static CommonTlsContext getCommonTlsContext( return builder.build(); } - /** - * Helper method to build DownstreamTlsContext for above tests. Called from other classes as well. - */ - static DownstreamTlsContext buildDownstreamTlsContext(CommonTlsContext commonTlsContext) { - DownstreamTlsContext downstreamTlsContext = - DownstreamTlsContext.newBuilder().setCommonTlsContext(commonTlsContext).build(); - return downstreamTlsContext; - } - /** * Helper method to build UpstreamTlsContext for above tests. Called from other classes as well. */ diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java index 283c60d39e3..b61c629e439 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/ServerSslContextProviderFactoryTest.java @@ -53,7 +53,7 @@ public void createSslContextProvider_sdsConfigForTlsCert_expectException() { CommonTlsContextTestsUtil.buildCommonTlsContextFromSdsConfigForTlsCertificate( "name", "unix:/tmp/sds/path", CA_PEM_FILE); DownstreamTlsContext downstreamTlsContext = - SecretVolumeSslContextProviderTest.buildDownstreamTlsContext(commonTlsContext); + CommonTlsContextTestsUtil.buildDownstreamTlsContext(commonTlsContext); try { SslContextProvider unused = @@ -72,7 +72,7 @@ public void createSslContextProvider_sdsConfigForCertValidationContext_expectExc CommonTlsContextTestsUtil.buildCommonTlsContextFromSdsConfigForValidationContext( "name", "unix:/tmp/sds/path", SERVER_KEY_FILE, SERVER_PEM_FILE); DownstreamTlsContext downstreamTlsContext = - SecretVolumeSslContextProviderTest.buildDownstreamTlsContext(commonTlsContext); + CommonTlsContextTestsUtil.buildDownstreamTlsContext(commonTlsContext); try { SslContextProvider unused = From e6d8c27448dc783260ee0bc536cb90a12c8cc46b Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Wed, 19 Feb 2020 13:59:58 -0800 Subject: [PATCH 66/86] Revert "census: Set SpanKind on Client/Server traces (#6680)" (#6729) This reverts commit 60bc74620fc88b321a46791241326939de70ed20. --- census/src/main/java/io/grpc/census/CensusTracingModule.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/census/src/main/java/io/grpc/census/CensusTracingModule.java b/census/src/main/java/io/grpc/census/CensusTracingModule.java index e04db099d4d..e9e12941ed4 100644 --- a/census/src/main/java/io/grpc/census/CensusTracingModule.java +++ b/census/src/main/java/io/grpc/census/CensusTracingModule.java @@ -238,7 +238,6 @@ final class ClientCallTracer extends ClientStreamTracer.Factory { generateTraceSpanName(false, method.getFullMethodName()), parentSpan) .setRecordEvents(true) - .setSpanKind(Span.Kind.CLIENT) .startSpan(); } @@ -309,7 +308,6 @@ private final class ServerTracer extends ServerStreamTracer { generateTraceSpanName(true, fullMethodName), remoteSpan) .setRecordEvents(true) - .setSpanKind(Span.Kind.SERVER) .startSpan(); } From 19de229b7f5fa5cf2b5259c04b7d7e896270dea3 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Wed, 19 Feb 2020 14:36:47 -0800 Subject: [PATCH 67/86] Update readme to specify 1.27.1 (#6730) --- README.md | 28 ++++++++++++------------ cronet/README.md | 2 +- documentation/android-channel-builder.md | 4 ++-- examples/example-xds/README.md | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9d8d449be16..f7f2c628dcb 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/quickstart/java.html) or the more explanatory [gRPC basics](https://grpc.io/docs/tutorials/basic/java.html). -The [examples](https://github.com/grpc/grpc-java/tree/v1.27.0/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.27.0/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.27.1/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.27.1/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -42,37 +42,37 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.27.0 + 1.27.1 io.grpc grpc-protobuf - 1.27.0 + 1.27.1 io.grpc grpc-stub - 1.27.0 + 1.27.1 ``` Or for Gradle with non-Android, add to your dependencies: ```gradle -implementation 'io.grpc:grpc-netty-shaded:1.27.0' -implementation 'io.grpc:grpc-protobuf:1.27.0' -implementation 'io.grpc:grpc-stub:1.27.0' +implementation 'io.grpc:grpc-netty-shaded:1.27.1' +implementation 'io.grpc:grpc-protobuf:1.27.1' +implementation 'io.grpc:grpc-stub:1.27.1' ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.27.0' -implementation 'io.grpc:grpc-protobuf-lite:1.27.0' -implementation 'io.grpc:grpc-stub:1.27.0' +implementation 'io.grpc:grpc-okhttp:1.27.1' +implementation 'io.grpc:grpc-protobuf-lite:1.27.1' +implementation 'io.grpc:grpc-stub:1.27.1' ``` [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.27.0 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.27.1 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -104,7 +104,7 @@ For protobuf-based codegen integrated with the Maven build system, you can use com.google.protobuf:protoc:3.11.0:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.27.0:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.27.1:exe:${os.detected.classifier} @@ -134,7 +134,7 @@ protobuf { } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.27.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.27.1' } } generateProtoTasks { diff --git a/cronet/README.md b/cronet/README.md index 563af0fd4ce..5e43c03d361 100644 --- a/cronet/README.md +++ b/cronet/README.md @@ -26,7 +26,7 @@ In your app module's `build.gradle` file, include a dependency on both `grpc-cro Google Play Services Client Library for Cronet ``` -implementation 'io.grpc:grpc-cronet:1.27.0' +implementation 'io.grpc:grpc-cronet:1.27.1' implementation 'com.google.android.gms:play-services-cronet:16.0.0' ``` diff --git a/documentation/android-channel-builder.md b/documentation/android-channel-builder.md index 9ae35e62940..1bcdcaa0b8d 100644 --- a/documentation/android-channel-builder.md +++ b/documentation/android-channel-builder.md @@ -36,8 +36,8 @@ In your `build.gradle` file, include a dependency on both `grpc-android` and `grpc-okhttp`: ``` -implementation 'io.grpc:grpc-android:1.27.0' -implementation 'io.grpc:grpc-okhttp:1.27.0' +implementation 'io.grpc:grpc-android:1.27.1' +implementation 'io.grpc:grpc-okhttp:1.27.1' ``` You also need permission to access the device's network state in your diff --git a/examples/example-xds/README.md b/examples/example-xds/README.md index b01a9ecdacd..c9bcbff3c73 100644 --- a/examples/example-xds/README.md +++ b/examples/example-xds/README.md @@ -19,7 +19,7 @@ encounter issues please consult [COMPILING.md](../../COMPILING.md). 1. The server does not use XDS, so recent releases work fine. Building using recent releases is much easier, so check out the most recent release tag: ``` -$ git checkout v1.27.0 +$ git checkout v1.27.1 ``` 2. Build the hello-world example server or the hostname example server. See @@ -40,7 +40,7 @@ $ git checkout master ``` To: ``` - grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.27.0" } + grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.27.1" } ``` From 88c027bac3b6ac40d1ea65f563bf1f9de8a9cc19 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Wed, 19 Feb 2020 18:07:44 -0800 Subject: [PATCH 68/86] xds: report load stats for all clusters over a single LRS client (#6706) Current implementation of client side load reporting is incorrect. Mainly each gRPC channel should have at most one LRS stream based on the current design of using a single management server. In this change: - Each LoadStatsStore instance is associated with clusterName:clusterServiceName. clusterName and clusterServiceName (nullable) is required to construct an LoadStatsStore instance. - The semantics is that an LoadStatsStore is responsible for recording loads sent to that cluster service of the cluster. The queried load report (via LoadStatsStore#generateLoadReport()) will have cluster_name and cluster_service_name (if not null) set. - A LoadReportClient is responsible for reporting loads for all clusters. Add LoadStatsStore to LoadReportClient via LoadReportClient#addLoadStatsStore(clusterName, clusterServiceName, loadStatsStore). This should be done before LoadReportClient#startLoadReporting() is called due to the above open question. - An XdsClient contains a single LoadReportClient instance. Its APIs XdsClient#reportClientStats(clusterName, clusterServiceName, loadStatsStore) calls LoadReportClient#addLoadStatsStore(clusterName, clusterServiceName, loadStatsStore) and then starts it. XdsClient#cancelClientStatsReport(clusterName, clusterServiceName) calls LoadReportClient#removeLoadStatsStore(clusterName, clusterServiceName) and stops it. LoadReportClient#addLoadStatsStore(clusterName, clusterServiceName, loadStatsStore) cannot be called repeatedly as once the load reporting started, we cannot change the cluster to report loads for. However, we are able to do report then cancel then report then cancel and so on. - Refactored EdsLoadBalancer a bit, to accommodate the new APIs of enabling/disabling load reporting. The ClusterEndpointsLoadBalancer instance carries its own LoadStatsStore and controls start/cancel of load reporting. - The interface for LoadReportClient is eliminated. LoadReportClient will completely be a subcomponent of XdsClient. Note: Currently we assume no cluster/eds service switch, which means we will report load for a single cluster/eds service. So we make the restriction to LoadReportClient#addLoadStatsStore() API that it cannot be called after load reporting has already started. This restriction will be removed after the above open question is resolved. --- .../java/io/grpc/xds/EdsLoadBalancer.java | 57 +-- .../java/io/grpc/xds/LoadReportClient.java | 354 ++++++++++++++++-- .../io/grpc/xds/LoadReportClientImpl.java | 335 ----------------- .../main/java/io/grpc/xds/LoadStatsStore.java | 26 +- .../java/io/grpc/xds/LoadStatsStoreImpl.java | 19 +- xds/src/main/java/io/grpc/xds/XdsClient.java | 13 +- .../main/java/io/grpc/xds/XdsClientImpl.java | 58 ++- ...mplTest.java => LoadReportClientTest.java} | 146 ++++---- .../io/grpc/xds/LoadStatsStoreImplTest.java | 5 +- .../java/io/grpc/xds/XdsClientImplTest.java | 49 +-- 10 files changed, 514 insertions(+), 548 deletions(-) delete mode 100644 xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java rename xds/src/test/java/io/grpc/xds/{LoadReportClientImplTest.java => LoadReportClientTest.java} (78%) diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java index 30963c1eb3d..1b439dcd2b1 100644 --- a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java @@ -46,7 +46,6 @@ import io.grpc.xds.XdsClient.XdsClientFactory; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -63,8 +62,6 @@ final class EdsLoadBalancer extends LoadBalancer { private final Bootstrapper bootstrapper; private final XdsChannelFactory channelFactory; private final Helper edsLbHelper; - // Cache for load stats stores for each service in cluster keyed by cluster service names. - private final Map loadStatsStoreMap = new HashMap<>(); // Most recent XdsConfig. @Nullable @@ -74,9 +71,10 @@ final class EdsLoadBalancer extends LoadBalancer { @Nullable private XdsClient xdsClient; @Nullable - private LoadReportClient loadReportClient; - @Nullable private String clusterName; + // FIXME(chengyuanzhang): should be one instance per cluster:cluster_service. + @Nullable + private LoadStatsStore loadStatsStore; EdsLoadBalancer(Helper edsLbHelper, ResourceUpdateCallback resourceUpdateCallback) { this( @@ -187,21 +185,22 @@ XdsClient createXdsClient() { // TODO(zdapeng): Use the correct cluster name. Currently load reporting will be broken if // edsServiceName is changed because we are using edsServiceName for the cluster name. clusterName = clusterServiceName; + loadStatsStore = new LoadStatsStoreImpl(clusterName, null); } - boolean shouldReportStats = newXdsConfig.lrsServerName != null; - if (shouldReportStats && !isReportingStats()) { - // Start load reporting. This may be a restarting after previously stopping the load - // reporting, so need to re-add all the pre-existing loadStatsStores to the new - // loadReportClient. - loadReportClient = xdsClient.reportClientStats(clusterName, newXdsConfig.lrsServerName); - for (Map.Entry entry : loadStatsStoreMap.entrySet()) { - loadReportClient.addLoadStatsStore(entry.getKey(), entry.getValue()); + // FIXME(chengyuanzhang): should report loads for each cluster:cluster_service. + if (xdsConfig == null + || !Objects.equals(newXdsConfig.lrsServerName, xdsConfig.lrsServerName)) { + if (newXdsConfig.lrsServerName != null) { + if (!newXdsConfig.lrsServerName.equals("")) { + throw new AssertionError( + "Can only report load to the same management server"); + } + xdsClient.reportClientStats(clusterName, null, loadStatsStore); + } else if (xdsConfig != null) { + xdsClient.cancelClientStatsReport(clusterName, null); } } - if (!shouldReportStats && isReportingStats()) { - cancelClientStatsReport(); - } // Note: childPolicy change will be handled in LocalityStore, to be implemented. // If edsServiceName in XdsConfig is changed, do a graceful switch. @@ -232,25 +231,14 @@ public boolean canHandleEmptyAddressListFromNameResolution() { public void shutdown() { channelLogger.log(ChannelLogLevel.DEBUG, "EDS load balancer is shutting down"); switchingLoadBalancer.shutdown(); - if (isReportingStats()) { - cancelClientStatsReport(); - } if (xdsClient != null) { + if (xdsConfig != null && xdsConfig.lrsServerName != null) { + xdsClient.cancelClientStatsReport(clusterName, null); + } xdsClient = xdsClientPool.returnObject(xdsClient); } } - /** Whether the client stats for the cluster is currently reported to the traffic director. */ - private boolean isReportingStats() { - return loadReportClient != null; - } - - /** Stops to report client stats for the cluster. */ - private void cancelClientStatsReport() { - xdsClient.cancelClientStatsReport(clusterName); - loadReportClient = null; - } - /** * A load balancer factory that provides a load balancer for a given cluster service. */ @@ -291,11 +279,6 @@ final class ClusterEndpointsBalancer extends LoadBalancer { ClusterEndpointsBalancer(Helper helper) { this.helper = helper; - LoadStatsStore loadStatsStore = new LoadStatsStoreImpl(); - loadStatsStoreMap.put(clusterServiceName, loadStatsStore); - if (isReportingStats()) { - loadReportClient.addLoadStatsStore(clusterServiceName, loadStatsStore); - } localityStore = localityStoreFactory.newLocalityStore(helper, lbRegistry, loadStatsStore); endpointWatcher = new EndpointWatcherImpl(localityStore); @@ -320,10 +303,6 @@ public boolean canHandleEmptyAddressListFromNameResolution() { @Override public void shutdown() { - loadStatsStoreMap.remove(clusterServiceName); - if (isReportingStats()) { - loadReportClient.removeLoadStatsStore(clusterServiceName); - } localityStore.reset(); xdsClient.cancelEndpointDataWatch(clusterServiceName, endpointWatcher); } diff --git a/xds/src/main/java/io/grpc/xds/LoadReportClient.java b/xds/src/main/java/io/grpc/xds/LoadReportClient.java index 3b9e3431965..aa0c3584f7a 100644 --- a/xds/src/main/java/io/grpc/xds/LoadReportClient.java +++ b/xds/src/main/java/io/grpc/xds/LoadReportClient.java @@ -16,53 +16,361 @@ package io.grpc.xds; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Stopwatch; +import com.google.common.base.Supplier; +import com.google.protobuf.util.Durations; +import io.envoyproxy.envoy.api.v2.core.Node; +import io.envoyproxy.envoy.api.v2.endpoint.ClusterStats; +import io.envoyproxy.envoy.service.load_stats.v2.LoadReportingServiceGrpc; +import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsRequest; +import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsResponse; +import io.grpc.ManagedChannel; +import io.grpc.Status; +import io.grpc.SynchronizationContext; +import io.grpc.SynchronizationContext.ScheduledHandle; +import io.grpc.internal.BackoffPolicy; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; import javax.annotation.concurrent.NotThreadSafe; /** - * A {@link LoadReportClient} is the gRPC client's load reporting agent that establishes - * connections to traffic director for reporting load stats from gRPC client's perspective. - * - *

    Each {@link LoadReportClient} instance is responsible for reporting loads for a single - * cluster. + * Client of xDS load reporting service based on LRS protocol, which reports load stats of + * gRPC client's perspective to a management server. */ @NotThreadSafe -interface LoadReportClient { +final class LoadReportClient { + private static final Logger logger = Logger.getLogger(XdsClientImpl.class.getName()); + + private final ManagedChannel channel; + private final Node node; + private final SynchronizationContext syncContext; + private final ScheduledExecutorService timerService; + private final Supplier stopwatchSupplier; + private final Stopwatch retryStopwatch; + private final BackoffPolicy.Provider backoffPolicyProvider; + + // Sources of load stats data for each cluster. + // FIXME(chengyuanzhang): this should be Map> as each + // ClusterStats is keyed by cluster:cluster_service. Currently, cluster_service is always unset. + private final Map loadStatsStoreMap = new HashMap<>(); + private boolean started; + + @Nullable + private BackoffPolicy lrsRpcRetryPolicy; + @Nullable + private ScheduledHandle lrsRpcRetryTimer; + @Nullable + private LrsStream lrsStream; + @Nullable + private LoadReportCallback callback; + + LoadReportClient( + ManagedChannel channel, + Node node, + SynchronizationContext syncContext, + ScheduledExecutorService scheduledExecutorService, + BackoffPolicy.Provider backoffPolicyProvider, + Supplier stopwatchSupplier) { + this.channel = checkNotNull(channel, "channel"); + this.node = checkNotNull(node, "node"); + this.syncContext = checkNotNull(syncContext, "syncContext"); + this.timerService = checkNotNull(scheduledExecutorService, "timeService"); + this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); + this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); + this.retryStopwatch = stopwatchSupplier.get(); + started = false; + } /** * Establishes load reporting communication and negotiates with traffic director to report load * stats periodically. Calling this method on an already started {@link LoadReportClient} is * no-op. - * - * @param callback containing methods to be invoked for passing information received from load - * reporting responses to xDS load balancer. */ - // TODO(chengyuanzhang): do not expose this method. - void startLoadReporting(LoadReportCallback callback); + public void startLoadReporting(LoadReportCallback callback) { + if (started) { + return; + } + this.callback = callback; + started = true; + startLrsRpc(); + } /** * Terminates load reporting. Calling this method on an already stopped * {@link LoadReportClient} is no-op. - * */ - // TODO(chengyuanzhang): do not expose this method. - void stopLoadReporting(); + public void stopLoadReporting() { + if (!started) { + return; + } + if (lrsRpcRetryTimer != null) { + lrsRpcRetryTimer.cancel(); + } + if (lrsStream != null) { + lrsStream.close(Status.CANCELLED.withDescription("stop load reporting").asException()); + } + started = false; + // Do not shutdown channel as it is not owned by LrsClient. + } /** - * Provides this LoadReportClient source of load stats data for the given cluster service. - * If requested, data from the given {@code loadStatsStore} is periodically queried and - * sent to traffic director by this LoadReportClient. + * Provides this LoadReportClient source of load stats data for the given + * cluster:cluster_service. If requested, data from the given loadStatsStore is + * periodically queried and sent to traffic director by this LoadReportClient. + * + *

    Currently we expect load stats data for all clusters to report loads for are provided + * before load reporting starts (so that LRS initial request tells management server clusters + * it is reporting loads for). Design TBD for reporting loads for extra clusters after load + * reporting has started. * - * @param clusterServiceName name of the cluster service. - * @param loadStatsStore storage of load stats. + *

    Note: currently clusterServiceName is always unset. */ - void addLoadStatsStore(String clusterServiceName, LoadStatsStore loadStatsStore); + public void addLoadStatsStore( + String clusterName, @Nullable String clusterServiceName, LoadStatsStore loadStatsStore) { + checkState( + !loadStatsStoreMap.containsKey(clusterName), + "load stats for cluster " + clusterName + " already exists"); + // FIXME(chengyuanzhang): relax this restriction after design is fleshed out. + checkState( + !started, + "load stats for all clusters to report loads for should be provided before " + + "load reporting has started"); + loadStatsStoreMap.put(clusterName, loadStatsStore); + } /** - * Stops providing load stats data for the given cluster service. + * Stops providing load stats data for the given cluster:cluster_service. * - * @param clusterServiceName name of the cluster service. + *

    Note: currently clusterServiceName is always unset. */ - void removeLoadStatsStore(String clusterServiceName); + public void removeLoadStatsStore(String clusterName, @Nullable String clusterServiceName) { + checkState( + loadStatsStoreMap.containsKey(clusterName), + "load stats for cluster " + clusterName + " does not exist"); + loadStatsStoreMap.remove(clusterName); + } + + @VisibleForTesting + static class LoadReportingTask implements Runnable { + private final LrsStream stream; + + LoadReportingTask(LrsStream stream) { + this.stream = stream; + } + + @Override + public void run() { + stream.sendLoadReport(); + } + } + + @VisibleForTesting + class LrsRpcRetryTask implements Runnable { + + @Override + public void run() { + startLrsRpc(); + } + } + + private void startLrsRpc() { + checkState(lrsStream == null, "previous lbStream has not been cleared yet"); + LoadReportingServiceGrpc.LoadReportingServiceStub stub + = LoadReportingServiceGrpc.newStub(channel); + lrsStream = new LrsStream(stub, stopwatchSupplier.get()); + retryStopwatch.reset().start(); + lrsStream.start(); + } + + private class LrsStream implements StreamObserver { + + // Cluster to report loads for asked by management server. + final Set clusterNames = new HashSet<>(); + final LoadReportingServiceGrpc.LoadReportingServiceStub stub; + final Stopwatch reportStopwatch; + StreamObserver lrsRequestWriter; + boolean initialResponseReceived; + boolean closed; + long loadReportIntervalNano = -1; + ScheduledHandle loadReportTimer; + + LrsStream(LoadReportingServiceGrpc.LoadReportingServiceStub stub, Stopwatch stopwatch) { + this.stub = checkNotNull(stub, "stub"); + reportStopwatch = checkNotNull(stopwatch, "stopwatch"); + } + + void start() { + lrsRequestWriter = stub.withWaitForReady().streamLoadStats(this); + reportStopwatch.reset().start(); + // Tells management server which clusters the client is reporting loads for. + List clusterStatsList = new ArrayList<>(); + for (String clusterName : loadStatsStoreMap.keySet()) { + clusterStatsList.add(ClusterStats.newBuilder().setClusterName(clusterName).build()); + } + LoadStatsRequest initRequest = + LoadStatsRequest.newBuilder() + .setNode(node) + .addAllClusterStats(clusterStatsList) + .build(); + lrsRequestWriter.onNext(initRequest); + logger.log(Level.FINE, "Initial LRS request sent: {0}", initRequest); + } + + @Override + public void onNext(final LoadStatsResponse response) { + syncContext.execute(new Runnable() { + @Override + public void run() { + handleResponse(response); + } + }); + } + + @Override + public void onError(final Throwable t) { + syncContext.execute(new Runnable() { + @Override + public void run() { + handleStreamClosed(Status.fromThrowable(t) + .augmentDescription("Stream to XDS management server had an error")); + } + }); + } + + @Override + public void onCompleted() { + syncContext.execute(new Runnable() { + @Override + public void run() { + handleStreamClosed( + Status.UNAVAILABLE.withDescription("Stream to XDS management server was closed")); + } + }); + } + + private void sendLoadReport() { + long interval = reportStopwatch.elapsed(TimeUnit.NANOSECONDS); + reportStopwatch.reset().start(); + LoadStatsRequest.Builder requestBuilder = LoadStatsRequest.newBuilder().setNode(node); + for (String name : clusterNames) { + if (loadStatsStoreMap.containsKey(name)) { + LoadStatsStore loadStatsStore = loadStatsStoreMap.get(name); + ClusterStats report = + loadStatsStore.generateLoadReport() + .toBuilder() + .setLoadReportInterval(Durations.fromNanos(interval)) + .build(); + requestBuilder.addClusterStats(report); + } + } + LoadStatsRequest request = requestBuilder.build(); + lrsRequestWriter.onNext(request); + logger.log(Level.FINE, "Sent LoadStatsRequest\n{0}", request); + scheduleNextLoadReport(); + } + + private void scheduleNextLoadReport() { + // Cancel pending load report and reschedule with updated load reporting interval. + if (loadReportTimer != null && loadReportTimer.isPending()) { + loadReportTimer.cancel(); + loadReportTimer = null; + } + if (loadReportIntervalNano > 0) { + loadReportTimer = syncContext.schedule( + new LoadReportingTask(this), loadReportIntervalNano, TimeUnit.NANOSECONDS, + timerService); + } + } + + private void handleResponse(LoadStatsResponse response) { + if (closed) { + return; + } + + if (!initialResponseReceived) { + logger.log(Level.FINE, "Received LRS initial response: {0}", response); + initialResponseReceived = true; + } else { + logger.log(Level.FINE, "Received an LRS response: {0}", response); + } + loadReportIntervalNano = Durations.toNanos(response.getLoadReportingInterval()); + callback.onReportResponse(loadReportIntervalNano); + clusterNames.clear(); + clusterNames.addAll(response.getClustersList()); + scheduleNextLoadReport(); + } + + private void handleStreamClosed(Status status) { + checkArgument(!status.isOk(), "unexpected OK status"); + if (closed) { + return; + } + closed = true; + cleanUp(); + + long delayNanos = 0; + if (initialResponseReceived || lrsRpcRetryPolicy == null) { + // Reset the backoff sequence if balancer has sent the initial response, or backoff sequence + // has never been initialized. + lrsRpcRetryPolicy = backoffPolicyProvider.get(); + } + // Backoff only when balancer wasn't working previously. + if (!initialResponseReceived) { + // The back-off policy determines the interval between consecutive RPC upstarts, thus the + // actual delay may be smaller than the value from the back-off policy, or even negative, + // depending how much time was spent in the previous RPC. + delayNanos = + lrsRpcRetryPolicy.nextBackoffNanos() - retryStopwatch.elapsed(TimeUnit.NANOSECONDS); + } + logger.log(Level.FINE, "LRS stream closed, backoff in {0} second(s)", + TimeUnit.NANOSECONDS.toSeconds(delayNanos <= 0 ? 0 : delayNanos)); + if (delayNanos <= 0) { + startLrsRpc(); + } else { + lrsRpcRetryTimer = + syncContext.schedule(new LrsRpcRetryTask(), delayNanos, TimeUnit.NANOSECONDS, + timerService); + } + } + + private void close(@Nullable Exception error) { + if (closed) { + return; + } + closed = true; + cleanUp(); + if (error == null) { + lrsRequestWriter.onCompleted(); + } else { + lrsRequestWriter.onError(error); + } + } + + private void cleanUp() { + if (loadReportTimer != null) { + loadReportTimer.cancel(); + loadReportTimer = null; + } + if (lrsStream == this) { + lrsStream = null; + } + } + } /** * Callbacks for passing information received from client load reporting responses to xDS load diff --git a/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java b/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java deleted file mode 100644 index ac5e6a89d18..00000000000 --- a/xds/src/main/java/io/grpc/xds/LoadReportClientImpl.java +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Copyright 2019 The gRPC Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.grpc.xds; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Stopwatch; -import com.google.common.base.Supplier; -import com.google.protobuf.util.Durations; -import io.envoyproxy.envoy.api.v2.core.Node; -import io.envoyproxy.envoy.api.v2.endpoint.ClusterStats; -import io.envoyproxy.envoy.service.load_stats.v2.LoadReportingServiceGrpc; -import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsRequest; -import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsResponse; -import io.grpc.ManagedChannel; -import io.grpc.Status; -import io.grpc.SynchronizationContext; -import io.grpc.SynchronizationContext.ScheduledHandle; -import io.grpc.internal.BackoffPolicy; -import io.grpc.stub.StreamObserver; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; -import javax.annotation.concurrent.NotThreadSafe; - -/** - * Client of xDS load reporting service based on LRS protocol. - */ -@NotThreadSafe -final class LoadReportClientImpl implements LoadReportClient { - - // TODO(chengyuanzhang): use channel logger once XdsClientImpl migrates to use channel logger. - private static final Logger logger = Logger.getLogger(XdsClientImpl.class.getName()); - - private final String clusterName; - private final ManagedChannel channel; - private final Node node; - private final SynchronizationContext syncContext; - private final ScheduledExecutorService timerService; - private final Supplier stopwatchSupplier; - private final Stopwatch retryStopwatch; - private final BackoffPolicy.Provider backoffPolicyProvider; - - // Sources of load stats data for each service in cluster. - private final Map loadStatsStoreMap = new HashMap<>(); - private boolean started; - - @Nullable - private BackoffPolicy lrsRpcRetryPolicy; - @Nullable - private ScheduledHandle lrsRpcRetryTimer; - @Nullable - private LrsStream lrsStream; - @Nullable - private LoadReportCallback callback; - - LoadReportClientImpl(ManagedChannel channel, - String clusterName, - Node node, - SynchronizationContext syncContext, - ScheduledExecutorService scheduledExecutorService, - BackoffPolicy.Provider backoffPolicyProvider, - Supplier stopwatchSupplier) { - this.channel = checkNotNull(channel, "channel"); - this.clusterName = checkNotNull(clusterName, "clusterName"); - this.node = checkNotNull(node, "node"); - this.syncContext = checkNotNull(syncContext, "syncContext"); - this.timerService = checkNotNull(scheduledExecutorService, "timeService"); - this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); - this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); - this.retryStopwatch = stopwatchSupplier.get(); - started = false; - } - - @Override - public void startLoadReporting(LoadReportCallback callback) { - if (started) { - return; - } - this.callback = callback; - started = true; - startLrsRpc(); - } - - @Override - public void stopLoadReporting() { - if (!started) { - return; - } - if (lrsRpcRetryTimer != null) { - lrsRpcRetryTimer.cancel(); - } - if (lrsStream != null) { - lrsStream.close(Status.CANCELLED.withDescription("stop load reporting").asException()); - } - started = false; - // Do not shutdown channel as it is not owned by LrsClient. - } - - @Override - public void addLoadStatsStore(String clusterServiceName, LoadStatsStore loadStatsStore) { - loadStatsStoreMap.put(clusterServiceName, loadStatsStore); - } - - @Override - public void removeLoadStatsStore(String clusterServiceName) { - loadStatsStoreMap.remove(clusterServiceName); - } - - @VisibleForTesting - static class LoadReportingTask implements Runnable { - private final LrsStream stream; - - LoadReportingTask(LrsStream stream) { - this.stream = stream; - } - - @Override - public void run() { - stream.sendLoadReport(); - } - } - - @VisibleForTesting - class LrsRpcRetryTask implements Runnable { - - @Override - public void run() { - startLrsRpc(); - } - } - - private void startLrsRpc() { - checkState(lrsStream == null, "previous lbStream has not been cleared yet"); - LoadReportingServiceGrpc.LoadReportingServiceStub stub - = LoadReportingServiceGrpc.newStub(channel); - lrsStream = new LrsStream(stub, stopwatchSupplier.get()); - retryStopwatch.reset().start(); - lrsStream.start(); - } - - private class LrsStream implements StreamObserver { - - // Cluster services to report loads for, instructed by LRS responses. - final Set clusterServiceNames = new HashSet<>(); - final LoadReportingServiceGrpc.LoadReportingServiceStub stub; - final Stopwatch reportStopwatch; - StreamObserver lrsRequestWriter; - boolean initialResponseReceived; - boolean closed; - long loadReportIntervalNano = -1; - ScheduledHandle loadReportTimer; - - LrsStream(LoadReportingServiceGrpc.LoadReportingServiceStub stub, Stopwatch stopwatch) { - this.stub = checkNotNull(stub, "stub"); - reportStopwatch = checkNotNull(stopwatch, "stopwatch"); - } - - void start() { - lrsRequestWriter = stub.withWaitForReady().streamLoadStats(this); - reportStopwatch.reset().start(); - LoadStatsRequest initRequest = - LoadStatsRequest.newBuilder() - .setNode(node) - .addClusterStats(ClusterStats.newBuilder().setClusterName(clusterName)) - .build(); - lrsRequestWriter.onNext(initRequest); - logger.log(Level.FINE, "Initial LRS request sent: {0}", initRequest); - } - - @Override - public void onNext(final LoadStatsResponse response) { - syncContext.execute(new Runnable() { - @Override - public void run() { - handleResponse(response); - } - }); - } - - @Override - public void onError(final Throwable t) { - syncContext.execute(new Runnable() { - @Override - public void run() { - handleStreamClosed(Status.fromThrowable(t) - .augmentDescription("Stream to XDS management server had an error")); - } - }); - } - - @Override - public void onCompleted() { - syncContext.execute(new Runnable() { - @Override - public void run() { - handleStreamClosed( - Status.UNAVAILABLE.withDescription("Stream to XDS management server was closed")); - } - }); - } - - private void sendLoadReport() { - long interval = reportStopwatch.elapsed(TimeUnit.NANOSECONDS); - reportStopwatch.reset().start(); - LoadStatsRequest.Builder requestBuilder = LoadStatsRequest.newBuilder().setNode(node); - for (String serviceName : clusterServiceNames) { - if (loadStatsStoreMap.containsKey(serviceName)) { - LoadStatsStore loadStatsStore = loadStatsStoreMap.get(serviceName); - ClusterStats report = - loadStatsStore.generateLoadReport() - .toBuilder() - .setClusterName(serviceName) - .setLoadReportInterval(Durations.fromNanos(interval)) - .build(); - requestBuilder.addClusterStats(report); - } - } - LoadStatsRequest request = requestBuilder.build(); - lrsRequestWriter.onNext(request); - logger.log(Level.FINE, "Sent LoadStatsRequest\n{0}", request); - scheduleNextLoadReport(); - } - - private void scheduleNextLoadReport() { - // Cancel pending load report and reschedule with updated load reporting interval. - if (loadReportTimer != null && loadReportTimer.isPending()) { - loadReportTimer.cancel(); - loadReportTimer = null; - } - if (loadReportIntervalNano > 0) { - loadReportTimer = syncContext.schedule( - new LoadReportingTask(this), loadReportIntervalNano, TimeUnit.NANOSECONDS, - timerService); - } - } - - private void handleResponse(LoadStatsResponse response) { - if (closed) { - return; - } - - if (!initialResponseReceived) { - logger.log(Level.FINE, "Received LRS initial response: {0}", response); - initialResponseReceived = true; - } else { - logger.log(Level.FINE, "Received an LRS response: {0}", response); - } - loadReportIntervalNano = Durations.toNanos(response.getLoadReportingInterval()); - callback.onReportResponse(loadReportIntervalNano); - clusterServiceNames.clear(); - clusterServiceNames.addAll(response.getClustersList()); - scheduleNextLoadReport(); - } - - private void handleStreamClosed(Status status) { - checkArgument(!status.isOk(), "unexpected OK status"); - if (closed) { - return; - } - closed = true; - cleanUp(); - - long delayNanos = 0; - if (initialResponseReceived || lrsRpcRetryPolicy == null) { - // Reset the backoff sequence if balancer has sent the initial response, or backoff sequence - // has never been initialized. - lrsRpcRetryPolicy = backoffPolicyProvider.get(); - } - // Backoff only when balancer wasn't working previously. - if (!initialResponseReceived) { - // The back-off policy determines the interval between consecutive RPC upstarts, thus the - // actual delay may be smaller than the value from the back-off policy, or even negative, - // depending how much time was spent in the previous RPC. - delayNanos = - lrsRpcRetryPolicy.nextBackoffNanos() - retryStopwatch.elapsed(TimeUnit.NANOSECONDS); - } - logger.log(Level.FINE, "LRS stream closed, backoff in {0} second(s)", - TimeUnit.NANOSECONDS.toSeconds(delayNanos <= 0 ? 0 : delayNanos)); - if (delayNanos <= 0) { - startLrsRpc(); - } else { - lrsRpcRetryTimer = - syncContext.schedule(new LrsRpcRetryTask(), delayNanos, TimeUnit.NANOSECONDS, - timerService); - } - } - - private void close(@Nullable Exception error) { - if (closed) { - return; - } - closed = true; - cleanUp(); - if (error == null) { - lrsRequestWriter.onCompleted(); - } else { - lrsRequestWriter.onError(error); - } - } - - private void cleanUp() { - if (loadReportTimer != null) { - loadReportTimer.cancel(); - loadReportTimer = null; - } - if (lrsStream == this) { - lrsStream = null; - } - } - } -} diff --git a/xds/src/main/java/io/grpc/xds/LoadStatsStore.java b/xds/src/main/java/io/grpc/xds/LoadStatsStore.java index 86eeb07a236..76e430aa8d9 100644 --- a/xds/src/main/java/io/grpc/xds/LoadStatsStore.java +++ b/xds/src/main/java/io/grpc/xds/LoadStatsStore.java @@ -21,14 +21,14 @@ import javax.annotation.Nullable; /** - * Interface for client side load stats store. An {@code LoadStatsStore} maintains load stats for - * a service cluster (i.e., GSLB service) exposed by traffic director from a gRPC client's - * perspective, including dropped calls instructed by traffic director. Load stats for endpoints - * (i.e., Google backends) are aggregated in locality granularity (i.e., Google cluster) while the - * numbers of dropped calls are aggregated in cluster granularity. + * Interface for client side load stats store. An {@code LoadStatsStore} maintains load stats per + * cluster:cluster_service exposed by traffic director from a gRPC client's perspective, + * including dropped calls. Load stats for endpoints (i.e., Google backends) are aggregated in + * locality granularity (i.e., Google cluster) while the numbers of dropped calls are aggregated + * in cluster:cluster_service granularity. * - *

    An {@code LoadStatsStore} lives the same span of lifecycle as a cluster and - * only tracks loads for localities exposed by remote traffic director. A proper usage should be + *

    An {@code LoadStatsStore} only tracks loads for localities exposed by remote traffic + * director. A proper usage should be * *

      *
    1. Let {@link LoadStatsStore} track the locality newly exposed by traffic director by @@ -41,10 +41,6 @@ * *

      No locality information is needed for recording dropped calls since they are aggregated in * cluster granularity. - * - *

      Note implementations should only be responsible for keeping track of loads and generating - * load reports with load data, any load reporting information should be opaque to {@code - * LoadStatsStore} and be set outside. */ interface LoadStatsStore { @@ -61,7 +57,7 @@ interface LoadStatsStore { * reporting. * *

      This method is not thread-safe and should be called from the same synchronized context - * used by {@link XdsClient}. + * used by {@link LoadReportClient}. */ ClusterStats generateLoadReport(); @@ -70,10 +66,10 @@ interface LoadStatsStore { * endpoints in added localities will be recorded and included in generated load reports. * *

      This method needs to be called at locality updates only for newly assigned localities in - * balancer discovery responses before recording loads for those localities. + * endpoint discovery responses before recording loads for those localities. * *

      This method is not thread-safe and should be called from the same synchronized context - * used by {@link XdsClient}. + * used by {@link LoadReportClient}. */ void addLocality(Locality locality); @@ -88,7 +84,7 @@ interface LoadStatsStore { * waste and keep including zero-load upstream locality stats in generated load reports. * *

      This method is not thread-safe and should be called from the same synchronized context - * used by {@link XdsClient}. + * used by {@link LoadReportClient}. */ void removeLocality(Locality locality); diff --git a/xds/src/main/java/io/grpc/xds/LoadStatsStoreImpl.java b/xds/src/main/java/io/grpc/xds/LoadStatsStoreImpl.java index 0dfeca125b1..37ff09d91be 100644 --- a/xds/src/main/java/io/grpc/xds/LoadStatsStoreImpl.java +++ b/xds/src/main/java/io/grpc/xds/LoadStatsStoreImpl.java @@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.Nullable; import javax.annotation.concurrent.NotThreadSafe; /** @@ -40,19 +41,27 @@ */ @NotThreadSafe final class LoadStatsStoreImpl implements LoadStatsStore { - + private final String clusterName; + @Nullable + @SuppressWarnings("unused") + private final String clusterServiceName; private final ConcurrentMap localityLoadCounters; // Cluster level dropped request counts for each category decision made by xDS load balancer. private final ConcurrentMap dropCounters; - LoadStatsStoreImpl() { - this(new ConcurrentHashMap(), + LoadStatsStoreImpl(String clusterName, @Nullable String clusterServiceName) { + this(clusterName, clusterServiceName, new ConcurrentHashMap(), new ConcurrentHashMap()); } @VisibleForTesting - LoadStatsStoreImpl(ConcurrentMap localityLoadCounters, + LoadStatsStoreImpl( + String clusterName, + @Nullable String clusterServiceName, + ConcurrentMap localityLoadCounters, ConcurrentMap dropCounters) { + this.clusterName = checkNotNull(clusterName, "clusterName"); + this.clusterServiceName = clusterServiceName; this.localityLoadCounters = checkNotNull(localityLoadCounters, "localityLoadCounters"); this.dropCounters = checkNotNull(dropCounters, "dropCounters"); } @@ -60,6 +69,8 @@ final class LoadStatsStoreImpl implements LoadStatsStore { @Override public ClusterStats generateLoadReport() { ClusterStats.Builder statsBuilder = ClusterStats.newBuilder(); + statsBuilder.setClusterName(clusterName); + // TODO(chengyuangzhang): also set cluster_service_name if provided. for (Map.Entry entry : localityLoadCounters.entrySet()) { ClientLoadSnapshot snapshot = entry.getValue().snapshot(); UpstreamLocalityStats.Builder localityStatsBuilder = diff --git a/xds/src/main/java/io/grpc/xds/XdsClient.java b/xds/src/main/java/io/grpc/xds/XdsClient.java index 288ad2fb92e..3fc0fc0e368 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClient.java +++ b/xds/src/main/java/io/grpc/xds/XdsClient.java @@ -417,16 +417,21 @@ void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) { } /** - * Starts reporting client load stats to a remote server for the given cluster. + * Report client load stats to a remote server for the given cluster:cluster_service. + * + *

      Note: currently we can only report loads for a single cluster:cluster_service, + * as the design for adding clusters to report loads for while load reporting is + * happening is undefined. */ - LoadReportClient reportClientStats(String clusterName, String serverUri) { + void reportClientStats( + String clusterName, @Nullable String clusterServiceName, LoadStatsStore loadStatsStore) { throw new UnsupportedOperationException(); } /** - * Stops reporting client load stats to the remote server for the given cluster. + * Stops reporting client load stats to the remote server for the given cluster:cluster_service. */ - void cancelClientStatsReport(String clusterName) { + void cancelClientStatsReport(String clusterName, @Nullable String clusterServiceName) { } abstract static class XdsClientFactory { diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index dea30bcc9ac..257404e395c 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -124,9 +124,6 @@ final class XdsClientImpl extends XdsClient { // watchers can watch endpoints in the same cluster. private final Map> endpointWatchers = new HashMap<>(); - // Load reporting clients, with each responsible for reporting loads of a single cluster. - private final Map lrsClients = new HashMap<>(); - // Resource fetch timers are used to conclude absence of resources. Each timer is activated when // subscription for the resource starts and disarmed on first update for the resource. @@ -150,6 +147,8 @@ final class XdsClientImpl extends XdsClient { private BackoffPolicy retryBackoffPolicy; @Nullable private ScheduledHandle rpcRetryTimer; + @Nullable + private LoadReportClient lrsClient; // Following fields are set only after the ConfigWatcher registered. Once set, they should // never change. @@ -189,8 +188,9 @@ void shutdown() { adsStream.close(Status.CANCELLED.withDescription("shutdown").asException()); } cleanUpResources(); - for (LoadReportClientImpl lrsClient : lrsClients.values()) { + if (lrsClient != null) { lrsClient.stopLoadReporting(); + lrsClient = null; } if (rpcRetryTimer != null) { rpcRetryTimer.cancel(); @@ -405,37 +405,31 @@ void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) { } @Override - LoadReportClient reportClientStats(String clusterName, String serverUri) { - checkNotNull(serverUri, "serverUri"); - checkArgument(serverUri.equals(""), - "Currently only support empty serverUri, which defaults to the same " - + "management server this client talks to."); - if (!lrsClients.containsKey(clusterName)) { - LoadReportClientImpl lrsClient = - new LoadReportClientImpl( - channel, - clusterName, - node, - syncContext, - timeService, - backoffPolicyProvider, - stopwatchSupplier); - lrsClient.startLoadReporting( - new LoadReportCallback() { - @Override - public void onReportResponse(long reportIntervalNano) {} - }); - lrsClients.put(clusterName, lrsClient); - } - return lrsClients.get(clusterName); + void reportClientStats( + String clusterName, @Nullable String clusterServiceName, LoadStatsStore loadStatsStore) { + checkState(lrsClient == null, + "load reporting has already started, cannot change clusters to report loads for"); + lrsClient = + new LoadReportClient( + channel, + node, + syncContext, + timeService, + backoffPolicyProvider, + stopwatchSupplier); + lrsClient.addLoadStatsStore(clusterName, clusterServiceName, loadStatsStore); + lrsClient.startLoadReporting(new LoadReportCallback() { + @Override + public void onReportResponse(long reportIntervalNano) {} + }); } @Override - void cancelClientStatsReport(String clusterName) { - LoadReportClientImpl lrsClient = lrsClients.remove(clusterName); - if (lrsClient != null) { - lrsClient.stopLoadReporting(); - } + void cancelClientStatsReport(String clusterName, @Nullable String clusterServiceName) { + checkState(lrsClient != null, "load reporting was never started"); + lrsClient.removeLoadStatsStore(clusterName, clusterServiceName); + lrsClient.stopLoadReporting(); + lrsClient = null; } /** diff --git a/xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java b/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java similarity index 78% rename from xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java rename to xds/src/test/java/io/grpc/xds/LoadReportClientTest.java index 633e2799d96..09a726104e9 100644 --- a/xds/src/test/java/io/grpc/xds/LoadReportClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java @@ -29,7 +29,6 @@ import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.MoreExecutors; import com.google.protobuf.util.Durations; @@ -54,6 +53,8 @@ import io.grpc.testing.GrpcCleanupRule; import io.grpc.xds.LoadReportClient.LoadReportCallback; import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -74,19 +75,17 @@ import org.mockito.MockitoAnnotations; /** - * Unit tests for {@link LoadReportClientImpl}. + * Unit tests for {@link LoadReportClient}. */ @RunWith(JUnit4.class) -public class LoadReportClientImplTest { - - private static final String CLUSTER_NAME = "foo.blade.googleapis.com"; +public class LoadReportClientTest { private static final Node NODE = Node.newBuilder().setId("LRS test").build(); private static final FakeClock.TaskFilter LOAD_REPORTING_TASK_FILTER = new FakeClock.TaskFilter() { @Override public boolean shouldAccept(Runnable command) { return command.toString() - .contains(LoadReportClientImpl.LoadReportingTask.class.getSimpleName()); + .contains(LoadReportClient.LoadReportingTask.class.getSimpleName()); } }; private static final FakeClock.TaskFilter LRS_RPC_RETRY_TASK_FILTER = @@ -94,14 +93,9 @@ public boolean shouldAccept(Runnable command) { @Override public boolean shouldAccept(Runnable command) { return command.toString() - .contains(LoadReportClientImpl.LrsRpcRetryTask.class.getSimpleName()); + .contains(LoadReportClient.LrsRpcRetryTask.class.getSimpleName()); } }; - private static final LoadStatsRequest EXPECTED_INITIAL_REQ = - LoadStatsRequest.newBuilder() - .setNode(NODE) - .addClusterStats(ClusterStats.newBuilder().setClusterName(CLUSTER_NAME)) - .build(); @Rule public final GrpcCleanupRule cleanupRule = new GrpcCleanupRule(); @@ -134,7 +128,7 @@ public void uncaughtException(Thread t, Throwable e) { private LoadReportingServiceGrpc.LoadReportingServiceImplBase mockLoadReportingService; private ManagedChannel channel; - private LoadReportClientImpl lrsClient; + private LoadReportClient lrsClient; @SuppressWarnings("unchecked") @Before @@ -172,14 +166,12 @@ public void cancelled(Context context) { when(backoffPolicy2.nextBackoffNanos()) .thenReturn(TimeUnit.SECONDS.toNanos(1L), TimeUnit.SECONDS.toNanos(10L)); lrsClient = - new LoadReportClientImpl( + new LoadReportClient( channel, - CLUSTER_NAME, NODE, syncContext, fakeClock.getScheduledExecutorService(), backoffPolicyProvider, fakeClock.getStopwatchSupplier()); - lrsClient.startLoadReporting(callback); } @After @@ -190,21 +182,29 @@ public void tearDown() { @Test public void typicalWorkflow() { + String cluster1 = "cluster-foo.googleapis.com"; + String cluster2 = "cluster-bar.googleapis.com"; + ClusterStats rawStats1 = generateClusterLoadStats(cluster1); + ClusterStats rawStats2 = generateClusterLoadStats(cluster2); + when(loadStatsStore1.generateLoadReport()).thenReturn(rawStats1); + when(loadStatsStore2.generateLoadReport()).thenReturn(rawStats2); + lrsClient.addLoadStatsStore(cluster1, null, loadStatsStore1); + lrsClient.addLoadStatsStore(cluster2, null, loadStatsStore2); + lrsClient.startLoadReporting(callback); + verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); StreamObserver requestObserver = Iterables.getOnlyElement(lrsRequestObservers); - InOrder inOrder = inOrder(requestObserver); - inOrder.verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); + InOrder inOrder = inOrder(requestObserver, callback); + inOrder.verify(requestObserver).onNext(eq(buildInitialRequest(cluster1, cluster2))); - String service1 = "namespace-foo:service-blade"; - ClusterStats rawStats1 = generateServiceLoadStats(); - when(loadStatsStore1.generateLoadReport()).thenReturn(rawStats1); - lrsClient.addLoadStatsStore(service1, loadStatsStore1); - responseObserver.onNext(buildLrsResponse(ImmutableList.of(service1), 1000)); + // Management server asks to report loads for cluster1. + responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster1), 1000)); + inOrder.verify(callback).onReportResponse(1000); ArgumentMatcher expectedLoadReportMatcher = - new LoadStatsRequestMatcher(ImmutableMap.of(service1, rawStats1), 1000); + new LoadStatsRequestMatcher(ImmutableList.of(rawStats1), 1000); fakeClock.forwardNanos(999); inOrder.verifyNoMoreInteractions(); fakeClock.forwardNanos(1); @@ -214,48 +214,55 @@ public void typicalWorkflow() { inOrder.verify(requestObserver).onNext(argThat(expectedLoadReportMatcher)); // Management server updates the interval of sending load reports. - responseObserver.onNext(buildLrsResponse(ImmutableList.of(service1), 2000)); + responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster1), 2000)); + inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(1000); inOrder.verifyNoMoreInteractions(); fakeClock.forwardNanos(1000); inOrder.verify(requestObserver) - .onNext(argThat(new LoadStatsRequestMatcher(ImmutableMap.of(service1, rawStats1), 2000))); - - String service2 = "namespace-bar:service-baz"; - ClusterStats rawStats2 = generateServiceLoadStats(); - when(loadStatsStore2.generateLoadReport()).thenReturn(rawStats2); - lrsClient.addLoadStatsStore(service2, loadStatsStore2); + .onNext(argThat(new LoadStatsRequestMatcher(ImmutableList.of(rawStats1), 2000))); - // Management server asks to report loads for an extra cluster service. - responseObserver.onNext(buildLrsResponse(ImmutableList.of(service1, service2), 2000)); + // Management server asks to report loads for cluster1 and cluster2. + responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster1, cluster2), 2000)); + inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(2000); inOrder.verify(requestObserver) .onNext( argThat( - new LoadStatsRequestMatcher( - ImmutableMap.of(service1, rawStats1, service2, rawStats2), 2000))); + new LoadStatsRequestMatcher(ImmutableList.of(rawStats1, rawStats2), 2000))); - // Load reports for one of existing service is no longer wanted. - responseObserver.onNext(buildLrsResponse(ImmutableList.of(service2), 2000)); + // Load reports for cluster1 is no longer wanted. + responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster2), 2000)); + inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(2000); inOrder.verify(requestObserver) - .onNext(argThat(new LoadStatsRequestMatcher(ImmutableMap.of(service2, rawStats2), 2000))); + .onNext(argThat(new LoadStatsRequestMatcher(ImmutableList.of(rawStats2), 2000))); - // Management server asks loads for a cluster service that client has no load data. - responseObserver.onNext(buildLrsResponse(ImmutableList.of("namespace-ham:service-spam"), 2000)); + // Management server asks loads for a cluster that client has no load data. + responseObserver + .onNext(buildLrsResponse(ImmutableList.of("cluster-unknown.googleapis.com"), 2000)); + inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(2000); ArgumentCaptor reportCaptor = ArgumentCaptor.forClass(null); inOrder.verify(requestObserver).onNext(reportCaptor.capture()); assertThat(reportCaptor.getValue().getClusterStatsCount()).isEqualTo(0); + + inOrder.verifyNoMoreInteractions(); } @Test public void lrsStreamClosedAndRetried() { + String clusterName = "cluster-foo.googleapis.com"; + ClusterStats stats = generateClusterLoadStats(clusterName); + when(loadStatsStore1.generateLoadReport()).thenReturn(stats); + lrsClient.addLoadStatsStore(clusterName, null, loadStatsStore1); + lrsClient.startLoadReporting(callback); + InOrder inOrder = inOrder(mockLoadReportingService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); inOrder.verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); @@ -264,7 +271,7 @@ public void lrsStreamClosedAndRetried() { StreamObserver requestObserver = lrsRequestObservers.poll(); // First balancer RPC - verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); + verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Balancer closes it immediately (erroneously) @@ -284,7 +291,7 @@ public void lrsStreamClosedAndRetried() { responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); + verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Balancer closes it with an error. @@ -303,13 +310,12 @@ public void lrsStreamClosedAndRetried() { responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); + verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); - // Balancer sends a response asking for loads of some cluster service. - String serviceName = "namespace-foo:service-blade"; + // Balancer sends a response asking for loads of the cluster. responseObserver - .onNext(buildLrsResponse(ImmutableList.of(serviceName), 0)); + .onNext(buildLrsResponse(ImmutableList.of(clusterName), 0)); // Then breaks the RPC responseObserver.onError(Status.UNAVAILABLE.asException()); @@ -320,7 +326,7 @@ public void lrsStreamClosedAndRetried() { responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); + verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); // Fail the retry after spending 4ns fakeClock.forwardNanos(4); @@ -338,19 +344,16 @@ public void lrsStreamClosedAndRetried() { inOrder.verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(EXPECTED_INITIAL_REQ)); + verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Load reporting back to normal. responseObserver = lrsResponseObserverCaptor.getValue(); - ClusterStats stats = generateServiceLoadStats(); - when(loadStatsStore1.generateLoadReport()).thenReturn(stats); - lrsClient.addLoadStatsStore(serviceName, loadStatsStore1); responseObserver - .onNext(buildLrsResponse(ImmutableList.of(serviceName), 10)); + .onNext(buildLrsResponse(ImmutableList.of(clusterName), 10)); fakeClock.forwardNanos(10); verify(requestObserver) - .onNext(argThat(new LoadStatsRequestMatcher(ImmutableMap.of(serviceName, stats), 10))); + .onNext(argThat(new LoadStatsRequestMatcher(ImmutableList.of(stats), 10))); // Wrapping up verify(backoffPolicyProvider, times(2)).get(); @@ -360,13 +363,19 @@ public void lrsStreamClosedAndRetried() { @Test public void raceBetweenLoadReportingAndLbStreamClosure() { + String clusterName = "cluster-foo.googleapis.com"; + ClusterStats stats = generateClusterLoadStats(clusterName); + when(loadStatsStore1.generateLoadReport()).thenReturn(stats); + lrsClient.addLoadStatsStore(clusterName, null, loadStatsStore1); + lrsClient.startLoadReporting(callback); + verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); StreamObserver requestObserver = lrsRequestObservers.poll(); // First balancer RPC - verify(requestObserver).onNext(EXPECTED_INITIAL_REQ); + verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Simulate receiving a response from traffic director. @@ -394,19 +403,28 @@ public void raceBetweenLoadReportingAndLbStreamClosure() { } private static LoadStatsResponse buildLrsResponse( - List clusterServiceNames, long loadReportIntervalNanos) { + List clusterNames, long loadReportIntervalNanos) { return LoadStatsResponse .newBuilder() - .addAllClusters(clusterServiceNames) + .addAllClusters(clusterNames) .setLoadReportingInterval(Durations.fromNanos(loadReportIntervalNanos)) .build(); } + private static LoadStatsRequest buildInitialRequest(String... clusters) { + List clusterStatsList = new ArrayList<>(); + for (String cluster : clusters) { + clusterStatsList.add(ClusterStats.newBuilder().setClusterName(cluster).build()); + } + return + LoadStatsRequest.newBuilder().setNode(NODE).addAllClusterStats(clusterStatsList).build(); + } + /** * Generates a raw service load stats report with random data. */ - private static ClusterStats generateServiceLoadStats() { + private static ClusterStats generateClusterLoadStats(String clusterName) { long callsInProgress = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); long callsSucceeded = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); long callsFailed = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); @@ -416,6 +434,7 @@ private static ClusterStats generateServiceLoadStats() { return ClusterStats.newBuilder() + .setClusterName(clusterName) .addUpstreamLocalityStats( UpstreamLocalityStats.newBuilder() .setLocality( @@ -440,21 +459,18 @@ private static ClusterStats generateServiceLoadStats() { } /** - * For comparing LoadStatsRequest based on a collection of raw service load stats. + * For comparing LoadStatsRequest stats data regardless of . */ private static class LoadStatsRequestMatcher implements ArgumentMatcher { private final Map expectedStats = new HashMap<>(); - LoadStatsRequestMatcher(Map serviceStats, long expectedIntervalNano) { - for (String serviceName : serviceStats.keySet()) { - // TODO(chengyuanzhang): the field to be populated should be cluster_service_name. + LoadStatsRequestMatcher(Collection clusterStats, long expectedIntervalNano) { + for (ClusterStats stats : clusterStats) { ClusterStats statsWithInterval = - serviceStats.get(serviceName) - .toBuilder() - .setClusterName(serviceName) + stats.toBuilder() .setLoadReportInterval(Durations.fromNanos(expectedIntervalNano)) .build(); - expectedStats.put(serviceName, statsWithInterval); + expectedStats.put(statsWithInterval.getClusterName(), statsWithInterval); } } diff --git a/xds/src/test/java/io/grpc/xds/LoadStatsStoreImplTest.java b/xds/src/test/java/io/grpc/xds/LoadStatsStoreImplTest.java index af6c4d8ac21..d58fd4bb92e 100644 --- a/xds/src/test/java/io/grpc/xds/LoadStatsStoreImplTest.java +++ b/xds/src/test/java/io/grpc/xds/LoadStatsStoreImplTest.java @@ -44,6 +44,7 @@ /** Unit tests for {@link LoadStatsStore}. */ @RunWith(JUnit4.class) public class LoadStatsStoreImplTest { + private static final String CLUSTER_NAME = "cluster-test.googleapis.com"; private static final Locality LOCALITY1 = new Locality("test_region1", "test_zone", "test_subzone"); private static final Locality LOCALITY2 = @@ -56,7 +57,8 @@ public class LoadStatsStoreImplTest { public void setUp() { localityLoadCounters = new ConcurrentHashMap<>(); dropCounters = new ConcurrentHashMap<>(); - loadStatsStore = new LoadStatsStoreImpl(localityLoadCounters, dropCounters); + loadStatsStore = + new LoadStatsStoreImpl(CLUSTER_NAME, null, localityLoadCounters, dropCounters); } private static List buildEndpointLoadMetricStatsList( @@ -103,6 +105,7 @@ private static ClusterStats buildClusterStats( @Nullable List upstreamLocalityStatsList, @Nullable List droppedRequestsList) { ClusterStats.Builder clusterStatsBuilder = ClusterStats.newBuilder(); + clusterStatsBuilder.setClusterName(CLUSTER_NAME); if (upstreamLocalityStatsList != null) { clusterStatsBuilder.addAllUpstreamLocalityStats(upstreamLocalityStatsList); } diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index 55dccdd6fe4..907f57f467e 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -103,7 +103,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -185,10 +184,9 @@ public void uncaughtException(Thread t, Throwable e) { private final Queue> responseObservers = new ArrayDeque<>(); private final Queue> requestObservers = new ArrayDeque<>(); - private final AtomicBoolean callEnded = new AtomicBoolean(true); - + private final AtomicBoolean adsEnded = new AtomicBoolean(true); private final Queue loadReportCalls = new ArrayDeque<>(); - private final AtomicInteger runningLrsCalls = new AtomicInteger(); + private final AtomicBoolean lrsEnded = new AtomicBoolean(true); @Mock private AggregatedDiscoveryServiceImplBase mockedDiscoveryService; @@ -220,13 +218,13 @@ public void setUp() throws IOException { @Override public StreamObserver streamAggregatedResources( final StreamObserver responseObserver) { - assertThat(callEnded.get()).isTrue(); // ensure previous call was ended - callEnded.set(false); + assertThat(adsEnded.get()).isTrue(); // ensure previous call was ended + adsEnded.set(false); Context.current().addListener( new CancellationListener() { @Override public void cancelled(Context context) { - callEnded.set(true); + adsEnded.set(true); } }, MoreExecutors.directExecutor()); responseObservers.offer(responseObserver); @@ -243,7 +241,8 @@ public void cancelled(Context context) { @Override public StreamObserver streamLoadStats( StreamObserver responseObserver) { - runningLrsCalls.getAndIncrement(); + assertThat(lrsEnded.get()).isTrue(); + lrsEnded.set(false); @SuppressWarnings("unchecked") StreamObserver requestObserver = mock(StreamObserver.class); final LoadReportCall call = new LoadReportCall(requestObserver, responseObserver); @@ -252,7 +251,7 @@ public StreamObserver streamLoadStats( @Override public void cancelled(Context context) { call.cancelled = true; - runningLrsCalls.getAndDecrement(); + lrsEnded.set(true); } }, MoreExecutors.directExecutor()); loadReportCalls.offer(call); @@ -290,17 +289,13 @@ ManagedChannel createChannel(List servers) { // least one watcher is registered. assertThat(responseObservers).isEmpty(); assertThat(requestObservers).isEmpty(); - - // Load reporting is not initiated until being invoked to do so. - assertThat(loadReportCalls).isEmpty(); - assertThat(runningLrsCalls.get()).isEqualTo(0); } @After public void tearDown() { xdsClient.shutdown(); - assertThat(callEnded.get()).isTrue(); - assertThat(runningLrsCalls.get()).isEqualTo(0); + assertThat(adsEnded.get()).isTrue(); + assertThat(lrsEnded.get()).isTrue(); assertThat(channel.isShutdown()).isTrue(); assertThat(fakeClock.getPendingTasks()).isEmpty(); } @@ -3102,21 +3097,15 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { */ @Test public void reportLoadStatsToServer() { - xdsClient.reportClientStats("cluster-foo.googleapis.com", ""); - LoadReportCall lrsCall1 = loadReportCalls.poll(); - verify(lrsCall1.requestObserver) - .onNext(eq(buildInitialLoadStatsRequest("cluster-foo.googleapis.com"))); - assertThat(lrsCall1.cancelled).isFalse(); - - xdsClient.reportClientStats("cluster-bar.googleapis.com", ""); - LoadReportCall lrsCall2 = loadReportCalls.poll(); - verify(lrsCall2.requestObserver) - .onNext(eq(buildInitialLoadStatsRequest("cluster-bar.googleapis.com"))); - assertThat(lrsCall2.cancelled).isFalse(); - - xdsClient.cancelClientStatsReport("cluster-bar.googleapis.com"); - assertThat(lrsCall2.cancelled).isTrue(); - assertThat(runningLrsCalls.get()).isEqualTo(1); + LoadStatsStore loadStatsStore = mock(LoadStatsStore.class); + String clusterName = "cluster-foo.googleapis.com"; + xdsClient.reportClientStats(clusterName, null, loadStatsStore); + LoadReportCall lrsCall = loadReportCalls.poll(); + verify(lrsCall.requestObserver).onNext(eq(buildInitialLoadStatsRequest(clusterName))); + + xdsClient.cancelClientStatsReport(clusterName, null); + assertThat(lrsCall.cancelled).isTrue(); + // See more test on LoadReportClientTest.java } // Simulates the use case of watching clusters/endpoints based on service config resolved by From abed7073855516d9934fe8b07e0e3c4a4be741df Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Fri, 21 Feb 2020 10:55:57 -0800 Subject: [PATCH 69/86] grpclb: handles empty address from LB (#6734) --- .../main/java/io/grpc/grpclb/GrpclbState.java | 27 +++- .../grpc/grpclb/GrpclbLoadBalancerTest.java | 118 ++++++++++++++++++ 2 files changed, 140 insertions(+), 5 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java index e767aac1551..d940fc8832c 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java @@ -138,6 +138,7 @@ static enum Mode { @Nullable private ManagedChannel lbCommChannel; + private boolean lbSentEmptyBackends = false; @Nullable private LbStream lbStream; @@ -423,6 +424,17 @@ private void useRoundRobinLists( subchannels = Collections.unmodifiableMap(newSubchannelMap); break; case PICK_FIRST: + checkState(subchannels.size() <= 1, "Unexpected Subchannel count: %s", subchannels); + Subchannel subchannel; + if (newBackendAddrList.isEmpty()) { + if (subchannels.size() == 1) { + cancelFallbackTimer(); + subchannel = subchannels.values().iterator().next(); + subchannel.shutdown(); + subchannels = Collections.emptyMap(); + } + break; + } List eagList = new ArrayList<>(); // Because for PICK_FIRST, we create a single Subchannel for all addresses, we have to // attach the tokens to the EAG attributes and use TokenAttachingLoadRecorder to put them on @@ -438,13 +450,11 @@ private void useRoundRobinLists( } eagList.add(new EquivalentAddressGroup(origEag.getAddresses(), eagAttrs)); } - Subchannel subchannel; if (subchannels.isEmpty()) { // TODO(zhangkun83): remove the deprecation suppression on this method once migrated to // the new createSubchannel(). subchannel = helper.createSubchannel(eagList, createSubchannelAttrs()); } else { - checkState(subchannels.size() == 1, "Unexpected Subchannel count: %s", subchannels); subchannel = subchannels.values().iterator().next(); subchannel.updateAddresses(eagList); } @@ -629,6 +639,7 @@ private void handleResponse(LoadBalanceResponse response) { } // Stop using fallback backends as soon as a new server list is received from the balancer. usingFallbackBackends = false; + lbSentEmptyBackends = serverList.getServersList().isEmpty(); cancelFallbackTimer(); useRoundRobinLists(newDropList, newBackendAddrList, loadRecorder); maybeUpdatePicker(); @@ -729,9 +740,15 @@ private void maybeUpdatePicker() { break; case PICK_FIRST: if (backendList.isEmpty()) { - pickList = Collections.singletonList(BUFFER_ENTRY); - // Have not received server addresses - state = CONNECTING; + if (lbSentEmptyBackends) { + pickList = + Collections.singletonList(new ErrorEntry(Status.UNAVAILABLE)); + state = TRANSIENT_FAILURE; + } else { + pickList = Collections.singletonList(BUFFER_ENTRY); + // Have not received server addresses + state = CONNECTING; + } } else { checkState(backendList.size() == 1, "Excessive backend entries: %s", backendList); BackendEntry onlyEntry = backendList.get(0); diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java index 6d9abc70c43..53b9d0093ed 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java @@ -1849,6 +1849,124 @@ public void grpclbWorking_pickFirstMode() throws Exception { .returnSubchannel(any(Subchannel.class), any(ConnectivityStateInfo.class)); } + @SuppressWarnings({"unchecked", "deprecation"}) + @Test + public void grpclbWorking_pickFirstMode_lbSendsEmptyAddress() throws Exception { + InOrder inOrder = inOrder(helper); + + List grpclbBalancerList = createResolvedBalancerAddresses(1); + deliverResolvedAddresses( + Collections.emptyList(), + grpclbBalancerList, + Attributes.EMPTY, + GrpclbConfig.create(Mode.PICK_FIRST)); + + assertEquals(1, fakeOobChannels.size()); + verify(mockLbService).balanceLoad(lbResponseObserverCaptor.capture()); + StreamObserver lbResponseObserver = lbResponseObserverCaptor.getValue(); + assertEquals(1, lbRequestObservers.size()); + StreamObserver lbRequestObserver = lbRequestObservers.poll(); + verify(lbRequestObserver).onNext( + eq(LoadBalanceRequest.newBuilder().setInitialRequest( + InitialLoadBalanceRequest.newBuilder().setName(SERVICE_AUTHORITY).build()) + .build())); + + // Simulate receiving LB response + List backends1 = Arrays.asList( + new ServerEntry("127.0.0.1", 2000, "token0001"), + new ServerEntry("127.0.0.1", 2010, "token0002")); + inOrder.verify(helper, never()) + .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); + lbResponseObserver.onNext(buildInitialResponse()); + lbResponseObserver.onNext(buildLbResponse(backends1)); + + // TODO(zhangkun83): remove the deprecation suppression on this method once migrated to + // the new createSubchannel(). + inOrder.verify(helper).createSubchannel( + eq(Arrays.asList( + new EquivalentAddressGroup(backends1.get(0).addr, eagAttrsWithToken("token0001")), + new EquivalentAddressGroup(backends1.get(1).addr, eagAttrsWithToken("token0002")))), + any(Attributes.class)); + + // Initially IDLE + inOrder.verify(helper).updateBalancingState(eq(IDLE), pickerCaptor.capture()); + RoundRobinPicker picker0 = (RoundRobinPicker) pickerCaptor.getValue(); + + // Only one subchannel is created + assertThat(mockSubchannels).hasSize(1); + Subchannel subchannel = mockSubchannels.poll(); + assertThat(picker0.dropList).containsExactly(null, null); + assertThat(picker0.pickList).containsExactly(new IdleSubchannelEntry(subchannel, syncContext)); + + // PICK_FIRST doesn't eagerly connect + verify(subchannel, never()).requestConnection(); + + // CONNECTING + deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(CONNECTING)); + + inOrder.verify(helper).updateBalancingState(eq(CONNECTING), pickerCaptor.capture()); + RoundRobinPicker picker1 = (RoundRobinPicker) pickerCaptor.getValue(); + assertThat(picker1.dropList).containsExactly(null, null); + assertThat(picker1.pickList).containsExactly(BUFFER_ENTRY); + + // TRANSIENT_FAILURE + Status error = Status.UNAVAILABLE.withDescription("Simulated connection error"); + deliverSubchannelState(subchannel, ConnectivityStateInfo.forTransientFailure(error)); + inOrder.verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture()); + RoundRobinPicker picker2 = (RoundRobinPicker) pickerCaptor.getValue(); + assertThat(picker2.dropList).containsExactly(null, null); + assertThat(picker2.pickList).containsExactly(new ErrorEntry(error)); + + // READY + deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); + inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture()); + RoundRobinPicker picker3 = (RoundRobinPicker) pickerCaptor.getValue(); + assertThat(picker3.dropList).containsExactly(null, null); + assertThat(picker3.pickList).containsExactly( + new BackendEntry(subchannel, new TokenAttachingTracerFactory(getLoadRecorder()))); + + inOrder.verify(helper, never()) + .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); + + // Empty addresses from LB + lbResponseObserver.onNext(buildLbResponse(Collections.emptyList())); + + // new addresses will be updated to the existing subchannel + // createSubchannel() has ever been called only once + inOrder.verify(helper, never()).createSubchannel(any(List.class), any(Attributes.class)); + assertThat(mockSubchannels).isEmpty(); + verify(subchannel).shutdown(); + + inOrder.verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture()); + RoundRobinPicker errorPicker = (RoundRobinPicker) pickerCaptor.getValue(); + assertThat(errorPicker.pickList).containsExactly(new ErrorEntry(Status.UNAVAILABLE)); + + lbResponseObserver.onNext(buildLbResponse(Collections.emptyList())); + + // Test recover from new LB response with addresses + // New server list with drops + List backends2 = Arrays.asList( + new ServerEntry("127.0.0.1", 2000, "token0001"), + new ServerEntry("token0003"), // drop + new ServerEntry("127.0.0.1", 2020, "token0004")); + inOrder.verify(helper, never()) + .updateBalancingState(any(ConnectivityState.class), any(SubchannelPicker.class)); + lbResponseObserver.onNext(buildLbResponse(backends2)); + + // new addresses will be updated to the existing subchannel + inOrder.verify(helper, times(1)).createSubchannel(any(List.class), any(Attributes.class)); + inOrder.verify(helper).updateBalancingState(eq(IDLE), pickerCaptor.capture()); + subchannel = mockSubchannels.poll(); + + // Subchannel became READY + deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(CONNECTING)); + deliverSubchannelState(subchannel, ConnectivityStateInfo.forNonError(READY)); + inOrder.verify(helper).updateBalancingState(eq(READY), pickerCaptor.capture()); + RoundRobinPicker picker4 = (RoundRobinPicker) pickerCaptor.getValue(); + assertThat(picker4.pickList).containsExactly( + new BackendEntry(subchannel, new TokenAttachingTracerFactory(getLoadRecorder()))); + } + @Test public void shutdownWithoutSubchannel_roundRobin() throws Exception { subtestShutdownWithoutSubchannel("round_robin"); From a98db126e265259ea73c2156833cbf872aa86811 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 21 Feb 2020 15:32:27 -0800 Subject: [PATCH 70/86] xds: update LRS protocol and fix usage of cluster name in ClusterStats (#6737) Fixes load reporting integration due to LRS design flaws. - Updated LRS protocol. The Node sent in LRS requests use a special metadata "PROXYLESS_CLIENT_HOSTNAME" with value being the hostname (including port) for creating the gRPC channel. Management server is able to infer clusters that the gRPC client potentially sends load to. LRS initial request does not need to populate clusters it wants to report load for. - Each ClusterStats message in LRS requests represents the loads for each (cluster, cluster_service), where cluster_service field is optional. EDS LB policy should track loads per (cluster, cluster_service) and populate cluster name from upstream CDS policy. - Modified CdsUpdate, which is the converted data of a CDS response. edsServiceName field can be null when an CDS response does not give it. We want to preserve the null value for LRS requests. --- .../java/io/grpc/xds/CdsLoadBalancer.java | 12 +- .../java/io/grpc/xds/EdsLoadBalancer.java | 82 +++++---- .../java/io/grpc/xds/LoadReportClient.java | 111 ++++++------ xds/src/main/java/io/grpc/xds/XdsClient.java | 37 ++-- .../main/java/io/grpc/xds/XdsClientImpl.java | 43 ++--- .../io/grpc/xds/XdsLoadBalancerProvider.java | 14 +- .../java/io/grpc/xds/XdsNameResolver.java | 1 + .../java/io/grpc/xds/CdsLoadBalancerTest.java | 13 +- .../java/io/grpc/xds/EdsLoadBalancerTest.java | 50 +++--- .../io/grpc/xds/LoadReportClientTest.java | 163 +++++++++++------- .../java/io/grpc/xds/XdsClientImplTest.java | 111 ++++++------ .../test/java/io/grpc/xds/XdsClientTest.java | 19 -- .../grpc/xds/XdsLoadBalancerProviderTest.java | 2 + 13 files changed, 339 insertions(+), 319 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index 6ec4263f03d..832e365b991 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -280,11 +280,13 @@ public void onClusterChanged(ClusterUpdate newUpdate) { newUpdate.getLbPolicy().equals("round_robin"), "The load balancing policy in ClusterUpdate '%s' is not supported", newUpdate); - final XdsConfig edsConfig = new XdsConfig( - new LbConfig(newUpdate.getLbPolicy(), ImmutableMap.of()), - /* fallbackPolicy = */ null, - /* edsServiceName = */ newUpdate.getEdsServiceName(), - /* lrsServerName = */ newUpdate.getLrsServerName()); + final XdsConfig edsConfig = + new XdsConfig( + /* cluster = */ newUpdate.getClusterName(), + new LbConfig(newUpdate.getLbPolicy(), ImmutableMap.of()), + /* fallbackPolicy = */ null, + /* edsServiceName = */ newUpdate.getEdsServiceName(), + /* lrsServerName = */ newUpdate.getLrsServerName()); updateSslContextProvider(newUpdate.getUpstreamTlsContext()); if (edsBalancer == null) { edsBalancer = lbRegistry.getProvider(EDS_POLICY_NAME).newLoadBalancer(helper); diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java index 1b439dcd2b1..5331ed14cff 100644 --- a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java @@ -72,9 +72,6 @@ final class EdsLoadBalancer extends LoadBalancer { private XdsClient xdsClient; @Nullable private String clusterName; - // FIXME(chengyuanzhang): should be one instance per cluster:cluster_service. - @Nullable - private LoadStatsStore loadStatsStore; EdsLoadBalancer(Helper edsLbHelper, ResourceUpdateCallback resourceUpdateCallback) { this( @@ -159,6 +156,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { XdsClient createXdsClient() { return new XdsClientImpl( + edsLbHelper.getAuthority(), serverList, channelFactory, node, @@ -173,41 +171,15 @@ XdsClient createXdsClient() { xdsClient = xdsClientPool.getObject(); } - // The edsServiceName field is null in legacy gRPC client with EDS: use target authority for - // querying endpoints, but in the future we expect this to be explicitly given by EDS config. - // We assume if edsServiceName is null, it will always be null in later resolver updates; - // and if edsServiceName is not null, it will always be not null. - String clusterServiceName = newXdsConfig.edsServiceName; - if (clusterServiceName == null) { - clusterServiceName = edsLbHelper.getAuthority(); - } - if (clusterName == null) { - // TODO(zdapeng): Use the correct cluster name. Currently load reporting will be broken if - // edsServiceName is changed because we are using edsServiceName for the cluster name. - clusterName = clusterServiceName; - loadStatsStore = new LoadStatsStoreImpl(clusterName, null); - } - - // FIXME(chengyuanzhang): should report loads for each cluster:cluster_service. - if (xdsConfig == null - || !Objects.equals(newXdsConfig.lrsServerName, xdsConfig.lrsServerName)) { - if (newXdsConfig.lrsServerName != null) { - if (!newXdsConfig.lrsServerName.equals("")) { - throw new AssertionError( - "Can only report load to the same management server"); - } - xdsClient.reportClientStats(clusterName, null, loadStatsStore); - } else if (xdsConfig != null) { - xdsClient.cancelClientStatsReport(clusterName, null); - } - } + // FIXME(chengyuanzhang): make cluster name required in XdsConfig. + clusterName = newXdsConfig.cluster != null ? newXdsConfig.cluster : edsLbHelper.getAuthority(); // Note: childPolicy change will be handled in LocalityStore, to be implemented. // If edsServiceName in XdsConfig is changed, do a graceful switch. if (xdsConfig == null || !Objects.equals(newXdsConfig.edsServiceName, xdsConfig.edsServiceName)) { LoadBalancer.Factory clusterEndpointsLoadBalancerFactory = - new ClusterEndpointsBalancerFactory(clusterServiceName); + new ClusterEndpointsBalancerFactory(newXdsConfig.edsServiceName); switchingLoadBalancer.switchTo(clusterEndpointsLoadBalancerFactory); } switchingLoadBalancer.handleResolvedAddresses(resolvedAddresses); @@ -232,9 +204,6 @@ public void shutdown() { channelLogger.log(ChannelLogLevel.DEBUG, "EDS load balancer is shutting down"); switchingLoadBalancer.shutdown(); if (xdsClient != null) { - if (xdsConfig != null && xdsConfig.lrsServerName != null) { - xdsClient.cancelClientStatsReport(clusterName, null); - } xdsClient = xdsClientPool.returnObject(xdsClient); } } @@ -243,10 +212,12 @@ public void shutdown() { * A load balancer factory that provides a load balancer for a given cluster service. */ private final class ClusterEndpointsBalancerFactory extends LoadBalancer.Factory { - final String clusterServiceName; + @Nullable final String clusterServiceName; + final LoadStatsStore loadStatsStore; - ClusterEndpointsBalancerFactory(String clusterServiceName) { + ClusterEndpointsBalancerFactory(@Nullable String clusterServiceName) { this.clusterServiceName = clusterServiceName; + loadStatsStore = new LoadStatsStoreImpl(clusterName, clusterServiceName); } @Override @@ -260,7 +231,7 @@ public boolean equals(Object o) { return false; } ClusterEndpointsBalancerFactory that = (ClusterEndpointsBalancerFactory) o; - return clusterServiceName.equals(that.clusterServiceName); + return Objects.equals(clusterServiceName, that.clusterServiceName); } @Override @@ -272,20 +243,41 @@ public int hashCode() { * Load-balances endpoints for a given cluster. */ final class ClusterEndpointsBalancer extends LoadBalancer { + // Name of the resource to be used for querying endpoint information. + final String resourceName; final Helper helper; final EndpointWatcherImpl endpointWatcher; final LocalityStore localityStore; + boolean isReportingLoad; ClusterEndpointsBalancer(Helper helper) { this.helper = helper; - + resourceName = clusterServiceName != null ? clusterServiceName : clusterName; localityStore = localityStoreFactory.newLocalityStore(helper, lbRegistry, loadStatsStore); - endpointWatcher = new EndpointWatcherImpl(localityStore); - xdsClient.watchEndpointData(clusterServiceName, endpointWatcher); + xdsClient.watchEndpointData(resourceName, endpointWatcher); } - // TODO(zddapeng): In handleResolvedAddresses() handle child policy change if any. + @Override + public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { + XdsConfig config = (XdsConfig) resolvedAddresses.getLoadBalancingPolicyConfig(); + if (config.lrsServerName != null) { + if (!config.lrsServerName.equals("")) { + throw new AssertionError( + "Can only report load to the same management server"); + } + if (!isReportingLoad) { + xdsClient.reportClientStats(clusterName, clusterServiceName, loadStatsStore); + isReportingLoad = true; + } + } else { + if (isReportingLoad) { + xdsClient.cancelClientStatsReport(clusterName, clusterServiceName); + isReportingLoad = false; + } + } + // TODO(zddapeng): In handleResolvedAddresses() handle child policy change if any. + } @Override public void handleNameResolutionError(Status error) { @@ -303,8 +295,12 @@ public boolean canHandleEmptyAddressListFromNameResolution() { @Override public void shutdown() { + if (isReportingLoad) { + xdsClient.cancelClientStatsReport(clusterName, clusterServiceName); + isReportingLoad = false; + } localityStore.reset(); - xdsClient.cancelEndpointDataWatch(clusterServiceName, endpointWatcher); + xdsClient.cancelEndpointDataWatch(resourceName, endpointWatcher); } } } diff --git a/xds/src/main/java/io/grpc/xds/LoadReportClient.java b/xds/src/main/java/io/grpc/xds/LoadReportClient.java index aa0c3584f7a..8fb069c485a 100644 --- a/xds/src/main/java/io/grpc/xds/LoadReportClient.java +++ b/xds/src/main/java/io/grpc/xds/LoadReportClient.java @@ -23,6 +23,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; import com.google.common.base.Supplier; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; import com.google.protobuf.util.Durations; import io.envoyproxy.envoy.api.v2.core.Node; import io.envoyproxy.envoy.api.v2.endpoint.ClusterStats; @@ -35,10 +37,8 @@ import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.internal.BackoffPolicy; import io.grpc.stub.StreamObserver; -import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ScheduledExecutorService; @@ -56,6 +56,9 @@ final class LoadReportClient { private static final Logger logger = Logger.getLogger(XdsClientImpl.class.getName()); + @VisibleForTesting + static final String TARGET_NAME_METADATA_KEY = "PROXYLESS_CLIENT_HOSTNAME"; + private final ManagedChannel channel; private final Node node; private final SynchronizationContext syncContext; @@ -64,10 +67,8 @@ final class LoadReportClient { private final Stopwatch retryStopwatch; private final BackoffPolicy.Provider backoffPolicyProvider; - // Sources of load stats data for each cluster. - // FIXME(chengyuanzhang): this should be Map> as each - // ClusterStats is keyed by cluster:cluster_service. Currently, cluster_service is always unset. - private final Map loadStatsStoreMap = new HashMap<>(); + // Sources of load stats data for each cluster:cluster_service. + private final Map> loadStatsStoreMap = new HashMap<>(); private boolean started; @Nullable @@ -80,6 +81,7 @@ final class LoadReportClient { private LoadReportCallback callback; LoadReportClient( + String targetName, ManagedChannel channel, Node node, SynchronizationContext syncContext, @@ -87,13 +89,21 @@ final class LoadReportClient { BackoffPolicy.Provider backoffPolicyProvider, Supplier stopwatchSupplier) { this.channel = checkNotNull(channel, "channel"); - this.node = checkNotNull(node, "node"); this.syncContext = checkNotNull(syncContext, "syncContext"); this.timerService = checkNotNull(scheduledExecutorService, "timeService"); this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); this.retryStopwatch = stopwatchSupplier.get(); - started = false; + checkNotNull(targetName, "targetName"); + checkNotNull(node, "node"); + Struct metadata = + node.getMetadata() + .toBuilder() + .putFields( + TARGET_NAME_METADATA_KEY, + Value.newBuilder().setStringValue(targetName).build()) + .build(); + this.node = node.toBuilder().setMetadata(metadata).build(); } /** @@ -101,7 +111,7 @@ final class LoadReportClient { * stats periodically. Calling this method on an already started {@link LoadReportClient} is * no-op. */ - public void startLoadReporting(LoadReportCallback callback) { + void startLoadReporting(LoadReportCallback callback) { if (started) { return; } @@ -114,7 +124,7 @@ public void startLoadReporting(LoadReportCallback callback) { * Terminates load reporting. Calling this method on an already stopped * {@link LoadReportClient} is no-op. */ - public void stopLoadReporting() { + void stopLoadReporting() { if (!started) { return; } @@ -132,37 +142,35 @@ public void stopLoadReporting() { * Provides this LoadReportClient source of load stats data for the given * cluster:cluster_service. If requested, data from the given loadStatsStore is * periodically queried and sent to traffic director by this LoadReportClient. - * - *

      Currently we expect load stats data for all clusters to report loads for are provided - * before load reporting starts (so that LRS initial request tells management server clusters - * it is reporting loads for). Design TBD for reporting loads for extra clusters after load - * reporting has started. - * - *

      Note: currently clusterServiceName is always unset. */ - public void addLoadStatsStore( + void addLoadStatsStore( String clusterName, @Nullable String clusterServiceName, LoadStatsStore loadStatsStore) { checkState( - !loadStatsStoreMap.containsKey(clusterName), - "load stats for cluster " + clusterName + " already exists"); - // FIXME(chengyuanzhang): relax this restriction after design is fleshed out. - checkState( - !started, - "load stats for all clusters to report loads for should be provided before " - + "load reporting has started"); - loadStatsStoreMap.put(clusterName, loadStatsStore); + !loadStatsStoreMap.containsKey(clusterName) + || !loadStatsStoreMap.get(clusterName).containsKey(clusterServiceName), + "load stats for cluster: %s, cluster service: %s already exists", + clusterName, clusterServiceName); + if (!loadStatsStoreMap.containsKey(clusterName)) { + loadStatsStoreMap.put(clusterName, new HashMap()); + } + Map clusterLoadStatsStores = loadStatsStoreMap.get(clusterName); + clusterLoadStatsStores.put(clusterServiceName, loadStatsStore); } /** * Stops providing load stats data for the given cluster:cluster_service. - * - *

      Note: currently clusterServiceName is always unset. */ - public void removeLoadStatsStore(String clusterName, @Nullable String clusterServiceName) { + void removeLoadStatsStore(String clusterName, @Nullable String clusterServiceName) { checkState( - loadStatsStoreMap.containsKey(clusterName), - "load stats for cluster " + clusterName + " does not exist"); - loadStatsStoreMap.remove(clusterName); + loadStatsStoreMap.containsKey(clusterName) + && loadStatsStoreMap.get(clusterName).containsKey(clusterServiceName), + "load stats for cluster: %s, cluster service: %s does not exist", + clusterName, clusterServiceName); + Map clusterLoadStatsStores = loadStatsStoreMap.get(clusterName); + clusterLoadStatsStores.remove(clusterServiceName); + if (clusterLoadStatsStores.isEmpty()) { + loadStatsStoreMap.remove(clusterName); + } } @VisibleForTesting @@ -217,15 +225,12 @@ private class LrsStream implements StreamObserver { void start() { lrsRequestWriter = stub.withWaitForReady().streamLoadStats(this); reportStopwatch.reset().start(); - // Tells management server which clusters the client is reporting loads for. - List clusterStatsList = new ArrayList<>(); - for (String clusterName : loadStatsStoreMap.keySet()) { - clusterStatsList.add(ClusterStats.newBuilder().setClusterName(clusterName).build()); - } + + // Send an initial LRS request with empty cluster stats. Management server is able to + // infer clusters the gRPC client sending loads to. LoadStatsRequest initRequest = LoadStatsRequest.newBuilder() .setNode(node) - .addAllClusterStats(clusterStatsList) .build(); lrsRequestWriter.onNext(initRequest); logger.log(Level.FINE, "Initial LRS request sent: {0}", initRequest); @@ -269,13 +274,15 @@ private void sendLoadReport() { LoadStatsRequest.Builder requestBuilder = LoadStatsRequest.newBuilder().setNode(node); for (String name : clusterNames) { if (loadStatsStoreMap.containsKey(name)) { - LoadStatsStore loadStatsStore = loadStatsStoreMap.get(name); - ClusterStats report = - loadStatsStore.generateLoadReport() - .toBuilder() - .setLoadReportInterval(Durations.fromNanos(interval)) - .build(); - requestBuilder.addClusterStats(report); + Map clusterLoadStatsStores = loadStatsStoreMap.get(name); + for (LoadStatsStore statsStore : clusterLoadStatsStores.values()) { + ClusterStats report = + statsStore.generateLoadReport() + .toBuilder() + .setLoadReportInterval(Durations.fromNanos(interval)) + .build(); + requestBuilder.addClusterStats(report); + } } } LoadStatsRequest request = requestBuilder.build(); @@ -308,10 +315,16 @@ private void handleResponse(LoadStatsResponse response) { } else { logger.log(Level.FINE, "Received an LRS response: {0}", response); } - loadReportIntervalNano = Durations.toNanos(response.getLoadReportingInterval()); - callback.onReportResponse(loadReportIntervalNano); - clusterNames.clear(); - clusterNames.addAll(response.getClustersList()); + long interval = Durations.toNanos(response.getLoadReportingInterval()); + if (interval != loadReportIntervalNano) { + loadReportIntervalNano = interval; + callback.onReportResponse(loadReportIntervalNano); + } + if (clusterNames.size() != response.getClustersCount() + || !clusterNames.containsAll(response.getClustersList())) { + clusterNames.clear(); + clusterNames.addAll(response.getClustersList()); + } scheduleNextLoadReport(); } diff --git a/xds/src/main/java/io/grpc/xds/XdsClient.java b/xds/src/main/java/io/grpc/xds/XdsClient.java index 3fc0fc0e368..d434ec9db07 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClient.java +++ b/xds/src/main/java/io/grpc/xds/XdsClient.java @@ -112,19 +112,22 @@ ConfigUpdate build() { */ static final class ClusterUpdate { private final String clusterName; + @Nullable private final String edsServiceName; private final String lbPolicy; - private final boolean enableLrs; + @Nullable private final String lrsServerName; private final UpstreamTlsContext upstreamTlsContext; - private ClusterUpdate(String clusterName, String edsServiceName, String lbPolicy, - boolean enableLrs, @Nullable String lrsServerName, + private ClusterUpdate( + String clusterName, + @Nullable String edsServiceName, + String lbPolicy, + @Nullable String lrsServerName, @Nullable UpstreamTlsContext upstreamTlsContext) { this.clusterName = clusterName; this.edsServiceName = edsServiceName; this.lbPolicy = lbPolicy; - this.enableLrs = enableLrs; this.lrsServerName = lrsServerName; this.upstreamTlsContext = upstreamTlsContext; } @@ -136,6 +139,7 @@ String getClusterName() { /** * Returns the resource name for EDS requests. */ + @Nullable String getEdsServiceName() { return edsServiceName; } @@ -148,16 +152,9 @@ String getLbPolicy() { return lbPolicy; } - /** - * Returns true if LRS is enabled. - */ - boolean isEnableLrs() { - return enableLrs; - } - /** * Returns the server name to send client load reports to if LRS is enabled. {@code null} if - * {@link #isEnableLrs()} returns {@code false}. + * load reporting is disabled for this cluster. */ @Nullable String getLrsServerName() { @@ -176,9 +173,9 @@ static Builder newBuilder() { static final class Builder { private String clusterName; + @Nullable private String edsServiceName; private String lbPolicy; - private boolean enableLrs; @Nullable private String lrsServerName; @Nullable @@ -203,11 +200,6 @@ Builder setLbPolicy(String lbPolicy) { return this; } - Builder setEnableLrs(boolean enableLrs) { - this.enableLrs = enableLrs; - return this; - } - Builder setLrsServerName(String lrsServerName) { this.lrsServerName = lrsServerName; return this; @@ -221,13 +213,10 @@ Builder setUpstreamTlsContext(UpstreamTlsContext upstreamTlsContext) { ClusterUpdate build() { Preconditions.checkState(clusterName != null, "clusterName is not set"); Preconditions.checkState(lbPolicy != null, "lbPolicy is not set"); - Preconditions.checkState( - (enableLrs && lrsServerName != null) || (!enableLrs && lrsServerName == null), - "lrsServerName is not set while LRS is enabled " - + "OR lrsServerName is set while LRS is not enabled"); + return - new ClusterUpdate(clusterName, edsServiceName == null ? clusterName : edsServiceName, - lbPolicy, enableLrs, lrsServerName, upstreamTlsContext); + new ClusterUpdate( + clusterName, edsServiceName, lbPolicy, lrsServerName, upstreamTlsContext); } } } diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 257404e395c..dfd787d292f 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -88,6 +88,8 @@ final class XdsClientImpl extends XdsClient { private final MessagePrinter respPrinter = new MessagePrinter(); + // Name of the target server this gRPC client is trying to talk to. + private final String targetName; private final ManagedChannel channel; private final SynchronizationContext syncContext; private final ScheduledExecutorService timeService; @@ -162,6 +164,7 @@ final class XdsClientImpl extends XdsClient { private String ldsResourceName; XdsClientImpl( + String targetName, List servers, // list of management servers XdsChannelFactory channelFactory, Node node, @@ -169,6 +172,7 @@ final class XdsClientImpl extends XdsClient { ScheduledExecutorService timeService, BackoffPolicy.Provider backoffPolicyProvider, Supplier stopwatchSupplier) { + this.targetName = checkNotNull(targetName, "targetName"); this.channel = checkNotNull(channelFactory, "channelFactory") .createChannel(checkNotNull(servers, "servers")); @@ -407,29 +411,30 @@ void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) { @Override void reportClientStats( String clusterName, @Nullable String clusterServiceName, LoadStatsStore loadStatsStore) { - checkState(lrsClient == null, - "load reporting has already started, cannot change clusters to report loads for"); - lrsClient = - new LoadReportClient( - channel, - node, - syncContext, - timeService, - backoffPolicyProvider, - stopwatchSupplier); + if (lrsClient == null) { + lrsClient = + new LoadReportClient( + targetName, + channel, + node, + syncContext, + timeService, + backoffPolicyProvider, + stopwatchSupplier); + lrsClient.startLoadReporting(new LoadReportCallback() { + @Override + public void onReportResponse(long reportIntervalNano) {} + }); + } lrsClient.addLoadStatsStore(clusterName, clusterServiceName, loadStatsStore); - lrsClient.startLoadReporting(new LoadReportCallback() { - @Override - public void onReportResponse(long reportIntervalNano) {} - }); } @Override void cancelClientStatsReport(String clusterName, @Nullable String clusterServiceName) { checkState(lrsClient != null, "load reporting was never started"); lrsClient.removeLoadStatsStore(clusterName, clusterServiceName); - lrsClient.stopLoadReporting(); - lrsClient = null; + // TODO(chengyuanzhang): can be optimized to stop load reporting if no more loads need + // to be reported. } /** @@ -740,8 +745,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { + "indicate to use EDS over ADS."; break; } - // If the service_name field is set, that value will be used for the EDS request - // instead of the cluster name (default). + // If the service_name field is set, that value will be used for the EDS request. if (!edsClusterConfig.getServiceName().isEmpty()) { updateBuilder.setEdsServiceName(edsClusterConfig.getServiceName()); edsServices.add(edsClusterConfig.getServiceName()); @@ -764,10 +768,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { + "management server."; break; } - updateBuilder.setEnableLrs(true); updateBuilder.setLrsServerName(""); - } else { - updateBuilder.setEnableLrs(false); } if (cluster.hasTlsContext()) { updateBuilder.setUpstreamTlsContext(cluster.getTlsContext()); diff --git a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java index 7b3faf67dd7..08de69c664a 100644 --- a/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java +++ b/xds/src/main/java/io/grpc/xds/XdsLoadBalancerProvider.java @@ -77,13 +77,14 @@ public ConfigOrError parseLoadBalancingPolicyConfig( static ConfigOrError parseLoadBalancingConfigPolicy( Map rawLoadBalancingPolicyConfig, LoadBalancerRegistry registry) { try { + String cluster = JsonUtil.getString(rawLoadBalancingPolicyConfig, "cluster"); LbConfig childPolicy = selectChildPolicy(rawLoadBalancingPolicyConfig, registry); LbConfig fallbackPolicy = selectFallbackPolicy(rawLoadBalancingPolicyConfig, registry); String edsServiceName = JsonUtil.getString(rawLoadBalancingPolicyConfig, "edsServiceName"); String lrsServerName = JsonUtil.getString(rawLoadBalancingPolicyConfig, "lrsLoadReportingServerName"); return ConfigOrError.fromConfig( - new XdsConfig(childPolicy, fallbackPolicy, edsServiceName, lrsServerName)); + new XdsConfig(cluster, childPolicy, fallbackPolicy, edsServiceName, lrsServerName)); } catch (RuntimeException e) { return ConfigOrError.fromError( Status.fromThrowable(e).withDescription( @@ -128,6 +129,9 @@ private static LbConfig selectSupportedLbPolicy( * Represents a successfully parsed and validated LoadBalancingConfig for XDS. */ static final class XdsConfig { + // FIXME(chengyuanzhang): make cluster name required. + @Nullable + final String cluster; // TODO(carl-mastrangelo): make these Object's containing the fully parsed child configs. @Nullable final LbConfig childPolicy; @@ -144,10 +148,12 @@ static final class XdsConfig { final String lrsServerName; XdsConfig( + @Nullable String cluster, @Nullable LbConfig childPolicy, @Nullable LbConfig fallbackPolicy, @Nullable String edsServiceName, @Nullable String lrsServerName) { + this.cluster = cluster; this.childPolicy = childPolicy; this.fallbackPolicy = fallbackPolicy; this.edsServiceName = edsServiceName; @@ -157,6 +163,7 @@ static final class XdsConfig { @Override public String toString() { return MoreObjects.toStringHelper(this) + .add("cluster", cluster) .add("childPolicy", childPolicy) .add("fallbackPolicy", fallbackPolicy) .add("edsServiceName", edsServiceName) @@ -170,7 +177,8 @@ public boolean equals(Object obj) { return false; } XdsConfig that = (XdsConfig) obj; - return Objects.equal(this.childPolicy, that.childPolicy) + return Objects.equal(this.cluster, that.cluster) + && Objects.equal(this.childPolicy, that.childPolicy) && Objects.equal(this.fallbackPolicy, that.fallbackPolicy) && Objects.equal(this.edsServiceName, that.edsServiceName) && Objects.equal(this.lrsServerName, that.lrsServerName); @@ -178,7 +186,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hashCode(childPolicy, fallbackPolicy, edsServiceName, lrsServerName); + return Objects.hashCode(cluster, childPolicy, fallbackPolicy, edsServiceName, lrsServerName); } } } diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index cb1651363d8..ea142a707f0 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -123,6 +123,7 @@ public void start(final Listener2 listener) { XdsClient createXdsClient() { return new XdsClientImpl( + authority, serverList, channelFactory, node, diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java index 912ad72d94a..75b6db1ea04 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java @@ -198,7 +198,6 @@ public void handleResolutionErrorBeforeOrAfterCdsWorking() { .setClusterName("foo.googleapis.com") .setEdsServiceName("edsServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .build()); verify(helper).updateBalancingState(eq(CONNECTING), any(SubchannelPicker.class)); @@ -230,7 +229,6 @@ public void handleCdsConfigs() throws Exception { .setClusterName("foo.googleapis.com") .setEdsServiceName("edsServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .build()); assertThat(edsLbHelpers).hasSize(1); @@ -240,6 +238,7 @@ public void handleCdsConfigs() throws Exception { ArgumentCaptor resolvedAddressesCaptor1 = ArgumentCaptor.forClass(null); verify(edsLoadBalancer1).handleResolvedAddresses(resolvedAddressesCaptor1.capture()); XdsConfig expectedXdsConfig = new XdsConfig( + "foo.googleapis.com", new LbConfig("round_robin", ImmutableMap.of()), null, "edsServiceFoo.googleapis.com", @@ -271,7 +270,6 @@ public void handleCdsConfigs() throws Exception { .setClusterName("bar.googleapis.com") .setEdsServiceName("edsServiceBar.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(true) .setLrsServerName("lrsBar.googleapis.com") .build()); @@ -282,6 +280,7 @@ public void handleCdsConfigs() throws Exception { ArgumentCaptor resolvedAddressesCaptor2 = ArgumentCaptor.forClass(null); verify(edsLoadBalancer2).handleResolvedAddresses(resolvedAddressesCaptor2.capture()); expectedXdsConfig = new XdsConfig( + "bar.googleapis.com", new LbConfig("round_robin", ImmutableMap.of()), null, "edsServiceBar.googleapis.com", @@ -307,10 +306,10 @@ public void handleCdsConfigs() throws Exception { .setClusterName("bar.googleapis.com") .setEdsServiceName("edsServiceBar2.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .build()); verify(edsLoadBalancer2, times(2)).handleResolvedAddresses(resolvedAddressesCaptor2.capture()); expectedXdsConfig = new XdsConfig( + "bar.googleapis.com", new LbConfig("round_robin", ImmutableMap.of()), null, "edsServiceBar2.googleapis.com", @@ -358,7 +357,6 @@ public void handleCdsConfigs_withUpstreamTlsContext() throws Exception { .setClusterName("foo.googleapis.com") .setEdsServiceName("edsServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .setUpstreamTlsContext(upstreamTlsContext) .build()); @@ -391,7 +389,6 @@ public void handleCdsConfigs_withUpstreamTlsContext() throws Exception { .setClusterName("bar.googleapis.com") .setEdsServiceName("eds1ServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .setUpstreamTlsContext(upstreamTlsContext) .build()); @@ -416,7 +413,6 @@ public void handleCdsConfigs_withUpstreamTlsContext() throws Exception { .setClusterName("bar.googleapis.com") .setEdsServiceName("eds1ServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .setUpstreamTlsContext(upstreamTlsContext1) .build()); @@ -436,7 +432,6 @@ public void handleCdsConfigs_withUpstreamTlsContext() throws Exception { .setClusterName("bar.googleapis.com") .setEdsServiceName("eds1ServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .setUpstreamTlsContext(null) .build()); verify(mockTlsContextManager).releaseClientSslContextProvider(same(mockSslContextProvider1)); @@ -501,7 +496,6 @@ public void clusterWatcher_onErrorCalledBeforeAndAfterOnClusterChanged() throws .setClusterName("foo.googleapis.com") .setEdsServiceName("edsServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .build()); assertThat(edsLbHelpers).hasSize(1); @@ -542,7 +536,6 @@ public void cdsBalancerIntegrateWithEdsBalancer() throws Exception { .setClusterName("foo.googleapis.com") .setEdsServiceName("edsServiceFoo.googleapis.com") .setLbPolicy("round_robin") - .setEnableLrs(false) .build()); ArgumentCaptor endpointWatcherCaptor = ArgumentCaptor.forClass(null); diff --git a/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java index e69490906e6..adcae1c52d9 100644 --- a/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java @@ -108,6 +108,7 @@ @RunWith(Parameterized.class) public class EdsLoadBalancerTest { + private static final String CLUSTER_NAME = "eds-lb-test.example.com"; private static final String SERVICE_AUTHORITY = "test.authority.example.com"; @Rule @@ -234,8 +235,13 @@ public StreamObserver streamAggregatedResources( if (isFullFlow) { xdsClientPoolFromResolveAddresses = new FakeXdsClientPool( new XdsClientImpl( - serverList, channelFactory, Node.getDefaultInstance(), syncContext, - fakeClock.getScheduledExecutorService(), mock(BackoffPolicy.Provider.class), + SERVICE_AUTHORITY, + serverList, + channelFactory, + Node.getDefaultInstance(), + syncContext, + fakeClock.getScheduledExecutorService(), + mock(BackoffPolicy.Provider.class), fakeClock.getStopwatchSupplier())); } @@ -265,7 +271,7 @@ public void tearDown() { @Test public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() { - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, null, null)); // handleResolutionError() before receiving any endpoint update. edsLb.handleNameResolutionError(Status.DATA_LOSS.withDescription("fake status")); @@ -273,7 +279,7 @@ public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() { // Endpoint update received. ClusterLoadAssignment clusterLoadAssignment = - buildClusterLoadAssignment("edsServiceName1", + buildClusterLoadAssignment(CLUSTER_NAME, ImmutableList.of( buildLocalityLbEndpoints("region1", "zone1", "subzone1", ImmutableList.of( @@ -293,7 +299,7 @@ public void handleNameResolutionErrorBeforeAndAfterEdsWorkding() { public void handleEdsServiceNameChangeInXdsConfig() { assertThat(childHelpers).isEmpty(); - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName1", null)); ClusterLoadAssignment clusterLoadAssignment = buildClusterLoadAssignment("edsServiceName1", ImmutableList.of( @@ -313,7 +319,7 @@ public void handleEdsServiceNameChangeInXdsConfig() { assertLatestConnectivityState(CONNECTING); // Change edsServicename to edsServiceName2. - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName2", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName2", null)); // The old balancer was not READY, so it will be shutdown immediately. verify(childBalancer1).shutdown(); @@ -343,7 +349,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { assertLatestSubchannelPicker(subchannel2); // Change edsServiceName to edsServiceName3. - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName3", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName3", null)); clusterLoadAssignment = buildClusterLoadAssignment("edsServiceName3", ImmutableList.of( @@ -369,7 +375,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { assertLatestConnectivityState(CONNECTING); // Change edsServiceName to edsServiceName4. - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName4", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName4", null)); verify(childBalancer3).shutdown(); clusterLoadAssignment = @@ -397,7 +403,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { assertLatestSubchannelPicker(subchannel4); // Change edsServiceName to edsServiceName5. - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName5", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName5", null)); clusterLoadAssignment = buildClusterLoadAssignment("edsServiceName5", ImmutableList.of( @@ -432,13 +438,13 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { @Test public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, null, null)); verify(resourceUpdateCallback, never()).onWorking(); // first EDS response ClusterLoadAssignment clusterLoadAssignment = - buildClusterLoadAssignment("edsServiceName1", + buildClusterLoadAssignment(CLUSTER_NAME, ImmutableList.of( buildLocalityLbEndpoints("region1", "zone1", "subzone1", ImmutableList.of( @@ -451,7 +457,7 @@ public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { // second EDS response clusterLoadAssignment = - buildClusterLoadAssignment("edsServiceName1", + buildClusterLoadAssignment(CLUSTER_NAME, ImmutableList.of( buildLocalityLbEndpoints("region1", "zone1", "subzone1", ImmutableList.of( @@ -466,10 +472,10 @@ public void firstAndSecondEdsResponseReceived_onWorkingCalledOnce() { @Test public void handleAllDropUpdates_pickersAreDropped() { - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, null, null)); ClusterLoadAssignment clusterLoadAssignment = buildClusterLoadAssignment( - "edsServiceName1", + CLUSTER_NAME, ImmutableList.of( buildLocalityLbEndpoints("region1", "zone1", "subzone1", ImmutableList.of( @@ -496,7 +502,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { assertLatestSubchannelPicker(subchannel); clusterLoadAssignment = buildClusterLoadAssignment( - "edsServiceName1", + CLUSTER_NAME, ImmutableList.of( buildLocalityLbEndpoints("region1", "zone1", "subzone1", ImmutableList.of( @@ -519,7 +525,7 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { @Test public void handleLocalityAssignmentUpdates_pickersUpdatedFromChildBalancer() { - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, null, null)); LbEndpoint endpoint11 = buildLbEndpoint("addr11.example.com", 8011, HEALTHY, 11); LbEndpoint endpoint12 = buildLbEndpoint("addr12.example.com", 8012, HEALTHY, 12); @@ -545,7 +551,7 @@ public void handleLocalityAssignmentUpdates_pickersUpdatedFromChildBalancer() { 0); ClusterLoadAssignment clusterLoadAssignment = buildClusterLoadAssignment( - "edsServiceName1", + CLUSTER_NAME, ImmutableList.of(localityLbEndpoints1, localityLbEndpoints2, localityLbEndpoints3), ImmutableList.of()); receiveEndpointUpdate(clusterLoadAssignment); @@ -606,7 +612,7 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, helper, resourceUpdateCallback, lbRegistry, localityStoreFactory, bootstrapper, channelFactory); - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName1", null)); assertThat(localityStores).hasSize(1); LocalityStore localityStore = localityStores.peekLast(); @@ -643,7 +649,7 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, verify(localityStore).updateLocalityStore(endpointUpdate.getLocalityLbEndpointsMap()); // Change cluster name. - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName2", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, "edsServiceName2", null)); assertThat(localityStores).hasSize(2); localityStore = localityStores.peekLast(); @@ -666,7 +672,7 @@ LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, @Test public void verifyErrorPropagation_noPreviousEndpointUpdateReceived() { - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, null, null)); verify(resourceUpdateCallback, never()).onError(); // Forwarding 20 seconds so that the xds client will deem EDS resource not available. @@ -677,10 +683,10 @@ public void verifyErrorPropagation_noPreviousEndpointUpdateReceived() { @Test public void verifyErrorPropagation_withPreviousEndpointUpdateReceived() { - deliverResolvedAddresses(new XdsConfig(null, null, "edsServiceName1", null)); + deliverResolvedAddresses(new XdsConfig(CLUSTER_NAME, null, null, null, null)); // Endpoint update received. ClusterLoadAssignment clusterLoadAssignment = - buildClusterLoadAssignment("edsServiceName1", + buildClusterLoadAssignment(CLUSTER_NAME, ImmutableList.of( buildLocalityLbEndpoints("region1", "zone1", "subzone1", ImmutableList.of( diff --git a/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java b/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java index 09a726104e9..06afd8567ea 100644 --- a/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java +++ b/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java @@ -31,6 +31,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.util.concurrent.MoreExecutors; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; import com.google.protobuf.util.Durations; import io.envoyproxy.envoy.api.v2.core.Locality; import io.envoyproxy.envoy.api.v2.core.Node; @@ -53,7 +55,6 @@ import io.grpc.testing.GrpcCleanupRule; import io.grpc.xds.LoadReportClient.LoadReportCallback; import java.util.ArrayDeque; -import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -61,6 +62,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import javax.annotation.Nullable; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -79,7 +81,17 @@ */ @RunWith(JUnit4.class) public class LoadReportClientTest { - private static final Node NODE = Node.newBuilder().setId("LRS test").build(); + private static final String TARGET_NAME = "lrs-test.example.com"; + // bootstrap node identifier + private static final Node NODE = + Node.newBuilder() + .setId("LRS test") + .setMetadata( + Struct.newBuilder() + .putFields( + "TRAFFICDIRECTOR_NETWORK_HOSTNAME", + Value.newBuilder().setStringValue("default").build())) + .build(); private static final FakeClock.TaskFilter LOAD_REPORTING_TASK_FILTER = new FakeClock.TaskFilter() { @Override @@ -167,11 +179,14 @@ public void cancelled(Context context) { .thenReturn(TimeUnit.SECONDS.toNanos(1L), TimeUnit.SECONDS.toNanos(10L)); lrsClient = new LoadReportClient( + TARGET_NAME, channel, - NODE, syncContext, + NODE, + syncContext, fakeClock.getScheduledExecutorService(), backoffPolicyProvider, fakeClock.getStopwatchSupplier()); + lrsClient.startLoadReporting(callback); } @After @@ -182,22 +197,17 @@ public void tearDown() { @Test public void typicalWorkflow() { - String cluster1 = "cluster-foo.googleapis.com"; - String cluster2 = "cluster-bar.googleapis.com"; - ClusterStats rawStats1 = generateClusterLoadStats(cluster1); - ClusterStats rawStats2 = generateClusterLoadStats(cluster2); - when(loadStatsStore1.generateLoadReport()).thenReturn(rawStats1); - when(loadStatsStore2.generateLoadReport()).thenReturn(rawStats2); - lrsClient.addLoadStatsStore(cluster1, null, loadStatsStore1); - lrsClient.addLoadStatsStore(cluster2, null, loadStatsStore2); - lrsClient.startLoadReporting(callback); - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); StreamObserver requestObserver = Iterables.getOnlyElement(lrsRequestObservers); InOrder inOrder = inOrder(requestObserver, callback); - inOrder.verify(requestObserver).onNext(eq(buildInitialRequest(cluster1, cluster2))); + inOrder.verify(requestObserver).onNext(eq(buildInitialRequest())); + + String cluster1 = "cluster-foo.googleapis.com"; + ClusterStats rawStats1 = generateClusterLoadStats(cluster1, null); + when(loadStatsStore1.generateLoadReport()).thenReturn(rawStats1); + lrsClient.addLoadStatsStore(cluster1, null, loadStatsStore1); // Management server asks to report loads for cluster1. responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster1), 1000)); @@ -213,7 +223,13 @@ public void typicalWorkflow() { fakeClock.forwardNanos(1000); inOrder.verify(requestObserver).onNext(argThat(expectedLoadReportMatcher)); - // Management server updates the interval of sending load reports. + String cluster2 = "cluster-bar.googleapis.com"; + ClusterStats rawStats2 = generateClusterLoadStats(cluster2, null); + when(loadStatsStore2.generateLoadReport()).thenReturn(rawStats2); + lrsClient.addLoadStatsStore(cluster2, null, loadStatsStore2); + + // Management server updates the interval of sending load reports, while still asking for + // loads to cluster1 only. responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster1), 2000)); inOrder.verify(callback).onReportResponse(2000); @@ -226,7 +242,6 @@ public void typicalWorkflow() { // Management server asks to report loads for cluster1 and cluster2. responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster1, cluster2), 2000)); - inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(2000); inOrder.verify(requestObserver) @@ -236,7 +251,6 @@ public void typicalWorkflow() { // Load reports for cluster1 is no longer wanted. responseObserver.onNext(buildLrsResponse(ImmutableList.of(cluster2), 2000)); - inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(2000); inOrder.verify(requestObserver) @@ -245,7 +259,6 @@ public void typicalWorkflow() { // Management server asks loads for a cluster that client has no load data. responseObserver .onNext(buildLrsResponse(ImmutableList.of("cluster-unknown.googleapis.com"), 2000)); - inOrder.verify(callback).onReportResponse(2000); fakeClock.forwardNanos(2000); ArgumentCaptor reportCaptor = ArgumentCaptor.forClass(null); @@ -257,12 +270,6 @@ public void typicalWorkflow() { @Test public void lrsStreamClosedAndRetried() { - String clusterName = "cluster-foo.googleapis.com"; - ClusterStats stats = generateClusterLoadStats(clusterName); - when(loadStatsStore1.generateLoadReport()).thenReturn(stats); - lrsClient.addLoadStatsStore(clusterName, null, loadStatsStore1); - lrsClient.startLoadReporting(callback); - InOrder inOrder = inOrder(mockLoadReportingService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); inOrder.verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); @@ -270,8 +277,14 @@ public void lrsStreamClosedAndRetried() { assertThat(lrsRequestObservers).hasSize(1); StreamObserver requestObserver = lrsRequestObservers.poll(); + String clusterName = "cluster-foo.googleapis.com"; + String clusterServiceName = "service-blade.googleapis.com"; + ClusterStats stats = generateClusterLoadStats(clusterName, clusterServiceName); + when(loadStatsStore1.generateLoadReport()).thenReturn(stats); + lrsClient.addLoadStatsStore(clusterName, null, loadStatsStore1); + // First balancer RPC - verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); + verify(requestObserver).onNext(eq(buildInitialRequest())); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Balancer closes it immediately (erroneously) @@ -291,7 +304,7 @@ public void lrsStreamClosedAndRetried() { responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); + verify(requestObserver).onNext(eq(buildInitialRequest())); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Balancer closes it with an error. @@ -310,7 +323,7 @@ public void lrsStreamClosedAndRetried() { responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); + verify(requestObserver).onNext(eq(buildInitialRequest())); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Balancer sends a response asking for loads of the cluster. @@ -326,7 +339,7 @@ public void lrsStreamClosedAndRetried() { responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); + verify(requestObserver).onNext(eq(buildInitialRequest())); // Fail the retry after spending 4ns fakeClock.forwardNanos(4); @@ -344,7 +357,7 @@ public void lrsStreamClosedAndRetried() { inOrder.verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); assertThat(lrsRequestObservers).hasSize(1); requestObserver = lrsRequestObservers.poll(); - verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); + verify(requestObserver).onNext(eq(buildInitialRequest())); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Load reporting back to normal. @@ -363,19 +376,19 @@ public void lrsStreamClosedAndRetried() { @Test public void raceBetweenLoadReportingAndLbStreamClosure() { - String clusterName = "cluster-foo.googleapis.com"; - ClusterStats stats = generateClusterLoadStats(clusterName); - when(loadStatsStore1.generateLoadReport()).thenReturn(stats); - lrsClient.addLoadStatsStore(clusterName, null, loadStatsStore1); - lrsClient.startLoadReporting(callback); - verify(mockLoadReportingService).streamLoadStats(lrsResponseObserverCaptor.capture()); StreamObserver responseObserver = lrsResponseObserverCaptor.getValue(); assertThat(lrsRequestObservers).hasSize(1); StreamObserver requestObserver = lrsRequestObservers.poll(); + String clusterName = "cluster-foo.googleapis.com"; + String clusterServiceName = "service-blade.googleapis.com"; + ClusterStats stats = generateClusterLoadStats(clusterName, clusterServiceName); + when(loadStatsStore1.generateLoadReport()).thenReturn(stats); + lrsClient.addLoadStatsStore(clusterName, null, loadStatsStore1); + // First balancer RPC - verify(requestObserver).onNext(eq(buildInitialRequest(clusterName))); + verify(requestObserver).onNext(eq(buildInitialRequest())); assertEquals(0, fakeClock.numPendingTasks(LRS_RPC_RETRY_TASK_FILTER)); // Simulate receiving a response from traffic director. @@ -412,19 +425,28 @@ private static LoadStatsResponse buildLrsResponse( .build(); } - private static LoadStatsRequest buildInitialRequest(String... clusters) { - List clusterStatsList = new ArrayList<>(); - for (String cluster : clusters) { - clusterStatsList.add(ClusterStats.newBuilder().setClusterName(cluster).build()); - } + private static LoadStatsRequest buildInitialRequest() { return - LoadStatsRequest.newBuilder().setNode(NODE).addAllClusterStats(clusterStatsList).build(); + LoadStatsRequest.newBuilder() + .setNode( + Node.newBuilder() + .setId("LRS test") + .setMetadata( + Struct.newBuilder() + .putFields( + "TRAFFICDIRECTOR_NETWORK_HOSTNAME", + Value.newBuilder().setStringValue("default").build()) + .putFields( + LoadReportClient.TARGET_NAME_METADATA_KEY, + Value.newBuilder().setStringValue(TARGET_NAME).build()))) + .build(); } /** * Generates a raw service load stats report with random data. */ - private static ClusterStats generateClusterLoadStats(String clusterName) { + private static ClusterStats generateClusterLoadStats( + String clusterName, @Nullable String clusterServiceName) { long callsInProgress = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); long callsSucceeded = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); long callsFailed = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); @@ -432,30 +454,32 @@ private static ClusterStats generateClusterLoadStats(String clusterName) { long numLbDrops = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); long numThrottleDrops = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE); - return - ClusterStats.newBuilder() - .setClusterName(clusterName) - .addUpstreamLocalityStats( - UpstreamLocalityStats.newBuilder() - .setLocality( - Locality.newBuilder() - .setRegion("region-foo") - .setZone("zone-bar") - .setSubZone("subzone-baz")) - .setTotalRequestsInProgress(callsInProgress) - .setTotalSuccessfulRequests(callsSucceeded) - .setTotalErrorRequests(callsFailed) - .setTotalIssuedRequests(callsIssued)) - .addDroppedRequests( - DroppedRequests.newBuilder() - .setCategory("lb") - .setDroppedCount(numLbDrops)) - .addDroppedRequests( - DroppedRequests.newBuilder() - .setCategory("throttle") - .setDroppedCount(numThrottleDrops)) - .setTotalDroppedRequests(numLbDrops + numThrottleDrops) - .build(); + ClusterStats.Builder clusterStatsBuilder = ClusterStats.newBuilder(); + clusterStatsBuilder.setClusterName(clusterName); + if (clusterServiceName != null) { + clusterStatsBuilder.setClusterServiceName(clusterServiceName); + } + clusterStatsBuilder.addUpstreamLocalityStats( + UpstreamLocalityStats.newBuilder() + .setLocality( + Locality.newBuilder() + .setRegion("region-foo") + .setZone("zone-bar") + .setSubZone("subzone-baz")) + .setTotalRequestsInProgress(callsInProgress) + .setTotalSuccessfulRequests(callsSucceeded) + .setTotalErrorRequests(callsFailed) + .setTotalIssuedRequests(callsIssued)) + .addDroppedRequests( + DroppedRequests.newBuilder() + .setCategory("lb") + .setDroppedCount(numLbDrops)) + .addDroppedRequests( + DroppedRequests.newBuilder() + .setCategory("throttle") + .setDroppedCount(numThrottleDrops)) + .setTotalDroppedRequests(numLbDrops + numThrottleDrops); + return clusterStatsBuilder.build(); } /** @@ -476,6 +500,11 @@ private static class LoadStatsRequestMatcher implements ArgumentMatcher streamLoadStats( new CancellationListener() { @Override public void cancelled(Context context) { - call.cancelled = true; lrsEnded.set(true); } }, MoreExecutors.directExecutor()); @@ -282,8 +283,14 @@ ManagedChannel createChannel(List servers) { }; xdsClient = - new XdsClientImpl(servers, channelFactory, NODE, syncContext, - fakeClock.getScheduledExecutorService(), backoffPolicyProvider, + new XdsClientImpl( + TARGET_NAME, + servers, + channelFactory, + NODE, + syncContext, + fakeClock.getScheduledExecutorService(), + backoffPolicyProvider, fakeClock.getStopwatchSupplier()); // Only the connection to management server is established, no RPC request is sent until at // least one watcher is registered. @@ -1266,10 +1273,9 @@ public void cdsResponseWithMatchingResource() { verify(clusterWatcher).onClusterChanged(clusterUpdateCaptor.capture()); ClusterUpdate clusterUpdate = clusterUpdateCaptor.getValue(); assertThat(clusterUpdate.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate.getEdsServiceName()).isNull(); assertThat(clusterUpdate.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate.getLrsServerName()).isNull(); // Management server sends back another CDS response updating the requested Cluster. clusters = ImmutableList.of( @@ -1292,8 +1298,7 @@ public void cdsResponseWithMatchingResource() { assertThat(clusterUpdate.getEdsServiceName()) .isEqualTo("eds-cluster-foo.googleapis.com"); assertThat(clusterUpdate.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate.isEnableLrs()).isTrue(); - assertThat(clusterUpdate.getLrsServerName()).isEmpty(); + assertThat(clusterUpdate.getLrsServerName()).isEqualTo(""); } /** @@ -1371,20 +1376,18 @@ public void multipleClusterWatchers() { ClusterUpdate clusterUpdate1 = clusterUpdateCaptor1.getValue(); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate1.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate1.getEdsServiceName()).isNull(); assertThat(clusterUpdate1.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate1.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate1.getLrsServerName()).isNull(); ArgumentCaptor clusterUpdateCaptor2 = ArgumentCaptor.forClass(null); verify(watcher2).onClusterChanged(clusterUpdateCaptor2.capture()); ClusterUpdate clusterUpdate2 = clusterUpdateCaptor2.getValue(); assertThat(clusterUpdate2.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); assertThat(clusterUpdate2.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate2.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate2.getEdsServiceName()).isNull(); assertThat(clusterUpdate2.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate2.getLrsServerName()).isNull(); verify(watcher3, never()).onClusterChanged(any(ClusterUpdate.class)); verify(watcher3, never()).onError(any(Status.class)); @@ -1421,20 +1424,18 @@ public void multipleClusterWatchers() { clusterUpdate1 = clusterUpdateCaptor1.getValue(); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate1.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate1.getEdsServiceName()).isNull(); assertThat(clusterUpdate1.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate1.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate1.getLrsServerName()).isNull(); clusterUpdateCaptor2 = ArgumentCaptor.forClass(null); verify(watcher2, times(2)).onClusterChanged(clusterUpdateCaptor2.capture()); clusterUpdate2 = clusterUpdateCaptor2.getValue(); assertThat(clusterUpdate2.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); assertThat(clusterUpdate2.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate2.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate2.getEdsServiceName()).isNull(); assertThat(clusterUpdate2.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate2.getLrsServerName()).isNull(); ArgumentCaptor clusterUpdateCaptor3 = ArgumentCaptor.forClass(null); verify(watcher3).onClusterChanged(clusterUpdateCaptor3.capture()); @@ -1443,8 +1444,7 @@ public void multipleClusterWatchers() { assertThat(clusterUpdate3.getEdsServiceName()) .isEqualTo("eds-cluster-bar.googleapis.com"); assertThat(clusterUpdate3.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate3.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate3.getLrsServerName()).isEmpty(); + assertThat(clusterUpdate3.getLrsServerName()).isEqualTo(""); } /** @@ -1484,10 +1484,9 @@ public void watchClusterAlreadyBeingWatched() { verify(watcher1).onClusterChanged(clusterUpdateCaptor1.capture()); ClusterUpdate clusterUpdate1 = clusterUpdateCaptor1.getValue(); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate1.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate1.getEdsServiceName()).isNull(); assertThat(clusterUpdate1.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate1.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate1.getLrsServerName()).isNull(); assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); // Another cluster watcher interested in the same cluster is added. @@ -1500,10 +1499,9 @@ public void watchClusterAlreadyBeingWatched() { verify(watcher2).onClusterChanged(clusterUpdateCaptor2.capture()); ClusterUpdate clusterUpdate2 = clusterUpdateCaptor2.getValue(); assertThat(clusterUpdate2.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate2.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate2.getEdsServiceName()).isNull(); assertThat(clusterUpdate2.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate2.getLrsServerName()).isNull(); verifyNoMoreInteractions(requestObserver); assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); @@ -1543,10 +1541,9 @@ public void addRemoveClusterWatchers() { verify(watcher1).onClusterChanged(clusterUpdateCaptor1.capture()); ClusterUpdate clusterUpdate1 = clusterUpdateCaptor1.getValue(); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate1.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate1.getEdsServiceName()).isNull(); assertThat(clusterUpdate1.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate1.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate1.getLrsServerName()).isNull(); // Add another cluster watcher for a different cluster. ClusterWatcher watcher2 = mock(ClusterWatcher.class); @@ -1581,10 +1578,9 @@ public void addRemoveClusterWatchers() { verify(watcher1, times(2)).onClusterChanged(clusterUpdateCaptor1.capture()); clusterUpdate1 = clusterUpdateCaptor1.getValue(); assertThat(clusterUpdate1.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate1.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate1.getEdsServiceName()).isNull(); assertThat(clusterUpdate1.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate1.isEnableLrs()).isEqualTo(false); + assertThat(clusterUpdate1.getLrsServerName()).isNull(); ArgumentCaptor clusterUpdateCaptor2 = ArgumentCaptor.forClass(null); verify(watcher2).onClusterChanged(clusterUpdateCaptor2.capture()); @@ -1593,8 +1589,7 @@ public void addRemoveClusterWatchers() { assertThat(clusterUpdate2.getEdsServiceName()) .isEqualTo("eds-cluster-bar.googleapis.com"); assertThat(clusterUpdate2.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate2.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate2.getLrsServerName()).isEmpty(); + assertThat(clusterUpdate2.getLrsServerName()).isEqualTo(""); // Cancel one of the watcher. xdsClient.cancelClusterDataWatch("cluster-foo.googleapis.com", watcher1); @@ -1660,11 +1655,9 @@ public void addRemoveClusterWatchers() { verify(watcher3).onClusterChanged(clusterUpdateCaptor3.capture()); ClusterUpdate clusterUpdate3 = clusterUpdateCaptor3.getValue(); assertThat(clusterUpdate3.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate3.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate3.getEdsServiceName()).isNull(); assertThat(clusterUpdate3.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate3.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate2.getLrsServerName()).isEmpty(); + assertThat(clusterUpdate2.getLrsServerName()).isEqualTo(""); verifyNoMoreInteractions(watcher1, watcher2); @@ -1768,11 +1761,9 @@ public void cdsUpdateForClusterBeingRemoved() { verify(clusterWatcher).onClusterChanged(clusterUpdateCaptor.capture()); ClusterUpdate clusterUpdate = clusterUpdateCaptor.getValue(); assertThat(clusterUpdate.getClusterName()).isEqualTo("cluster-foo.googleapis.com"); - assertThat(clusterUpdate.getEdsServiceName()) - .isEqualTo("cluster-foo.googleapis.com"); // default to cluster name + assertThat(clusterUpdate.getEdsServiceName()).isNull(); assertThat(clusterUpdate.getLbPolicy()).isEqualTo("round_robin"); - assertThat(clusterUpdate.isEnableLrs()).isEqualTo(true); - assertThat(clusterUpdate.getLrsServerName()).isEmpty(); + assertThat(clusterUpdate.getLrsServerName()).isEqualTo(""); assertThat(fakeClock.getPendingTasks(CDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).isEmpty(); // No cluster is available. @@ -3097,14 +3088,31 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { */ @Test public void reportLoadStatsToServer() { - LoadStatsStore loadStatsStore = mock(LoadStatsStore.class); String clusterName = "cluster-foo.googleapis.com"; + LoadStatsStore loadStatsStore = new LoadStatsStoreImpl(clusterName, null); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(null); xdsClient.reportClientStats(clusterName, null, loadStatsStore); LoadReportCall lrsCall = loadReportCalls.poll(); - verify(lrsCall.requestObserver).onNext(eq(buildInitialLoadStatsRequest(clusterName))); + verify(lrsCall.requestObserver).onNext(requestCaptor.capture()); + assertThat(requestCaptor.getValue().getClusterStatsCount()) + .isEqualTo(0); // initial request + + lrsCall.responseObserver.onNext( + LoadStatsResponse.newBuilder() + .addClusters(clusterName) + .setLoadReportingInterval(Durations.fromNanos(1000L)) + .build()); + fakeClock.forwardNanos(1000L); + verify(lrsCall.requestObserver, times(2)).onNext(requestCaptor.capture()); + ClusterStats report = Iterables.getOnlyElement(requestCaptor.getValue().getClusterStatsList()); + assertThat(report.getClusterName()).isEqualTo(clusterName); xdsClient.cancelClientStatsReport(clusterName, null); - assertThat(lrsCall.cancelled).isTrue(); + fakeClock.forwardNanos(1000L); + verify(lrsCall.requestObserver, times(3)).onNext(requestCaptor.capture()); + assertThat(requestCaptor.getValue().getClusterStatsCount()) + .isEqualTo(0); // no more stats reported + // See more test on LoadReportClientTest.java } @@ -3536,14 +3544,6 @@ public void messagePrinter_printEdsResponse() { assertThat(res).isEqualTo(expectedString); } - private static LoadStatsRequest buildInitialLoadStatsRequest(String clusterName) { - return - LoadStatsRequest.newBuilder() - .setNode(NODE) - .addClusterStats(ClusterStats.newBuilder().setClusterName(clusterName)) - .build(); - } - /** * Matcher for DiscoveryRequest without the comparison of error_details field, which is used for * management server debugging purposes. @@ -3593,7 +3593,6 @@ private static class LoadReportCall { private final StreamObserver requestObserver; @SuppressWarnings("unused") private final StreamObserver responseObserver; - private boolean cancelled; LoadReportCall(StreamObserver requestObserver, StreamObserver responseObserver) { diff --git a/xds/src/test/java/io/grpc/xds/XdsClientTest.java b/xds/src/test/java/io/grpc/xds/XdsClientTest.java index 70cc3ef253c..56fca6dc09b 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientTest.java @@ -21,7 +21,6 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; -import io.grpc.xds.XdsClient.ClusterUpdate; import io.grpc.xds.XdsClient.RefCountedXdsClientObjectPool; import io.grpc.xds.XdsClient.XdsClientFactory; import org.junit.Rule; @@ -38,24 +37,6 @@ public class XdsClientTest { @Rule public final ExpectedException thrown = ExpectedException.none(); - @Test - public void buildClusterUpdate_defaultToClusterNameWhenEdsServiceNameNotSet() { - ClusterUpdate clusterUpdate1 = - ClusterUpdate.newBuilder() - .setClusterName("foo.googleapis.com") - .setEdsServiceName("bar.googleapis.com") - .setLbPolicy("round_robin") - .build(); - assertThat(clusterUpdate1.getEdsServiceName()).isEqualTo("bar.googleapis.com"); - - ClusterUpdate clusterUpdate2 = - ClusterUpdate.newBuilder() - .setClusterName("foo.googleapis.com") - .setLbPolicy("round_robin") - .build(); - assertThat(clusterUpdate2.getEdsServiceName()).isEqualTo("foo.googleapis.com"); - } - @Test public void refCountedXdsClientObjectPool_getObjectShouldMatchReturnObject() { XdsClientFactory xdsClientFactory = new XdsClientFactory() { diff --git a/xds/src/test/java/io/grpc/xds/XdsLoadBalancerProviderTest.java b/xds/src/test/java/io/grpc/xds/XdsLoadBalancerProviderTest.java index 2e83b9e5835..e4e8c1987bb 100644 --- a/xds/src/test/java/io/grpc/xds/XdsLoadBalancerProviderTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsLoadBalancerProviderTest.java @@ -155,6 +155,7 @@ public void selectFallBackPolicy_roundRobinIsDefault() throws Exception { @Test public void parseLoadBalancingConfigPolicy() throws Exception { String rawLbConfig = "{" + + "\"cluster\" : \"foo.googleapis.com\"," + "\"childPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"supported_1\" : {}}]," + "\"fallbackPolicy\" : [{\"unsupported\" : {}}, {\"round_robin\" : {\"key\" : \"val\"}}," + "{\"supported_2\" : {\"key\" : \"val\"}}]," @@ -169,6 +170,7 @@ public void parseLoadBalancingConfigPolicy() throws Exception { assertThat(configOrError.getConfig()).isInstanceOf(XdsConfig.class); assertThat(configOrError.getConfig()).isEqualTo( new XdsConfig( + "foo.googleapis.com", ServiceConfigUtil.unwrapLoadBalancingConfig( checkObject(JsonParser.parse("{\"supported_1\" : {}}"))), ServiceConfigUtil.unwrapLoadBalancingConfig( From 3b1af270732f8c53d22488fae82d4f40f1c0855e Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Mon, 24 Feb 2020 11:34:19 -0800 Subject: [PATCH 71/86] compiler: add std:: qualifications to all references to std::string --- .../src/java_plugin/cpp/java_generator.cpp | 110 +++++++++--------- compiler/src/java_plugin/cpp/java_generator.h | 4 +- compiler/src/java_plugin/cpp/java_plugin.cpp | 16 +-- 3 files changed, 65 insertions(+), 65 deletions(-) diff --git a/compiler/src/java_plugin/cpp/java_generator.cpp b/compiler/src/java_plugin/cpp/java_generator.cpp index 5232f11fa53..969d71103e1 100644 --- a/compiler/src/java_plugin/cpp/java_generator.cpp +++ b/compiler/src/java_plugin/cpp/java_generator.cpp @@ -52,7 +52,7 @@ using google::protobuf::SourceLocation; using std::to_string; // java keywords from: https://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-3.9 -static std::set java_keywords = { +static std::set java_keywords = { "abstract", "assert", "boolean", @@ -112,8 +112,8 @@ static std::set java_keywords = { // - decapitalize the first letter // - remove embedded underscores & capitalize the following letter // Finally, if the result is a reserved java keyword, append an underscore. -static string MixedLower(const string& word) { - string w; +static std::string MixedLower(const std::string& word) { + std::string w; w += tolower(word[0]); bool after_underscore = false; for (size_t i = 1; i < word.length(); ++i) { @@ -134,8 +134,8 @@ static string MixedLower(const string& word) { // - An underscore is inserted where a lower case letter is followed by an // upper case letter. // - All letters are converted to upper case -static string ToAllUpperCase(const string& word) { - string w; +static std::string ToAllUpperCase(const std::string& word) { + std::string w; for (size_t i = 0; i < word.length(); ++i) { w += toupper(word[i]); if ((i < word.length() - 1) && islower(word[i]) && isupper(word[i + 1])) { @@ -145,19 +145,19 @@ static string ToAllUpperCase(const string& word) { return w; } -static inline string LowerMethodName(const MethodDescriptor* method) { +static inline std::string LowerMethodName(const MethodDescriptor* method) { return MixedLower(method->name()); } -static inline string MethodPropertiesFieldName(const MethodDescriptor* method) { +static inline std::string MethodPropertiesFieldName(const MethodDescriptor* method) { return "METHOD_" + ToAllUpperCase(method->name()); } -static inline string MethodPropertiesGetterName(const MethodDescriptor* method) { +static inline std::string MethodPropertiesGetterName(const MethodDescriptor* method) { return MixedLower("get_" + method->name() + "_method"); } -static inline string MethodIdFieldName(const MethodDescriptor* method) { +static inline std::string MethodIdFieldName(const MethodDescriptor* method) { return "METHODID_" + ToAllUpperCase(method->name()); } @@ -165,13 +165,13 @@ static inline bool ShouldGenerateAsLite(const Descriptor* desc) { return false; } -static inline string MessageFullJavaName(const Descriptor* desc) { +static inline std::string MessageFullJavaName(const Descriptor* desc) { return google::protobuf::compiler::java::ClassName(desc); } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. template -static void GrpcSplitStringToIteratorUsing(const string& full, +static void GrpcSplitStringToIteratorUsing(const std::string& full, const char* delim, ITR& result) { // Optimize the common case where delim is a single character. @@ -185,17 +185,17 @@ static void GrpcSplitStringToIteratorUsing(const string& full, } else { const char* start = p; while (++p != end && *p != c); - *result++ = string(start, p - start); + *result++ = std::string(start, p - start); } } return; } - string::size_type begin_index, end_index; + std::string::size_type begin_index, end_index; begin_index = full.find_first_not_of(delim); - while (begin_index != string::npos) { + while (begin_index != std::string::npos) { end_index = full.find_first_of(delim, begin_index); - if (end_index == string::npos) { + if (end_index == std::string::npos) { *result++ = full.substr(begin_index); return; } @@ -205,28 +205,28 @@ static void GrpcSplitStringToIteratorUsing(const string& full, } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. -static void GrpcSplitStringUsing(const string& full, +static void GrpcSplitStringUsing(const std::string& full, const char* delim, - std::vector* result) { - std::back_insert_iterator< std::vector > it(*result); + std::vector* result) { + std::back_insert_iterator< std::vector > it(*result); GrpcSplitStringToIteratorUsing(full, delim, it); } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. -static std::vector GrpcSplit(const string& full, const char* delim) { - std::vector result; +static std::vector GrpcSplit(const std::string& full, const char* delim) { + std::vector result; GrpcSplitStringUsing(full, delim, &result); return result; } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. -static string GrpcEscapeJavadoc(const string& input) { - string result; +static std::string GrpcEscapeJavadoc(const std::string& input) { + std::string result; result.reserve(input.size() * 2); char prev = '*'; - for (string::size_type i = 0; i < input.size(); i++) { + for (std::string::size_type i = 0; i < input.size(); i++) { char c = input[i]; switch (c) { case '*': @@ -280,17 +280,17 @@ static string GrpcEscapeJavadoc(const string& input) { // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. template -static string GrpcGetCommentsForDescriptor(const DescriptorType* descriptor) { +static std::string GrpcGetCommentsForDescriptor(const DescriptorType* descriptor) { SourceLocation location; if (descriptor->GetSourceLocation(&location)) { return location.leading_comments.empty() ? location.trailing_comments : location.leading_comments; } - return string(); + return std::string(); } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. -static std::vector GrpcGetDocLines(const string& comments) { +static std::vector GrpcGetDocLines(const std::string& comments) { if (!comments.empty()) { // TODO(kenton): Ideally we should parse the comment text as Markdown and // write it back as HTML, but this requires a Markdown parser. For now @@ -298,26 +298,26 @@ static std::vector GrpcGetDocLines(const string& comments) { // If the comment itself contains block comment start or end markers, // HTML-escape them so that they don't accidentally close the doc comment. - string escapedComments = GrpcEscapeJavadoc(comments); + std::string escapedComments = GrpcEscapeJavadoc(comments); - std::vector lines = GrpcSplit(escapedComments, "\n"); + std::vector lines = GrpcSplit(escapedComments, "\n"); while (!lines.empty() && lines.back().empty()) { lines.pop_back(); } return lines; } - return std::vector(); + return std::vector(); } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. template -static std::vector GrpcGetDocLinesForDescriptor(const DescriptorType* descriptor) { +static std::vector GrpcGetDocLinesForDescriptor(const DescriptorType* descriptor) { return GrpcGetDocLines(GrpcGetCommentsForDescriptor(descriptor)); } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. static void GrpcWriteDocCommentBody(Printer* printer, - const std::vector& lines, + const std::vector& lines, bool surroundWithPreTag) { if (!lines.empty()) { if (surroundWithPreTag) { @@ -342,9 +342,9 @@ static void GrpcWriteDocCommentBody(Printer* printer, } // TODO(nmittler): Remove once protobuf includes javadoc methods in distribution. -static void GrpcWriteDocComment(Printer* printer, const string& comments) { +static void GrpcWriteDocComment(Printer* printer, const std::string& comments) { printer->Print("/**\n"); - std::vector lines = GrpcGetDocLines(comments); + std::vector lines = GrpcGetDocLines(comments); GrpcWriteDocCommentBody(printer, lines, false); printer->Print(" */\n"); } @@ -355,7 +355,7 @@ static void GrpcWriteServiceDocComment(Printer* printer, // Deviating from protobuf to avoid extraneous docs // (see https://github.com/google/protobuf/issues/1406); printer->Print("/**\n"); - std::vector lines = GrpcGetDocLinesForDescriptor(service); + std::vector lines = GrpcGetDocLinesForDescriptor(service); GrpcWriteDocCommentBody(printer, lines, true); printer->Print(" */\n"); } @@ -366,13 +366,13 @@ void GrpcWriteMethodDocComment(Printer* printer, // Deviating from protobuf to avoid extraneous docs // (see https://github.com/google/protobuf/issues/1406); printer->Print("/**\n"); - std::vector lines = GrpcGetDocLinesForDescriptor(method); + std::vector lines = GrpcGetDocLinesForDescriptor(method); GrpcWriteDocCommentBody(printer, lines, true); printer->Print(" */\n"); } static void PrintMethodFields( - const ServiceDescriptor* service, std::map* vars, + const ServiceDescriptor* service, std::map* vars, Printer* p, ProtoFlavor flavor) { p->Print("// Static method descriptors that strictly reflect the proto.\n"); (*vars)["service_name"] = service->name(); @@ -486,15 +486,15 @@ enum CallType { }; static void PrintBindServiceMethodBody(const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p); // Prints a StubFactory for given service / stub type. static void PrintStubFactory( const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p, StubType type) { - string stub_type_name; + std::string stub_type_name; switch (type) { case ASYNC_CLIENT_IMPL: stub_type_name = ""; @@ -523,14 +523,14 @@ static void PrintStubFactory( // Prints a client interface or implementation class, or a server interface. static void PrintStub( const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p, StubType type) { - const string service_name = service->name(); + const std::string service_name = service->name(); (*vars)["service_name"] = service_name; (*vars)["abstract_name"] = service_name + "ImplBase"; - string stub_name = service_name; - string client_name = service_name; - string stub_base_class_name = "AbstractStub"; + std::string stub_name = service_name; + std::string client_name = service_name; + std::string stub_base_class_name = "AbstractStub"; CallType call_type; bool impl_base = false; bool interface = false; @@ -804,7 +804,7 @@ static bool CompareMethodClientStreaming(const MethodDescriptor* method1, // Place all method invocations into a single class to reduce memory footprint // on Android. static void PrintMethodHandlerClass(const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p) { // Sort method ids based on client_streaming() so switch tables are compact. std::vector sorted_methods(service->method_count()); @@ -910,7 +910,7 @@ static void PrintMethodHandlerClass(const ServiceDescriptor* service, } static void PrintGetServiceDescriptorMethod(const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p, ProtoFlavor flavor) { (*vars)["service_name"] = service->name(); @@ -1011,7 +1011,7 @@ static void PrintGetServiceDescriptorMethod(const ServiceDescriptor* service, } static void PrintBindServiceMethodBody(const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p) { (*vars)["service_name"] = service->name(); p->Indent(); @@ -1065,7 +1065,7 @@ static void PrintBindServiceMethodBody(const ServiceDescriptor* service, } static void PrintService(const ServiceDescriptor* service, - std::map* vars, + std::map* vars, Printer* p, ProtoFlavor flavor, bool disable_version) { @@ -1195,7 +1195,7 @@ void GenerateService(const ServiceDescriptor* service, bool disable_version) { // All non-generated classes must be referred by fully qualified names to // avoid collision with generated classes. - std::map vars; + std::map vars; vars["String"] = "java.lang.String"; vars["Deprecated"] = "java.lang.Deprecated"; vars["Override"] = "java.lang.Override"; @@ -1229,7 +1229,7 @@ void GenerateService(const ServiceDescriptor* service, "com.google.common.util.concurrent.ListenableFuture"; Printer printer(out, '$'); - string package_name = ServiceJavaPackage(service->file()); + std::string package_name = ServiceJavaPackage(service->file()); if (!package_name.empty()) { printer.Print( "package $package_name$;\n\n", @@ -1237,7 +1237,7 @@ void GenerateService(const ServiceDescriptor* service, } PrintImports(&printer); - // Package string is used to fully qualify method names. + // Package std::string is used to fully qualify method names. vars["Package"] = service->file()->package(); if (!vars["Package"].empty()) { vars["Package"].append("."); @@ -1245,10 +1245,10 @@ void GenerateService(const ServiceDescriptor* service, PrintService(service, &vars, &printer, flavor, disable_version); } -string ServiceJavaPackage(const FileDescriptor* file) { - string result = google::protobuf::compiler::java::ClassName(file); +std::string ServiceJavaPackage(const FileDescriptor* file) { + std::string result = google::protobuf::compiler::java::ClassName(file); size_t last_dot_pos = result.find_last_of('.'); - if (last_dot_pos != string::npos) { + if (last_dot_pos != std::string::npos) { result.resize(last_dot_pos); } else { result = ""; @@ -1256,7 +1256,7 @@ string ServiceJavaPackage(const FileDescriptor* file) { return result; } -string ServiceClassName(const google::protobuf::ServiceDescriptor* service) { +std::string ServiceClassName(const google::protobuf::ServiceDescriptor* service) { return service->name() + "Grpc"; } diff --git a/compiler/src/java_plugin/cpp/java_generator.h b/compiler/src/java_plugin/cpp/java_generator.h index 41a7fc093ff..66a4e9c9c43 100644 --- a/compiler/src/java_plugin/cpp/java_generator.h +++ b/compiler/src/java_plugin/cpp/java_generator.h @@ -56,11 +56,11 @@ enum ProtoFlavor { }; // Returns the package name of the gRPC services defined in the given file. -string ServiceJavaPackage(const google::protobuf::FileDescriptor* file); +std::string ServiceJavaPackage(const google::protobuf::FileDescriptor* file); // Returns the name of the outer class that wraps in all the generated code for // the given service. -string ServiceClassName(const google::protobuf::ServiceDescriptor* service); +std::string ServiceClassName(const google::protobuf::ServiceDescriptor* service); // Writes the generated service interface into the given ZeroCopyOutputStream void GenerateService(const google::protobuf::ServiceDescriptor* service, diff --git a/compiler/src/java_plugin/cpp/java_plugin.cpp b/compiler/src/java_plugin/cpp/java_plugin.cpp index 098a4db2178..a62fb8677d6 100644 --- a/compiler/src/java_plugin/cpp/java_plugin.cpp +++ b/compiler/src/java_plugin/cpp/java_plugin.cpp @@ -27,8 +27,8 @@ #include #include -static string JavaPackageToDir(const string& package_name) { - string package_dir = package_name; +static std::string JavaPackageToDir(const std::string& package_name) { + std::string package_dir = package_name; for (size_t i = 0; i < package_dir.size(); ++i) { if (package_dir[i] == '.') { package_dir[i] = '/'; @@ -44,10 +44,10 @@ class JavaGrpcGenerator : public google::protobuf::compiler::CodeGenerator { virtual ~JavaGrpcGenerator() {} virtual bool Generate(const google::protobuf::FileDescriptor* file, - const string& parameter, + const std::string& parameter, google::protobuf::compiler::GeneratorContext* context, - string* error) const { - std::vector > options; + std::string* error) const { + std::vector > options; google::protobuf::compiler::ParseGeneratorParameter(parameter, &options); java_grpc_generator::ProtoFlavor flavor = @@ -62,11 +62,11 @@ class JavaGrpcGenerator : public google::protobuf::compiler::CodeGenerator { } } - string package_name = java_grpc_generator::ServiceJavaPackage(file); - string package_filename = JavaPackageToDir(package_name); + std::string package_name = java_grpc_generator::ServiceJavaPackage(file); + std::string package_filename = JavaPackageToDir(package_name); for (int i = 0; i < file->service_count(); ++i) { const google::protobuf::ServiceDescriptor* service = file->service(i); - string filename = package_filename + std::string filename = package_filename + java_grpc_generator::ServiceClassName(service) + ".java"; std::unique_ptr output( context->Open(filename)); From e5d124400439859a4e7654c3aa24bf1e87681eed Mon Sep 17 00:00:00 2001 From: Sourabh Sarvotham Parkala Date: Mon, 24 Feb 2020 21:23:11 +0100 Subject: [PATCH 72/86] okhttp: updating okhttp version from 2.5.0 to 2.7.4 --- build.gradle | 2 +- gae-interop-testing/gae-jdk8/build.gradle | 2 +- repositories.bzl | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/build.gradle b/build.gradle index 292f931f4aa..2c1bae914d5 100644 --- a/build.gradle +++ b/build.gradle @@ -125,7 +125,7 @@ subprojects { google_api_protos: 'com.google.api.grpc:proto-google-common-protos:1.17.0', google_auth_credentials: "com.google.auth:google-auth-library-credentials:${googleauthVersion}", google_auth_oauth2_http: "com.google.auth:google-auth-library-oauth2-http:${googleauthVersion}", - okhttp: 'com.squareup.okhttp:okhttp:2.5.0', + okhttp: 'com.squareup.okhttp:okhttp:2.7.4', okio: 'com.squareup.okio:okio:1.13.0', opencensus_api: "io.opencensus:opencensus-api:${opencensusVersion}", opencensus_contrib_grpc_metrics: "io.opencensus:opencensus-contrib-grpc-metrics:${opencensusVersion}", diff --git a/gae-interop-testing/gae-jdk8/build.gradle b/gae-interop-testing/gae-jdk8/build.gradle index 53befa72e4c..fd3e9ffabff 100644 --- a/gae-interop-testing/gae-jdk8/build.gradle +++ b/gae-interop-testing/gae-jdk8/build.gradle @@ -21,7 +21,7 @@ buildscript { } dependencies { classpath 'com.google.cloud.tools:appengine-gradle-plugin:1.3.5' - classpath 'com.squareup.okhttp:okhttp:2.5.0' + classpath 'com.squareup.okhttp:okhttp:2.7.4' } } diff --git a/repositories.bzl b/repositories.bzl index d8316da7e25..0197ca58a8f 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -22,7 +22,7 @@ IO_GRPC_GRPC_JAVA_ARTIFACTS = [ "com.google.guava:guava:28.1-android", "com.google.j2objc:j2objc-annotations:1.3", "com.google.truth:truth:1.0", - "com.squareup.okhttp:okhttp:2.5.0", + "com.squareup.okhttp:okhttp:2.7.4", "com.squareup.okio:okio:1.13.0", "io.netty:netty-buffer:4.1.45.Final", "io.netty:netty-codec-http2:4.1.45.Final", @@ -293,9 +293,9 @@ def com_google_truth_truth(): def com_squareup_okhttp_okhttp(): jvm_maven_import_external( name = "com_squareup_okhttp_okhttp", - artifact = "com.squareup.okhttp:okhttp:2.5.0", + artifact = "com.squareup.okhttp:okhttp:2.7.4", server_urls = ["https://repo.maven.apache.org/maven2/"], - artifact_sha256 = "1cc716e29539adcda677949508162796daffedb4794cbf947a6f65e696f0381c", + artifact_sha256 = "c88be9af1509d5aeec9394a818c0fa08e26fad9d64ba134e6f977e0bb20cb114", licenses = ["notice"], # Apache 2.0 ) From 47c0b0b792243a97fd23ae2682f2cb01a8c4d26c Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Mon, 24 Feb 2020 14:32:26 -0800 Subject: [PATCH 73/86] complier: fix typo in comment (#6747) --- compiler/src/java_plugin/cpp/java_generator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/src/java_plugin/cpp/java_generator.cpp b/compiler/src/java_plugin/cpp/java_generator.cpp index 969d71103e1..db914ce12e1 100644 --- a/compiler/src/java_plugin/cpp/java_generator.cpp +++ b/compiler/src/java_plugin/cpp/java_generator.cpp @@ -1237,7 +1237,7 @@ void GenerateService(const ServiceDescriptor* service, } PrintImports(&printer); - // Package std::string is used to fully qualify method names. + // Package string is used to fully qualify method names. vars["Package"] = service->file()->package(); if (!vars["Package"].empty()) { vars["Package"].append("."); From 4b201267c631e4cac576773ca1acadcec48550e1 Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Tue, 25 Feb 2020 11:05:36 -0800 Subject: [PATCH 74/86] grpclb: add description to lb sends no backends status (#6751) --- grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java | 6 +++++- .../test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java index d940fc8832c..85f7d6b7669 100644 --- a/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java +++ b/grpclb/src/main/java/io/grpc/grpclb/GrpclbState.java @@ -91,6 +91,9 @@ final class GrpclbState { @VisibleForTesting static final PickResult DROP_PICK_RESULT = PickResult.withDrop(Status.UNAVAILABLE.withDescription("Dropped as requested by balancer")); + @VisibleForTesting + static final Status NO_AVAILABLE_BACKENDS_STATUS = + Status.UNAVAILABLE.withDescription("LoadBalancer responded without any backends"); @VisibleForTesting static final RoundRobinEntry BUFFER_ENTRY = new RoundRobinEntry() { @@ -742,7 +745,8 @@ private void maybeUpdatePicker() { if (backendList.isEmpty()) { if (lbSentEmptyBackends) { pickList = - Collections.singletonList(new ErrorEntry(Status.UNAVAILABLE)); + Collections.singletonList( + new ErrorEntry(NO_AVAILABLE_BACKENDS_STATUS)); state = TRANSIENT_FAILURE; } else { pickList = Collections.singletonList(BUFFER_ENTRY); diff --git a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java index 53b9d0093ed..2a07d2786c6 100644 --- a/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java +++ b/grpclb/src/test/java/io/grpc/grpclb/GrpclbLoadBalancerTest.java @@ -1939,7 +1939,8 @@ public void grpclbWorking_pickFirstMode_lbSendsEmptyAddress() throws Exception { inOrder.verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), pickerCaptor.capture()); RoundRobinPicker errorPicker = (RoundRobinPicker) pickerCaptor.getValue(); - assertThat(errorPicker.pickList).containsExactly(new ErrorEntry(Status.UNAVAILABLE)); + assertThat(errorPicker.pickList) + .containsExactly(new ErrorEntry(GrpclbState.NO_AVAILABLE_BACKENDS_STATUS)); lbResponseObserver.onNext(buildLbResponse(Collections.emptyList())); From 936515d2a7fe0bc3af1a76a0f1f1aad2797f2bfc Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Tue, 25 Feb 2020 15:45:44 -0800 Subject: [PATCH 75/86] xds: Improve grpc-xds javadoc and make it publishable - Improve package-info.java and make minor changes to other javadoc. - Make Orca API non-public for the moment. - Make grpc-xds publishable. --- xds/build.gradle | 5 +++- .../main/java/io/grpc/xds/Bootstrapper.java | 1 + .../java/io/grpc/xds/CdsLoadBalancer.java | 2 +- .../java/io/grpc/xds/EdsLoadBalancer.java | 2 +- .../main/java/io/grpc/xds/LoadStatsStore.java | 2 +- .../OrcaMetricReportingServerInterceptor.java | 4 +--- .../main/java/io/grpc/xds/OrcaOobUtil.java | 4 +--- .../java/io/grpc/xds/OrcaPerRequestUtil.java | 4 +--- .../java/io/grpc/xds/XdsNameResolver.java | 6 ++--- .../io/grpc/xds/XdsNameResolverProvider.java | 5 ++-- .../io/grpc/xds/internal/package-info.java | 24 +++++++++++++++++++ .../main/java/io/grpc/xds/package-info.java | 11 ++++++++- .../java/io/grpc/xds/XdsNameResolverTest.java | 2 +- 13 files changed, 52 insertions(+), 20 deletions(-) create mode 100644 xds/src/main/java/io/grpc/xds/internal/package-info.java diff --git a/xds/build.gradle b/xds/build.gradle index fa53527fef2..994d77b202d 100644 --- a/xds/build.gradle +++ b/xds/build.gradle @@ -63,6 +63,10 @@ jar { classifier = 'original' } +javadoc { + exclude 'io/grpc/xds/internal/**' +} + shadowJar { classifier = null dependencies { @@ -95,4 +99,3 @@ publishing { } } } -[publishMavenPublicationToMavenRepository]*.onlyIf { false } diff --git a/xds/src/main/java/io/grpc/xds/Bootstrapper.java b/xds/src/main/java/io/grpc/xds/Bootstrapper.java index ab6076f833c..54eaff148f3 100644 --- a/xds/src/main/java/io/grpc/xds/Bootstrapper.java +++ b/xds/src/main/java/io/grpc/xds/Bootstrapper.java @@ -234,6 +234,7 @@ List getChannelCredentials() { /** * Data class containing the results of reading bootstrap. */ + @Internal @Immutable public static class BootstrapInfo { private List servers; diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index 832e365b991..eedea985856 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -50,7 +50,7 @@ /** * Load balancer for cds_experimental LB policy. */ -public final class CdsLoadBalancer extends LoadBalancer { +final class CdsLoadBalancer extends LoadBalancer { private final ChannelLogger channelLogger; private final LoadBalancerRegistry lbRegistry; private final GracefulSwitchLoadBalancer switchingLoadBalancer; diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java index 5331ed14cff..9ad0f86fc14 100644 --- a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java @@ -148,7 +148,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { TRANSIENT_FAILURE, new ErrorPicker( Status.UNAVAILABLE - .withDescription("No traffic director provided by bootstrap"))); + .withDescription("No management server provided by bootstrap"))); return; } XdsClientFactory xdsClientFactory = new XdsClientFactory() { diff --git a/xds/src/main/java/io/grpc/xds/LoadStatsStore.java b/xds/src/main/java/io/grpc/xds/LoadStatsStore.java index 76e430aa8d9..cd76be41edf 100644 --- a/xds/src/main/java/io/grpc/xds/LoadStatsStore.java +++ b/xds/src/main/java/io/grpc/xds/LoadStatsStore.java @@ -53,7 +53,7 @@ interface LoadStatsStore { * once all of theirs loads are completed and reported. * *

      The fields {@code cluster_name} and {@code load_report_interval} in the returned {@link - * ClusterStats} needs to be set before it is ready to be sent to the traffic directory for load + * ClusterStats} needs to be set before it is ready to be sent to the traffic director for load * reporting. * *

      This method is not thread-safe and should be called from the same synchronized context diff --git a/xds/src/main/java/io/grpc/xds/OrcaMetricReportingServerInterceptor.java b/xds/src/main/java/io/grpc/xds/OrcaMetricReportingServerInterceptor.java index a420d5eae53..8a503bd35f9 100644 --- a/xds/src/main/java/io/grpc/xds/OrcaMetricReportingServerInterceptor.java +++ b/xds/src/main/java/io/grpc/xds/OrcaMetricReportingServerInterceptor.java @@ -20,7 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import io.grpc.Context; import io.grpc.Contexts; -import io.grpc.ExperimentalApi; import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; import io.grpc.Metadata; import io.grpc.ServerCall; @@ -41,8 +40,7 @@ * * @since 1.23.0 */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/6021") -public final class OrcaMetricReportingServerInterceptor implements ServerInterceptor { +final class OrcaMetricReportingServerInterceptor implements ServerInterceptor { private static final OrcaMetricReportingServerInterceptor INSTANCE = new OrcaMetricReportingServerInterceptor(); diff --git a/xds/src/main/java/io/grpc/xds/OrcaOobUtil.java b/xds/src/main/java/io/grpc/xds/OrcaOobUtil.java index 55751562466..1b5acd8c080 100644 --- a/xds/src/main/java/io/grpc/xds/OrcaOobUtil.java +++ b/xds/src/main/java/io/grpc/xds/OrcaOobUtil.java @@ -37,7 +37,6 @@ import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.ClientCall; import io.grpc.ConnectivityStateInfo; -import io.grpc.ExperimentalApi; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.CreateSubchannelArgs; import io.grpc.LoadBalancer.Helper; @@ -69,8 +68,7 @@ * Utility class that provides method for {@link LoadBalancer} to install listeners to receive * out-of-band backend cost metrics in the format of Open Request Cost Aggregation (ORCA). */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/5790") -public abstract class OrcaOobUtil { +abstract class OrcaOobUtil { private static final Logger logger = Logger.getLogger(OrcaPerRequestUtil.class.getName()); private static final OrcaOobUtil DEFAULT_INSTANCE = diff --git a/xds/src/main/java/io/grpc/xds/OrcaPerRequestUtil.java b/xds/src/main/java/io/grpc/xds/OrcaPerRequestUtil.java index 9719a919346..c193f5e35e5 100644 --- a/xds/src/main/java/io/grpc/xds/OrcaPerRequestUtil.java +++ b/xds/src/main/java/io/grpc/xds/OrcaPerRequestUtil.java @@ -23,7 +23,6 @@ import io.grpc.CallOptions; import io.grpc.ClientStreamTracer; import io.grpc.ClientStreamTracer.StreamInfo; -import io.grpc.ExperimentalApi; import io.grpc.LoadBalancer; import io.grpc.Metadata; import io.grpc.protobuf.ProtoUtils; @@ -35,8 +34,7 @@ * Utility class that provides method for {@link LoadBalancer} to install listeners to receive * per-request backend cost metrics in the format of Open Request Cost Aggregation (ORCA). */ -@ExperimentalApi("https://github.com/grpc/grpc-java/issues/5790") -public abstract class OrcaPerRequestUtil { +abstract class OrcaPerRequestUtil { private static final ClientStreamTracer NOOP_CLIENT_STREAM_TRACER = new ClientStreamTracer() {}; private static final ClientStreamTracer.Factory NOOP_CLIENT_STREAM_TRACER_FACTORY = new ClientStreamTracer.Factory() { diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index ea142a707f0..e65044016e6 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -50,8 +50,8 @@ /** * A {@link NameResolver} for resolving gRPC target names with "xds-experimental" scheme. * - *

      Resolving a gRPC target involves contacting the traffic director via xDS protocol to - * retrieve service information and produce a service config to the caller. + *

      Resolving a gRPC target involves contacting the control plane management server via xDS + * protocol to retrieve service information and produce a service config to the caller. * * @see XdsNameResolverProvider */ @@ -114,7 +114,7 @@ public void start(final Listener2 listener) { final Node node = bootstrapInfo.getNode(); if (serverList.isEmpty()) { listener.onError( - Status.UNAVAILABLE.withDescription("No traffic director provided by bootstrap")); + Status.UNAVAILABLE.withDescription("No management server provided by bootstrap")); return; } diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolverProvider.java b/xds/src/main/java/io/grpc/xds/XdsNameResolverProvider.java index 7087489e18f..9b9003e1eba 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolverProvider.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolverProvider.java @@ -17,6 +17,7 @@ package io.grpc.xds; import com.google.common.base.Preconditions; +import io.grpc.Internal; import io.grpc.NameResolver.Args; import io.grpc.NameResolverProvider; import io.grpc.internal.ExponentialBackoffPolicy; @@ -32,9 +33,9 @@ * slash {@code '/'}, will indicate the name to use in the VHDS query. * *

      This class should not be directly referenced in code. The resolver should be accessed - * through {@link io.grpc.NameResolverRegistry#asFactory#newNameResolver(URI, Args)} with the URI - * scheme "xds-experimental". + * through {@link io.grpc.NameResolverRegistry} with the URI scheme "xds-experimental". */ +@Internal public final class XdsNameResolverProvider extends NameResolverProvider { private static final String SCHEME = "xds-experimental"; diff --git a/xds/src/main/java/io/grpc/xds/internal/package-info.java b/xds/src/main/java/io/grpc/xds/internal/package-info.java new file mode 100644 index 00000000000..fc220860595 --- /dev/null +++ b/xds/src/main/java/io/grpc/xds/internal/package-info.java @@ -0,0 +1,24 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Interfaces and implementations that are internal to gRPC. + * + *

      All the content under this package and its subpackages are considered annotated with {@link + * io.grpc.Internal}. + */ +@io.grpc.Internal +package io.grpc.xds.internal; diff --git a/xds/src/main/java/io/grpc/xds/package-info.java b/xds/src/main/java/io/grpc/xds/package-info.java index 6dc57e8ad53..631e20da9d9 100644 --- a/xds/src/main/java/io/grpc/xds/package-info.java +++ b/xds/src/main/java/io/grpc/xds/package-info.java @@ -15,7 +15,16 @@ */ /** - * The XDS loadbalancer plugin implementation. + * Library for gPRC proxyless service mesh using Envoy xDS protocol. + * + *

      The package currently includes a name resolver plugin and a family of load balancer plugins. + * A gRPC channel for a target with {@code "xds-experimental"} scheme will load the plugins and a + * bootstrap file, and will communicate with an external control plane management server (e.g. + * Traffic Director) that speaks Envoy xDS protocol to retrieve routing, load balancing, load + * reporting configurations etc. for the channel. More features will be added. + * + *

      The library is currently in an agile development phase, so API and design are subject to + * breaking changes. */ @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/5288") package io.grpc.xds; diff --git a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java index cb82406dae5..7ce286d8c99 100644 --- a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java @@ -202,7 +202,7 @@ public BootstrapInfo readBootstrap() { verify(mockListener).onError(statusCaptor.capture()); assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); assertThat(statusCaptor.getValue().getDescription()) - .isEqualTo("No traffic director provided by bootstrap"); + .isEqualTo("No management server provided by bootstrap"); } @Test From 5b36caa4c2b83aab4bade5b9f42b6bd45f3cb56f Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Tue, 25 Feb 2020 17:00:16 -0800 Subject: [PATCH 76/86] xds: clean up xDS fallback balancer's logic for filtering out grpclb balancer addresses. (#6752) Previously, the internal prod test name resolver will give grpclb balancer addresses in `ResolutionResult.addresses`. So we have this filtering code to avoid those addresses being used. We've changed the internal resolver, it will never mix grpclb balancer addresses with normal backend addresses. Therefore, we no longer need this piece of code. --- xds/src/main/java/io/grpc/xds/FallbackLb.java | 17 --- .../test/java/io/grpc/xds/FallbackLbTest.java | 108 +----------------- 2 files changed, 4 insertions(+), 121 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/FallbackLb.java b/xds/src/main/java/io/grpc/xds/FallbackLb.java index 380a22dba19..9e37515c6e0 100644 --- a/xds/src/main/java/io/grpc/xds/FallbackLb.java +++ b/xds/src/main/java/io/grpc/xds/FallbackLb.java @@ -18,17 +18,14 @@ import static com.google.common.base.Preconditions.checkNotNull; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; -import static io.grpc.xds.XdsLoadBalancerProvider.XDS_POLICY_NAME; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; import io.grpc.Attributes; import io.grpc.EquivalentAddressGroup; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerRegistry; import io.grpc.NameResolver.ConfigOrError; import io.grpc.Status; -import io.grpc.internal.GrpcAttributes; import io.grpc.internal.ServiceConfigUtil.LbConfig; import io.grpc.util.ForwardingLoadBalancer; import io.grpc.util.GracefulSwitchLoadBalancer; @@ -112,20 +109,6 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { fallbackPolicyLb.switchTo(lbRegistry.getProvider(newFallbackPolicyName)); List servers = resolvedAddresses.getAddresses(); - // Some addresses in the list may be grpclb-v1 balancer addresses, so if the fallback policy - // does not support grpclb-v1 balancer addresses, then we need to exclude them from the list. - // TODO(chengyuanzhang): delete the following logic after changing internal resolver - // to not include grpclb server addresses. - if (!newFallbackPolicyName.equals("grpclb") && !newFallbackPolicyName.equals(XDS_POLICY_NAME)) { - ImmutableList.Builder backends = ImmutableList.builder(); - for (EquivalentAddressGroup eag : resolvedAddresses.getAddresses()) { - if (eag.getAttributes().get(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY) == null) { - backends.add(eag); - } - } - servers = backends.build(); - } - // TODO(zhangkun83): FIXME(#5496): this is a temporary hack. if (servers.isEmpty() && !fallbackPolicyLb.canHandleEmptyAddressListFromNameResolution()) { diff --git a/xds/src/test/java/io/grpc/xds/FallbackLbTest.java b/xds/src/test/java/io/grpc/xds/FallbackLbTest.java index 4a63a2b5558..0d715e35982 100644 --- a/xds/src/test/java/io/grpc/xds/FallbackLbTest.java +++ b/xds/src/test/java/io/grpc/xds/FallbackLbTest.java @@ -33,21 +33,16 @@ import io.grpc.LoadBalancer.SubchannelPicker; import io.grpc.LoadBalancerProvider; import io.grpc.LoadBalancerRegistry; -import io.grpc.Status; -import io.grpc.Status.Code; -import io.grpc.internal.GrpcAttributes; import io.grpc.internal.JsonParser; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; /** * Tests for {@link FallbackLb}. @@ -245,20 +240,13 @@ public void handlePolicyChanges() throws Exception { verify(balancer2).shutdown(); } - @Test - public void handleBackendsEagsOnly() throws Exception { - EquivalentAddressGroup eag0 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8080))); - Attributes attributes = Attributes - .newBuilder() - .set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "this is a balancer address") - .build(); + public void propagateAddressesToFallbackPolicy() throws Exception { EquivalentAddressGroup eag1 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8081)), attributes); + ImmutableList.of(new InetSocketAddress(8080))); EquivalentAddressGroup eag2 = new EquivalentAddressGroup( ImmutableList.of(new InetSocketAddress(8082))); - List eags = ImmutableList.of(eag0, eag1, eag2); + List eags = ImmutableList.of(eag1, eag2); String lbConfigRaw = "{" + "\"fallbackPolicy\" : [{\"fallback_1\" : { \"fallback_1_option\" : \"yes\"}}]" @@ -273,99 +261,11 @@ public void handleBackendsEagsOnly() throws Exception { LoadBalancer balancer1 = balancers1.get(0); verify(balancer1).handleResolvedAddresses( ResolvedAddresses.newBuilder() - .setAddresses(ImmutableList.of(eag0, eag2)) + .setAddresses(ImmutableList.of(eag1, eag2)) .setAttributes( Attributes.newBuilder() .set(ATTR_LOAD_BALANCING_CONFIG, ImmutableMap.of("fallback_1_option", "yes")) .build()) .build()); } - - @Test - public void resolvingWithOnlyGrpclbAddresses_NoBackendAddress() throws Exception { - Attributes attributes = Attributes - .newBuilder() - .set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "this is a balancer address") - .build(); - EquivalentAddressGroup eag1 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8081)), attributes); - EquivalentAddressGroup eag2 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8082)), attributes); - List eags = ImmutableList.of(eag1, eag2); - String lbConfigRaw = "{" - + "\"fallbackPolicy\" : [{\"fallback_1\" : { \"fallback_1_option\" : \"yes\"}}]" - + "}"; - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); - fallbackLb.handleResolvedAddresses(ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder().set(ATTR_LOAD_BALANCING_CONFIG, lbConfig).build()) - .build()); - - LoadBalancer balancer1 = balancers1.get(0); - ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(Status.class); - verify(balancer1).handleNameResolutionError(statusCaptor.capture()); - assertThat(statusCaptor.getValue().getCode()).isEqualTo(Code.UNAVAILABLE); - } - - @Test - public void handleGrpclbAddresses() throws Exception { - final AtomicReference balancer = new AtomicReference<>(); - LoadBalancerProvider grpclbProvider = new LoadBalancerProvider() { - @Override - public boolean isAvailable() { - return true; - } - - @Override - public int getPriority() { - return 5; - } - - @Override - public String getPolicyName() { - return "grpclb"; - } - - @Override - public LoadBalancer newLoadBalancer(Helper helper) { - balancer.set(mock(LoadBalancer.class)); - return balancer.get(); - } - }; - LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry(); - lbRegistry.register(grpclbProvider); - fallbackLb = new FallbackLb(helper, lbRegistry); - - EquivalentAddressGroup eag0 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8080))); - Attributes attributes = Attributes - .newBuilder() - .set(GrpcAttributes.ATTR_LB_ADDR_AUTHORITY, "this is a balancer address") - .build(); - EquivalentAddressGroup eag1 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8081)), attributes); - EquivalentAddressGroup eag2 = new EquivalentAddressGroup( - ImmutableList.of(new InetSocketAddress(8082))); - List eags = ImmutableList.of(eag0, eag1, eag2); - - String lbConfigRaw = "{" - + "\"fallbackPolicy\" : [{\"grpclb\" : { \"grpclb_option\" : \"yes\"}}]" - + "}"; - @SuppressWarnings("unchecked") - Map lbConfig = (Map) JsonParser.parse(lbConfigRaw); - fallbackLb.handleResolvedAddresses(ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes(Attributes.newBuilder().set(ATTR_LOAD_BALANCING_CONFIG, lbConfig).build()) - .build()); - - verify(balancer.get()).handleResolvedAddresses( - ResolvedAddresses.newBuilder() - .setAddresses(eags) - .setAttributes( - Attributes.newBuilder() - .set(ATTR_LOAD_BALANCING_CONFIG, ImmutableMap.of("grpclb_option", "yes")) - .build()) - .build()); - } } From e8066ccf4ad1ec602098f57c4957bf4346fd3d5b Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Tue, 25 Feb 2020 18:04:29 -0800 Subject: [PATCH 77/86] xds: improve logging for xDS workflow (#6748) This PR tries to improve observability of xDS workflow for some extent. Users can configure Java logger `io.grpc.xds` (or `io.grpc.xds.XdsLogger`) level to enable different verbosities of log messages. Verbosity of logging: - FINE: mostly nothing useless there is something abnormal happens such xDS RPC stream closed. - FINER: informative log messages showing the main xDS workflow happening under the hood. - FINEST: verbose log messages for debugging purposes, original RPC messages and data types are printed. --- .../main/java/io/grpc/xds/Bootstrapper.java | 32 +- .../java/io/grpc/xds/CdsLoadBalancer.java | 70 +++-- .../java/io/grpc/xds/EdsLoadBalancer.java | 108 +++++-- .../java/io/grpc/xds/LoadReportClient.java | 42 ++- .../main/java/io/grpc/xds/LocalityStore.java | 66 ++++- xds/src/main/java/io/grpc/xds/XdsClient.java | 47 ++- .../main/java/io/grpc/xds/XdsClientImpl.java | 278 ++++++++++++------ xds/src/main/java/io/grpc/xds/XdsLogger.java | 109 +++++++ .../java/io/grpc/xds/XdsNameResolver.java | 14 + .../java/io/grpc/xds/CdsLoadBalancerTest.java | 19 -- .../java/io/grpc/xds/EdsLoadBalancerTest.java | 8 +- .../io/grpc/xds/LoadReportClientTest.java | 3 + .../java/io/grpc/xds/LocalityStoreTest.java | 4 +- 13 files changed, 589 insertions(+), 211 deletions(-) create mode 100644 xds/src/main/java/io/grpc/xds/XdsLogger.java diff --git a/xds/src/main/java/io/grpc/xds/Bootstrapper.java b/xds/src/main/java/io/grpc/xds/Bootstrapper.java index 54eaff148f3..3b3c38cc150 100644 --- a/xds/src/main/java/io/grpc/xds/Bootstrapper.java +++ b/xds/src/main/java/io/grpc/xds/Bootstrapper.java @@ -27,6 +27,7 @@ import io.grpc.internal.GrpcUtil; import io.grpc.internal.JsonParser; import io.grpc.internal.JsonUtil; +import io.grpc.xds.XdsLogger.XdsLogLevel; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; @@ -44,6 +45,7 @@ @Internal public abstract class Bootstrapper { + private static final String LOG_PREFIX = "xds-bootstrap"; private static final String BOOTSTRAP_PATH_SYS_ENV_VAR = "GRPC_XDS_BOOTSTRAP"; private static final Bootstrapper DEFAULT_INSTANCE = new Bootstrapper() { @@ -54,6 +56,9 @@ public BootstrapInfo readBootstrap() throws IOException { throw new IOException("Environment variable " + BOOTSTRAP_PATH_SYS_ENV_VAR + " not defined."); } + XdsLogger + .withPrefix(LOG_PREFIX) + .log(XdsLogLevel.INFO, BOOTSTRAP_PATH_SYS_ENV_VAR + "={0}", filePath); return parseConfig( new String(Files.readAllBytes(Paths.get(filePath)), StandardCharsets.UTF_8)); } @@ -70,20 +75,25 @@ public static Bootstrapper getInstance() { @VisibleForTesting static BootstrapInfo parseConfig(String rawData) throws IOException { + XdsLogger logger = XdsLogger.withPrefix(LOG_PREFIX); + logger.log(XdsLogLevel.INFO, "Reading bootstrap information"); @SuppressWarnings("unchecked") Map rawBootstrap = (Map) JsonParser.parse(rawData); + logger.log(XdsLogLevel.DEBUG, "Bootstrap configuration:\n{0}", rawBootstrap); List servers = new ArrayList<>(); List rawServerConfigs = JsonUtil.getList(rawBootstrap, "xds_servers"); if (rawServerConfigs == null) { throw new IOException("Invalid bootstrap: 'xds_servers' does not exist."); } + logger.log(XdsLogLevel.INFO, "Configured with {0} xDS servers", rawServerConfigs.size()); List> serverConfigList = JsonUtil.checkObjectList(rawServerConfigs); for (Map serverConfig : serverConfigList) { String serverUri = JsonUtil.getString(serverConfig, "server_uri"); if (serverUri == null) { throw new IOException("Invalid bootstrap: 'xds_servers' contains unknown server."); } + logger.log(XdsLogLevel.INFO, "xDS server URI: {0}", serverUri); List channelCredsOptions = new ArrayList<>(); List rawChannelCredsList = JsonUtil.getList(serverConfig, "channel_creds"); // List of channel creds is optional. @@ -95,6 +105,7 @@ static BootstrapInfo parseConfig(String rawData) throws IOException { throw new IOException("Invalid bootstrap: 'xds_servers' contains server with " + "unknown type 'channel_creds'."); } + logger.log(XdsLogLevel.INFO, "Channel credentials option: {0}", type); ChannelCreds creds = new ChannelCreds(type, JsonUtil.getObject(channelCreds, "config")); channelCredsOptions.add(creds); } @@ -107,16 +118,21 @@ static BootstrapInfo parseConfig(String rawData) throws IOException { if (rawNode != null) { String id = JsonUtil.getString(rawNode, "id"); if (id != null) { + logger.log(XdsLogLevel.INFO, "Node id: {0}", id); nodeBuilder.setId(id); } String cluster = JsonUtil.getString(rawNode, "cluster"); if (cluster != null) { + logger.log(XdsLogLevel.INFO, "Node cluster: {0}", cluster); nodeBuilder.setCluster(cluster); } Map metadata = JsonUtil.getObject(rawNode, "metadata"); if (metadata != null) { Struct.Builder structBuilder = Struct.newBuilder(); for (Map.Entry entry : metadata.entrySet()) { + logger.log( + XdsLogLevel.INFO, + "Node metadata field {0}: {1}", entry.getKey(), entry.getValue()); structBuilder.putFields(entry.getKey(), convertToValue(entry.getValue())); } nodeBuilder.setMetadata(structBuilder); @@ -125,18 +141,26 @@ static BootstrapInfo parseConfig(String rawData) throws IOException { if (rawLocality != null) { Locality.Builder localityBuilder = Locality.newBuilder(); if (rawLocality.containsKey("region")) { - localityBuilder.setRegion(JsonUtil.getString(rawLocality, "region")); + String region = JsonUtil.getString(rawLocality, "region"); + logger.log(XdsLogLevel.INFO, "Locality region: {0}", region); + localityBuilder.setRegion(region); } if (rawLocality.containsKey("zone")) { - localityBuilder.setZone(JsonUtil.getString(rawLocality, "zone")); + String zone = JsonUtil.getString(rawLocality, "zone"); + logger.log(XdsLogLevel.INFO, "Locality zone: {0}", zone); + localityBuilder.setZone(zone); } if (rawLocality.containsKey("sub_zone")) { - localityBuilder.setSubZone(JsonUtil.getString(rawLocality, "sub_zone")); + String subZone = JsonUtil.getString(rawLocality, "sub_zone"); + logger.log(XdsLogLevel.INFO, "Locality sub_zone: {0}", subZone); + localityBuilder.setSubZone(subZone); } nodeBuilder.setLocality(localityBuilder); } } - nodeBuilder.setBuildVersion(GrpcUtil.getGrpcBuildVersion()); + String buildVersion = GrpcUtil.getGrpcBuildVersion(); + logger.log(XdsLogLevel.INFO, "Build version: {0}", buildVersion); + nodeBuilder.setBuildVersion(buildVersion); return new BootstrapInfo(servers, nodeBuilder.build()); } diff --git a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java index eedea985856..db821739067 100644 --- a/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/CdsLoadBalancer.java @@ -17,15 +17,15 @@ package io.grpc.xds; import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; import static io.grpc.ConnectivityState.TRANSIENT_FAILURE; import static io.grpc.xds.EdsLoadBalancerProvider.EDS_POLICY_NAME; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; -import io.grpc.ChannelLogger; -import io.grpc.ChannelLogger.ChannelLogLevel; import io.grpc.EquivalentAddressGroup; +import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerRegistry; import io.grpc.Status; @@ -37,6 +37,7 @@ import io.grpc.xds.XdsClient.ClusterUpdate; import io.grpc.xds.XdsClient.ClusterWatcher; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; +import io.grpc.xds.XdsLogger.XdsLogLevel; import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; import io.grpc.xds.internal.sds.SslContextProvider; import io.grpc.xds.internal.sds.TlsContextManager; @@ -50,11 +51,10 @@ /** * Load balancer for cds_experimental LB policy. */ -final class CdsLoadBalancer extends LoadBalancer { - private final ChannelLogger channelLogger; +public final class CdsLoadBalancer extends LoadBalancer { + private final XdsLogger logger; private final LoadBalancerRegistry lbRegistry; private final GracefulSwitchLoadBalancer switchingLoadBalancer; - private final Helper helper; private final TlsContextManager tlsContextManager; // The following fields become non-null once handleResolvedAddresses() successfully. @@ -74,38 +74,29 @@ final class CdsLoadBalancer extends LoadBalancer { @VisibleForTesting CdsLoadBalancer(Helper helper, LoadBalancerRegistry lbRegistry, TlsContextManager tlsContextManager) { - this.helper = helper; - this.channelLogger = helper.getChannelLogger(); + checkNotNull(helper, "helper"); this.lbRegistry = lbRegistry; this.switchingLoadBalancer = new GracefulSwitchLoadBalancer(helper); this.tlsContextManager = tlsContextManager; + logger = XdsLogger.withLogId(InternalLogId.allocate("cds-lb", helper.getAuthority())); + logger.log(XdsLogLevel.INFO, "Created"); } @Override public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - channelLogger.log(ChannelLogLevel.DEBUG, "Received ResolvedAddresses {0}", resolvedAddresses); + logger.log(XdsLogLevel.DEBUG, "Received resolution result: {0}", resolvedAddresses); if (xdsClientPool == null) { xdsClientPool = resolvedAddresses.getAttributes().get(XdsAttributes.XDS_CLIENT_POOL); - if (xdsClientPool == null) { - // TODO(zdapeng): create a new xdsClient from bootstrap if no one exists. - helper.updateBalancingState( - TRANSIENT_FAILURE, - new ErrorPicker(Status.UNAVAILABLE.withDescription( - "XDS_CLIENT_POOL attributes not available from resolve addresses"))); - return; - } + checkNotNull(xdsClientPool, "missing xDS client pool"); xdsClient = xdsClientPool.getObject(); } Object lbConfig = resolvedAddresses.getLoadBalancingPolicyConfig(); - if (!(lbConfig instanceof CdsConfig)) { - helper.updateBalancingState( - TRANSIENT_FAILURE, - new ErrorPicker(Status.UNAVAILABLE.withDescription( - "Load balancing config '" + lbConfig + "' is not a CdsConfig"))); - return; - } + checkNotNull(lbConfig, "missing CDS lb config"); CdsConfig newCdsConfig = (CdsConfig) lbConfig; + logger.log( + XdsLogLevel.INFO, + "Received CDS lb config: cluster={0}", newCdsConfig.name); // If cluster is changed, do a graceful switch. if (!newCdsConfig.name.equals(clusterName)) { @@ -118,7 +109,7 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { @Override public void handleNameResolutionError(Status error) { - channelLogger.log(ChannelLogLevel.ERROR, "Name resolution error: {0}", error); + logger.log(XdsLogLevel.WARNING, "Received name resolution error: {0}", error); switchingLoadBalancer.handleNameResolutionError(error); } @@ -129,8 +120,7 @@ public boolean canHandleEmptyAddressListFromNameResolution() { @Override public void shutdown() { - channelLogger.log(ChannelLogLevel.DEBUG, "CDS load balancer is shutting down"); - + logger.log(XdsLogLevel.INFO, "Shutdown"); switchingLoadBalancer.shutdown(); if (xdsClientPool != null) { xdsClientPool.returnObject(xdsClient); @@ -191,6 +181,10 @@ public void shutdown() { clusterWatcher.edsBalancer.shutdown(); } xdsClient.cancelClusterDataWatch(clusterName, clusterWatcher); + logger.log( + XdsLogLevel.INFO, + "Cancelled cluster watcher on {0} with xDS client {1}", + clusterName, xdsClient); } } @@ -198,6 +192,10 @@ public void shutdown() { public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { if (clusterWatcher == null) { clusterWatcher = new ClusterWatcherImpl(helper, resolvedAddresses); + logger.log( + XdsLogLevel.INFO, + "Start cluster watcher on {0} with xDS client {1}", + clusterName, xdsClient); xdsClient.watchClusterData(clusterName, clusterWatcher); } } @@ -274,11 +272,16 @@ private final class ClusterWatcherImpl implements ClusterWatcher { @Override public void onClusterChanged(ClusterUpdate newUpdate) { - channelLogger.log( - ChannelLogLevel.DEBUG, "CDS load balancer received a cluster update: {0}", newUpdate); + if (logger.isLoggable(XdsLogLevel.INFO)) { + logger.log( + XdsLogLevel.INFO, + "Received cluster update from xDS client {0}: " + + "cluster_name={1}, eds_service_name={2}, lb_policy={3}, report_load={4}", + xdsClient, newUpdate.getClusterName(), newUpdate.getEdsServiceName(), + newUpdate.getLbPolicy(), newUpdate.getLrsServerName() != null); + } checkArgument( - newUpdate.getLbPolicy().equals("round_robin"), - "The load balancing policy in ClusterUpdate '%s' is not supported", newUpdate); + newUpdate.getLbPolicy().equals("round_robin"), "can only support round_robin policy"); final XdsConfig edsConfig = new XdsConfig( @@ -318,7 +321,12 @@ private void updateSslContextProvider(UpstreamTlsContext newUpstreamTlsContext) @Override public void onError(Status error) { - channelLogger.log(ChannelLogLevel.ERROR, "CDS load balancer received an error: {0}", error); + logger.log( + XdsLogLevel.WARNING, + "Received error from xDS client {0}: {1}: {2}", + xdsClient, + error.getCode(), + error.getDescription()); // Go into TRANSIENT_FAILURE if we have not yet created the child // policy (i.e., we have not yet received valid data for the cluster). Otherwise, diff --git a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java index 9ad0f86fc14..d80043ce4e4 100644 --- a/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java +++ b/xds/src/main/java/io/grpc/xds/EdsLoadBalancer.java @@ -24,8 +24,7 @@ import com.google.common.collect.ImmutableMap; import io.envoyproxy.envoy.api.v2.core.Node; import io.grpc.Attributes; -import io.grpc.ChannelLogger; -import io.grpc.ChannelLogger.ChannelLogLevel; +import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancerRegistry; import io.grpc.Status; @@ -45,6 +44,7 @@ import io.grpc.xds.XdsClient.XdsChannelFactory; import io.grpc.xds.XdsClient.XdsClientFactory; import io.grpc.xds.XdsLoadBalancerProvider.XdsConfig; +import io.grpc.xds.XdsLogger.XdsLogLevel; import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; import java.util.List; import java.util.Map; @@ -54,7 +54,8 @@ /** Load balancer for the EDS LB policy. */ final class EdsLoadBalancer extends LoadBalancer { - private final ChannelLogger channelLogger; + private final InternalLogId logId; + private final XdsLogger logger; private final ResourceUpdateCallback resourceUpdateCallback; private final GracefulSwitchLoadBalancer switchingLoadBalancer; private final LoadBalancerRegistry lbRegistry; @@ -75,8 +76,8 @@ final class EdsLoadBalancer extends LoadBalancer { EdsLoadBalancer(Helper edsLbHelper, ResourceUpdateCallback resourceUpdateCallback) { this( - checkNotNull(edsLbHelper, "edsLbHelper"), - checkNotNull(resourceUpdateCallback, "resourceUpdateCallback"), + edsLbHelper, + resourceUpdateCallback, LoadBalancerRegistry.getDefaultRegistry(), LocalityStoreFactory.getInstance(), Bootstrapper.getInstance(), @@ -91,29 +92,40 @@ final class EdsLoadBalancer extends LoadBalancer { LocalityStoreFactory localityStoreFactory, Bootstrapper bootstrapper, XdsChannelFactory channelFactory) { - this.edsLbHelper = edsLbHelper; - this.channelLogger = edsLbHelper.getChannelLogger(); - this.resourceUpdateCallback = resourceUpdateCallback; - this.lbRegistry = lbRegistry; - this.localityStoreFactory = localityStoreFactory; + this.edsLbHelper = checkNotNull(edsLbHelper, "edsLbHelper"); + this.resourceUpdateCallback = checkNotNull(resourceUpdateCallback, "resourceUpdateCallback"); + this.lbRegistry = checkNotNull(lbRegistry, "lbRegistry"); + this.localityStoreFactory = checkNotNull(localityStoreFactory, "localityStoreFactory"); + this.bootstrapper = checkNotNull(bootstrapper, "bootstrapper"); + this.channelFactory = checkNotNull(channelFactory, "channelFactory"); this.switchingLoadBalancer = new GracefulSwitchLoadBalancer(edsLbHelper); - this.bootstrapper = bootstrapper; - this.channelFactory = channelFactory; + logId = InternalLogId.allocate("eds-lb", edsLbHelper.getAuthority()); + logger = XdsLogger.withLogId(logId); + logger.log(XdsLogLevel.INFO, "Created"); } @Override public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { - channelLogger.log(ChannelLogLevel.DEBUG, "Received ResolvedAddresses {0}", resolvedAddresses); - + logger.log(XdsLogLevel.DEBUG, "Received resolution result: {0}", resolvedAddresses); Object lbConfig = resolvedAddresses.getLoadBalancingPolicyConfig(); - if (!(lbConfig instanceof XdsConfig)) { + if (lbConfig == null) { edsLbHelper.updateBalancingState( TRANSIENT_FAILURE, - new ErrorPicker(Status.UNAVAILABLE.withDescription( - "Load balancing config '" + lbConfig + "' is not an XdsConfig"))); + new ErrorPicker(Status.UNAVAILABLE.withDescription("Missing EDS lb config"))); return; } XdsConfig newXdsConfig = (XdsConfig) lbConfig; + if (logger.isLoggable(XdsLogLevel.INFO)) { + logger.log( + XdsLogLevel.INFO, + "Received EDS lb config: cluster={0}, child_policy={1}, fallback_policy={2}, " + + "eds_service_name={3}, report_load={4}", + newXdsConfig.cluster, + newXdsConfig.childPolicy != null ? newXdsConfig.childPolicy.getPolicyName() : "", + newXdsConfig.fallbackPolicy != null ? newXdsConfig.fallbackPolicy.getPolicyName() : "", + newXdsConfig.edsServiceName, + newXdsConfig.lrsServerName != null); + } if (xdsClientPool == null) { // Init xdsClientPool and xdsClient. @@ -137,7 +149,8 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { } catch (Exception e) { edsLbHelper.updateBalancingState( TRANSIENT_FAILURE, - new ErrorPicker(Status.UNAVAILABLE.withCause(e))); + new ErrorPicker( + Status.UNAVAILABLE.withDescription("Failed to bootstrap").withCause(e))); return; } @@ -167,6 +180,8 @@ XdsClient createXdsClient() { } }; xdsClientPool = new RefCountedXdsClientObjectPool(xdsClientFactory); + } else { + logger.log(XdsLogLevel.INFO, "Use xDS client from channel"); } xdsClient = xdsClientPool.getObject(); } @@ -188,7 +203,7 @@ XdsClient createXdsClient() { @Override public void handleNameResolutionError(Status error) { - channelLogger.log(ChannelLogLevel.ERROR, "Name resolution error: {0}", error); + logger.log(XdsLogLevel.WARNING, "Received name resolution error: {0}", error); // This will go into TRANSIENT_FAILURE if we have not yet received any endpoint update and // otherwise keep running with the data we had previously. switchingLoadBalancer.handleNameResolutionError(error); @@ -201,7 +216,7 @@ public boolean canHandleEmptyAddressListFromNameResolution() { @Override public void shutdown() { - channelLogger.log(ChannelLogLevel.DEBUG, "EDS load balancer is shutting down"); + logger.log(XdsLogLevel.INFO, "Shutdown"); switchingLoadBalancer.shutdown(); if (xdsClient != null) { xdsClient = xdsClientPool.returnObject(xdsClient); @@ -253,8 +268,14 @@ final class ClusterEndpointsBalancer extends LoadBalancer { ClusterEndpointsBalancer(Helper helper) { this.helper = helper; resourceName = clusterServiceName != null ? clusterServiceName : clusterName; - localityStore = localityStoreFactory.newLocalityStore(helper, lbRegistry, loadStatsStore); + localityStore = + localityStoreFactory.newLocalityStore(logId, helper, lbRegistry, loadStatsStore); endpointWatcher = new EndpointWatcherImpl(localityStore); + logger.log( + XdsLogLevel.INFO, + "Start endpoint watcher on {0} with xDS client {1}", + resourceName, + xdsClient); xdsClient.watchEndpointData(resourceName, endpointWatcher); } @@ -263,15 +284,24 @@ public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) { XdsConfig config = (XdsConfig) resolvedAddresses.getLoadBalancingPolicyConfig(); if (config.lrsServerName != null) { if (!config.lrsServerName.equals("")) { - throw new AssertionError( - "Can only report load to the same management server"); + throw new AssertionError("Can only report load to the same management server"); } if (!isReportingLoad) { + logger.log( + XdsLogLevel.INFO, + "Start reporting loads for cluster: {0}, cluster_service: {1}", + clusterName, + clusterServiceName); xdsClient.reportClientStats(clusterName, clusterServiceName, loadStatsStore); isReportingLoad = true; } } else { if (isReportingLoad) { + logger.log( + XdsLogLevel.INFO, + "Stop reporting loads for cluster: {0}, cluster_service: {1}", + clusterName, + clusterServiceName); xdsClient.cancelClientStatsReport(clusterName, clusterServiceName); isReportingLoad = false; } @@ -296,11 +326,21 @@ public boolean canHandleEmptyAddressListFromNameResolution() { @Override public void shutdown() { if (isReportingLoad) { + logger.log( + XdsLogLevel.INFO, + "Stop reporting loads for cluster: {0}, cluster_service: {1}", + clusterName, + clusterServiceName); xdsClient.cancelClientStatsReport(clusterName, clusterServiceName); isReportingLoad = false; } localityStore.reset(); xdsClient.cancelEndpointDataWatch(resourceName, endpointWatcher); + logger.log( + XdsLogLevel.INFO, + "Cancelled endpoint watcher on {0} with xDS client {1}", + resourceName, + xdsClient); } } } @@ -328,10 +368,17 @@ private final class EndpointWatcherImpl implements EndpointWatcher { @Override public void onEndpointChanged(EndpointUpdate endpointUpdate) { - channelLogger.log( - ChannelLogLevel.DEBUG, - "EDS load balancer received an endpoint update: {0}", - endpointUpdate); + logger.log(XdsLogLevel.DEBUG, "Received endpoint update: {0}", endpointUpdate); + if (logger.isLoggable(XdsLogLevel.INFO)) { + logger.log( + XdsLogLevel.INFO, + "Received endpoint update from xDS client {0}: cluster_name={1}, {2} localities, " + + "{3} drop categories", + xdsClient, + endpointUpdate.getClusterName(), + endpointUpdate.getLocalityLbEndpointsMap().size(), + endpointUpdate.getDropPolicies().size()); + } if (!firstEndpointUpdateReceived) { firstEndpointUpdateReceived = true; @@ -365,7 +412,12 @@ public void onEndpointChanged(EndpointUpdate endpointUpdate) { @Override public void onError(Status error) { - channelLogger.log(ChannelLogLevel.ERROR, "EDS load balancer received an error: {0}", error); + logger.log( + XdsLogLevel.WARNING, + "Received error from xDS client {0}: {1}: {2}", + xdsClient, + error.getCode(), + error.getDescription()); resourceUpdateCallback.onError(); // If we get an error before getting any valid result, we should put the channel in // TRANSIENT_FAILURE; if they get an error after getting a valid result, we keep using the diff --git a/xds/src/main/java/io/grpc/xds/LoadReportClient.java b/xds/src/main/java/io/grpc/xds/LoadReportClient.java index 8fb069c485a..525b645d610 100644 --- a/xds/src/main/java/io/grpc/xds/LoadReportClient.java +++ b/xds/src/main/java/io/grpc/xds/LoadReportClient.java @@ -31,20 +31,20 @@ import io.envoyproxy.envoy.service.load_stats.v2.LoadReportingServiceGrpc; import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsRequest; import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsResponse; +import io.grpc.InternalLogId; import io.grpc.ManagedChannel; import io.grpc.Status; import io.grpc.SynchronizationContext; import io.grpc.SynchronizationContext.ScheduledHandle; import io.grpc.internal.BackoffPolicy; import io.grpc.stub.StreamObserver; +import io.grpc.xds.XdsLogger.XdsLogLevel; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.logging.Level; -import java.util.logging.Logger; import javax.annotation.Nullable; import javax.annotation.concurrent.NotThreadSafe; @@ -54,11 +54,10 @@ */ @NotThreadSafe final class LoadReportClient { - private static final Logger logger = Logger.getLogger(XdsClientImpl.class.getName()); - @VisibleForTesting static final String TARGET_NAME_METADATA_KEY = "PROXYLESS_CLIENT_HOSTNAME"; + private final XdsLogger logger; private final ManagedChannel channel; private final Node node; private final SynchronizationContext syncContext; @@ -81,6 +80,7 @@ final class LoadReportClient { private LoadReportCallback callback; LoadReportClient( + InternalLogId logId, String targetName, ManagedChannel channel, Node node, @@ -104,6 +104,8 @@ final class LoadReportClient { Value.newBuilder().setStringValue(targetName).build()) .build(); this.node = node.toBuilder().setMetadata(metadata).build(); + String logPrefix = checkNotNull(logId, "logId").toString().concat("-lrs-client"); + logger = XdsLogger.withPrefix(logPrefix); } /** @@ -150,6 +152,9 @@ void addLoadStatsStore( || !loadStatsStoreMap.get(clusterName).containsKey(clusterServiceName), "load stats for cluster: %s, cluster service: %s already exists", clusterName, clusterServiceName); + logger.log( + XdsLogLevel.INFO, + "Add load stats for cluster: {0}, cluster_service: {1}", clusterName, clusterServiceName); if (!loadStatsStoreMap.containsKey(clusterName)) { loadStatsStoreMap.put(clusterName, new HashMap()); } @@ -166,6 +171,11 @@ void removeLoadStatsStore(String clusterName, @Nullable String clusterServiceNam && loadStatsStoreMap.get(clusterName).containsKey(clusterServiceName), "load stats for cluster: %s, cluster service: %s does not exist", clusterName, clusterServiceName); + logger.log( + XdsLogLevel.INFO, + "Remove load stats for cluster: {0}, cluster_service: {1}", + clusterName, + clusterServiceName); Map clusterLoadStatsStores = loadStatsStoreMap.get(clusterName); clusterLoadStatsStores.remove(clusterServiceName); if (clusterLoadStatsStores.isEmpty()) { @@ -233,7 +243,7 @@ void start() { .setNode(node) .build(); lrsRequestWriter.onNext(initRequest); - logger.log(Level.FINE, "Initial LRS request sent: {0}", initRequest); + logger.log(XdsLogLevel.DEBUG, "Initial LRS request sent:\n{0}", initRequest); } @Override @@ -251,8 +261,7 @@ public void onError(final Throwable t) { syncContext.execute(new Runnable() { @Override public void run() { - handleStreamClosed(Status.fromThrowable(t) - .augmentDescription("Stream to XDS management server had an error")); + handleStreamClosed(Status.fromThrowable(t)); } }); } @@ -263,7 +272,7 @@ public void onCompleted() { @Override public void run() { handleStreamClosed( - Status.UNAVAILABLE.withDescription("Stream to XDS management server was closed")); + Status.UNAVAILABLE.withDescription("Closed by server")); } }); } @@ -287,7 +296,7 @@ private void sendLoadReport() { } LoadStatsRequest request = requestBuilder.build(); lrsRequestWriter.onNext(request); - logger.log(Level.FINE, "Sent LoadStatsRequest\n{0}", request); + logger.log(XdsLogLevel.DEBUG, "Sent LoadStatsRequest\n{0}", request); scheduleNextLoadReport(); } @@ -310,18 +319,22 @@ private void handleResponse(LoadStatsResponse response) { } if (!initialResponseReceived) { - logger.log(Level.FINE, "Received LRS initial response: {0}", response); + logger.log(XdsLogLevel.DEBUG, "Received LRS initial response:\n{0}", response); initialResponseReceived = true; } else { - logger.log(Level.FINE, "Received an LRS response: {0}", response); + logger.log(XdsLogLevel.DEBUG, "Received LRS response:\n{0}", response); } long interval = Durations.toNanos(response.getLoadReportingInterval()); if (interval != loadReportIntervalNano) { + logger.log(XdsLogLevel.INFO, "Update load reporting interval to {0} ns", interval); loadReportIntervalNano = interval; callback.onReportResponse(loadReportIntervalNano); } if (clusterNames.size() != response.getClustersCount() || !clusterNames.containsAll(response.getClustersList())) { + logger.log( + XdsLogLevel.INFO, + "Update load reporting clusters to {0}", response.getClustersList()); clusterNames.clear(); clusterNames.addAll(response.getClustersList()); } @@ -333,6 +346,10 @@ private void handleStreamClosed(Status status) { if (closed) { return; } + logger.log( + XdsLogLevel.ERROR, + "LRS stream closed with status {0}: {1}. Cause: {2}", + status.getCode(), status.getDescription(), status.getCause()); closed = true; cleanUp(); @@ -350,8 +367,7 @@ private void handleStreamClosed(Status status) { delayNanos = lrsRpcRetryPolicy.nextBackoffNanos() - retryStopwatch.elapsed(TimeUnit.NANOSECONDS); } - logger.log(Level.FINE, "LRS stream closed, backoff in {0} second(s)", - TimeUnit.NANOSECONDS.toSeconds(delayNanos <= 0 ? 0 : delayNanos)); + logger.log(XdsLogLevel.INFO, "Retry LRS stream in {0} ns", delayNanos); if (delayNanos <= 0) { startLrsRpc(); } else { diff --git a/xds/src/main/java/io/grpc/xds/LocalityStore.java b/xds/src/main/java/io/grpc/xds/LocalityStore.java index 2527c0dc2ce..d66203f116a 100644 --- a/xds/src/main/java/io/grpc/xds/LocalityStore.java +++ b/xds/src/main/java/io/grpc/xds/LocalityStore.java @@ -29,6 +29,7 @@ import com.google.common.collect.ImmutableSet; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; +import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; import io.grpc.LoadBalancer.PickResult; @@ -50,6 +51,7 @@ import io.grpc.xds.InterLocalityPicker.WeightedChildPicker; import io.grpc.xds.OrcaOobUtil.OrcaReportingConfig; import io.grpc.xds.OrcaOobUtil.OrcaReportingHelperWrapper; +import io.grpc.xds.XdsLogger.XdsLogLevel; import io.grpc.xds.XdsSubchannelPickers.ErrorPicker; import java.util.ArrayList; import java.util.Collections; @@ -82,8 +84,11 @@ abstract class LocalityStoreFactory { new LocalityStoreFactory() { @Override LocalityStore newLocalityStore( - Helper helper, LoadBalancerRegistry lbRegistry, LoadStatsStore loadStatsStore) { - return new LocalityStoreImpl(helper, lbRegistry, loadStatsStore); + InternalLogId logId, + Helper helper, + LoadBalancerRegistry lbRegistry, + LoadStatsStore loadStatsStore) { + return new LocalityStoreImpl(logId, helper, lbRegistry, loadStatsStore); } }; @@ -92,13 +97,17 @@ static LocalityStoreFactory getInstance() { } abstract LocalityStore newLocalityStore( - Helper helper, LoadBalancerRegistry lbRegistry, LoadStatsStore loadStatsStore); + InternalLogId logId, + Helper helper, + LoadBalancerRegistry lbRegistry, + LoadStatsStore loadStatsStore); } final class LocalityStoreImpl implements LocalityStore { private static final String ROUND_ROBIN = "round_robin"; private static final long DELAYED_DELETION_TIMEOUT_MINUTES = 15L; + private final XdsLogger logger; private final Helper helper; private final PickerFactory pickerFactory; private final LoadBalancerProvider loadBalancerProvider; @@ -114,13 +123,24 @@ final class LocalityStoreImpl implements LocalityStore { private long metricsReportIntervalNano = -1; LocalityStoreImpl( - Helper helper, LoadBalancerRegistry lbRegistry, LoadStatsStore loadStatsStore) { - this(helper, pickerFactoryImpl, lbRegistry, ThreadSafeRandom.ThreadSafeRandomImpl.instance, - loadStatsStore, OrcaPerRequestUtil.getInstance(), OrcaOobUtil.getInstance()); + InternalLogId logId, + Helper helper, + LoadBalancerRegistry lbRegistry, + LoadStatsStore loadStatsStore) { + this( + logId, + helper, + pickerFactoryImpl, + lbRegistry, + ThreadSafeRandom.ThreadSafeRandomImpl.instance, + loadStatsStore, + OrcaPerRequestUtil.getInstance(), + OrcaOobUtil.getInstance()); } @VisibleForTesting LocalityStoreImpl( + InternalLogId logId, Helper helper, PickerFactory pickerFactory, LoadBalancerRegistry lbRegistry, @@ -137,6 +157,7 @@ final class LocalityStoreImpl implements LocalityStore { this.loadStatsStore = checkNotNull(loadStatsStore, "loadStatsStore"); this.orcaPerRequestUtil = checkNotNull(orcaPerRequestUtil, "orcaPerRequestUtil"); this.orcaOobUtil = checkNotNull(orcaOobUtil, "orcaOobUtil"); + logger = XdsLogger.withLogId(checkNotNull(logId, "logId")); } @VisibleForTesting // Introduced for testing only. @@ -144,7 +165,7 @@ interface PickerFactory { SubchannelPicker picker(List childPickers); } - private static final class DroppablePicker extends SubchannelPicker { + private final class DroppablePicker extends SubchannelPicker { final List dropOverloads; final SubchannelPicker delegate; @@ -165,6 +186,9 @@ public PickResult pickSubchannel(PickSubchannelArgs args) { for (DropOverload dropOverload : dropOverloads) { int rand = random.nextInt(1000_000); if (rand < dropOverload.getDropsPerMillion()) { + logger.log( + XdsLogLevel.INFO, + "Drop request with category: {0}", dropOverload.getCategory()); loadStatsStore.recordDroppedRequest(dropOverload.getCategory()); return PickResult.withDrop(Status.UNAVAILABLE.withDescription( "dropped by loadbalancer: " + dropOverload.toString())); @@ -205,10 +229,8 @@ public void reset() { priorityManager.reset(); } - // This is triggered by EDS response. @Override - public void updateLocalityStore( - final Map localityInfoMap) { + public void updateLocalityStore(final Map localityInfoMap) { Set newLocalities = localityInfoMap.keySet(); // TODO: put endPointWeights into attributes for WRR. @@ -338,16 +360,17 @@ private void updatePicker( * State of a single Locality. */ // TODO(zdapeng): rename it to LocalityLbState - static final class LocalityLbInfo { + private final class LocalityLbInfo { + final Locality locality; final LoadBalancer childBalancer; final ChildHelper childHelper; @Nullable private ScheduledHandle delayedDeletionTimer; - LocalityLbInfo( - LoadBalancer childBalancer, ChildHelper childHelper) { + LocalityLbInfo(Locality locality, LoadBalancer childBalancer, ChildHelper childHelper) { + this.locality = checkNotNull(locality, "locality"); this.childBalancer = checkNotNull(childBalancer, "childBalancer"); this.childHelper = checkNotNull(childHelper, "childHelper"); } @@ -358,6 +381,7 @@ void shutdown() { delayedDeletionTimer = null; } childBalancer.shutdown(); + logger.log(XdsLogLevel.INFO, "Shut down child balancer for locality {0}", locality); } void reactivate() { @@ -395,7 +419,9 @@ public void updateBalancingState(ConnectivityState newState, final SubchannelPicker newPicker) { checkNotNull(newState, "newState"); checkNotNull(newPicker, "newPicker"); - + logger.log( + XdsLogLevel.INFO, + "Update load balancing state for locality {0} to {1}", locality, newState); currentChildState = newState; currentChildPicker = new LoadRecordingSubchannelPicker(counter, @@ -459,6 +485,13 @@ void updateLocalities(Map localityInfoMap) { } priorityTable.get(priority).add(newLocality); } + if (logger.isLoggable(XdsLogLevel.INFO)) { + for (int i = 0; i < priorityTable.size(); i++) { + logger.log( + XdsLogLevel.INFO, + "Priority {0} contains localities: {1}", i, priorityTable.get(i)); + } + } currentPriority = -1; failOver(); @@ -491,7 +524,7 @@ void updatePriorityState(int priority) { new WeightedChildPicker(localityInfoMap.get(l).getLocalityWeight(), childPicker)); } } - + logger.log(XdsLogLevel.INFO, "Update priority {0} state to {1}", priority, overallState); if (priority == currentPriority) { updatePicker(overallState, childPickers); if (overallState == READY) { @@ -560,6 +593,7 @@ private void failOver() { class FailOverTask implements Runnable { @Override public void run() { + logger.log(XdsLogLevel.INFO, "Failing over to priority {0}", currentPriority + 1); failOverTimer = null; failOver(); } @@ -573,11 +607,13 @@ public void run() { } private void initLocality(Locality locality) { + logger.log(XdsLogLevel.INFO, "Create child balancer for locality {0}", locality); ChildHelper childHelper = new ChildHelper(locality, loadStatsStore.getLocalityCounter(locality), orcaOobUtil); LocalityLbInfo localityLbInfo = new LocalityLbInfo( + locality, loadBalancerProvider.newLoadBalancer(childHelper), childHelper); localityMap.put(locality, localityLbInfo); diff --git a/xds/src/main/java/io/grpc/xds/XdsClient.java b/xds/src/main/java/io/grpc/xds/XdsClient.java index d434ec9db07..ce2a3e70e47 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClient.java +++ b/xds/src/main/java/io/grpc/xds/XdsClient.java @@ -19,6 +19,7 @@ import static com.google.common.base.Preconditions.checkArgument; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -35,6 +36,7 @@ import io.grpc.xds.EnvoyProtoData.Locality; import io.grpc.xds.EnvoyProtoData.LocalityLbEndpoints; import io.grpc.xds.EnvoyServerProtoData.Listener; +import io.grpc.xds.XdsLogger.XdsLogLevel; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; @@ -76,6 +78,15 @@ public Listener getListener() { return listener; } + @Override + public String toString() { + return + MoreObjects + .toStringHelper(this) + .add("clusterName", clusterName) + .toString(); + } + static Builder newBuilder() { return new Builder(); } @@ -167,6 +178,19 @@ UpstreamTlsContext getUpstreamTlsContext() { return upstreamTlsContext; } + @Override + public String toString() { + return + MoreObjects + .toStringHelper(this) + .add("clusterName", clusterName) + .add("edsServiceName", edsServiceName) + .add("lbPolicy", lbPolicy) + .add("lrsServerName", lrsServerName) + .add("upstreamTlsContext", upstreamTlsContext) + .toString(); + } + static Builder newBuilder() { return new Builder(); } @@ -282,6 +306,17 @@ public int hashCode() { return Objects.hash(clusterName, localityLbEndpointsMap, dropPolicies); } + @Override + public String toString() { + return + MoreObjects + .toStringHelper(this) + .add("clusterName", clusterName) + .add("localityLbEndpointsMap", localityLbEndpointsMap) + .add("dropPolicies", dropPolicies) + .toString(); + } + static final class Builder { private String clusterName; private Map localityLbEndpointsMap = new LinkedHashMap<>(); @@ -373,8 +408,6 @@ void watchConfigData(String hostName, int port, ConfigWatcher watcher) { /** * Registers a data watcher for the given cluster. - * - *

      Adding the same watcher for the same cluster more than once is a no-op. */ void watchClusterData(String clusterName, ClusterWatcher watcher) { } @@ -382,16 +415,12 @@ void watchClusterData(String clusterName, ClusterWatcher watcher) { /** * Unregisters the given cluster watcher, which was registered to receive updates for the * given cluster. - * - *

      Cancelling a watcher that was not registered for the given cluster is a no-op. */ void cancelClusterDataWatch(String clusterName, ClusterWatcher watcher) { } /** * Registers a data watcher for endpoints in the given cluster. - * - *

      Adding the same watcher for the same cluster more than once is a no-op. */ void watchEndpointData(String clusterName, EndpointWatcher watcher) { } @@ -399,8 +428,6 @@ void watchEndpointData(String clusterName, EndpointWatcher watcher) { /** * Unregisters the given endpoints watcher, which was registered to receive updates for * endpoints information in the given cluster. - * - *

      Cancelling a watcher that was not registered for the given cluster is a no-op. */ void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) { } @@ -497,19 +524,23 @@ abstract static class XdsChannelFactory { @Override ManagedChannel createChannel(List servers) { checkArgument(!servers.isEmpty(), "No management server provided."); + XdsLogger logger = XdsLogger.withPrefix("xds-client-channel-factory"); ServerInfo serverInfo = servers.get(0); String serverUri = serverInfo.getServerUri(); + logger.log(XdsLogLevel.INFO, "Creating channel to {0}", serverUri); List channelCredsList = serverInfo.getChannelCredentials(); ManagedChannelBuilder channelBuilder = null; // Use the first supported channel credentials configuration. // Currently, only "google_default" is supported. for (ChannelCreds creds : channelCredsList) { if (creds.getType().equals("google_default")) { + logger.log(XdsLogLevel.INFO, "Using channel credentials: google_default"); channelBuilder = GoogleDefaultChannelBuilder.forTarget(serverUri); break; } } if (channelBuilder == null) { + logger.log(XdsLogLevel.INFO, "Using default channel credentials"); channelBuilder = ManagedChannelBuilder.forTarget(serverUri); } diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index dfd787d292f..5aca5d8e387 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -43,6 +43,7 @@ import io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager; import io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2.Rds; import io.envoyproxy.envoy.service.discovery.v2.AggregatedDiscoveryServiceGrpc; +import io.grpc.InternalLogId; import io.grpc.ManagedChannel; import io.grpc.Status; import io.grpc.SynchronizationContext; @@ -54,6 +55,7 @@ import io.grpc.xds.EnvoyProtoData.Locality; import io.grpc.xds.EnvoyProtoData.LocalityLbEndpoints; import io.grpc.xds.LoadReportClient.LoadReportCallback; +import io.grpc.xds.XdsLogger.XdsLogLevel; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -64,12 +66,9 @@ import java.util.Set; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.logging.Level; -import java.util.logging.Logger; import javax.annotation.Nullable; final class XdsClientImpl extends XdsClient { - private static final Logger logger = Logger.getLogger(XdsClientImpl.class.getName()); // Longest time to wait, since the subscription to some resource, for concluding its absence. @VisibleForTesting @@ -88,6 +87,8 @@ final class XdsClientImpl extends XdsClient { private final MessagePrinter respPrinter = new MessagePrinter(); + private final InternalLogId logId; + private final XdsLogger logger; // Name of the target server this gRPC client is trying to talk to. private final String targetName; private final ManagedChannel channel; @@ -182,11 +183,14 @@ final class XdsClientImpl extends XdsClient { this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatch"); adsStreamRetryStopwatch = stopwatchSupplier.get(); + logId = InternalLogId.allocate("xds-client", targetName); + logger = XdsLogger.withLogId(logId); + logger.log(XdsLogLevel.INFO, "Created"); } @Override void shutdown() { - logger.log(Level.INFO, "Shutting down XdsClient"); + logger.log(XdsLogLevel.INFO, "Shutting down"); channel.shutdown(); if (adsStream != null) { adsStream.close(Status.CANCELLED.withDescription("shutdown").asException()); @@ -230,7 +234,7 @@ private void cleanUpResources() { @Override void watchConfigData(String hostName, int port, ConfigWatcher watcher) { - checkState(configWatcher == null, "ConfigWatcher is already registered"); + checkState(configWatcher == null, "watcher for %s already registered", hostName); configWatcher = checkNotNull(watcher, "watcher"); this.hostName = checkNotNull(hostName, "hostName"); if (port == -1) { @@ -238,6 +242,7 @@ void watchConfigData(String hostName, int port, ConfigWatcher watcher) { } else { ldsResourceName = hostName + ":" + port; } + logger.log(XdsLogLevel.INFO, "Started watching config {0}", ldsResourceName); if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { // Currently in retry backoff. return; @@ -255,21 +260,20 @@ void watchConfigData(String hostName, int port, ConfigWatcher watcher) { @Override void watchClusterData(String clusterName, ClusterWatcher watcher) { + checkNotNull(clusterName, "clusterName"); checkNotNull(watcher, "watcher"); boolean needRequest = false; if (!clusterWatchers.containsKey(clusterName)) { - logger.log(Level.FINE, "Start watching cluster {0}", clusterName); + logger.log(XdsLogLevel.INFO, "Start watching cluster {0}", clusterName); needRequest = true; clusterWatchers.put(clusterName, new HashSet()); } Set watchers = clusterWatchers.get(clusterName); - if (watchers.contains(watcher)) { - logger.log(Level.WARNING, "Watcher {0} already registered", watcher); - return; - } + checkState(!watchers.contains(watcher), "watcher for %s already registered", clusterName); watchers.add(watcher); // If local cache contains cluster information to be watched, notify the watcher immediately. if (absentCdsResources.contains(clusterName)) { + logger.log(XdsLogLevel.DEBUG, "Cluster resource {0} is known to be absent", clusterName); watcher.onError( Status.NOT_FOUND .withDescription( @@ -277,6 +281,7 @@ void watchClusterData(String clusterName, ClusterWatcher watcher) { return; } if (clusterNamesToClusterUpdates.containsKey(clusterName)) { + logger.log(XdsLogLevel.DEBUG, "Retrieve cluster info {0} from local cache", clusterName); watcher.onClusterChanged(clusterNamesToClusterUpdates.get(clusterName)); return; } @@ -303,13 +308,12 @@ void watchClusterData(String clusterName, ClusterWatcher watcher) { void cancelClusterDataWatch(String clusterName, ClusterWatcher watcher) { checkNotNull(watcher, "watcher"); Set watchers = clusterWatchers.get(clusterName); - if (watchers == null || !watchers.contains(watcher)) { - logger.log(Level.FINE, "Watcher {0} was not registered", watcher); - return; - } + checkState( + watchers != null && watchers.contains(watcher), + "watcher for %s was not registered", clusterName); watchers.remove(watcher); if (watchers.isEmpty()) { - logger.log(Level.FINE, "Stop watching cluster {0}", clusterName); + logger.log(XdsLogLevel.INFO, "Stop watching cluster {0}", clusterName); clusterWatchers.remove(clusterName); // Remove the corresponding CDS entry. absentCdsResources.remove(clusterName); @@ -336,26 +340,29 @@ void watchEndpointData(String clusterName, EndpointWatcher watcher) { checkNotNull(watcher, "watcher"); boolean needRequest = false; if (!endpointWatchers.containsKey(clusterName)) { - logger.log(Level.FINE, "Start watching endpoints in cluster {0}", clusterName); + logger.log(XdsLogLevel.INFO, "Start watching endpoints in cluster {0}", clusterName); needRequest = true; endpointWatchers.put(clusterName, new HashSet()); } Set watchers = endpointWatchers.get(clusterName); - if (watchers.contains(watcher)) { - logger.log(Level.WARNING, "Watcher {0} already registered", watcher); - return; - } + checkState(!watchers.contains(watcher), "watcher for %s already registered", clusterName); watchers.add(watcher); // If local cache contains endpoint information for the cluster to be watched, notify // the watcher immediately. if (absentEdsResources.contains(clusterName)) { + logger.log( + XdsLogLevel.DEBUG, + "Endpoint resource for cluster {0} is known to be absent.", clusterName); watcher.onError( Status.NOT_FOUND .withDescription( - "Endpoint resource for cluster [" + clusterName + "] not found.")); + "Endpoint resource for cluster " + clusterName + " not found.")); return; } if (clusterNamesToEndpointUpdates.containsKey(clusterName)) { + logger.log( + XdsLogLevel.DEBUG, + "Retrieve endpoints info for cluster {0} from local cache.", clusterName); watcher.onEndpointChanged(clusterNamesToEndpointUpdates.get(clusterName)); return; } @@ -382,13 +389,12 @@ void watchEndpointData(String clusterName, EndpointWatcher watcher) { void cancelEndpointDataWatch(String clusterName, EndpointWatcher watcher) { checkNotNull(watcher, "watcher"); Set watchers = endpointWatchers.get(clusterName); - if (watchers == null || !watchers.contains(watcher)) { - logger.log(Level.FINE, "Watcher {0} was not registered", watcher); - return; - } + checkState( + watchers != null && watchers.contains(watcher), + "watcher for %s was not registered", clusterName); watchers.remove(watcher); if (watchers.isEmpty()) { - logger.log(Level.FINE, "Stop watching endpoints in cluster {0}", clusterName); + logger.log(XdsLogLevel.INFO, "Stop watching endpoints in cluster {0}", clusterName); endpointWatchers.remove(clusterName); // Remove the corresponding EDS cache entry. absentEdsResources.remove(clusterName); @@ -414,6 +420,7 @@ void reportClientStats( if (lrsClient == null) { lrsClient = new LoadReportClient( + logId, targetName, channel, node, @@ -426,17 +433,30 @@ void reportClientStats( public void onReportResponse(long reportIntervalNano) {} }); } + logger.log( + XdsLogLevel.INFO, + "Report loads for cluster: {0}, cluster_service: {1}", clusterName, clusterServiceName); lrsClient.addLoadStatsStore(clusterName, clusterServiceName, loadStatsStore); } @Override void cancelClientStatsReport(String clusterName, @Nullable String clusterServiceName) { checkState(lrsClient != null, "load reporting was never started"); + logger.log( + XdsLogLevel.INFO, + "Stop reporting loads for cluster: {0}, cluster_service: {1}", + clusterName, + clusterServiceName); lrsClient.removeLoadStatsStore(clusterName, clusterServiceName); // TODO(chengyuanzhang): can be optimized to stop load reporting if no more loads need // to be reported. } + @Override + public String toString() { + return logId.toString(); + } + /** * Establishes the RPC connection by creating a new RPC stream on the given channel for * xDS protocol communication. @@ -447,6 +467,7 @@ private void startRpcStream() { AggregatedDiscoveryServiceGrpc.newStub(channel); adsStream = new AdsStream(stub); adsStream.start(); + logger.log(XdsLogLevel.INFO, "ADS stream started"); adsStreamRetryStopwatch.reset().start(); } @@ -459,23 +480,30 @@ private void startRpcStream() { * ACK request is sent to management server. */ private void handleLdsResponse(DiscoveryResponse ldsResponse) { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Received an LDS response: {0}", respPrinter.print(ldsResponse)); - } checkState(ldsResourceName != null && configWatcher != null, "No LDS request was ever sent. Management server is doing something wrong"); + if (logger.isLoggable(XdsLogLevel.DEBUG)) { + logger.log( + XdsLogLevel.DEBUG, "Received LDS response:\n{0}", respPrinter.print(ldsResponse)); + } // Unpack Listener messages. List listeners = new ArrayList<>(ldsResponse.getResourcesCount()); + List listenerNames = new ArrayList<>(ldsResponse.getResourcesCount()); try { for (com.google.protobuf.Any res : ldsResponse.getResourcesList()) { - listeners.add(res.unpack(Listener.class)); + Listener listener = res.unpack(Listener.class); + listeners.add(listener); + listenerNames.add(listener.getName()); } } catch (InvalidProtocolBufferException e) { - adsStream.sendNackRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), - "Broken LDS response."); + logger.log(XdsLogLevel.WARNING, "Failed to unpack Listeners in LDS response {0}", e); + adsStream.sendNackRequest( + ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), + ldsResponse.getVersionInfo(), "Malformed LDS response: " + e); return; } + logger.log(XdsLogLevel.INFO, "Received LDS response for resources: {0}", listenerNames); // Unpack HttpConnectionManager messages. HttpConnectionManager requestedHttpConnManager = null; @@ -488,8 +516,12 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { } } } catch (InvalidProtocolBufferException e) { - adsStream.sendNackRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), - "Broken LDS response."); + logger.log( + XdsLogLevel.WARNING, + "Failed to unpack HttpConnectionManagers in Listeners of LDS response {0}", e); + adsStream.sendNackRequest( + ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), + ldsResponse.getVersionInfo(), "Malformed LDS response: " + e); return; } @@ -501,6 +533,7 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { // Process the requested Listener if exists, either extract cluster information from in-lined // RouteConfiguration message or send an RDS request for dynamic resolution. if (requestedHttpConnManager != null) { + logger.log(XdsLogLevel.DEBUG, "Found http connection manager"); // The HttpConnectionManager message must either provide the RouteConfiguration directly // in-line or tell the client to use RDS to obtain it. // TODO(chengyuanzhang): if both route_config and rds are set, it should be either invalid @@ -509,24 +542,30 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { RouteConfiguration rc = requestedHttpConnManager.getRouteConfig(); clusterName = findClusterNameInRouteConfig(rc, hostName); if (clusterName == null) { - errorMessage = "Cannot find a valid cluster name in VirtualHost inside " - + "RouteConfiguration with domains matching: " + hostName + "."; + errorMessage = + "Listener " + ldsResourceName + " : cannot find a valid cluster name in any " + + "virtual hosts inside RouteConfiguration with domains matching: " + hostName; } } else if (requestedHttpConnManager.hasRds()) { Rds rds = requestedHttpConnManager.getRds(); if (!rds.getConfigSource().hasAds()) { - errorMessage = "For using RDS, it must be set to use ADS."; + errorMessage = + "Listener " + ldsResourceName + " : for using RDS, config_source must be " + + "set to use ADS."; } else { rdsRouteConfigName = rds.getRouteConfigName(); } } else { - errorMessage = "HttpConnectionManager message must either provide the " - + "RouteConfiguration directly in-line or tell the client to use RDS to obtain it."; + errorMessage = "Listener " + ldsResourceName + " : HttpConnectionManager message must " + + "either provide the RouteConfiguration directly in-line or tell the client to " + + "use RDS to obtain it."; } } if (errorMessage != null) { - adsStream.sendNackRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), errorMessage); + adsStream.sendNackRequest( + ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), + ldsResponse.getVersionInfo(), errorMessage); return; } adsStream.sendAckRequest(ADS_TYPE_URL_LDS, ImmutableList.of(ldsResourceName), @@ -540,11 +579,17 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { } if (clusterName != null) { // Found clusterName in the in-lined RouteConfiguration. + logger.log( + XdsLogLevel.INFO, + "Found cluster name (inlined in route config): {0}", clusterName); ConfigUpdate configUpdate = ConfigUpdate.newBuilder().setClusterName(clusterName).build(); configWatcher.onConfigChanged(configUpdate); } else if (rdsRouteConfigName != null) { // Send an RDS request if the resource to request has changed. if (!rdsRouteConfigName.equals(adsStream.rdsResourceName)) { + logger.log( + XdsLogLevel.INFO, + "Use RDS to dynamically resolve route config, resource name: {0}", rdsRouteConfigName); adsStream.sendXdsRequest(ADS_TYPE_URL_RDS, ImmutableList.of(rdsRouteConfigName)); // Cancel the timer for fetching the previous RDS resource. if (rdsRespTimer != null) { @@ -557,11 +602,11 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS, timeService); } } else { - // The requested Listener does not present in this LDS response. + // The requested Listener is removed by management server. if (ldsRespTimer == null) { configWatcher.onError( Status.NOT_FOUND.withDescription( - "Listener resource for listener [" + ldsResourceName + "] does not exist")); + "Listener resource for listener " + ldsResourceName + " does not exist")); } } @@ -574,35 +619,45 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { * invalid data for gRPC's usage. Otherwise, an ACK request is sent to management server. */ private void handleRdsResponse(DiscoveryResponse rdsResponse) { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Received an RDS response: {0}", respPrinter.print(rdsResponse)); + if (logger.isLoggable(XdsLogLevel.DEBUG)) { + logger.log(XdsLogLevel.DEBUG, "Received RDS response:\n{0}", respPrinter.print(rdsResponse)); } checkState(adsStream.rdsResourceName != null, "Never requested for RDS resources, management server is doing something wrong"); // Unpack RouteConfiguration messages. + List routeConfigNames = new ArrayList<>(rdsResponse.getResourcesCount()); RouteConfiguration requestedRouteConfig = null; try { for (com.google.protobuf.Any res : rdsResponse.getResourcesList()) { RouteConfiguration rc = res.unpack(RouteConfiguration.class); + routeConfigNames.add(rc.getName()); if (rc.getName().equals(adsStream.rdsResourceName)) { requestedRouteConfig = rc; } } } catch (InvalidProtocolBufferException e) { - adsStream.sendNackRequest(ADS_TYPE_URL_RDS, ImmutableList.of(adsStream.rdsResourceName), - "Broken RDS response."); + logger.log( + XdsLogLevel.WARNING, "Failed to unpack RouteConfiguration in RDS response {0}", e); + adsStream.sendNackRequest( + ADS_TYPE_URL_RDS, ImmutableList.of(adsStream.rdsResourceName), + rdsResponse.getVersionInfo(), "Malformed RDS response: " + e); return; } + logger.log( + XdsLogLevel.INFO, "Received RDS response for resources: {0}", routeConfigNames); // Resolved cluster name for the requested resource, if exists. String clusterName = null; if (requestedRouteConfig != null) { clusterName = findClusterNameInRouteConfig(requestedRouteConfig, hostName); if (clusterName == null) { - adsStream.sendNackRequest(ADS_TYPE_URL_RDS, ImmutableList.of(adsStream.rdsResourceName), - "Cannot find a valid cluster name in VirtualHost inside " - + "RouteConfiguration with domains matching: " + hostName + "."); + adsStream.sendNackRequest( + ADS_TYPE_URL_RDS, ImmutableList.of(adsStream.rdsResourceName), + rdsResponse.getVersionInfo(), + "RouteConfiguration " + requestedRouteConfig.getName() + ": cannot find a " + + "valid cluster name in any virtual hosts with domains matching: " + + hostName); return; } } @@ -617,6 +672,7 @@ private void handleRdsResponse(DiscoveryResponse rdsResponse) { rdsRespTimer.cancel(); rdsRespTimer = null; } + logger.log(XdsLogLevel.INFO, "Found cluster name: {0}", clusterName); ConfigUpdate configUpdate = ConfigUpdate.newBuilder().setClusterName(clusterName).build(); configWatcher.onConfigChanged(configUpdate); } @@ -697,22 +753,28 @@ static String findClusterNameInRouteConfig(RouteConfiguration config, String hos * interested in the same clusters are added later. */ private void handleCdsResponse(DiscoveryResponse cdsResponse) { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Received an CDS response: {0}", respPrinter.print(cdsResponse)); + if (logger.isLoggable(XdsLogLevel.DEBUG)) { + logger.log(XdsLogLevel.DEBUG, "Received CDS response:\n{0}", respPrinter.print(cdsResponse)); } adsStream.cdsRespNonce = cdsResponse.getNonce(); // Unpack Cluster messages. List clusters = new ArrayList<>(cdsResponse.getResourcesCount()); + List clusterNames = new ArrayList<>(cdsResponse.getResourcesCount()); try { for (com.google.protobuf.Any res : cdsResponse.getResourcesList()) { - clusters.add(res.unpack(Cluster.class)); + Cluster cluster = res.unpack(Cluster.class); + clusters.add(cluster); + clusterNames.add(cluster.getName()); } } catch (InvalidProtocolBufferException e) { - adsStream.sendNackRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet(), - "Broken CDS response"); + logger.log(XdsLogLevel.WARNING, "Failed to unpack Clusters in CDS response {0}", e); + adsStream.sendNackRequest( + ADS_TYPE_URL_CDS, clusterWatchers.keySet(), + cdsResponse.getVersionInfo(), "Malformed CDS response: " + e); return; } + logger.log(XdsLogLevel.INFO, "Received CDS response for resources: {0}", clusterNames); String errorMessage = null; // Cluster information update for requested clusters received in this CDS response. @@ -733,7 +795,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { updateBuilder.setClusterName(clusterName); // The type field must be set to EDS. if (!cluster.getType().equals(DiscoveryType.EDS)) { - errorMessage = "Cluster [" + clusterName + "]: only EDS discovery type is supported " + errorMessage = "Cluster " + clusterName + " : only EDS discovery type is supported " + "in gRPC."; break; } @@ -741,7 +803,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { // use EDS (must be set to use ADS). EdsClusterConfig edsClusterConfig = cluster.getEdsClusterConfig(); if (!edsClusterConfig.getEdsConfig().hasAds()) { - errorMessage = "Cluster [" + clusterName + "]: field eds_cluster_config must be set to " + errorMessage = "Cluster " + clusterName + " : field eds_cluster_config must be set to " + "indicate to use EDS over ADS."; break; } @@ -754,7 +816,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { } // The lb_policy field must be set to ROUND_ROBIN. if (!cluster.getLbPolicy().equals(LbPolicy.ROUND_ROBIN)) { - errorMessage = "Cluster [" + clusterName + "]: only round robin load balancing policy is " + errorMessage = "Cluster " + clusterName + " : only round robin load balancing policy is " + "supported in gRPC."; break; } @@ -764,7 +826,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { // LRS load reporting will be disabled. if (cluster.hasLrsServer()) { if (!cluster.getLrsServer().hasSelf()) { - errorMessage = "Cluster [" + clusterName + "]: only support enabling LRS for the same " + errorMessage = "Cluster " + clusterName + " : only support enabling LRS for the same " + "management server."; break; } @@ -776,7 +838,8 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { clusterUpdates.put(clusterName, updateBuilder.build()); } if (errorMessage != null) { - adsStream.sendNackRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet(), errorMessage); + adsStream.sendNackRequest( + ADS_TYPE_URL_CDS, clusterWatchers.keySet(), cdsResponse.getVersionInfo(), errorMessage); return; } adsStream.sendAckRequest(ADS_TYPE_URL_CDS, clusterWatchers.keySet(), @@ -804,7 +867,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { watcher.onError( Status.NOT_FOUND .withDescription( - "Endpoint resource for cluster [" + clusterName + "] is deleted.")); + "Endpoint resource for cluster " + clusterName + " is deleted.")); } } } @@ -831,7 +894,7 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { for (ClusterWatcher watcher : entry.getValue()) { watcher.onError( Status.NOT_FOUND - .withDescription("Cluster resource [" + clusterName + "] not found.")); + .withDescription("Cluster resource " + clusterName + " not found.")); } } } @@ -846,22 +909,29 @@ private void handleCdsResponse(DiscoveryResponse cdsResponse) { * are added later. */ private void handleEdsResponse(DiscoveryResponse edsResponse) { - if (logger.isLoggable(Level.FINE)) { - logger.log(Level.FINE, "Received an EDS response: {0}", respPrinter.print(edsResponse)); + if (logger.isLoggable(XdsLogLevel.DEBUG)) { + logger.log(XdsLogLevel.DEBUG, "Received EDS response:\n{0}", respPrinter.print(edsResponse)); } // Unpack ClusterLoadAssignment messages. List clusterLoadAssignments = new ArrayList<>(edsResponse.getResourcesCount()); + List claNames = new ArrayList<>(edsResponse.getResourcesCount()); try { for (com.google.protobuf.Any res : edsResponse.getResourcesList()) { - clusterLoadAssignments.add(res.unpack(ClusterLoadAssignment.class)); + ClusterLoadAssignment assignment = res.unpack(ClusterLoadAssignment.class); + clusterLoadAssignments.add(assignment); + claNames.add(assignment.getClusterName()); } } catch (InvalidProtocolBufferException e) { - adsStream.sendNackRequest(ADS_TYPE_URL_EDS, endpointWatchers.keySet(), - "Broken EDS response"); + logger.log( + XdsLogLevel.WARNING, "Failed to unpack ClusterLoadAssignments in EDS response {0}", e); + adsStream.sendNackRequest( + ADS_TYPE_URL_EDS, endpointWatchers.keySet(), + edsResponse.getVersionInfo(), "Malformed EDS response: " + e); return; } + logger.log(XdsLogLevel.INFO, "Received EDS response for resources: {0}", claNames); String errorMessage = null; // Endpoint information updates for requested clusters received in this EDS response. @@ -880,7 +950,7 @@ private void handleEdsResponse(DiscoveryResponse edsResponse) { EndpointUpdate.Builder updateBuilder = EndpointUpdate.newBuilder(); updateBuilder.setClusterName(clusterName); if (assignment.getEndpointsCount() == 0) { - errorMessage = "Cluster without any locality endpoint."; + errorMessage = "ClusterLoadAssignment " + clusterName + " : no locality endpoints."; break; } @@ -892,7 +962,7 @@ private void handleEdsResponse(DiscoveryResponse edsResponse) { : assignment.getEndpointsList()) { // The lb_endpoints field for LbEndpoint must contain at least one entry. if (localityLbEndpoints.getLbEndpointsCount() == 0) { - errorMessage = "Locality with no endpoint."; + errorMessage = "ClusterLoadAssignment " + clusterName + " : locality with no endpoint."; break; } // The endpoint field of each lb_endpoints must be set. @@ -900,7 +970,7 @@ private void handleEdsResponse(DiscoveryResponse edsResponse) { for (io.envoyproxy.envoy.api.v2.endpoint.LbEndpoint lbEndpoint : localityLbEndpoints.getLbEndpointsList()) { if (!lbEndpoint.getEndpoint().hasAddress()) { - errorMessage = "Invalid endpoint address information."; + errorMessage = "ClusterLoadAssignment " + clusterName + " : endpoint with no address."; break; } } @@ -925,9 +995,8 @@ private void handleEdsResponse(DiscoveryResponse edsResponse) { endpointUpdates.put(clusterName, update); } if (errorMessage != null) { - adsStream.sendNackRequest(ADS_TYPE_URL_EDS, endpointWatchers.keySet(), - "ClusterLoadAssignment message contains invalid information for gRPC's usage: " - + errorMessage); + adsStream.sendNackRequest( + ADS_TYPE_URL_EDS, endpointWatchers.keySet(), edsResponse.getVersionInfo(), errorMessage); return; } adsStream.sendAckRequest(ADS_TYPE_URL_EDS, endpointWatchers.keySet(), @@ -1060,8 +1129,9 @@ public void run() { edsRespNonce = response.getNonce(); handleEdsResponse(response); } else { - logger.log(Level.FINE, "Received an unknown type of DiscoveryResponse {0}", - response); + logger.log( + XdsLogLevel.WARNING, + "Received an unknown type of DiscoveryResponse\n{0}", response); } } }); @@ -1072,8 +1142,7 @@ public void onError(final Throwable t) { syncContext.execute(new Runnable() { @Override public void run() { - handleStreamClosed( - Status.fromThrowable(t).augmentDescription("ADS stream [" + this + "] had an error")); + handleStreamClosed(Status.fromThrowable(t)); } }); } @@ -1084,7 +1153,7 @@ public void onCompleted() { @Override public void run() { handleStreamClosed( - Status.UNAVAILABLE.withDescription("ADS stream [" + this + "] was closed by server")); + Status.UNAVAILABLE.withDescription("Closed by server")); } }); } @@ -1094,7 +1163,10 @@ private void handleStreamClosed(Status error) { if (closed) { return; } - logger.log(Level.FINE, error.getDescription(), error.getCause()); + logger.log( + XdsLogLevel.ERROR, + "ADS stream closed with status {0}: {1}. Cause: {2}", + error.getCode(), error.getDescription(), error.getCause()); closed = true; if (configWatcher != null) { configWatcher.onError(error); @@ -1124,7 +1196,7 @@ private void handleStreamClosed(Status error) { retryBackoffPolicy.nextBackoffNanos() - adsStreamRetryStopwatch.elapsed(TimeUnit.NANOSECONDS)); } - logger.log(Level.FINE, "{0} stream closed, retry in {1} ns", new Object[]{this, delayNanos}); + logger.log(XdsLogLevel.INFO, "Retry ADS stream in {0} ns", delayNanos); rpcRetryTimer = syncContext.schedule( new RpcRetryTask(), delayNanos, TimeUnit.NANOSECONDS, timeService); @@ -1157,18 +1229,22 @@ private void sendXdsRequest(String typeUrl, Collection resourceNames) { if (typeUrl.equals(ADS_TYPE_URL_LDS)) { version = ldsVersion; nonce = ldsRespNonce; + logger.log(XdsLogLevel.INFO, "Sending LDS request for resources: {0}", resourceNames); } else if (typeUrl.equals(ADS_TYPE_URL_RDS)) { checkArgument(resourceNames.size() == 1, "RDS request requesting for more than one resource"); version = rdsVersion; nonce = rdsRespNonce; rdsResourceName = resourceNames.iterator().next(); + logger.log(XdsLogLevel.INFO, "Sending RDS request for resources: {0}", resourceNames); } else if (typeUrl.equals(ADS_TYPE_URL_CDS)) { version = cdsVersion; nonce = cdsRespNonce; + logger.log(XdsLogLevel.INFO, "Sending CDS request for resources: {0}", resourceNames); } else if (typeUrl.equals(ADS_TYPE_URL_EDS)) { version = edsVersion; nonce = edsRespNonce; + logger.log(XdsLogLevel.INFO, "Sending EDS request for resources: {0}", resourceNames); } DiscoveryRequest request = DiscoveryRequest @@ -1180,7 +1256,7 @@ private void sendXdsRequest(String typeUrl, Collection resourceNames) { .setResponseNonce(nonce) .build(); requestWriter.onNext(request); - logger.log(Level.FINE, "Sent DiscoveryRequest {0}", request); + logger.log(XdsLogLevel.DEBUG, "Sent DiscoveryRequest\n{0}", request); } /** @@ -1214,7 +1290,7 @@ private void sendAckRequest(String typeUrl, Collection resourceNames, .setResponseNonce(nonce) .build(); requestWriter.onNext(request); - logger.log(Level.FINE, "Sent ACK request {0}", request); + logger.log(XdsLogLevel.DEBUG, "Sent ACK request\n{0}", request); } /** @@ -1222,22 +1298,34 @@ private void sendAckRequest(String typeUrl, Collection resourceNames, * accepted version. */ private void sendNackRequest(String typeUrl, Collection resourceNames, - String message) { + String rejectVersion, String message) { checkState(requestWriter != null, "ADS stream has not been started"); String versionInfo = ""; String nonce = ""; if (typeUrl.equals(ADS_TYPE_URL_LDS)) { versionInfo = ldsVersion; nonce = ldsRespNonce; + logger.log( + XdsLogLevel.WARNING, + "Rejecting LDS update, version: {0}, reason: {1}", rejectVersion, message); } else if (typeUrl.equals(ADS_TYPE_URL_RDS)) { versionInfo = rdsVersion; nonce = rdsRespNonce; + logger.log( + XdsLogLevel.WARNING, + "Rejecting RDS update, version: {0}, reason: {1}", rejectVersion, message); } else if (typeUrl.equals(ADS_TYPE_URL_CDS)) { versionInfo = cdsVersion; nonce = cdsRespNonce; + logger.log( + XdsLogLevel.WARNING, + "Rejecting CDS update, version: {0}, reason: {1}", rejectVersion, message); } else if (typeUrl.equals(ADS_TYPE_URL_EDS)) { versionInfo = edsVersion; nonce = edsRespNonce; + logger.log( + XdsLogLevel.WARNING, + "Rejecting EDS update, version: {0}, reason: {1}", rejectVersion, message); } DiscoveryRequest request = DiscoveryRequest @@ -1253,16 +1341,24 @@ private void sendNackRequest(String typeUrl, Collection resourceNames, .setMessage(message)) .build(); requestWriter.onNext(request); - logger.log(Level.FINE, "Sent NACK request {0}", request); + logger.log(XdsLogLevel.DEBUG, "Sent NACK request\n{0}", request); } } - private abstract static class ResourceFetchTimeoutTask implements Runnable { - protected final String resourceName; + private abstract class ResourceFetchTimeoutTask implements Runnable { + final String resourceName; ResourceFetchTimeoutTask(String resourceName) { this.resourceName = resourceName; } + + @Override + public void run() { + logger.log( + XdsLogLevel.WARNING, + "Did not receive resource info {0} after {1} seconds, conclude it absent", + resourceName, INITIAL_RESOURCE_FETCH_TIMEOUT_SEC); + } } @VisibleForTesting @@ -1274,10 +1370,11 @@ final class LdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { @Override public void run() { + super.run(); ldsRespTimer = null; configWatcher.onError( Status.NOT_FOUND - .withDescription("Listener resource for listener [" + resourceName + "] not found.")); + .withDescription("Listener resource for listener " + resourceName + " not found.")); } } @@ -1290,10 +1387,11 @@ final class RdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { @Override public void run() { + super.run(); rdsRespTimer = null; configWatcher.onError(Status.NOT_FOUND .withDescription( - "RouteConfiguration resource for route [" + resourceName + "] not found.")); + "RouteConfiguration resource for route " + resourceName + " not found.")); } } @@ -1306,12 +1404,13 @@ final class CdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { @Override public void run() { + super.run(); cdsRespTimers.remove(resourceName); absentCdsResources.add(resourceName); for (ClusterWatcher wat : clusterWatchers.get(resourceName)) { wat.onError( Status.NOT_FOUND - .withDescription("Cluster resource [" + resourceName + "] not found.")); + .withDescription("Cluster resource " + resourceName + " not found.")); } } } @@ -1325,13 +1424,14 @@ final class EdsResourceFetchTimeoutTask extends ResourceFetchTimeoutTask { @Override public void run() { + super.run(); edsRespTimers.remove(resourceName); absentEdsResources.add(resourceName); for (EndpointWatcher wat : endpointWatchers.get(resourceName)) { wat.onError( Status.NOT_FOUND .withDescription( - "Endpoint resource for cluster [" + resourceName + "] not found.")); + "Endpoint resource for cluster " + resourceName + " not found.")); } } } diff --git a/xds/src/main/java/io/grpc/xds/XdsLogger.java b/xds/src/main/java/io/grpc/xds/XdsLogger.java new file mode 100644 index 00000000000..616602536f9 --- /dev/null +++ b/xds/src/main/java/io/grpc/xds/XdsLogger.java @@ -0,0 +1,109 @@ +/* + * Copyright 2020 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.grpc.xds; + +import com.google.common.base.Preconditions; +import io.grpc.InternalLogId; +import java.text.MessageFormat; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +/** + * An xDS-specific logger for collecting xDS specific events. Information logged here goes + * to the Java logger of this class. + */ +final class XdsLogger { + private static final Logger logger = Logger.getLogger(XdsLogger.class.getName()); + + private final String prefix; + + static XdsLogger withLogId(InternalLogId logId) { + Preconditions.checkNotNull(logId, "logId"); + return new XdsLogger(logId.toString()); + } + + static XdsLogger withPrefix(String prefix) { + return new XdsLogger(prefix); + } + + private XdsLogger(String prefix) { + this.prefix = Preconditions.checkNotNull(prefix, "prefix"); + } + + boolean isLoggable(XdsLogLevel level) { + Level javaLevel = toJavaLogLevel(level); + return logger.isLoggable(javaLevel); + } + + void log(XdsLogLevel level, String msg) { + Level javaLevel = toJavaLogLevel(level); + logOnly(prefix, javaLevel, msg); + } + + void log(XdsLogLevel level, String messageFormat, Object... args) { + Level javaLogLevel = toJavaLogLevel(level); + if (logger.isLoggable(javaLogLevel)) { + String msg = MessageFormat.format(messageFormat, args); + logOnly(prefix, javaLogLevel, msg); + } + } + + private static void logOnly(String prefix, Level logLevel, String msg) { + if (logger.isLoggable(logLevel)) { + LogRecord lr = new LogRecord(logLevel, "[" + prefix + "] " + msg); + // No resource bundle as gRPC is not localized. + lr.setLoggerName(logger.getName()); + lr.setSourceClassName(logger.getName()); + lr.setSourceMethodName("log"); + logger.log(lr); + } + } + + private static Level toJavaLogLevel(XdsLogLevel level) { + switch (level) { + case ERROR: + case WARNING: + return Level.FINE; + case INFO: + return Level.FINER; + default: + return Level.FINEST; + } + } + + /** + * Log levels. See the table below for the mapping from the XdsLogger levels to + * Java logger levels. + *

      +   * +---------------------+-------------------+
      +   * | XdsLogger Level     | Java Logger Level |
      +   * +---------------------+-------------------+
      +   * | DEBUG               | FINEST            |
      +   * | INFO                | FINER             |
      +   * | WARNING             | FINE              |
      +   * | ERROR               | FINE              |
      +   * +---------------------+-------------------+
      +   * 
      + */ + enum XdsLogLevel { + DEBUG, + INFO, + WARNING, + ERROR + } +} diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index e65044016e6..bcc442d3252 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -25,6 +25,7 @@ import io.envoyproxy.envoy.api.v2.core.Node; import io.grpc.Attributes; import io.grpc.EquivalentAddressGroup; +import io.grpc.InternalLogId; import io.grpc.NameResolver; import io.grpc.Status; import io.grpc.Status.Code; @@ -40,6 +41,7 @@ import io.grpc.xds.XdsClient.RefCountedXdsClientObjectPool; import io.grpc.xds.XdsClient.XdsChannelFactory; import io.grpc.xds.XdsClient.XdsClientFactory; +import io.grpc.xds.XdsLogger.XdsLogLevel; import java.io.IOException; import java.net.URI; import java.util.List; @@ -57,6 +59,7 @@ */ final class XdsNameResolver extends NameResolver { + private final XdsLogger logger; private final String authority; private final String hostName; private final int port; @@ -93,6 +96,8 @@ final class XdsNameResolver extends NameResolver { this.backoffPolicyProvider = checkNotNull(backoffPolicyProvider, "backoffPolicyProvider"); this.stopwatchSupplier = checkNotNull(stopwatchSupplier, "stopwatchSupplier"); this.bootstrapper = checkNotNull(bootstrapper, "bootstrapper"); + logger = XdsLogger.withLogId(InternalLogId.allocate("xds-resolver", name)); + logger.log(XdsLogLevel.INFO, "Created resolver for {0}", name); } @Override @@ -138,6 +143,10 @@ XdsClient createXdsClient() { xdsClient.watchConfigData(hostName, port, new ConfigWatcher() { @Override public void onConfigChanged(ConfigUpdate update) { + logger.log( + XdsLogLevel.INFO, + "Received config update from xDS client {0}: cluster_name={1}", + xdsClient, update.getClusterName()); String serviceConfig = "{\n" + " \"loadBalancingConfig\": [\n" + " {\n" @@ -155,6 +164,7 @@ public void onConfigChanged(ConfigUpdate update) { Status.UNKNOWN.withDescription("Invalid service config").withCause(e)); return; } + logger.log(XdsLogLevel.INFO, "Generated service config:\n{0}", serviceConfig); Attributes attrs = Attributes.newBuilder() .set(GrpcAttributes.NAME_RESOLVER_SERVICE_CONFIG, config) @@ -178,6 +188,9 @@ public void onError(Status error) { // TODO(chengyuanzhang): Returning an empty resolution result based on status code is // a temporary solution. More design discussion needs to be done. if (error.getCode().equals(Code.NOT_FOUND)) { + logger.log( + XdsLogLevel.WARNING, + "Received error from xDS client {0}: {1}", xdsClient, error.getDescription()); listener.onResult(ResolutionResult.newBuilder().build()); return; } @@ -188,6 +201,7 @@ public void onError(Status error) { @Override public void shutdown() { + logger.log(XdsLogLevel.INFO, "Shutdown"); if (xdsClient != null) { xdsClient = xdsClientPool.returnObject(xdsClient); } diff --git a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java index 75b6db1ea04..b2916d873a3 100644 --- a/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/CdsLoadBalancerTest.java @@ -35,7 +35,6 @@ import com.google.common.collect.ImmutableMap; import io.envoyproxy.envoy.api.v2.auth.UpstreamTlsContext; import io.grpc.Attributes; -import io.grpc.ChannelLogger; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; import io.grpc.LoadBalancer; @@ -134,8 +133,6 @@ public void uncaughtException(Thread t, Throwable e) { @Mock private Helper helper; - @Mock - private ChannelLogger channelLogger; private LoadBalancer cdsLoadBalancer; private XdsClient xdsClient; @@ -147,7 +144,6 @@ public void uncaughtException(Thread t, Throwable e) { public void setUp() { MockitoAnnotations.initMocks(this); - doReturn(channelLogger).when(helper).getChannelLogger(); doReturn(syncContext).when(helper).getSynchronizationContext(); doReturn(fakeClock.getScheduledExecutorService()).when(helper).getScheduledExecutorService(); lbRegistry.register(fakeEdsLoadBlancerProvider); @@ -159,21 +155,6 @@ public void canHandleEmptyAddressListFromNameResolution() { assertThat(cdsLoadBalancer.canHandleEmptyAddressListFromNameResolution()).isTrue(); } - @Test - public void invalidConfigType() { - ResolvedAddresses resolvedAddresses = ResolvedAddresses.newBuilder() - .setAddresses(ImmutableList.of()) - .setAttributes(Attributes.newBuilder() - .set(XdsAttributes.XDS_CLIENT_POOL, xdsClientPool) - .build()) - .setLoadBalancingPolicyConfig(new Object()) - .build(); - - cdsLoadBalancer.handleResolvedAddresses(resolvedAddresses); - - verify(helper).updateBalancingState(eq(TRANSIENT_FAILURE), any(SubchannelPicker.class)); - } - @Test public void handleResolutionErrorBeforeOrAfterCdsWorking() { ResolvedAddresses resolvedAddresses1 = ResolvedAddresses.newBuilder() diff --git a/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java b/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java index adcae1c52d9..ad532a32256 100644 --- a/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java +++ b/xds/src/test/java/io/grpc/xds/EdsLoadBalancerTest.java @@ -48,9 +48,9 @@ import io.envoyproxy.envoy.api.v2.endpoint.LocalityLbEndpoints; import io.envoyproxy.envoy.service.discovery.v2.AggregatedDiscoveryServiceGrpc.AggregatedDiscoveryServiceImplBase; import io.grpc.Attributes; -import io.grpc.ChannelLogger; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; +import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; import io.grpc.LoadBalancer.PickResult; @@ -173,7 +173,6 @@ public static Collection isFullFlow() { public void setUp() throws Exception { doReturn(SERVICE_AUTHORITY).when(helper).getAuthority(); doReturn(syncContext).when(helper).getSynchronizationContext(); - doReturn(mock(ChannelLogger.class)).when(helper).getChannelLogger(); doReturn(fakeClock.getScheduledExecutorService()).when(helper).getScheduledExecutorService(); // Register a fake round robin balancer provider. @@ -594,7 +593,10 @@ public void handleEndpointUpdates_delegateUpdatesToLocalityStore() { final ArrayDeque localityStores = new ArrayDeque<>(); localityStoreFactory = new LocalityStoreFactory() { @Override - LocalityStore newLocalityStore(Helper helper, LoadBalancerRegistry lbRegistry, + LocalityStore newLocalityStore( + InternalLogId logId, + Helper helper, + LoadBalancerRegistry lbRegistry, LoadStatsStore loadStatsStore) { // Note that this test approach can not verify anything about how localityStore will use the // helper in the arguments to delegate updates from localityStore to the EDS balancer, and diff --git a/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java b/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java index 06afd8567ea..8c2e6f5a9fc 100644 --- a/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java +++ b/xds/src/test/java/io/grpc/xds/LoadReportClientTest.java @@ -44,6 +44,7 @@ import io.envoyproxy.envoy.service.load_stats.v2.LoadStatsResponse; import io.grpc.Context; import io.grpc.Context.CancellationListener; +import io.grpc.InternalLogId; import io.grpc.ManagedChannel; import io.grpc.Status; import io.grpc.SynchronizationContext; @@ -118,6 +119,7 @@ public void uncaughtException(Thread t, Throwable e) { throw new AssertionError(e); } }); + private final InternalLogId logId = InternalLogId.allocate("lrs-client-test", null); private final FakeClock fakeClock = new FakeClock(); private final ArrayDeque> lrsRequestObservers = new ArrayDeque<>(); @@ -179,6 +181,7 @@ public void cancelled(Context context) { .thenReturn(TimeUnit.SECONDS.toNanos(1L), TimeUnit.SECONDS.toNanos(10L)); lrsClient = new LoadReportClient( + logId, TARGET_NAME, channel, NODE, diff --git a/xds/src/test/java/io/grpc/xds/LocalityStoreTest.java b/xds/src/test/java/io/grpc/xds/LocalityStoreTest.java index a60681ea697..ddf38a73a47 100644 --- a/xds/src/test/java/io/grpc/xds/LocalityStoreTest.java +++ b/xds/src/test/java/io/grpc/xds/LocalityStoreTest.java @@ -47,6 +47,7 @@ import io.grpc.ClientStreamTracer; import io.grpc.ConnectivityState; import io.grpc.EquivalentAddressGroup; +import io.grpc.InternalLogId; import io.grpc.LoadBalancer; import io.grpc.LoadBalancer.Helper; import io.grpc.LoadBalancer.PickResult; @@ -134,6 +135,7 @@ public void uncaughtException(Thread t, Throwable e) { } }); + private final InternalLogId logId = InternalLogId.allocate("locality-store-test", null); private final LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry(); private final Map loadBalancers = new HashMap<>(); private final Map childHelpers = new HashMap<>(); @@ -251,7 +253,7 @@ public OrcaReportingHelperWrapper answer(InvocationOnMock invocation) { }); lbRegistry.register(lbProvider); localityStore = - new LocalityStoreImpl(helper, pickerFactory, lbRegistry, random, loadStatsStore, + new LocalityStoreImpl(logId, helper, pickerFactory, lbRegistry, random, loadStatsStore, orcaPerRequestUtil, orcaOobUtil); } From df1b678698bfa66a65ad521e3937ad516b75f2be Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Tue, 25 Feb 2020 18:32:32 -0800 Subject: [PATCH 78/86] Update readme to v1.27.2 (#6758) --- README.md | 28 ++++++++++++------------ cronet/README.md | 2 +- documentation/android-channel-builder.md | 4 ++-- examples/example-xds/README.md | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index f7f2c628dcb..9b379ff0c74 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/quickstart/java.html) or the more explanatory [gRPC basics](https://grpc.io/docs/tutorials/basic/java.html). -The [examples](https://github.com/grpc/grpc-java/tree/v1.27.1/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.27.1/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.27.2/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.27.2/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -42,37 +42,37 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.27.1 + 1.27.2 io.grpc grpc-protobuf - 1.27.1 + 1.27.2 io.grpc grpc-stub - 1.27.1 + 1.27.2 ``` Or for Gradle with non-Android, add to your dependencies: ```gradle -implementation 'io.grpc:grpc-netty-shaded:1.27.1' -implementation 'io.grpc:grpc-protobuf:1.27.1' -implementation 'io.grpc:grpc-stub:1.27.1' +implementation 'io.grpc:grpc-netty-shaded:1.27.2' +implementation 'io.grpc:grpc-protobuf:1.27.2' +implementation 'io.grpc:grpc-stub:1.27.2' ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.27.1' -implementation 'io.grpc:grpc-protobuf-lite:1.27.1' -implementation 'io.grpc:grpc-stub:1.27.1' +implementation 'io.grpc:grpc-okhttp:1.27.2' +implementation 'io.grpc:grpc-protobuf-lite:1.27.2' +implementation 'io.grpc:grpc-stub:1.27.2' ``` [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.27.1 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.27.2 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -104,7 +104,7 @@ For protobuf-based codegen integrated with the Maven build system, you can use com.google.protobuf:protoc:3.11.0:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.27.1:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.27.2:exe:${os.detected.classifier} @@ -134,7 +134,7 @@ protobuf { } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.27.1' + artifact = 'io.grpc:protoc-gen-grpc-java:1.27.2' } } generateProtoTasks { diff --git a/cronet/README.md b/cronet/README.md index 5e43c03d361..54c62084634 100644 --- a/cronet/README.md +++ b/cronet/README.md @@ -26,7 +26,7 @@ In your app module's `build.gradle` file, include a dependency on both `grpc-cro Google Play Services Client Library for Cronet ``` -implementation 'io.grpc:grpc-cronet:1.27.1' +implementation 'io.grpc:grpc-cronet:1.27.2' implementation 'com.google.android.gms:play-services-cronet:16.0.0' ``` diff --git a/documentation/android-channel-builder.md b/documentation/android-channel-builder.md index 1bcdcaa0b8d..9337b9df895 100644 --- a/documentation/android-channel-builder.md +++ b/documentation/android-channel-builder.md @@ -36,8 +36,8 @@ In your `build.gradle` file, include a dependency on both `grpc-android` and `grpc-okhttp`: ``` -implementation 'io.grpc:grpc-android:1.27.1' -implementation 'io.grpc:grpc-okhttp:1.27.1' +implementation 'io.grpc:grpc-android:1.27.2' +implementation 'io.grpc:grpc-okhttp:1.27.2' ``` You also need permission to access the device's network state in your diff --git a/examples/example-xds/README.md b/examples/example-xds/README.md index c9bcbff3c73..b0f388224f0 100644 --- a/examples/example-xds/README.md +++ b/examples/example-xds/README.md @@ -19,7 +19,7 @@ encounter issues please consult [COMPILING.md](../../COMPILING.md). 1. The server does not use XDS, so recent releases work fine. Building using recent releases is much easier, so check out the most recent release tag: ``` -$ git checkout v1.27.1 +$ git checkout v1.27.2 ``` 2. Build the hello-world example server or the hostname example server. See @@ -40,7 +40,7 @@ $ git checkout master ``` To: ``` - grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.27.1" } + grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.27.2" } ``` From 2ec86c11f3c7138b099669bb270b0c5ffcdb6f83 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Thu, 27 Feb 2020 13:25:49 -0800 Subject: [PATCH 79/86] xds: update envoy proto and udpa proto (v1.28.v backport) (#6769) Update envoy proto to c0ab3a4374144728c1e193fc2d43951ed36ccdb7 and udpa proto to edbea6a78f6d1ba34edc69c53a396b1d88d59651. --- .../v2/ScopedRoutesDiscoveryServiceGrpc.java | 10 - .../v2/VirtualHostDiscoveryServiceGrpc.java | 40 +- .../main/java/io/grpc/xds/Bootstrapper.java | 1 + .../java/io/grpc/xds/BootstrapperTest.java | 5 + .../io/grpc/xds/EnvoyServerProtoDataTest.java | 1 + .../java/io/grpc/xds/XdsClientTestHelper.java | 1 + .../sds/CommonTlsContextTestsUtil.java | 1 + .../sds/SdsClientFileBasedMetadataTest.java | 1 + .../sds/trust/SdsX509TrustManagerTest.java | 11 + xds/third_party/envoy/import.sh | 16 +- .../proto/envoy/annotations/deprecation.proto | 22 + .../proto/envoy/annotations/resource.proto | 18 + .../main/proto/envoy/api/v2/auth/cert.proto | 104 +- .../src/main/proto/envoy/api/v2/cds.proto | 825 +-------- .../src/main/proto/envoy/api/v2/cluster.proto | 848 ++++++++++ .../api/v2/cluster/circuit_breaker.proto | 49 +- .../proto/envoy/api/v2/cluster/filter.proto | 14 +- .../api/v2/cluster/outlier_detection.proto | 15 +- .../proto/envoy/api/v2/core/address.proto | 16 +- .../main/proto/envoy/api/v2/core/base.proto | 103 +- .../envoy/api/v2/core/config_source.proto | 45 +- .../envoy/api/v2/core/grpc_service.proto | 70 +- .../envoy/api/v2/core/health_check.proto | 60 +- .../proto/envoy/api/v2/core/http_uri.proto | 10 +- .../proto/envoy/api/v2/core/protocol.proto | 90 +- .../main/proto/envoy/api/v2/discovery.proto | 16 +- .../src/main/proto/envoy/api/v2/eds.proto | 119 +- .../main/proto/envoy/api/v2/endpoint.proto | 117 ++ .../envoy/api/v2/endpoint/endpoint.proto | 123 +- .../api/v2/endpoint/endpoint_components.proto | 131 ++ .../envoy/api/v2/endpoint/load_report.proto | 38 +- .../src/main/proto/envoy/api/v2/lds.proto | 203 +-- .../main/proto/envoy/api/v2/listener.proto | 239 +++ .../envoy/api/v2/listener/listener.proto | 203 +-- .../api/v2/listener/listener_components.proto | 271 +++ .../api/v2/listener/udp_listener_config.proto | 18 +- .../src/main/proto/envoy/api/v2/rds.proto | 115 +- .../src/main/proto/envoy/api/v2/route.proto | 105 ++ .../main/proto/envoy/api/v2/route/route.proto | 1395 +-------------- .../envoy/api/v2/route/route_components.proto | 1495 +++++++++++++++++ .../proto/envoy/api/v2/scoped_route.proto | 107 ++ .../src/main/proto/envoy/api/v2/srds.proto | 123 +- .../filter/accesslog/v2/accesslog.proto | 30 +- .../v2/http_connection_manager.proto | 67 +- .../config/listener/v2/api_listener.proto | 12 +- .../envoy/service/discovery/v2/ads.proto | 6 +- .../envoy/service/discovery/v2/sds.proto | 40 +- .../envoy/service/load_stats/v2/lrs.proto | 16 +- .../src/main/proto/envoy/type/http.proto | 20 + .../main/proto/envoy/type/matcher/regex.proto | 10 +- .../proto/envoy/type/matcher/string.proto | 23 +- .../envoy/type/metadata/v2/metadata.proto | 97 ++ .../src/main/proto/envoy/type/percent.proto | 6 +- .../src/main/proto/envoy/type/range.proto | 12 +- .../proto/envoy/type/semantic_version.proto | 20 + .../envoy/type/tracing/v2/custom_tag.proto | 83 + xds/third_party/udpa/import.sh | 4 +- .../main/proto/udpa/annotations/migrate.proto | 49 + .../proto/udpa/annotations/sensitive.proto | 14 + 59 files changed, 4477 insertions(+), 3226 deletions(-) create mode 100644 xds/third_party/envoy/src/main/proto/envoy/annotations/deprecation.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/annotations/resource.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint_components.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/listener.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener_components.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/route.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route_components.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/api/v2/scoped_route.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/http.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/metadata/v2/metadata.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/semantic_version.proto create mode 100644 xds/third_party/envoy/src/main/proto/envoy/type/tracing/v2/custom_tag.proto create mode 100644 xds/third_party/udpa/src/main/proto/udpa/annotations/migrate.proto create mode 100644 xds/third_party/udpa/src/main/proto/udpa/annotations/sensitive.proto diff --git a/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/ScopedRoutesDiscoveryServiceGrpc.java b/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/ScopedRoutesDiscoveryServiceGrpc.java index c19809f816e..db849c6b984 100644 --- a/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/ScopedRoutesDiscoveryServiceGrpc.java +++ b/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/ScopedRoutesDiscoveryServiceGrpc.java @@ -17,8 +17,6 @@ /** *
      - * [#protodoc-title: HTTP scoped routing configuration]
      - * * Routing :ref:`architecture overview <arch_overview_http_routing>`
        * The Scoped Routes Discovery Service (SRDS) API distributes
        * :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`
        * resources. Each ScopedRouteConfiguration resource represents a "routing
      @@ -177,8 +175,6 @@ public ScopedRoutesDiscoveryServiceFutureStub newStub(io.grpc.Channel channel, i
       
         /**
          * 
      -   * [#protodoc-title: HTTP scoped routing configuration]
      -   * * Routing :ref:`architecture overview <arch_overview_http_routing>`
          * The Scoped Routes Discovery Service (SRDS) API distributes
          * :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`
          * resources. Each ScopedRouteConfiguration resource represents a "routing
      @@ -240,8 +236,6 @@ public void fetchScopedRoutes(io.envoyproxy.envoy.api.v2.DiscoveryRequest reques
       
         /**
          * 
      -   * [#protodoc-title: HTTP scoped routing configuration]
      -   * * Routing :ref:`architecture overview <arch_overview_http_routing>`
          * The Scoped Routes Discovery Service (SRDS) API distributes
          * :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`
          * resources. Each ScopedRouteConfiguration resource represents a "routing
      @@ -290,8 +284,6 @@ public void fetchScopedRoutes(io.envoyproxy.envoy.api.v2.DiscoveryRequest reques
       
         /**
          * 
      -   * [#protodoc-title: HTTP scoped routing configuration]
      -   * * Routing :ref:`architecture overview <arch_overview_http_routing>`
          * The Scoped Routes Discovery Service (SRDS) API distributes
          * :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`
          * resources. Each ScopedRouteConfiguration resource represents a "routing
      @@ -323,8 +315,6 @@ public io.envoyproxy.envoy.api.v2.DiscoveryResponse fetchScopedRoutes(io.envoypr
       
         /**
          * 
      -   * [#protodoc-title: HTTP scoped routing configuration]
      -   * * Routing :ref:`architecture overview <arch_overview_http_routing>`
          * The Scoped Routes Discovery Service (SRDS) API distributes
          * :ref:`ScopedRouteConfiguration<envoy_api_msg.ScopedRouteConfiguration>`
          * resources. Each ScopedRouteConfiguration resource represents a "routing
      diff --git a/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/VirtualHostDiscoveryServiceGrpc.java b/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/VirtualHostDiscoveryServiceGrpc.java
      index dac821b715e..4b2272666f5 100644
      --- a/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/VirtualHostDiscoveryServiceGrpc.java
      +++ b/xds/src/generated/main/grpc/io/envoyproxy/envoy/api/v2/VirtualHostDiscoveryServiceGrpc.java
      @@ -20,13 +20,13 @@
        * Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for
        * a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered
        * during the processing of an HTTP request if a route for the request cannot be resolved. The
      - * :ref:`resource_names_subscribe <envoy_api_msg_DeltaDiscoveryRequest.resource_names_subscribe>`
      + * :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>`
        * field contains a list of virtual host names or aliases to track. The contents of an alias would
        * be the contents of a *host* or *authority* header used to make an http request. An xDS server
        * will match an alias to a virtual host based on the content of :ref:`domains'
      - * <envoy_api_msg_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field contains
      - * a list of virtual host names that have been :ref:`unsubscribed <xds_protocol_unsubscribe>`
      - * from the routing table associated with the RouteConfiguration.
      + * <envoy_api_field_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field
      + * contains a list of virtual host names that have been :ref:`unsubscribed
      + * <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration.
        * 
      */ @javax.annotation.Generated( @@ -119,13 +119,13 @@ public VirtualHostDiscoveryServiceFutureStub newStub(io.grpc.Channel channel, io * Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for * a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered * during the processing of an HTTP request if a route for the request cannot be resolved. The - * :ref:`resource_names_subscribe <envoy_api_msg_DeltaDiscoveryRequest.resource_names_subscribe>` + * :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>` * field contains a list of virtual host names or aliases to track. The contents of an alias would * be the contents of a *host* or *authority* header used to make an http request. An xDS server * will match an alias to a virtual host based on the content of :ref:`domains' - * <envoy_api_msg_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field contains - * a list of virtual host names that have been :ref:`unsubscribed <xds_protocol_unsubscribe>` - * from the routing table associated with the RouteConfiguration. + * <envoy_api_field_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field + * contains a list of virtual host names that have been :ref:`unsubscribed + * <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration. *
      */ public static abstract class VirtualHostDiscoveryServiceImplBase implements io.grpc.BindableService { @@ -155,13 +155,13 @@ public io.grpc.stub.StreamObserver */ public static final class VirtualHostDiscoveryServiceStub extends io.grpc.stub.AbstractAsyncStub { @@ -190,13 +190,13 @@ public io.grpc.stub.StreamObserver */ public static final class VirtualHostDiscoveryServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub { @@ -217,13 +217,13 @@ protected VirtualHostDiscoveryServiceBlockingStub build( * Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for * a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered * during the processing of an HTTP request if a route for the request cannot be resolved. The - * :ref:`resource_names_subscribe <envoy_api_msg_DeltaDiscoveryRequest.resource_names_subscribe>` + * :ref:`resource_names_subscribe <envoy_api_field_DeltaDiscoveryRequest.resource_names_subscribe>` * field contains a list of virtual host names or aliases to track. The contents of an alias would * be the contents of a *host* or *authority* header used to make an http request. An xDS server * will match an alias to a virtual host based on the content of :ref:`domains' - * <envoy_api_msg_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field contains - * a list of virtual host names that have been :ref:`unsubscribed <xds_protocol_unsubscribe>` - * from the routing table associated with the RouteConfiguration. + * <envoy_api_field_route.VirtualHost.domains>` field. The *resource_names_unsubscribe* field + * contains a list of virtual host names that have been :ref:`unsubscribed + * <xds_protocol_unsubscribe>` from the routing table associated with the RouteConfiguration. *
      */ public static final class VirtualHostDiscoveryServiceFutureStub extends io.grpc.stub.AbstractFutureStub { diff --git a/xds/src/main/java/io/grpc/xds/Bootstrapper.java b/xds/src/main/java/io/grpc/xds/Bootstrapper.java index 3b3c38cc150..02de61e5c72 100644 --- a/xds/src/main/java/io/grpc/xds/Bootstrapper.java +++ b/xds/src/main/java/io/grpc/xds/Bootstrapper.java @@ -74,6 +74,7 @@ public static Bootstrapper getInstance() { public abstract BootstrapInfo readBootstrap() throws IOException; @VisibleForTesting + @SuppressWarnings("deprecation") static BootstrapInfo parseConfig(String rawData) throws IOException { XdsLogger logger = XdsLogger.withPrefix(LOG_PREFIX); logger.log(XdsLogLevel.INFO, "Reading bootstrap information"); diff --git a/xds/src/test/java/io/grpc/xds/BootstrapperTest.java b/xds/src/test/java/io/grpc/xds/BootstrapperTest.java index 87c46dcc32d..b1b227dcfae 100644 --- a/xds/src/test/java/io/grpc/xds/BootstrapperTest.java +++ b/xds/src/test/java/io/grpc/xds/BootstrapperTest.java @@ -41,6 +41,7 @@ public class BootstrapperTest { @Rule public ExpectedException thrown = ExpectedException.none(); @Test + @SuppressWarnings("deprecation") public void parseBootstrap_validData_singleXdsServer() throws IOException { String rawData = "{\n" + " \"node\": {\n" @@ -96,6 +97,7 @@ public void parseBootstrap_validData_singleXdsServer() throws IOException { } @Test + @SuppressWarnings("deprecation") public void parseBootstrap_validData_multipleXdsServers() throws IOException { String rawData = "{\n" + " \"node\": {\n" @@ -160,6 +162,7 @@ public void parseBootstrap_validData_multipleXdsServers() throws IOException { } @Test + @SuppressWarnings("deprecation") public void parseBootstrap_IgnoreIrrelevantFields() throws IOException { String rawData = "{\n" + " \"node\": {\n" @@ -225,6 +228,7 @@ public void parseBootstrap_emptyData() throws IOException { } @Test + @SuppressWarnings("deprecation") public void parseBootstrap_minimumRequiredFields() throws IOException { String rawData = "{\n" + " \"xds_servers\": []\n" @@ -241,6 +245,7 @@ public void parseBootstrap_minimumRequiredFields() throws IOException { } @Test + @SuppressWarnings("deprecation") public void parseBootstrap_minimalUsableData() throws IOException { String rawData = "{\n" + " \"xds_servers\": [\n" diff --git a/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java b/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java index a9e76474b2c..c132410001e 100644 --- a/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java +++ b/xds/src/test/java/io/grpc/xds/EnvoyServerProtoDataTest.java @@ -107,6 +107,7 @@ private static FilterChain createOutFilter() { return filterChain; } + @SuppressWarnings("deprecation") private static FilterChain createInFilter() { FilterChain filterChain = FilterChain.newBuilder() diff --git a/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java b/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java index 6ef308f0e3d..2090c3af339 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientTestHelper.java @@ -125,6 +125,7 @@ static Cluster buildCluster(String clusterName, @Nullable String edsServiceName, return buildSecureCluster(clusterName, edsServiceName, enableLrs, null); } + @SuppressWarnings("deprecation") static Cluster buildSecureCluster(String clusterName, @Nullable String edsServiceName, boolean enableLrs, @Nullable UpstreamTlsContext upstreamTlsContext) { Cluster.Builder clusterBuilder = Cluster.newBuilder(); diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java b/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java index 0fc7511c2f7..fec1e8daf37 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/CommonTlsContextTestsUtil.java @@ -77,6 +77,7 @@ static CommonTlsContext buildCommonTlsContextFromSdsConfigForTlsCertificate( } /** takes additional values and creates CombinedCertificateValidationContext as needed. */ + @SuppressWarnings("deprecation") static CommonTlsContext buildCommonTlsContextWithAdditionalValues( String certName, String certTargetUri, diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java index d67e37649b2..fda58feaaae 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/SdsClientFileBasedMetadataTest.java @@ -68,6 +68,7 @@ public class SdsClientFileBasedMetadataTest { private SdsSecretConfig sdsSecretConfig; private File tempTokenFile; + @SuppressWarnings("deprecation") static ConfigSource buildConfigSourceWithCreds( String targetUri, String channelType, diff --git a/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java b/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java index b0618bfd0fa..78cadd4453b 100644 --- a/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java +++ b/xds/src/test/java/io/grpc/xds/internal/sds/trust/SdsX509TrustManagerTest.java @@ -93,6 +93,7 @@ public void emptySanListContextTest() throws CertificateException, IOException { } @Test + @SuppressWarnings("deprecation") public void missingPeerCerts() throws CertificateException, FileNotFoundException { CertificateValidationContext certContext = CertificateValidationContext.newBuilder().addVerifySubjectAltName("foo.com").build(); @@ -106,6 +107,7 @@ public void missingPeerCerts() throws CertificateException, FileNotFoundExceptio } @Test + @SuppressWarnings("deprecation") public void emptyArrayPeerCerts() throws CertificateException, FileNotFoundException { CertificateValidationContext certContext = CertificateValidationContext.newBuilder().addVerifySubjectAltName("foo.com").build(); @@ -119,6 +121,7 @@ public void emptyArrayPeerCerts() throws CertificateException, FileNotFoundExcep } @Test + @SuppressWarnings("deprecation") public void noSansInPeerCerts() throws CertificateException, IOException { CertificateValidationContext certContext = CertificateValidationContext.newBuilder().addVerifySubjectAltName("foo.com").build(); @@ -134,6 +137,7 @@ public void noSansInPeerCerts() throws CertificateException, IOException { } @Test + @SuppressWarnings("deprecation") public void oneSanInPeerCertsVerifies() throws CertificateException, IOException { CertificateValidationContext certContext = CertificateValidationContext.newBuilder() @@ -146,6 +150,7 @@ public void oneSanInPeerCertsVerifies() throws CertificateException, IOException } @Test + @SuppressWarnings("deprecation") public void oneSanInPeerCertsVerifiesMultipleVerifySans() throws CertificateException, IOException { CertificateValidationContext certContext = @@ -160,6 +165,7 @@ public void oneSanInPeerCertsVerifiesMultipleVerifySans() } @Test + @SuppressWarnings("deprecation") public void oneSanInPeerCertsNotFoundException() throws CertificateException, IOException { CertificateValidationContext certContext = @@ -176,6 +182,7 @@ public void oneSanInPeerCertsNotFoundException() } @Test + @SuppressWarnings("deprecation") public void wildcardSanInPeerCertsVerifiesMultipleVerifySans() throws CertificateException, IOException { CertificateValidationContext certContext = @@ -190,6 +197,7 @@ public void wildcardSanInPeerCertsVerifiesMultipleVerifySans() } @Test + @SuppressWarnings("deprecation") public void wildcardSanInPeerCertsVerifiesMultipleVerifySans1() throws CertificateException, IOException { CertificateValidationContext certContext = @@ -204,6 +212,7 @@ public void wildcardSanInPeerCertsVerifiesMultipleVerifySans1() } @Test + @SuppressWarnings("deprecation") public void wildcardSanInPeerCertsSubdomainMismatch() throws CertificateException, IOException { // 2. Asterisk (*) cannot match across domain name labels. @@ -225,6 +234,7 @@ public void wildcardSanInPeerCertsSubdomainMismatch() } @Test + @SuppressWarnings("deprecation") public void oneIpAddressInPeerCertsVerifies() throws CertificateException, IOException { CertificateValidationContext certContext = CertificateValidationContext.newBuilder() @@ -238,6 +248,7 @@ public void oneIpAddressInPeerCertsVerifies() throws CertificateException, IOExc } @Test + @SuppressWarnings("deprecation") public void oneIpAddressInPeerCertsMismatch() throws CertificateException, IOException { CertificateValidationContext certContext = CertificateValidationContext.newBuilder() diff --git a/xds/third_party/envoy/import.sh b/xds/third_party/envoy/import.sh index 7a253716b0b..ccc50b3522c 100755 --- a/xds/third_party/envoy/import.sh +++ b/xds/third_party/envoy/import.sh @@ -18,14 +18,17 @@ set -e BRANCH=master # import VERSION from one of the google internal CLs -VERSION=f709434b37e9ff74666d5b854aa11fb2f1ec37f3 +VERSION=c0ab3a4374144728c1e193fc2d43951ed36ccdb7 GIT_REPO="https://github.com/envoyproxy/envoy.git" GIT_BASE_DIR=envoy SOURCE_PROTO_BASE_DIR=envoy/api TARGET_PROTO_BASE_DIR=src/main/proto FILES=( +envoy/annotations/deprecation.proto +envoy/annotations/resource.proto envoy/api/v2/auth/cert.proto envoy/api/v2/cds.proto +envoy/api/v2/cluster.proto envoy/api/v2/cluster/circuit_breaker.proto envoy/api/v2/cluster/filter.proto envoy/api/v2/cluster/outlier_detection.proto @@ -38,13 +41,20 @@ envoy/api/v2/core/http_uri.proto envoy/api/v2/core/protocol.proto envoy/api/v2/discovery.proto envoy/api/v2/eds.proto +envoy/api/v2/endpoint.proto envoy/api/v2/endpoint/endpoint.proto +envoy/api/v2/endpoint/endpoint_components.proto envoy/api/v2/endpoint/load_report.proto envoy/api/v2/lds.proto +envoy/api/v2/listener.proto envoy/api/v2/listener/listener.proto +envoy/api/v2/listener/listener_components.proto envoy/api/v2/listener/udp_listener_config.proto envoy/api/v2/rds.proto +envoy/api/v2/route.proto envoy/api/v2/route/route.proto +envoy/api/v2/route/route_components.proto +envoy/api/v2/scoped_route.proto envoy/api/v2/srds.proto envoy/config/filter/accesslog/v2/accesslog.proto envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -52,10 +62,14 @@ envoy/config/listener/v2/api_listener.proto envoy/service/discovery/v2/ads.proto envoy/service/discovery/v2/sds.proto envoy/service/load_stats/v2/lrs.proto +envoy/type/http.proto envoy/type/matcher/regex.proto envoy/type/matcher/string.proto +envoy/type/metadata/v2/metadata.proto envoy/type/percent.proto envoy/type/range.proto +envoy/type/semantic_version.proto +envoy/type/tracing/v2/custom_tag.proto ) # clone the envoy github repo in a tmp directory diff --git a/xds/third_party/envoy/src/main/proto/envoy/annotations/deprecation.proto b/xds/third_party/envoy/src/main/proto/envoy/annotations/deprecation.proto new file mode 100644 index 00000000000..7f9b64e32b0 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/annotations/deprecation.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package envoy.annotations; + +import "google/protobuf/descriptor.proto"; + +// [#protodoc-title: Deprecation] +// Allows tagging proto fields as fatal by default. One Envoy release after +// deprecation, deprecated fields will be disallowed by default, a state which +// is reversible with :ref:`runtime overrides `. + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "envoy.annotation.disallowed_by_default" +extend google.protobuf.FieldOptions { + bool disallowed_by_default = 189503207; +} + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "envoy.annotation.disallowed_by_default_enum" +extend google.protobuf.EnumValueOptions { + bool disallowed_by_default_enum = 70100853; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/annotations/resource.proto b/xds/third_party/envoy/src/main/proto/envoy/annotations/resource.proto new file mode 100644 index 00000000000..bd794c68dd7 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/annotations/resource.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package envoy.annotations; + +import "google/protobuf/descriptor.proto"; + +// [#protodoc-title: Resource] + +// Magic number in this file derived from top 28bit of SHA256 digest of "envoy.annotation.resource". +extend google.protobuf.ServiceOptions { + ResourceAnnotation resource = 265073217; +} + +message ResourceAnnotation { + // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource + // type. + string type = 1; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/auth/cert.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/auth/cert.proto index ebf199a4743..52e5e29a839 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/auth/cert.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/auth/cert.proto @@ -2,19 +2,26 @@ syntax = "proto3"; package envoy.api.v2.auth; -option java_outer_classname = "CertProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.auth"; - import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; +import "envoy/type/matcher/string.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/sensitive.proto"; + +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.auth"; +option java_outer_classname = "CertProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.transport_sockets.tls.v3"; + // [#protodoc-title: Common TLS configuration] message TlsParameters { @@ -35,7 +42,8 @@ message TlsParameters { TLSv1_3 = 4; } - // Minimum TLS protocol version. By default, it's ``TLSv1_0``. + // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for + // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and @@ -110,18 +118,19 @@ message PrivateKeyProvider { // Private key method provider specific configuration. oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; - google.protobuf.Any typed_config = 3; + google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; } } +// [#next-free-field: 7] message TlsCertificate { // The TLS certificate chain. core.DataSource certificate_chain = 1; // The TLS private key. - core.DataSource private_key = 2; + core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // BoringSSL private key method provider. This is an alternative to :ref:`private_key // ` field. This can't be @@ -134,7 +143,7 @@ message TlsCertificate { // The password to decrypt the TLS private key. If this field is not set, it is assumed that the // TLS private key is not password encrypted. - core.DataSource password = 3; + core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; // [#not-implemented-hide:] core.DataSource ocsp_staple = 4; @@ -167,10 +176,23 @@ message TlsSessionTicketKeys { // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source - repeated core.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}]; + repeated core.DataSource keys = 1 + [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; } +// [#next-free-field: 11] message CertificateValidationContext { + // Peer certificate verification mode. + enum TrustChainVerification { + // Perform default certificate verification (e.g., against CA / verification lists) + VERIFY_TRUST_CHAIN = 0; + + // Connections where the certificate fails verification will be permitted. + // For HTTP connections, the result of certificate verification can be used in route matching. ( + // see :ref:`validated ` ). + ACCEPT_UNTRUSTED = 1; + } + // TLS certificate data containing certificate authority certificates to use in verifying // a presented peer certificate (e.g. server certificate for clusters or client certificate // for listeners). If not specified and a peer certificate is presented it will not be @@ -181,8 +203,8 @@ message CertificateValidationContext { // `, // :ref:`verify_certificate_hash // `, or - // :ref:`verify_subject_alt_name - // `) is also + // :ref:`match_subject_alt_names + // `) is also // specified. // // It can optionally contain certificate revocation lists, in which case Envoy will verify @@ -262,7 +284,27 @@ message CertificateValidationContext { // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. - repeated string verify_subject_alt_name = 4; + repeated string verify_subject_alt_name = 4 [deprecated = true]; + + // An optional list of Subject Alternative name matchers. Envoy will verify that the + // Subject Alternative Name of the presented certificate matches one of the specified matches. + // + // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be + // configured with exact match type in the :ref:`string matcher `. + // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", + // it should be configured as shown below. + // + // .. code-block:: yaml + // + // match_subject_alt_names: + // exact: "api.example.com" + // + // .. attention:: + // + // Subject Alternative Names are easily spoofable and verifying only them is insecure, + // therefore this option must be used together with :ref:`trusted_ca + // `. + repeated type.matcher.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. google.protobuf.BoolValue require_ocsp_staple = 5; @@ -279,9 +321,14 @@ message CertificateValidationContext { // If specified, Envoy will not reject expired certificates. bool allow_expired_certificate = 8; + + // Certificate trust chain verification mode. + TrustChainVerification trust_chain_verification = 10 + [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. +// [#next-free-field: 9] message CommonTlsContext { message CombinedCertificateValidationContext { // How to validate peer certificates. @@ -342,6 +389,12 @@ message CommonTlsContext { message UpstreamTlsContext { // Common TLS context settings. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. @@ -361,6 +414,7 @@ message UpstreamTlsContext { google.protobuf.UInt32Value max_session_keys = 4; } +// [#next-free-field: 7] message DownstreamTlsContext { // Common TLS context settings. CommonTlsContext common_tls_context = 1; @@ -377,21 +431,35 @@ message DownstreamTlsContext { // TLS session ticket key settings. TlsSessionTicketKeys session_ticket_keys = 4; - // [#not-implemented-hide:] + // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; } + + // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session + // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) + // ` + // only seconds could be specified (fractional seconds are going to be ignored). + google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { + lt {seconds: 4294967296} + gte {} + }]; +} + +message GenericSecret { + // Secret of generic type and is available to filters. + core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; } message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - // When both name and config are specified, then secret can be fetched and/or reloaded via SDS. - // When only name is specified, then secret will be loaded from static - // resources. + // When both name and config are specified, then secret can be fetched and/or reloaded via + // SDS. When only name is specified, then secret will be loaded from static resources. string name = 1; core.ConfigSource sds_config = 2; } +// [#next-free-field: 6] message Secret { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; @@ -402,5 +470,7 @@ message Secret { TlsSessionTicketKeys session_ticket_keys = 3; CertificateValidationContext validation_context = 4; + + GenericSecret generic_secret = 5; } } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cds.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cds.proto index cbd0eea5de4..dcd5c3fd0fb 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cds.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cds.proto @@ -2,36 +2,27 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "CdsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_generic_services = true; - -import "envoy/api/v2/auth/cert.proto"; -import "envoy/api/v2/cluster/circuit_breaker.proto"; -import "envoy/api/v2/cluster/filter.proto"; -import "envoy/api/v2/cluster/outlier_detection.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; -import "envoy/api/v2/core/health_check.proto"; -import "envoy/api/v2/core/protocol.proto"; import "envoy/api/v2/discovery.proto"; -import "envoy/api/v2/eds.proto"; -import "envoy/type/percent.proto"; import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; -import "validate/validate.proto"; +import "envoy/annotations/resource.proto"; +import "udpa/annotations/migrate.proto"; -// [#protodoc-title: Clusters] +import public "envoy/api/v2/cluster.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "CdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3"; + +// [#protodoc-title: CDS] // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { + option (envoy.annotations.resource).type = "envoy.api.v2.Cluster"; + rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } @@ -39,792 +30,12 @@ service ClusterDiscoveryService { } rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http) = { - post: "/v2/discovery:clusters" - body: "*" - }; - } -} - -// Configuration for a single upstream cluster. -// [#comment:next free field: 45] -message Cluster { - // Refer to :ref:`service discovery type ` - // for an explanation on each type. - enum DiscoveryType { - // Refer to the :ref:`static discovery type` - // for an explanation. - STATIC = 0; - - // Refer to the :ref:`strict DNS discovery - // type` - // for an explanation. - STRICT_DNS = 1; - - // Refer to the :ref:`logical DNS discovery - // type` - // for an explanation. - LOGICAL_DNS = 2; - - // Refer to the :ref:`service discovery type` - // for an explanation. - EDS = 3; - - // Refer to the :ref:`original destination discovery - // type` - // for an explanation. - ORIGINAL_DST = 4; - } - - // Refer to :ref:`load balancer type ` architecture - // overview section for information on each type. - enum LbPolicy { - // Refer to the :ref:`round robin load balancing - // policy` - // for an explanation. - ROUND_ROBIN = 0; - - // Refer to the :ref:`least request load balancing - // policy` - // for an explanation. - LEAST_REQUEST = 1; - - // Refer to the :ref:`ring hash load balancing - // policy` - // for an explanation. - RING_HASH = 2; - - // Refer to the :ref:`random load balancing - // policy` - // for an explanation. - RANDOM = 3; - - // Refer to the :ref:`original destination load balancing - // policy` - // for an explanation. - // - // .. attention:: - // - // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. - // - ORIGINAL_DST_LB = 4 [deprecated = true]; - - // Refer to the :ref:`Maglev load balancing policy` - // for an explanation. - MAGLEV = 5; - - // This load balancer type must be specified if the configured cluster provides a cluster - // specific load balancer. Consult the configured cluster's documentation for whether to set - // this option or not. - CLUSTER_PROVIDED = 6; - - // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy - // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] - LOAD_BALANCING_POLICY_CONFIG = 7; - } - - // When V4_ONLY is selected, the DNS resolver will only perform a lookup for - // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - // only perform a lookup for addresses in the IPv6 family. If AUTO is - // specified, the DNS resolver will first perform a lookup for addresses in - // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - // For cluster types other than - // :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS`, - // this setting is - // ignored. - enum DnsLookupFamily { - AUTO = 0; - V4_ONLY = 1; - V6_ONLY = 2; - } - - enum ClusterProtocolSelection { - // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - // If :ref:`http2_protocol_options ` are - // present, HTTP2 will be used, otherwise HTTP1.1 will be used. - USE_CONFIGURED_PROTOCOL = 0; - - // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - USE_DOWNSTREAM_PROTOCOL = 1; - } - - // TransportSocketMatch specifies what transport socket config will be used - // when the match conditions are satisfied. - message TransportSocketMatch { - // The name of the match, used in stats generation. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Optional endpoint metadata match criteria. - // The connection to the endpoint with metadata matching what is set in this field - // will use the transport socket configuration specified here. - // The endpoint's metadata entry in *envoy.transport_socket* is used to match - // against the values specified in this field. - google.protobuf.Struct match = 2; - - // The configuration of the transport socket. - core.TransportSocket transport_socket = 3; - } - - // Extended cluster type. - message CustomClusterType { - // The type of the cluster to instantiate. The name must match a supported cluster type. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Cluster specific configuration which depends on the cluster being instantiated. - // See the supported cluster for further documentation. - google.protobuf.Any typed_config = 2; - } - - // Only valid when discovery type is EDS. - message EdsClusterConfig { - // Configuration for the source of EDS updates for this Cluster. - core.ConfigSource eds_config = 1; - - // Optional alternative to cluster name to present to EDS. This does not - // have the same restrictions as cluster name, i.e. it may be arbitrary - // length. - string service_name = 2; - } - - // Optionally divide the endpoints in this cluster into subsets defined by - // endpoint metadata and selected by route and weighted cluster metadata. - message LbSubsetConfig { - // If NO_FALLBACK is selected, a result - // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - // any cluster endpoint may be returned (subject to policy, health checks, - // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - enum LbSubsetFallbackPolicy { - NO_FALLBACK = 0; - ANY_ENDPOINT = 1; - DEFAULT_SUBSET = 2; - } - - // Specifications for subsets. - message LbSubsetSelector { - // Allows to override top level fallback policy per selector. - enum LbSubsetSelectorFallbackPolicy { - // If NOT_DEFINED top level config fallback policy is used instead. - NOT_DEFINED = 0; - - // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - NO_FALLBACK = 1; - - // If ANY_ENDPOINT is selected, any cluster endpoint may be returned - // (subject to policy, health checks, etc). - ANY_ENDPOINT = 2; - - // If DEFAULT_SUBSET is selected, load balancing is performed over the - // endpoints matching the values from the default_subset field. - DEFAULT_SUBSET = 3; - } - - // List of keys to match with the weighted cluster metadata. - repeated string keys = 1; - - // The behavior used when no endpoint subset matches the selected route's - // metadata. - LbSubsetSelectorFallbackPolicy fallback_policy = 2 - [(validate.rules).enum = {defined_only: true}]; - } - - // The behavior used when no endpoint subset matches the selected route's - // metadata. The value defaults to - // :ref:`NO_FALLBACK`. - LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; - - // Specifies the default subset of endpoints used during fallback if - // fallback_policy is - // :ref:`DEFAULT_SUBSET`. - // Each field in default_subset is - // compared to the matching LbEndpoint.Metadata under the *envoy.lb* - // namespace. It is valid for no hosts to match, in which case the behavior - // is the same as a fallback_policy of - // :ref:`NO_FALLBACK`. - google.protobuf.Struct default_subset = 2; - - // For each entry, LbEndpoint.Metadata's - // *envoy.lb* namespace is traversed and a subset is created for each unique - // combination of key and value. For example: - // - // .. code-block:: json - // - // { "subset_selectors": [ - // { "keys": [ "version" ] }, - // { "keys": [ "stage", "hardware_type" ] } - // ]} - // - // A subset is matched when the metadata from the selected route and - // weighted cluster contains the same keys and values as the subset's - // metadata. The same host may appear in multiple subsets. - repeated LbSubsetSelector subset_selectors = 3; - - // If true, routing to subsets will take into account the localities and locality weights of the - // endpoints when making the routing decision. - // - // There are some potential pitfalls associated with enabling this feature, as the resulting - // traffic split after applying both a subset match and locality weights might be undesirable. - // - // Consider for example a situation in which you have 50/50 split across two localities X/Y - // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - // host selected but Y having 100, then a lot more load is being dumped on the single host in X - // than originally anticipated in the load balancing assignment delivered via EDS. - bool locality_weight_aware = 4; - - // When used with locality_weight_aware, scales the weight of each locality by the ratio - // of hosts in the subset vs hosts in the original subset. This aims to even out the load - // going to an individual locality if said locality is disproportionally affected by the - // subset predicate. - bool scale_locality_weight = 5; - - // If true, when a fallback policy is configured and its corresponding subset fails to find - // a host this will cause any host to be selected instead. - // - // This is useful when using the default subset as the fallback policy, given the default - // subset might become empty. With this option enabled, if that happens the LB will attempt - // to select a host from the entire cluster. - bool panic_mode_any = 6; - - // If true, metadata specified for a metadata key will be matched against the corresponding - // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - // and any of the elements in the list matches the criteria. - bool list_as_any = 7; - } - - // Specific configuration for the LeastRequest load balancing policy. - message LeastRequestLbConfig { - // The number of random healthy hosts from which the host with the fewest active requests will - // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; - } - - // Specific configuration for the :ref:`RingHash` - // load balancing policy. - message RingHashLbConfig { - // The hash function used to hash hosts onto the ketama ring. - enum HashFunction { - // Use `xxHash `_, this is the default hash function. - XX_HASH = 0; - - // Use `MurmurHash2 `_, this is compatible with - // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - // on Linux and not macOS. - MURMUR_HASH_2 = 1; - } - - reserved 2; - - // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - // provided host) the better the request distribution will reflect the desired weights. Defaults - // to 1024 entries, and limited to 8M entries. See also - // :ref:`maximum_ring_size`. - google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; - - // The hash function used to hash hosts onto the ketama ring. The value defaults to - // :ref:`XX_HASH`. - HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; - - // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - // to further constrain resource use. See also - // :ref:`minimum_ring_size`. - google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; - } - - // Specific configuration for the - // :ref:`Original Destination ` - // load balancing policy. - message OriginalDstLbConfig { - // When true, :ref:`x-envoy-original-dst-host - // ` can be used to override destination - // address. - // - // .. attention:: - // - // This header isn't sanitized by default, so enabling this feature allows HTTP clients to - // route traffic to arbitrary hosts and/or ports, which may have serious security - // consequences. - bool use_http_header = 1; - } - - // Common configuration for all load balancer implementations. - message CommonLbConfig { - // Configuration for :ref:`zone aware routing - // `. - message ZoneAwareLbConfig { - // Configures percentage of requests that will be considered for zone aware routing - // if zone aware routing is configured. If not specified, the default is 100%. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - type.Percent routing_enabled = 1; - - // Configures minimum upstream cluster size required for zone aware routing - // If upstream cluster size is less than specified, zone aware routing is not performed - // even if zone aware routing is configured. If not specified, the default is 6. - // * :ref:`runtime values `. - // * :ref:`Zone aware routing support `. - google.protobuf.UInt64Value min_cluster_size = 2; - - // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - // mode`. Instead, the cluster will fail all - // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - // failing service. - bool fail_traffic_on_panic = 3; - } - - // Configuration for :ref:`locality weighted load balancing - // ` - message LocalityWeightedLbConfig { - } - - // Configures the :ref:`healthy panic threshold `. - // If not specified, the default is 50%. - // To disable panic mode, set to 0%. - // - // .. note:: - // The specified percent will be truncated to the nearest 1%. - type.Percent healthy_panic_threshold = 1; - - oneof locality_config_specifier { - ZoneAwareLbConfig zone_aware_lb_config = 2; - - LocalityWeightedLbConfig locality_weighted_lb_config = 3; - } - - // If set, all health check/weight/metadata updates that happen within this duration will be - // merged and delivered in one shot when the duration expires. The start of the duration is when - // the first update happens. This is useful for big clusters, with potentially noisy deploys - // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - // cluster). Please always keep in mind that the use of sandbox technologies may change this - // behavior. - // - // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - // window to 0. - // - // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - // because merging those updates isn't currently safe. See - // https://github.com/envoyproxy/envoy/pull/3941. - google.protobuf.Duration update_merge_window = 4; - - // If set to true, Envoy will not consider new hosts when computing load balancing weights until - // they have been health checked for the first time. This will have no effect unless - // active health checking is also configured. - // - // Ignoring a host means that for any load balancing calculations that adjust weights based - // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and - // panic mode) Envoy will exclude these hosts in the denominator. - // - // For example, with hosts in two priorities P0 and P1, where P0 looks like - // {healthy, unhealthy (new), unhealthy (new)} - // and where P1 looks like - // {healthy, healthy} - // all traffic will still hit P0, as 1 / (3 - 2) = 1. - // - // Enabling this will allow scaling up the number of hosts for a given cluster without entering - // panic mode or triggering priority spillover, assuming the hosts pass the first health check. - // - // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not - // contribute to the calculation when deciding whether panic mode is enabled or not. - bool ignore_new_hosts_until_first_hc = 5; - - // If set to `true`, the cluster manager will drain all existing - // connections to upstream hosts whenever hosts are added or removed from the cluster. - bool close_connections_on_host_set_change = 6; - } - - message RefreshRate { - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - reserved 12, 15; - - // Configuration to use different transport sockets for different endpoints. - // The entry of *envoy.transport_socket* in the - // :ref:`LbEndpoint.Metadata ` - // is used to match against the transport sockets as they appear in the list. The first - // :ref:`match ` is used. - // For example, with the following match - // - // .. code-block:: yaml - // - // transport_socket_matches: - // - name: "enableMTLS" - // match: - // acceptMTLS: true - // transport_socket: - // name: tls - // config: { ... } # tls socket configuration - // - name: "defaultToPlaintext" - // match: {} - // transport_socket: - // name: "rawbuffer" - // - // Connections to the endpoints whose metadata value under *envoy.transport_socket* - // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - // - // If a :ref:`socket match ` with empty match - // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - // socket match in case above. - // - // If an endpoint metadata's value under *envoy.transport_socket* does not match any - // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - // *transport_socket* specified in this cluster. - // - // This field allows gradual and flexible transport socket configuration changes. - // - // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - // has "acceptPlaintext": "true" metadata information. - // - // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - // *TransportSocketMatch* in this field. Other client Envoys receive CDS without - // *transport_socket_match* set, and still send plain text traffic to the same cluster. - // - // TODO(incfly): add a detailed architecture doc on intended usage. - // [#not-implemented-hide:] - repeated TransportSocketMatch transport_socket_matches = 43; - - // Supplies the name of the cluster which must be unique across all clusters. - // The cluster name is used when emitting - // :ref:`statistics ` if :ref:`alt_stat_name - // ` is not provided. - // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An optional alternative to the cluster name to be used while emitting stats. - // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - // confused with :ref:`Router Filter Header - // `. - string alt_stat_name = 28; - - oneof cluster_discovery_type { - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; - - // The custom cluster type. - CustomClusterType cluster_type = 38; - } - - // Configuration to use for EDS updates for the Cluster. - EdsClusterConfig eds_cluster_config = 3; - - // The timeout for new network connections to hosts in the cluster. - google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; - - // Soft limit on size of the cluster’s connections read and write buffers. If - // unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - - // The :ref:`load balancer type ` to use - // when picking a host in the cluster. - LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; - - // If the service discovery type is - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS`, - // then hosts is required. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`load_assignment` field instead. - // - repeated core.Address hosts = 7; - - // Setting this is required for specifying members of - // :ref:`STATIC`, - // :ref:`STRICT_DNS` - // or :ref:`LOGICAL_DNS` clusters. - // This field supersedes :ref:`hosts` field. - // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` - // once load_assignment is implemented.] - // - // .. attention:: - // - // Setting this allows non-EDS cluster types to contain embedded EDS equivalent - // :ref:`endpoint assignments`. - // Setting this overrides :ref:`hosts` values. - // - ClusterLoadAssignment load_assignment = 33; - - // Optional :ref:`active health checking ` - // configuration for the cluster. If no - // configuration is specified no health checking will be done and all cluster - // members will be considered healthy at all times. - repeated core.HealthCheck health_checks = 8; - - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - - // Optional :ref:`circuit breaking ` for the cluster. - cluster.CircuitBreakers circuit_breakers = 10; - - // The TLS configuration for connections to the upstream cluster. If no TLS - // configuration is specified, TLS will not be used for new connections. - // - // .. attention:: - // - // Server certificate verification is not enabled by default. Configure - // :ref:`trusted_ca` to enable - // verification. - auth.UpstreamTlsContext tls_context = 11; - - // Additional options when handling HTTP requests. These options will be applicable to both - // HTTP1 and HTTP2 requests. - core.HttpProtocolOptions common_http_protocol_options = 29; - - // Additional options when handling HTTP1 requests. - core.Http1ProtocolOptions http_protocol_options = 13; - - // Even if default HTTP2 protocol options are desired, this field must be - // set so that Envoy will assume that the upstream supports HTTP/2 when - // making new HTTP connection pool connections. Currently, Envoy only - // supports prior knowledge for upstream connections. Even if TLS is used - // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - // connections to happen over plain text. - core.Http2ProtocolOptions http2_protocol_options = 14; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map extension_protocol_options = 35; - - // The extension_protocol_options field is used to provide extension-specific protocol options - // for upstream connections. The key should match the extension filter name, such as - // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - // specific options. - map typed_extension_protocol_options = 36; - - // If the DNS refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used as the cluster’s DNS refresh - // rate. If this setting is not specified, the value defaults to 5000ms. For - // cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {}}]; - - // If the DNS failure refresh rate is specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - // other than :ref:`STRICT_DNS` and - // :ref:`LOGICAL_DNS` this setting is - // ignored. - // - // Note: Currently, DNS failures and empty DNS responses are not treated differently and this - // configuration is applied in both situations. - RefreshRate dns_failure_refresh_rate = 44; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 39; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; - - // If DNS resolvers are specified and the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`, - // this value is used to specify the cluster’s dns resolvers. - // If this setting is not specified, the value defaults to the default - // resolver, which uses /etc/resolv.conf for configuration. For cluster types - // other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - repeated core.Address dns_resolvers = 18; - - // If specified, outlier detection will be enabled for this upstream cluster. - // Each of the configuration values can be overridden via - // :ref:`runtime values `. - cluster.OutlierDetection outlier_detection = 19; - - // The interval for removing stale hosts from a cluster type - // :ref:`ORIGINAL_DST`. - // Hosts are considered stale if they have not been used - // as upstream destinations during this interval. New hosts are added - // to original destination clusters on demand as new connections are - // redirected to Envoy, causing the number of hosts in the cluster to - // grow over time. Hosts that are not stale (they are actively used as - // destinations) are kept in the cluster, which allows connections to - // them remain open, saving the latency that would otherwise be spent - // on opening new connections. If this setting is not specified, the - // value defaults to 5000ms. For cluster types other than - // :ref:`ORIGINAL_DST` - // this setting is ignored. - google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; - - // Optional configuration used to bind newly established upstream connections. - // This overrides any bind_config specified in the bootstrap proto. - // If the address and port are empty, no bind will be performed. - core.BindConfig upstream_bind_config = 21; - - // Configuration for load balancing subsetting. - LbSubsetConfig lb_subset_config = 22; - - // Optional configuration for the load balancing algorithm selected by - // LbPolicy. Currently only - // :ref:`RING_HASH` and - // :ref:`LEAST_REQUEST` - // has additional configuration options. - // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - // LbPolicy will generate an error at runtime. - oneof lb_config { - // Optional configuration for the Ring Hash load balancing policy. - RingHashLbConfig ring_hash_lb_config = 23; - - // Optional configuration for the Original Destination load balancing policy. - OriginalDstLbConfig original_dst_lb_config = 34; - - // Optional configuration for the LeastRequest load balancing policy. - LeastRequestLbConfig least_request_lb_config = 37; - } - - // Common configuration for all load balancer implementations. - CommonLbConfig common_lb_config = 27; - - // Optional custom transport socket implementation to use for upstream connections. - core.TransportSocket transport_socket = 24; - - // The Metadata field can be used to provide additional information about the - // cluster. It can be used for stats, logging, and varying filter behavior. - // Fields should use reverse DNS notation to denote which entity within Envoy - // will need the information. For instance, if the metadata is intended for - // the Router filter, the filter name should be specified as *envoy.router*. - core.Metadata metadata = 25; - - // Determines how Envoy selects the protocol used to speak to upstream hosts. - ClusterProtocolSelection protocol_selection = 26; - - // Optional options for upstream connections. - UpstreamConnectionOptions upstream_connection_options = 30; - - // If an upstream host becomes unhealthy (as determined by the configured health checks - // or outlier detection), immediately close all connections to the failed host. - // - // .. note:: - // - // This is currently only supported for connections created by tcp_proxy. - // - // .. note:: - // - // The current implementation of this feature closes all connections immediately when - // the unhealthy status is detected. If there are a large number of connections open - // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - // time exclusively closing these connections, and not processing any other traffic. - bool close_connections_on_host_health_failure = 31; - - // If this cluster uses EDS or STRICT_DNS to configure its hosts, immediately drain - // connections from any hosts that are removed from service discovery. - // - // This only affects behavior for hosts that are being actively health checked. - // If this flag is not set to true, Envoy will wait until the hosts fail active health - // checking before removing it from the cluster. - bool drain_connections_on_host_removal = 32; - - // An (optional) network filter chain, listed in the order the filters should be applied. - // The chain will be applied to all outgoing connections that Envoy makes to the upstream - // servers of this cluster. - repeated cluster.Filter filters = 40; - - // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. - LoadBalancingPolicy load_balancing_policy = 41; - - // [#not-implemented-hide:] - // If present, tells the client where to send load reports via LRS. If not present, the - // client will fall back to a client-side default, which may be either (a) don't send any - // load reports or (b) send load reports for all clusters to a single default server - // (which may be configured in the bootstrap file). - // - // Note that if multiple clusters point to the same LRS server, the client may choose to - // create a separate stream for each cluster or it may choose to coalesce the data for - // multiple clusters onto a single stream. Either way, the client must make sure to send - // the data for any given cluster on no more than one stream. - // - // [#next-major-version: In the v3 API, we should consider restructuring this somehow, - // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - // from the LRS stream here.] - core.ConfigSource lrs_server = 42; -} - -// [#not-implemented-hide:] Extensible load balancing policy configuration. -// -// Every LB policy defined via this mechanism will be identified via a unique name using reverse -// DNS notation. If the policy needs configuration parameters, it must define a message for its -// own configuration, which will be stored in the config field. The name of the policy will tell -// clients which type of message they should expect to see in the config field. -// -// Note that there are cases where it is useful to be able to independently select LB policies -// for choosing a locality and for choosing an endpoint within that locality. For example, a -// given deployment may always use the same policy to choose the locality, but for choosing the -// endpoint within the locality, some clusters may use weighted-round-robin, while others may -// use some sort of session-based balancing. -// -// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a -// child LB policy for each locality. For each request, the parent chooses the locality and then -// delegates to the child policy for that locality to choose the endpoint within the locality. -// -// To facilitate this, the config message for the top-level LB policy may include a field of -// type LoadBalancingPolicy that specifies the child policy. -message LoadBalancingPolicy { - message Policy { - // Required. The name of the LB policy. - string name = 1; - - // Optional config for the LB policy. - // No more than one of these two fields may be populated. - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; + option (google.api.http).post = "/v2/discovery:clusters"; + option (google.api.http).body = "*"; } - - // Each client will iterate over the list in order and stop at the first policy that it - // supports. This provides a mechanism for starting to use new LB policies that are not yet - // supported by all clients. - repeated Policy policies = 1; -} - -// An extensible structure containing the address Envoy should bind to when -// establishing upstream connections. -message UpstreamBindConfig { - // The address Envoy should bind to when establishing upstream connections. - core.Address source_address = 1; } -message UpstreamConnectionOptions { - // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - core.TcpKeepalive tcp_keepalive = 1; +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message CdsDummy { } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster.proto new file mode 100644 index 00000000000..2d61ef3f22b --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster.proto @@ -0,0 +1,848 @@ +syntax = "proto3"; + +package envoy.api.v2; + +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/cluster/circuit_breaker.proto"; +import "envoy/api/v2/cluster/filter.proto"; +import "envoy/api/v2/cluster/outlier_detection.proto"; +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/core/health_check.proto"; +import "envoy/api/v2/core/protocol.proto"; +import "envoy/api/v2/endpoint.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "ClusterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; + +// [#protodoc-title: Cluster configuration] + +// Configuration for a single upstream cluster. +// [#next-free-field: 48] +message Cluster { + // Refer to :ref:`service discovery type ` + // for an explanation on each type. + enum DiscoveryType { + // Refer to the :ref:`static discovery type` + // for an explanation. + STATIC = 0; + + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + STRICT_DNS = 1; + + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + LOGICAL_DNS = 2; + + // Refer to the :ref:`service discovery type` + // for an explanation. + EDS = 3; + + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + ORIGINAL_DST = 4; + } + + // Refer to :ref:`load balancer type ` architecture + // overview section for information on each type. + enum LbPolicy { + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + ROUND_ROBIN = 0; + + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + LEAST_REQUEST = 1; + + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + RING_HASH = 2; + + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + RANDOM = 3; + + // Refer to the :ref:`original destination load balancing + // policy` + // for an explanation. + // + // .. attention:: + // + // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. + // + ORIGINAL_DST_LB = 4 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; + + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + MAGLEV = 5; + + // This load balancer type must be specified if the configured cluster provides a cluster + // specific load balancer. Consult the configured cluster's documentation for whether to set + // this option or not. + CLUSTER_PROVIDED = 6; + + // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy + // ` field to determine the LB policy. + // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field + // and instead using the new load_balancing_policy field as the one and only mechanism for + // configuring this.] + LOAD_BALANCING_POLICY_CONFIG = 7; + } + + // When V4_ONLY is selected, the DNS resolver will only perform a lookup for + // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + // only perform a lookup for addresses in the IPv6 family. If AUTO is + // specified, the DNS resolver will first perform a lookup for addresses in + // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // For cluster types other than + // :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS`, + // this setting is + // ignored. + enum DnsLookupFamily { + AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; + } + + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; + + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } + + // TransportSocketMatch specifies what transport socket config will be used + // when the match conditions are satisfied. + message TransportSocketMatch { + // The name of the match, used in stats generation. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Optional endpoint metadata match criteria. + // The connection to the endpoint with metadata matching what is set in this field + // will use the transport socket configuration specified here. + // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match + // against the values specified in this field. + google.protobuf.Struct match = 2; + + // The configuration of the transport socket. + core.TransportSocket transport_socket = 3; + } + + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + // Configuration for the source of EDS updates for this Cluster. + core.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } + + // Optionally divide the endpoints in this cluster into subsets defined by + // endpoint metadata and selected by route and weighted cluster metadata. + // [#next-free-field: 8] + message LbSubsetConfig { + // If NO_FALLBACK is selected, a result + // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + // any cluster endpoint may be returned (subject to policy, health checks, + // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + enum LbSubsetFallbackPolicy { + NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; + } + + // Specifications for subsets. + message LbSubsetSelector { + // Allows to override top level fallback policy per selector. + enum LbSubsetSelectorFallbackPolicy { + // If NOT_DEFINED top level config fallback policy is used instead. + NOT_DEFINED = 0; + + // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + NO_FALLBACK = 1; + + // If ANY_ENDPOINT is selected, any cluster endpoint may be returned + // (subject to policy, health checks, etc). + ANY_ENDPOINT = 2; + + // If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + DEFAULT_SUBSET = 3; + + // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + // keys reduced to + // :ref:`fallback_keys_subset`. + // It allows for a fallback to a different, less specific selector if some of the keys of + // the selector are considered optional. + KEYS_SUBSET = 4; + } + + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + + // The behavior used when no endpoint subset matches the selected route's + // metadata. + LbSubsetSelectorFallbackPolicy fallback_policy = 2 + [(validate.rules).enum = {defined_only: true}]; + + // Subset of + // :ref:`keys` used by + // :ref:`KEYS_SUBSET` + // fallback policy. + // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + // For any other fallback policy the parameter is not used and should not be set. + // Only values also present in + // :ref:`keys` are allowed, but + // `fallback_keys_subset` cannot be equal to `keys`. + repeated string fallback_keys_subset = 3; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // For each entry, LbEndpoint.Metadata's + // *envoy.lb* namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + repeated LbSubsetSelector subset_selectors = 3; + + // If true, routing to subsets will take into account the localities and locality weights of the + // endpoints when making the routing decision. + // + // There are some potential pitfalls associated with enabling this feature, as the resulting + // traffic split after applying both a subset match and locality weights might be undesirable. + // + // Consider for example a situation in which you have 50/50 split across two localities X/Y + // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + // host selected but Y having 100, then a lot more load is being dumped on the single host in X + // than originally anticipated in the load balancing assignment delivered via EDS. + bool locality_weight_aware = 4; + + // When used with locality_weight_aware, scales the weight of each locality by the ratio + // of hosts in the subset vs hosts in the original subset. This aims to even out the load + // going to an individual locality if said locality is disproportionately affected by the + // subset predicate. + bool scale_locality_weight = 5; + + // If true, when a fallback policy is configured and its corresponding subset fails to find + // a host this will cause any host to be selected instead. + // + // This is useful when using the default subset as the fallback policy, given the default + // subset might become empty. With this option enabled, if that happens the LB will attempt + // to select a host from the entire cluster. + bool panic_mode_any = 6; + + // If true, metadata specified for a metadata key will be matched against the corresponding + // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + // and any of the elements in the list matches the criteria. + bool list_as_any = 7; + } + + // Specific configuration for the LeastRequest load balancing policy. + message LeastRequestLbConfig { + // The number of random healthy hosts from which the host with the fewest active requests will + // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; + } + + // Specific configuration for the :ref:`RingHash` + // load balancing policy. + message RingHashLbConfig { + // The hash function used to hash hosts onto the ketama ring. + enum HashFunction { + // Use `xxHash `_, this is the default hash function. + XX_HASH = 0; + + // Use `MurmurHash2 `_, this is compatible with + // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + // on Linux and not macOS. + MURMUR_HASH_2 = 1; + } + + reserved 2; + + // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + // provided host) the better the request distribution will reflect the desired weights. Defaults + // to 1024 entries, and limited to 8M entries. See also + // :ref:`maximum_ring_size`. + google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; + + // The hash function used to hash hosts onto the ketama ring. The value defaults to + // :ref:`XX_HASH`. + HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; + + // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + // to further constrain resource use. See also + // :ref:`minimum_ring_size`. + google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; + } + + // Specific configuration for the + // :ref:`Original Destination ` + // load balancing policy. + message OriginalDstLbConfig { + // When true, :ref:`x-envoy-original-dst-host + // ` can be used to override destination + // address. + // + // .. attention:: + // + // This header isn't sanitized by default, so enabling this feature allows HTTP clients to + // route traffic to arbitrary hosts and/or ports, which may have serious security + // consequences. + bool use_http_header = 1; + } + + // Common configuration for all load balancer implementations. + // [#next-free-field: 7] + message CommonLbConfig { + // Configuration for :ref:`zone aware routing + // `. + message ZoneAwareLbConfig { + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + type.Percent routing_enabled = 1; + + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + google.protobuf.UInt64Value min_cluster_size = 2; + + // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + // mode`. Instead, the cluster will fail all + // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + // failing service. + bool fail_traffic_on_panic = 3; + } + + // Configuration for :ref:`locality weighted load balancing + // ` + message LocalityWeightedLbConfig { + } + + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // To disable panic mode, set to 0%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + type.Percent healthy_panic_threshold = 1; + + oneof locality_config_specifier { + ZoneAwareLbConfig zone_aware_lb_config = 2; + + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } + + // If set, all health check/weight/metadata updates that happen within this duration will be + // merged and delivered in one shot when the duration expires. The start of the duration is when + // the first update happens. This is useful for big clusters, with potentially noisy deploys + // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + // cluster). Please always keep in mind that the use of sandbox technologies may change this + // behavior. + // + // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + // window to 0. + // + // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + // because merging those updates isn't currently safe. See + // https://github.com/envoyproxy/envoy/pull/3941. + google.protobuf.Duration update_merge_window = 4; + + // If set to true, Envoy will not consider new hosts when computing load balancing weights until + // they have been health checked for the first time. This will have no effect unless + // active health checking is also configured. + // + // Ignoring a host means that for any load balancing calculations that adjust weights based + // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and + // panic mode) Envoy will exclude these hosts in the denominator. + // + // For example, with hosts in two priorities P0 and P1, where P0 looks like + // {healthy, unhealthy (new), unhealthy (new)} + // and where P1 looks like + // {healthy, healthy} + // all traffic will still hit P0, as 1 / (3 - 2) = 1. + // + // Enabling this will allow scaling up the number of hosts for a given cluster without entering + // panic mode or triggering priority spillover, assuming the hosts pass the first health check. + // + // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not + // contribute to the calculation when deciding whether panic mode is enabled or not. + bool ignore_new_hosts_until_first_hc = 5; + + // If set to `true`, the cluster manager will drain all existing + // connections to upstream hosts whenever hosts are added or removed from the cluster. + bool close_connections_on_host_set_change = 6; + } + + message RefreshRate { + // Specifies the base interval between refreshes. This parameter is required and must be greater + // than zero and less than + // :ref:`max_interval `. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {nanos: 1000000} + }]; + + // Specifies the maximum interval between refreshes. This parameter is optional, but must be + // greater than or equal to the + // :ref:`base_interval ` if set. The default + // is 10 times the :ref:`base_interval `. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; + } + + reserved 12, 15; + + // Configuration to use different transport sockets for different endpoints. + // The entry of *envoy.transport_socket* in the + // :ref:`LbEndpoint.Metadata ` + // is used to match against the transport sockets as they appear in the list. The first + // :ref:`match ` is used. + // For example, with the following match + // + // .. code-block:: yaml + // + // transport_socket_matches: + // - name: "enableMTLS" + // match: + // acceptMTLS: true + // transport_socket: + // name: envoy.transport_sockets.tls + // config: { ... } # tls socket configuration + // - name: "defaultToPlaintext" + // match: {} + // transport_socket: + // name: envoy.transport_sockets.raw_buffer + // + // Connections to the endpoints whose metadata value under *envoy.transport_socket* + // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + // + // If a :ref:`socket match ` with empty match + // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + // socket match in case above. + // + // If an endpoint metadata's value under *envoy.transport_socket* does not match any + // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or + // *transport_socket* specified in this cluster. + // + // This field allows gradual and flexible transport socket configuration changes. + // + // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + // has "acceptPlaintext": "true" metadata information. + // + // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + // *TransportSocketMatch* in this field. Other client Envoys receive CDS without + // *transport_socket_match* set, and still send plain text traffic to the same cluster. + // + // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + repeated TransportSocketMatch transport_socket_matches = 43; + + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } + + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; + + // If the service discovery type is + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS`, + // then hosts is required. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`load_assignment` field instead. + // + repeated core.Address hosts = 7 [deprecated = true]; + + // Setting this is required for specifying members of + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS` clusters. + // This field supersedes the *hosts* field in the v2 API. + // + // .. attention:: + // + // Setting this allows non-EDS cluster types to contain embedded EDS equivalent + // :ref:`endpoint assignments`. + // + ClusterLoadAssignment load_assignment = 33; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + cluster.CircuitBreakers circuit_breakers = 10; + + // The TLS configuration for connections to the upstream cluster. + // + // .. attention:: + // + // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are + // set, `transport_socket` takes priority. + auth.UpstreamTlsContext tls_context = 11 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // HTTP protocol options that are applied only to upstream HTTP connections. + // These options apply to all HTTP versions. + core.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; + + // Additional options when handling HTTP requests upstream. These options will be applicable to + // both HTTP1 and HTTP2 requests. + core.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.Http2ProtocolOptions http2_protocol_options = 14; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map extension_protocol_options = 35 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // The extension_protocol_options field is used to provide extension-specific protocol options + // for upstream connections. The key should match the extension filter name, such as + // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + // specific options. + map typed_extension_protocol_options = 36; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. The value configured must be at least 1ms. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; + + // If the DNS failure refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + // other than :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS` this setting is + // ignored. + // + // Note: Currently, DNS failures and empty DNS responses are not treated differently and this + // configuration is applied in both situations. + RefreshRate dns_failure_refresh_rate = 44; + + // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + // resolution. + bool respect_dns_ttl = 39; + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.Address dns_resolvers = 18; + + // [#next-major-version: Reconcile DNS options in a single message.] + // Always use TCP queries instead of UDP queries for DNS lookups. + bool use_tcp_for_dns_lookups = 45; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + cluster.OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.BindConfig upstream_bind_config = 21; + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` and + // :ref:`LEAST_REQUEST` + // has additional configuration options. + // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding + // LbPolicy will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + + // Optional configuration for the Original Destination load balancing policy. + OriginalDstLbConfig original_dst_lb_config = 34; + + // Optional configuration for the LeastRequest load balancing policy. + LeastRequestLbConfig least_request_lb_config = 37; + } + + // Common configuration for all load balancer implementations. + CommonLbConfig common_lb_config = 27; + + // Optional custom transport socket implementation to use for upstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`UpstreamTlsContexts ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.TransportSocket transport_socket = 24; + + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as *envoy.router*. + core.Metadata metadata = 25; + + // Determines how Envoy selects the protocol used to speak to upstream hosts. + ClusterProtocolSelection protocol_selection = 26; + + // Optional options for upstream connections. + UpstreamConnectionOptions upstream_connection_options = 30; + + // If an upstream host becomes unhealthy (as determined by the configured health checks + // or outlier detection), immediately close all connections to the failed host. + // + // .. note:: + // + // This is currently only supported for connections created by tcp_proxy. + // + // .. note:: + // + // The current implementation of this feature closes all connections immediately when + // the unhealthy status is detected. If there are a large number of connections open + // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + // time exclusively closing these connections, and not processing any other traffic. + bool close_connections_on_host_health_failure = 31; + + // If set to true, Envoy will ignore the health value of a host when processing its removal + // from service discovery. This means that if active health checking is used, Envoy will *not* + // wait for the endpoint to go unhealthy before removing it. + bool drain_connections_on_host_removal = 32 + [(udpa.annotations.field_migrate).rename = "ignore_health_on_host_removal"]; + + // An (optional) network filter chain, listed in the order the filters should be applied. + // The chain will be applied to all outgoing connections that Envoy makes to the upstream + // servers of this cluster. + repeated cluster.Filter filters = 40; + + // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the + // :ref:`lb_policy` field has the value + // :ref:`LOAD_BALANCING_POLICY_CONFIG`. + LoadBalancingPolicy load_balancing_policy = 41; + + // [#not-implemented-hide:] + // If present, tells the client where to send load reports via LRS. If not present, the + // client will fall back to a client-side default, which may be either (a) don't send any + // load reports or (b) send load reports for all clusters to a single default server + // (which may be configured in the bootstrap file). + // + // Note that if multiple clusters point to the same LRS server, the client may choose to + // create a separate stream for each cluster or it may choose to coalesce the data for + // multiple clusters onto a single stream. Either way, the client must make sure to send + // the data for any given cluster on no more than one stream. + // + // [#next-major-version: In the v3 API, we should consider restructuring this somehow, + // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + // from the LRS stream here.] + core.ConfigSource lrs_server = 42; + + // If track_timeout_budgets is true, the :ref:`timeout budget histograms + // ` will be published for each + // request. These show what percentage of a request's per try and global timeout was used. A value + // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + // of 100 would indicate that the request took the entirety of the timeout given to it. + bool track_timeout_budgets = 47; +} + +// [#not-implemented-hide:] Extensible load balancing policy configuration. +// +// Every LB policy defined via this mechanism will be identified via a unique name using reverse +// DNS notation. If the policy needs configuration parameters, it must define a message for its +// own configuration, which will be stored in the config field. The name of the policy will tell +// clients which type of message they should expect to see in the config field. +// +// Note that there are cases where it is useful to be able to independently select LB policies +// for choosing a locality and for choosing an endpoint within that locality. For example, a +// given deployment may always use the same policy to choose the locality, but for choosing the +// endpoint within the locality, some clusters may use weighted-round-robin, while others may +// use some sort of session-based balancing. +// +// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a +// child LB policy for each locality. For each request, the parent chooses the locality and then +// delegates to the child policy for that locality to choose the endpoint within the locality. +// +// To facilitate this, the config message for the top-level LB policy may include a field of +// type LoadBalancingPolicy that specifies the child policy. +message LoadBalancingPolicy { + message Policy { + // Required. The name of the LB policy. + string name = 1; + + // Optional config for the LB policy. + // No more than one of these two fields may be populated. + google.protobuf.Struct config = 2 [deprecated = true]; + + google.protobuf.Any typed_config = 3; + } + + // Each client will iterate over the list in order and stop at the first policy that it + // supports. This provides a mechanism for starting to use new LB policies that are not yet + // supported by all clients. + repeated Policy policies = 1; +} + +// An extensible structure containing the address Envoy should bind to when +// establishing upstream connections. +message UpstreamBindConfig { + // The address Envoy should bind to when establishing upstream connections. + core.Address source_address = 1; +} + +message UpstreamConnectionOptions { + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + core.TcpKeepalive tcp_keepalive = 1; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/circuit_breaker.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/circuit_breaker.proto index d2e0a328e49..893d1f1aa16 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/circuit_breaker.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/circuit_breaker.proto @@ -2,15 +2,20 @@ syntax = "proto3"; package envoy.api.v2.cluster; +import "envoy/api/v2/core/base.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; - -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/wrappers.proto"; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; // [#protodoc-title: Circuit breakers] @@ -19,12 +24,26 @@ import "google/protobuf/wrappers.proto"; message CircuitBreakers { // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. + // [#next-free-field: 9] message Thresholds { + message RetryBudget { + // Specifies the limit on concurrent retries as a percentage of the sum of active requests and + // active pending requests. For example, if there are 100 active requests and the + // budget_percent is set to 25, there may be 25 active retries. + // + // This parameter is optional. Defaults to 20%. + type.Percent budget_percent = 1; + + // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the + // number of active retries may never go below this number. + // + // This parameter is optional. Defaults to 3. + google.protobuf.UInt32Value min_retry_concurrency = 2; + } + // The :ref:`RoutingPriority` // the specified CircuitBreaker settings apply to. - // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once - // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] - core.RoutingPriority priority = 1; + core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; // The maximum number of connections that Envoy will make to the upstream // cluster. If not specified, the default is 1024. @@ -42,9 +61,23 @@ message CircuitBreakers { // upstream cluster. If not specified, the default is 3. google.protobuf.UInt32Value max_retries = 5; + // Specifies a limit on concurrent retries in relation to the number of active requests. This + // parameter is optional. + // + // .. note:: + // + // If this field is set, the retry budget will override any configured retry circuit + // breaker. + RetryBudget retry_budget = 8; + // If track_remaining is true, then stats will be published that expose // the number of resources remaining until the circuit breakers open. If // not specified, the default is false. + // + // .. note:: + // + // If a retry budget is used in lieu of the max_retries circuit breaker, + // the remaining retry resources remaining will not be tracked. bool track_remaining = 6; // The maximum number of connection pools per cluster that Envoy will concurrently support at diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/filter.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/filter.proto index b89b2a6b778..67f3c3ba5e0 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/filter.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/filter.proto @@ -2,19 +2,21 @@ syntax = "proto3"; package envoy.api.v2.cluster; +import "google/protobuf/any.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option java_outer_classname = "FilterProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; - -import "google/protobuf/any.proto"; - -import "validate/validate.proto"; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; // [#protodoc-title: Upstream filters] -// // Upstream filters apply to the connections to the upstream cluster hosts. + message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/outlier_detection.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/outlier_detection.proto index 4702bd0a6f1..0cc638ceb49 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/outlier_detection.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/cluster/outlier_detection.proto @@ -2,21 +2,24 @@ syntax = "proto3"; package envoy.api.v2.cluster; -option java_outer_classname = "OutlierDetectionProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.cluster"; -option csharp_namespace = "Envoy.Api.V2.ClusterNS"; -option ruby_package = "Envoy.Api.V2.ClusterNS"; - import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.cluster"; +option java_outer_classname = "OutlierDetectionProto"; +option java_multiple_files = true; +option csharp_namespace = "Envoy.Api.V2.ClusterNS"; +option ruby_package = "Envoy.Api.V2.ClusterNS"; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; + // [#protodoc-title: Outlier detection] // See the :ref:`architecture overview ` for // more information on outlier detection. +// [#next-free-field: 21] message OutlierDetection { // The number of consecutive 5xx responses or local origin errors that are mapped // to 5xx error codes before a consecutive 5xx ejection diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/address.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/address.proto index 89fd0adb1eb..d2e2b653900 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/address.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/address.proto @@ -2,16 +2,18 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "envoy/api/v2/core/base.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "AddressProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: Network addresses] message Pipe { @@ -20,13 +22,15 @@ message Pipe { // Paths starting with '@' will result in an error in environments other than // Linux. string path = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The mode for the Pipe. Not applicable for abstract sockets. + uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } +// [#next-free-field: 7] message SocketAddress { enum Protocol { TCP = 0; - - // [#not-implemented-hide:] UDP = 1; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/base.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/base.proto index eca00760575..67a97dd6e25 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/base.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/base.proto @@ -2,19 +2,22 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "BaseProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "envoy/api/v2/core/http_uri.proto"; import "envoy/type/percent.proto"; +import "envoy/type/semantic_version.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "BaseProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: Common types] // Envoy supports :ref:`upstream priority routing @@ -76,9 +79,49 @@ message Locality { string sub_zone = 3; } +// BuildVersion combines SemVer version of extension with free-form build information +// (i.e. 'alpha', 'private-build') as a set of strings. +message BuildVersion { + // SemVer version of extension. + type.SemanticVersion version = 1; + + // Free-form build information. + // Envoy defines several well known keys in the source/common/common/version.h file + google.protobuf.Struct metadata = 2; +} + +// Version and identification for an Envoy extension. +// [#next-free-field: 6] +message Extension { + // This is the name of the Envoy filter as specified in the Envoy + // configuration, e.g. envoy.router, com.acme.widget. + string name = 1; + + // Category of the extension. + // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" + // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from + // acme.com vendor. + // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] + string category = 2; + + // [#not-implemented-hide:] Type descriptor of extension configuration proto. + // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] + // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + string type_descriptor = 3; + + // The version is a property of the extension and maintained independently + // of other extensions and the Envoy API. + // This field is not set when extension did not provide version information. + BuildVersion version = 4; + + // Indicates that the extension is present but was disabled via dynamic configuration. + bool disabled = 5; +} + // Identifies a specific Envoy instance. The node identifier is presented to the // management server, which may use this identifier to distinguish per Envoy // configuration for serving. +// [#next-free-field: 11] message Node { // An opaque node identifier for the Envoy node. This also provides the local // service node name. It should be set if any of the following features are @@ -91,7 +134,8 @@ message Node { // Defines the local service cluster name where Envoy is running. Though // optional, it should be set if any of the following features are used: // :ref:`statsd `, :ref:`health check cluster - // verification `, + // verification + // `, // :ref:`runtime override directory `, // :ref:`user agent addition // `, @@ -111,7 +155,31 @@ message Node { // This is motivated by informing a management server during canary which // version of Envoy is being tested in a heterogeneous fleet. This will be set // by Envoy in management server RPCs. - string build_version = 5; + // This field is deprecated in favor of the user_agent_name and user_agent_version values. + string build_version = 5 [deprecated = true]; + + // Free-form string that identifies the entity requesting config. + // E.g. "envoy" or "grpc" + string user_agent_name = 6; + + oneof user_agent_version_type { + // Free-form string that identifies the version of the entity requesting config. + // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" + string user_agent_version = 7; + + // Structured version of the entity requesting config. + BuildVersion user_agent_build_version = 8; + } + + // List of extensions and their versions supported by the node. + repeated Extension extensions = 9; + + // Client feature support list. These are well known features described + // in the Envoy API repository for a given major version of an API. Client features + // use reverse DNS naming scheme, for example `com.acme.feature`. + // See :ref:`the list of features ` that xDS client may + // support. + repeated string client_features = 10; } // Metadata provides additional inputs to filters based on matched listeners, @@ -135,6 +203,7 @@ message Node { // * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an // endpoint and is also used during header processing // (x-envoy-upstream-canary) and for stats purposes. +// [#next-major-version: move to type/metadata/v2] message Metadata { // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* // namespace is reserved for Envoy's built-in filters. @@ -150,6 +219,17 @@ message RuntimeUInt32 { string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } +// Runtime derived bool with a default when not specified. +message RuntimeFeatureFlag { + // Default value if runtime value is not available. + google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; + + // Runtime key to get value for comparison. This value is used if defined. The boolean value must + // be represented via its + // `canonical JSON encoding `_. + string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; +} + // Header name/value pair. message HeaderValue { // Header name. @@ -228,7 +308,7 @@ message TransportSocket { // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } @@ -236,6 +316,7 @@ message TransportSocket { // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. +// [#next-free-field: 7] message SocketOption { enum SocketState { // Socket options are applied after socket creation but before binding the socket to a port @@ -275,6 +356,14 @@ message SocketOption { // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. +// +// .. note:: +// +// Parsing of the runtime key's data is implemented such that it may be represented as a +// :ref:`FractionalPercent ` proto represented as JSON/YAML +// and may also be represented as an integer with the assumption that the value is an integral +// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse +// as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. message RuntimeFractionalPercent { // Default value if the runtime value's for the numerator/denominator keys are not available. type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/config_source.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/config_source.proto index 240d37b81ee..60949ca1c8e 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/config_source.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/config_source.proto @@ -2,27 +2,47 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "ConfigSourceProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "envoy/api/v2/core/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "ConfigSourceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: Configuration sources] +// xDS API version. This is used to describe both resource and transport +// protocol versions (in distinct configuration fields). +enum ApiVersion { + // When not specified, we assume v2, to ease migration to Envoy's stable API + // versioning. If a client does not support v2 (e.g. due to deprecation), this + // is an invalid value. + AUTO = 0; + + // Use xDS v2 API. + V2 = 1; + + // Use xDS v3 API. + V3 = 2; +} + // API configuration source. This identifies the API type and cluster that Envoy // will use to fetch an xDS API. +// [#next-free-field: 9] message ApiConfigSource { // APIs may be fetched via either REST or gRPC. enum ApiType { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. - UNSUPPORTED_REST_LEGACY = 0 [deprecated = true]; + UNSUPPORTED_REST_LEGACY = 0 + [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // REST-JSON v2 API. The `canonical JSON encoding // `_ for @@ -37,12 +57,18 @@ message ApiConfigSource { // with every update, the xDS server only sends what has changed since the last update. // // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. - // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. + // Do not use for other xDSes. + // [#comment:TODO(fredlas) update/remove this warning when appropriate.] DELTA_GRPC = 3; } + // API type (gRPC, REST, delta gRPC) ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; + // API version for xDS transport protocol. This describes the xDS gRPC/REST + // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; + // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. @@ -101,7 +127,7 @@ message RateLimitSettings { // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. -// [#comment:next free field: 6] +// [#next-free-field: 7] message ConfigSource { oneof config_source_specifier { option (validate.required) = true; @@ -148,4 +174,9 @@ message ConfigSource { // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another // timeout applies). The default is 15s. google.protobuf.Duration initial_fetch_timeout = 4; + + // API version for xDS resources. This implies the type URLs that the client + // will request for resources and the resource type that the client will in + // turn expect to be delivered. + ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/grpc_service.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/grpc_service.proto index cbe6c8296e1..17f769b14e8 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/grpc_service.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/grpc_service.proto @@ -2,10 +2,6 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "GrpcServiceProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; @@ -13,20 +9,30 @@ import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/sensitive.proto"; + +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "GrpcServiceProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: gRPC services] // gRPC service configuration. This is used by :ref:`ApiConfigSource // ` and filter configurations. +// [#next-free-field: 6] message GrpcService { message EnvoyGrpc { // The name of the upstream gRPC cluster. SSL credentials will be supplied - // in the :ref:`Cluster ` :ref:`tls_context - // `. + // in the :ref:`Cluster ` :ref:`transport_socket + // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } + // [#next-free-field: 7] message GoogleGrpc { // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { @@ -34,7 +40,7 @@ message GrpcService { DataSource root_certs = 1; // PEM encoded client private key. - DataSource private_key = 2; + DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // PEM encoded client certificate chain. DataSource cert_chain = 3; @@ -60,6 +66,7 @@ message GrpcService { } } + // [#next-free-field: 8] message CallCredentials { message ServiceAccountJWTAccessCredentials { string json_key = 1; @@ -77,12 +84,54 @@ message GrpcService { string name = 1; oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } + // Security token service configuration that allows Google gRPC to + // fetch security token from an OAuth 2.0 authorization server. + // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and + // https://github.com/grpc/grpc/pull/19587. + // [#next-free-field: 10] + message StsService { + // URI of the token exchange service that handles token exchange requests. + // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by + // https://github.com/envoyproxy/protoc-gen-validate/issues/303] + string token_exchange_service_uri = 1; + + // Location of the target service or resource where the client + // intends to use the requested security token. + string resource = 2; + + // Logical name of the target service where the client intends to + // use the requested security token. + string audience = 3; + + // The desired scope of the requested security token in the + // context of the service or resource where the token will be used. + string scope = 4; + + // Type of the requested security token. + string requested_token_type = 5; + + // The path of subject token, a security token that represents the + // identity of the party on behalf of whom the request is being made. + string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; + + // Type of the subject token. + string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; + + // The path of actor token, a security token that represents the identity + // of the acting party. The acting party is authorized to use the + // requested security token and act on behalf of the subject. + string actor_token_path = 8; + + // Type of the actor token. + string actor_token_type = 9; + } + oneof credential_specifier { option (validate.required) = true; @@ -110,6 +159,11 @@ message GrpcService { // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. MetadataCredentialsFromPlugin from_plugin = 6; + + // Custom security token service which implements OAuth 2.0 token exchange. + // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 + // See https://github.com/grpc/grpc/pull/19587. + StsService sts_service = 7; } } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/health_check.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/health_check.proto index 078a5a4beef..91aeb76b8b4 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/health_check.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/health_check.proto @@ -2,11 +2,9 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "HealthCheckProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "envoy/api/v2/core/base.proto"; +import "envoy/type/http.proto"; +import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; import "google/protobuf/any.proto"; @@ -14,8 +12,15 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "HealthCheckProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. // * If health checking is configured for a cluster, additional statistics are emitted. They are @@ -47,6 +52,7 @@ enum HealthStatus { DEGRADED = 5; } +// [#next-free-field: 22] message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { @@ -61,7 +67,7 @@ message HealthCheck { } } - // [#comment:next free field: 10] + // [#next-free-field: 12] message HttpHealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated @@ -81,7 +87,12 @@ message HealthCheck { // An optional service name parameter which is used to validate the identity of // the health checked cluster. See the :ref:`architecture overview // ` for more information. - string service_name = 5; + // + // .. attention:: + // + // This field has been deprecated in favor of `service_name_matcher` for better flexibility + // over matching with service-cluster name. + string service_name = 5 [deprecated = true]; // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see @@ -95,12 +106,24 @@ message HealthCheck { repeated string request_headers_to_remove = 8; // If set, health checks will be made using http/2. - bool use_http2 = 7; + // Deprecated, use :ref:`codec_client_type + // ` instead. + bool use_http2 = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - // semantics of :ref:`Int64Range `. + // semantics of :ref:`Int64Range `. The start and end of each + // range are required. Only statuses in the range [100, 600) are allowed. repeated type.Int64Range expected_statuses = 9; + + // Use specified application protocol for health checks. + type.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; + + // An optional service name parameter which is used to validate the identity of + // the health checked cluster using a :ref:`StringMatcher + // `. See the :ref:`architecture overview + // ` for more information. + type.matcher.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { @@ -147,12 +170,24 @@ message HealthCheck { // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } + // Health checks occur over the transport socket specified for the cluster. This implies that if a + // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. + // + // This allows overriding the cluster TLS settings, just for health check connections. + message TlsOptions { + // Specifies the ALPN protocols for health check connections. This is useful if the + // corresponding upstream is using ALPN-based :ref:`FilterChainMatch + // ` along with different protocols for health checks + // versus data connections. If empty, no ALPN protocols will be set on health check connections. + repeated string alpn_protocols = 1; + } + reserved 10; // The time to wait for a health check response. If the timeout is reached the @@ -188,12 +223,12 @@ message HealthCheck { // The number of unhealthy health checks required before a host is marked // unhealthy. Note that for *http* health checking if a host responds with 503 // this threshold is ignored and the host is considered unhealthy immediately. - google.protobuf.UInt32Value unhealthy_threshold = 4; + google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; // The number of healthy health checks required before a host is marked // healthy. Note that during startup, only a single successful health check is // required to mark a host healthy. - google.protobuf.UInt32Value healthy_threshold = 5; + google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Non-serving port for health checking. google.protobuf.UInt32Value alt_port = 6; @@ -257,4 +292,7 @@ message HealthCheck { // initial health check failure event will be logged. // The default value is false. bool always_log_health_check_failures = 19; + + // This allows overriding the cluster TLS settings, just for health check connections. + TlsOptions tls_options = 21; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/http_uri.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/http_uri.proto index 7e4b4dba43c..5f740695dd8 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/http_uri.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/http_uri.proto @@ -2,14 +2,16 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "HttpUriProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "google/protobuf/duration.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "HttpUriProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: HTTP Service URI ] // Envoy external URI descriptor diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/protocol.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/protocol.proto index c45bb7adf7d..53b6ae87467 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/protocol.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/core/protocol.proto @@ -2,30 +2,83 @@ syntax = "proto3"; package envoy.api.v2.core; -option java_outer_classname = "ProtocolProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.core"; - import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.core"; +option java_outer_classname = "ProtocolProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; + // [#protodoc-title: Protocol options] // [#not-implemented-hide:] message TcpProtocolOptions { } +message UpstreamHttpProtocolOptions { + // Set transport socket `SNI `_ for new + // upstream connections based on the downstream HTTP host/authority header, as seen by the + // :ref:`router filter `. + bool auto_sni = 1; + + // Automatic validate upstream presented certificate for new upstream connections based on the + // downstream HTTP host/authority header, as seen by the + // :ref:`router filter `. + // This field is intended to set with `auto_sni` field. + bool auto_san_validation = 2; +} + message HttpProtocolOptions { - // The idle timeout for upstream connection pool connections. The idle timeout is defined as the + // The idle timeout for connections. The idle timeout is defined as the // period in which there are no active requests. If not set, there is no idle timeout. When the - // idle timeout is reached the connection will be closed. Note that request based timeouts mean - // that HTTP/2 PINGs will not keep the connection alive. + // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + // downstream connection a drain sequence will occur prior to closing the connection, see + // :ref:`drain_timeout + // `. + // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + // + // .. warning:: + // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + // FIN packets, etc. google.protobuf.Duration idle_timeout = 1; + + // The maximum duration of a connection. The duration is defined as a period since a connection + // was established. If not set, there is no max duration. When max_connection_duration is reached + // the connection will be closed. Drain sequence will occur prior to closing the connection if + // if's applicable. See :ref:`drain_timeout + // `. + // Note: not implemented for upstream connections. + google.protobuf.Duration max_connection_duration = 3; + + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; } +// [#next-free-field: 6] message Http1ProtocolOptions { + message HeaderKeyFormat { + message ProperCaseWords { + } + + oneof header_format { + option (validate.required) = true; + + // Formats the header by proper casing words: the first character and any character following + // a special character will be capitalized if it's an alpha character. For example, + // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + // Note that while this results in most headers following conventional casing, certain headers + // are not covered. For example, the "TE" header will be formatted as "Te". + ProperCaseWords proper_case_words = 1; + } + } + // Handle HTTP requests with absolute URLs in the requests. These requests // are generally sent by clients to forward/explicit proxies. This allows clients to configure // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the @@ -42,9 +95,24 @@ message Http1ProtocolOptions { // Envoy does not otherwise support HTTP/1.0 without a Host header. // This is a no-op if *accept_http_10* is not true. string default_host_for_http_10 = 3; + + // Describes how the keys for response headers should be formatted. By default, all header keys + // are lower cased. + HeaderKeyFormat header_key_format = 4; + + // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + // + // .. attention:: + // + // Note that this only happens when Envoy is chunk encoding which occurs when: + // - The request is HTTP/1.1. + // - Is neither a HEAD only request nor a HTTP Upgrade. + // - Not a response to a HEAD request. + // - The content length header is not present. + bool enable_trailers = 5; } -// [#comment:next free field: 13] +// [#next-free-field: 13] message Http2ProtocolOptions { // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values @@ -55,6 +123,10 @@ message Http2ProtocolOptions { // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. + // + // For upstream connections, this also limits how many streams Envoy will initiate concurrently + // on a single connection. If the limit is reached, Envoy may queue requests or establish + // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; @@ -142,7 +214,7 @@ message Http2ProtocolOptions { // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // - // See [RFC7540, sec. 8.1](https://tools.ietf.org/html/rfc7540#section-8.1) for details. + // See `RFC7540, sec. 8.1 `_ for details. bool stream_error_on_invalid_http_messaging = 12; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/discovery.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/discovery.proto index a8423f5f904..0794f82aa9d 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/discovery.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/discovery.proto @@ -2,19 +2,23 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "DiscoveryProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2"; - import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; +import "udpa/annotations/migrate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "DiscoveryProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.discovery.v3"; + // [#protodoc-title: Common discovery API components] // A DiscoveryRequest requests a set of versioned resources of the same type for // a given Envoy node on some API. +// [#next-free-field: 7] message DiscoveryRequest { // The version_info provided in the request messages will be the version_info // received with the most recent successfully processed response or empty on @@ -56,6 +60,7 @@ message DiscoveryRequest { google.rpc.Status error_detail = 6; } +// [#next-free-field: 7] message DiscoveryResponse { // The version of the response data. string version_info = 1; @@ -130,6 +135,7 @@ message DiscoveryResponse { // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. +// [#next-free-field: 8] message DeltaDiscoveryRequest { // The node making the request. core.Node node = 1; @@ -186,6 +192,7 @@ message DeltaDiscoveryRequest { google.rpc.Status error_detail = 7; } +// [#next-free-field: 7] message DeltaDiscoveryResponse { // The version of the response data (used for debugging). string system_version_info = 1; @@ -213,7 +220,6 @@ message Resource { // The resource's name, to distinguish it from others of the same type of resource. string name = 3; - // [#not-implemented-hide:] // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/eds.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/eds.proto index 15518902977..0917940aee8 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/eds.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/eds.proto @@ -2,25 +2,30 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "EdsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_generic_services = true; - import "envoy/api/v2/discovery.proto"; -import "envoy/api/v2/endpoint/endpoint.proto"; -import "envoy/type/percent.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/resource.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +import public "envoy/api/v2/endpoint.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "EdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3"; + // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` service EndpointDiscoveryService { + option (envoy.annotations.resource).type = "envoy.api.v2.ClusterLoadAssignment"; + // The resource_names field in DiscoveryRequest specifies a list of clusters // to subscribe to updates for. rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) { @@ -30,102 +35,12 @@ service EndpointDiscoveryService { } rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http) = { - post: "/v2/discovery:endpoints" - body: "*" - }; + option (google.api.http).post = "/v2/discovery:endpoints"; + option (google.api.http).body = "*"; } } -// Each route from RDS will map to a single cluster or traffic split across -// clusters using weights expressed in the RDS WeightedCluster. -// -// With EDS, each cluster is treated independently from a LB perspective, with -// LB taking place between the Localities within a cluster and at a finer -// granularity between the hosts within a locality. The percentage of traffic -// for each endpoint is determined by both its load_balancing_weight, and the -// load_balancing_weight of its locality. First, a locality will be selected, -// then an endpoint within that locality will be chose based on its weight. -message ClusterLoadAssignment { - // Load balancing policy settings. - message Policy { - message DropOverload { - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Percentage of traffic that should be dropped for the category. - type.FractionalPercent drop_percentage = 2; - } - - reserved 1; - - // Action to trim the overall incoming traffic to protect the upstream - // hosts. This action allows protection in case the hosts are unable to - // recover from an outage, or unable to autoscale or unable to handle - // incoming traffic volume for any reason. - // - // At the client each category is applied one after the other to generate - // the 'actual' drop percentage on all outgoing traffic. For example: - // - // .. code-block:: json - // - // { "drop_overloads": [ - // { "category": "throttle", "drop_percentage": 60 } - // { "category": "lb", "drop_percentage": 50 } - // ]} - // - // The actual drop percentages applied to the traffic at the clients will be - // "throttle"_drop = 60% - // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. - // actual_outgoing_load = 20% // remaining after applying all categories. - repeated DropOverload drop_overloads = 2; - - // Priority levels and localities are considered overprovisioned with this - // factor (in percentage). This means that we don't consider a priority - // level or locality unhealthy until the percentage of healthy hosts - // multiplied by the overprovisioning factor drops below 100. - // With the default value 140(1.4), Envoy doesn't consider a priority level - // or a locality unhealthy until their percentage of healthy hosts drops - // below 72%. For example: - // - // .. code-block:: json - // - // { "overprovisioning_factor": 100 } - // - // Read more at :ref:`priority levels ` and - // :ref:`localities `. - google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; - - // The max time until which the endpoints from this assignment can be used. - // If no new assignments are received before this time expires the endpoints - // are considered stale and should be marked unhealthy. - // Defaults to 0 which means endpoints never go stale. - google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; - - // The flag to disable overprovisioning. If it is set to true, - // :ref:`overprovisioning factor - // ` will be ignored - // and Envoy will not perform graceful failover between priority levels or - // localities as endpoints become unhealthy. Otherwise Envoy will perform - // graceful failover as :ref:`overprovisioning factor - // ` suggests. - // [#next-major-version: Unify with overprovisioning config as a single message.] - // [#not-implemented-hide:] - bool disable_overprovisioning = 5; - } - - // Name of the cluster. This will be the :ref:`service_name - // ` value if specified - // in the cluster :ref:`EdsClusterConfig - // `. - string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // List of endpoints to load balance to. - repeated endpoint.LocalityLbEndpoints endpoints = 2; - - // Map of named endpoints that can be referenced in LocalityLbEndpoints. - map named_endpoints = 5; - - // Load balancing policy settings. - Policy policy = 4; +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message EdsDummy { } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint.proto new file mode 100644 index 00000000000..d800c6d19e5 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint.proto @@ -0,0 +1,117 @@ +syntax = "proto3"; + +package envoy.api.v2; + +import "envoy/api/v2/endpoint/endpoint_components.proto"; +import "envoy/type/percent.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "EndpointProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; + +// [#protodoc-title: Endpoint configuration] +// Endpoint discovery :ref:`architecture overview ` + +// Each route from RDS will map to a single cluster or traffic split across +// clusters using weights expressed in the RDS WeightedCluster. +// +// With EDS, each cluster is treated independently from a LB perspective, with +// LB taking place between the Localities within a cluster and at a finer +// granularity between the hosts within a locality. The percentage of traffic +// for each endpoint is determined by both its load_balancing_weight, and the +// load_balancing_weight of its locality. First, a locality will be selected, +// then an endpoint within that locality will be chose based on its weight. +// [#next-free-field: 6] +message ClusterLoadAssignment { + // Load balancing policy settings. + // [#next-free-field: 6] + message Policy { + message DropOverload { + // Identifier for the policy specifying the drop. + string category = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Percentage of traffic that should be dropped for the category. + type.FractionalPercent drop_percentage = 2; + } + + reserved 1; + + // Action to trim the overall incoming traffic to protect the upstream + // hosts. This action allows protection in case the hosts are unable to + // recover from an outage, or unable to autoscale or unable to handle + // incoming traffic volume for any reason. + // + // At the client each category is applied one after the other to generate + // the 'actual' drop percentage on all outgoing traffic. For example: + // + // .. code-block:: json + // + // { "drop_overloads": [ + // { "category": "throttle", "drop_percentage": 60 } + // { "category": "lb", "drop_percentage": 50 } + // ]} + // + // The actual drop percentages applied to the traffic at the clients will be + // "throttle"_drop = 60% + // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. + // actual_outgoing_load = 20% // remaining after applying all categories. + repeated DropOverload drop_overloads = 2; + + // Priority levels and localities are considered overprovisioned with this + // factor (in percentage). This means that we don't consider a priority + // level or locality unhealthy until the percentage of healthy hosts + // multiplied by the overprovisioning factor drops below 100. + // With the default value 140(1.4), Envoy doesn't consider a priority level + // or a locality unhealthy until their percentage of healthy hosts drops + // below 72%. For example: + // + // .. code-block:: json + // + // { "overprovisioning_factor": 100 } + // + // Read more at :ref:`priority levels ` and + // :ref:`localities `. + google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; + + // The max time until which the endpoints from this assignment can be used. + // If no new assignments are received before this time expires the endpoints + // are considered stale and should be marked unhealthy. + // Defaults to 0 which means endpoints never go stale. + google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; + + // The flag to disable overprovisioning. If it is set to true, + // :ref:`overprovisioning factor + // ` will be ignored + // and Envoy will not perform graceful failover between priority levels or + // localities as endpoints become unhealthy. Otherwise Envoy will perform + // graceful failover as :ref:`overprovisioning factor + // ` suggests. + // [#next-major-version: Unify with overprovisioning config as a single message.] + // [#not-implemented-hide:] + bool disable_overprovisioning = 5; + } + + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // List of endpoints to load balance to. + repeated endpoint.LocalityLbEndpoints endpoints = 2; + + // Map of named endpoints that can be referenced in LocalityLbEndpoints. + // [#not-implemented-hide:] + map named_endpoints = 5; + + // Load balancing policy settings. + Policy policy = 4; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint.proto index 46875a173e8..247c9ae265a 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint.proto @@ -2,125 +2,8 @@ syntax = "proto3"; package envoy.api.v2.endpoint; +import public "envoy/api/v2/endpoint/endpoint_components.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/health_check.proto"; - -import "google/protobuf/wrappers.proto"; - -import "validate/validate.proto"; - -// [#protodoc-title: Endpoints] - -// Upstream host identifier. -message Endpoint { - // The optional health check configuration. - message HealthCheckConfig { - // Optional alternative health check port value. - // - // By default the health check address port of an upstream host is the same - // as the host's serving address port. This provides an alternative health - // check port. Setting this with a non-zero value allows an upstream host - // to have different health check address port. - uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; - } - - // The upstream host address. - // - // .. attention:: - // - // The form of host address depends on the given cluster type. For STATIC or EDS, - // it is expected to be a direct IP address (or something resolvable by the - // specified :ref:`resolver ` - // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, - // and will be resolved via DNS. - core.Address address = 1; - - // The optional health check configuration is used as configuration for the - // health checker to contact the health checked host. - // - // .. attention:: - // - // This takes into effect only for upstream clusters with - // :ref:`active health checking ` enabled. - HealthCheckConfig health_check_config = 2; -} - -// An Endpoint that Envoy can route traffic to. -message LbEndpoint { - // Upstream host identifier or a named reference. - oneof host_identifier { - Endpoint endpoint = 1; - - string endpoint_name = 5; - } - - // Optional health status when known and supplied by EDS server. - core.HealthStatus health_status = 2; - - // The endpoint metadata specifies values that may be used by the load - // balancer to select endpoints in a cluster for a given request. The filter - // name should be specified as *envoy.lb*. An example boolean key-value pair - // is *canary*, providing the optional canary status of the upstream host. - // This may be matched against in a route's - // :ref:`RouteAction ` metadata_match field - // to subset the endpoints considered in cluster load balancing. - core.Metadata metadata = 3; - - // The optional load balancing weight of the upstream host; at least 1. - // Envoy uses the load balancing weight in some of the built in load - // balancers. The load balancing weight for an endpoint is divided by the sum - // of the weights of all endpoints in the endpoint's locality to produce a - // percentage of traffic for the endpoint. This percentage is then further - // weighted by the endpoint's locality's load balancing weight from - // LocalityLbEndpoints. If unspecified, each host is presumed to have equal - // weight in a locality. - google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; -} - -// A group of endpoints belonging to a Locality. -// One can have multiple LocalityLbEndpoints for a locality, but this is -// generally only done if the different groups need to have different load -// balancing weights or different priorities. -message LocalityLbEndpoints { - // Identifies location of where the upstream hosts run. - core.Locality locality = 1; - - // The group of endpoints belonging to the locality specified. - repeated LbEndpoint lb_endpoints = 2; - - // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load - // balancing weight for a locality is divided by the sum of the weights of all - // localities at the same priority level to produce the effective percentage - // of traffic for the locality. - // - // Locality weights are only considered when :ref:`locality weighted load - // balancing ` is - // configured. These weights are ignored otherwise. If no weights are - // specified when locality weighted load balancing is enabled, the locality is - // assigned no load. - google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Optional: the priority for this LocalityLbEndpoints. If unspecified this will - // default to the highest priority (0). - // - // Under usual circumstances, Envoy will only select endpoints for the highest - // priority (0). In the event all endpoints for a particular priority are - // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the - // next highest priority group. - // - // Priorities should range from 0 (highest) to N (lowest) without skipping. - uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; - - // Optional: Per locality proximity value which indicates how close this - // locality is from the source locality. This value only provides ordering - // information (lower the value, closer it is to the source locality). - // This will be consumed by load balancing schemes that need proximity order - // to determine where to route the requests. - // [#not-implemented-hide:] - google.protobuf.UInt32Value proximity = 6; -} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint_components.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint_components.proto new file mode 100644 index 00000000000..5d2fe527588 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/endpoint_components.proto @@ -0,0 +1,131 @@ +syntax = "proto3"; + +package envoy.api.v2.endpoint; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/health_check.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; +option java_outer_classname = "EndpointComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; + +// [#protodoc-title: Endpoints] + +// Upstream host identifier. +message Endpoint { + // The optional health check configuration. + message HealthCheckConfig { + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; + } + + // The upstream host address. + // + // .. attention:: + // + // The form of host address depends on the given cluster type. For STATIC or EDS, + // it is expected to be a direct IP address (or something resolvable by the + // specified :ref:`resolver ` + // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, + // and will be resolved via DNS. + core.Address address = 1; + + // The optional health check configuration is used as configuration for the + // health checker to contact the health checked host. + // + // .. attention:: + // + // This takes into effect only for upstream clusters with + // :ref:`active health checking ` enabled. + HealthCheckConfig health_check_config = 2; +} + +// An Endpoint that Envoy can route traffic to. +// [#next-free-field: 6] +message LbEndpoint { + // Upstream host identifier or a named reference. + oneof host_identifier { + Endpoint endpoint = 1; + + // [#not-implemented-hide:] + string endpoint_name = 5; + } + + // Optional health status when known and supplied by EDS server. + core.HealthStatus health_status = 2; + + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as *envoy.lb*. An example boolean key-value pair + // is *canary*, providing the optional canary status of the upstream host. + // This may be matched against in a route's + // :ref:`RouteAction ` metadata_match field + // to subset the endpoints considered in cluster load balancing. + core.Metadata metadata = 3; + + // The optional load balancing weight of the upstream host; at least 1. + // Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, each host is presumed to have equal + // weight in a locality. + google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; +} + +// A group of endpoints belonging to a Locality. +// One can have multiple LocalityLbEndpoints for a locality, but this is +// generally only done if the different groups need to have different load +// balancing weights or different priorities. +// [#next-free-field: 7] +message LocalityLbEndpoints { + // Identifies location of where the upstream hosts run. + core.Locality locality = 1; + + // The group of endpoints belonging to the locality specified. + repeated LbEndpoint lb_endpoints = 2; + + // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load + // balancing weight for a locality is divided by the sum of the weights of all + // localities at the same priority level to produce the effective percentage + // of traffic for the locality. + // + // Locality weights are only considered when :ref:`locality weighted load + // balancing ` is + // configured. These weights are ignored otherwise. If no weights are + // specified when locality weighted load balancing is enabled, the locality is + // assigned no load. + google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; + + // Optional: the priority for this LocalityLbEndpoints. If unspecified this will + // default to the highest priority (0). + // + // Under usual circumstances, Envoy will only select endpoints for the highest + // priority (0). In the event all endpoints for a particular priority are + // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the + // next highest priority group. + // + // Priorities should range from 0 (highest) to N (lowest) without skipping. + uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; + + // Optional: Per locality proximity value which indicates how close this + // locality is from the source locality. This value only provides ordering + // information (lower the value, closer it is to the source locality). + // This will be consumed by load balancing schemes that need proximity order + // to determine where to route the requests. + // [#not-implemented-hide:] + google.protobuf.UInt32Value proximity = 6; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/load_report.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/load_report.proto index b44313ba4ee..a80d5b77d92 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/load_report.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/endpoint/load_report.proto @@ -2,23 +2,26 @@ syntax = "proto3"; package envoy.api.v2.endpoint; -option java_outer_classname = "LoadReportProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; - import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; +option java_outer_classname = "LoadReportProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; + // These are stats Envoy reports to GLB every so often. Report frequency is // defined by -// :ref:`LoadStatsResponse.load_reporting_interval`. +// :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// [#next-free-field: 9] message UpstreamLocalityStats { // Name of zone, region and optionally endpoint group these metrics were // collected from. Zone and region names could be empty if unknown. @@ -45,7 +48,7 @@ message UpstreamLocalityStats { // Endpoint granularity stats information for this locality. This information // is populated if the Server requests it by setting - // :ref:`LoadStatsResponse.report_endpoint_granularity`. + // :ref:`LoadStatsResponse.report_endpoint_granularity`. repeated UpstreamEndpointStats upstream_endpoint_stats = 7; // [#not-implemented-hide:] The priority of the endpoint group these metrics @@ -53,6 +56,8 @@ message UpstreamLocalityStats { uint32 priority = 6; } +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// [#next-free-field: 8] message UpstreamEndpointStats { // Upstream host address. core.Address address = 1; @@ -105,12 +110,21 @@ message EndpointLoadMetricStats { } // Per cluster load stats. Envoy reports these stats a management server in a -// :ref:`LoadStatsRequest` +// :ref:`LoadStatsRequest` // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // Next ID: 7 +// [#next-free-field: 7] message ClusterStats { + message DroppedRequests { + // Identifier for the policy specifying the drop. + string category = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Total number of deliberately dropped requests for the category. + uint64 dropped_count = 2; + } + // The name of the cluster. - string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The eds_cluster_config service_name of the cluster. // It's possible that two clusters send the same service_name to EDS, @@ -119,7 +133,7 @@ message ClusterStats { // Need at least one. repeated UpstreamLocalityStats upstream_locality_stats = 2 - [(validate.rules).repeated .min_items = 1]; + [(validate.rules).repeated = {min_items: 1}]; // Cluster-level stats such as total_successful_requests may be computed by // summing upstream_locality_stats. In addition, below there are additional @@ -129,12 +143,6 @@ message ClusterStats { // deliberately dropped by the drop_overload policy and circuit breaking. uint64 total_dropped_requests = 3; - message DroppedRequests { - // Identifier for the policy specifying the drop. - string category = 1 [(validate.rules).string.min_bytes = 1]; - // Total number of deliberately dropped requests for the category. - uint64 dropped_count = 2; - } // Information about deliberately dropped requests for each category specified // in the DropOverload policy. repeated DroppedRequests dropped_requests = 5; diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/lds.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/lds.proto index 0ea940f6c47..aa13453ebc6 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/lds.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/lds.proto @@ -2,24 +2,24 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "LdsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_generic_services = true; - -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/discovery.proto"; -import "envoy/api/v2/listener/listener.proto"; -import "envoy/api/v2/listener/udp_listener_config.proto"; -import "envoy/config/listener/v2/api_listener.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/resource.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +import public "envoy/api/v2/listener.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "LdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3"; + // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` @@ -28,6 +28,8 @@ import "validate/validate.proto"; // consist of a complete update of all listeners. Existing connections will be // allowed to drain from listeners that are no longer present. service ListenerDiscoveryService { + option (envoy.annotations.resource).type = "envoy.api.v2.Listener"; + rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } @@ -35,183 +37,12 @@ service ListenerDiscoveryService { } rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http) = { - post: "/v2/discovery:listeners" - body: "*" - }; + option (google.api.http).post = "/v2/discovery:listeners"; + option (google.api.http).body = "*"; } } -// [#comment:next free field: 20] -message Listener { - enum DrainType { - // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - // filter), listener removal/modification, and hot restart. - DEFAULT = 0; - - // Drain in response to listener removal/modification and hot restart. This setting does not - // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - // and egress listeners. - MODIFY_ONLY = 1; - } - - // [#not-implemented-hide:] - message DeprecatedV1 { - // Whether the listener should bind to the port. A listener that doesn't - // bind can only receive connections redirected from other listeners that - // set use_original_dst parameter to true. Default is true. - // - // This is deprecated in v2, all Listeners will bind to their port. An - // additional filter chain must be created for every original destination - // port this listener may redirect to in v2, with the original port - // specified in the FilterChainMatch destination_port field. - // - // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - google.protobuf.BoolValue bind_to_port = 1; - } - - reserved 14; - - // The unique name by which this listener is known. If no name is provided, - // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - // updated or removed via :ref:`LDS ` a unique name must be provided. - string name = 1; - - // The address that the listener should listen on. In general, the address must be unique, though - // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - // Linux as the actual port will be allocated by the OS. - core.Address address = 2 [(validate.rules).message = {required: true}]; - - // A list of filter chains to consider for this listener. The - // :ref:`FilterChain ` with the most specific - // :ref:`FilterChainMatch ` criteria is used on a - // connection. - // - // Example using SNI for filter chain selection can be found in the - // :ref:`FAQ entry `. - repeated listener.FilterChain filter_chains = 3; - - // If a connection is redirected using *iptables*, the port on which the proxy - // receives it might be different from the original destination address. When this flag is set to - // true, the listener hands off redirected connections to the listener associated with the - // original destination address. If there is no listener associated with the original destination - // address, the connection is handled by the listener that receives it. Defaults to false. - // - // .. attention:: - // - // This field is deprecated. Use :ref:`an original_dst ` - // :ref:`listener filter ` instead. - // - // Note that hand off to another listener is *NOT* performed without this flag. Once - // :ref:`FilterChainMatch ` is implemented this flag - // will be removed, as filter chain matching can be used to select a filter chain based on the - // restored destination address. - google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; - - // Soft limit on size of the listener’s new connection read and write buffers. - // If unspecified, an implementation defined default is applied (1MiB). - google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; - - // Listener metadata. - core.Metadata metadata = 6; - - // [#not-implemented-hide:] - DeprecatedV1 deprecated_v1 = 7; - - // The type of draining to perform at a listener-wide level. - DrainType drain_type = 8; - - // Listener filters have the opportunity to manipulate and augment the connection metadata that - // is used in connection filter chain matching, for example. These filters are run before any in - // :ref:`filter_chains `. Order matters as the - // filters are processed sequentially right after a socket has been accepted by the listener, and - // before a connection is created. - // UDP Listener filters can be specified when the protocol in the listener socket address in - // :ref:`protocol ` is :ref:'UDP - // `. - // UDP listeners currently support a single filter. - repeated listener.ListenerFilter listener_filters = 9; - - // The timeout to wait for all listener filters to complete operation. If the timeout is reached, - // the accepted socket is closed without a connection being created unless - // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - // timeout. If not specified, a default timeout of 15s is used. - google.protobuf.Duration listener_filters_timeout = 15; - - // Whether a connection should be created when listener filters timeout. Default is false. - // - // .. attention:: - // - // Some listener filters, such as :ref:`Proxy Protocol filter - // `, should not be used with this option. It will cause - // unexpected behavior when a connection is created. - bool continue_on_listener_filters_timeout = 17; - - // Whether the listener should be set as a transparent socket. - // When this flag is set to true, connections can be redirected to the listener using an - // *iptables* *TPROXY* target, in which case the original source and destination addresses and - // ports are preserved on accepted connections. This flag should be used in combination with - // :ref:`an original_dst ` :ref:`listener filter - // ` to mark the connections' local addresses as - // "restored." This can be used to hand off each redirected connection to another listener - // associated with the connection's destination address. Direct connections to the socket without - // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - // therefore treated as if they were redirected. - // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - // When this flag is not set (default), the socket is not modified, i.e. the transparent option - // is neither set nor reset. - google.protobuf.BoolValue transparent = 10; - - // Whether the listener should set the *IP_FREEBIND* socket option. When this - // flag is set to true, listeners can be bound to an IP address that is not - // configured on the system running Envoy. When this flag is set to false, the - // option *IP_FREEBIND* is disabled on the socket. When this flag is not set - // (default), the socket is not modified, i.e. the option is neither enabled - // nor disabled. - google.protobuf.BoolValue freebind = 11; - - // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - repeated core.SocketOption socket_options = 13; - - // Whether the listener should accept TCP Fast Open (TFO) connections. - // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - // the socket, with a queue length of the specified size - // (see `details in RFC7413 `_). - // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - // When this flag is not set (default), the socket is not modified, - // i.e. the option is neither enabled nor disabled. - // - // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - // TCP_FASTOPEN. - // See `ip-sysctl.txt `_. - // - // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; - - // Specifies the intended direction of the traffic relative to the local Envoy. - core.TrafficDirection traffic_direction = 16; - - // If the protocol in the listener socket address in :ref:`protocol - // ` is :ref:'UDP - // `, this field specifies the actual udp listener to create, - // i.e. :ref:`udp_listener_name - // ` = "raw_udp_listener" for - // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". - listener.UdpListenerConfig udp_listener_config = 18; - - // [#not-implemented-hide:] - // Used to represent an API listener, which is used in non-proxy clients. The type of API - // exposed to the non-proxy application depends on the type of API listener. - // When this field is set, no other field except for :ref:`name` - // should be set. - // [#next-major-version: In the v3 API, instead of this messy approach where the socket - // listener fields are directly in the top-level Listener message and the API listener types - // are in the ApiListener message, the socket listener messages should be in their own message, - // and the top-level Listener should essentially be a oneof that selects between the - // socket listener and the various types of API listener. That way, a given Listener message - // can structurally only contain the fields of the relevant type.] - config.listener.v2.ApiListener api_listener = 19; +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message LdsDummy { } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener.proto new file mode 100644 index 00000000000..3fbb10070d0 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener.proto @@ -0,0 +1,239 @@ +syntax = "proto3"; + +package envoy.api.v2; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/listener/listener_components.proto"; +import "envoy/api/v2/listener/udp_listener_config.proto"; +import "envoy/config/listener/v2/api_listener.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "ListenerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; + +// [#protodoc-title: Listener configuration] +// Listener :ref:`configuration overview ` + +// [#next-free-field: 22] +message Listener { + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // [#not-implemented-hide:] + message DeprecatedV1 { + // Whether the listener should bind to the port. A listener that doesn't + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // This is deprecated in v2, all Listeners will bind to their port. An + // additional filter chain must be created for every original destination + // port this listener may redirect to in v2, with the original port + // specified in the FilterChainMatch destination_port field. + // + // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] + google.protobuf.BoolValue bind_to_port = 1; + } + + // Configuration for listener connection balancing. + message ConnectionBalanceConfig { + // A connection balancer implementation that does exact balancing. This means that a lock is + // held during balancing so that connection counts are nearly exactly balanced between worker + // threads. This is "nearly" exact in the sense that a connection might close in parallel thus + // making the counts incorrect, but this should be rectified on the next accept. This balancer + // sacrifices accept throughput for accuracy and should be used when there are a small number of + // connections that rarely cycle (e.g., service mesh gRPC egress). + message ExactBalance { + } + + oneof balance_type { + option (validate.required) = true; + + // If specified, the listener will use the exact connection balancer. + ExactBalance exact_balance = 1; + } + } + + reserved 14; + + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.Address address = 2 [(validate.rules).message = {required: true}]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // Example using SNI for filter chain selection can be found in the + // :ref:`FAQ entry `. + repeated listener.FilterChain filter_chains = 3; + + // If a connection is redirected using *iptables*, the port on which the proxy + // receives it might be different from the original destination address. When this flag is set to + // true, the listener hands off redirected connections to the listener associated with the + // original destination address. If there is no listener associated with the original destination + // address, the connection is handled by the listener that receives it. Defaults to false. + // + // .. attention:: + // + // This field is deprecated. Use :ref:`an original_dst ` + // :ref:`listener filter ` instead. + // + // Note that hand off to another listener is *NOT* performed without this flag. Once + // :ref:`FilterChainMatch ` is implemented this flag + // will be removed, as filter chain matching can be used to select a filter chain based on the + // restored destination address. + google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Listener metadata. + core.Metadata metadata = 6; + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + // UDP Listener filters can be specified when the protocol in the listener socket address in + // :ref:`protocol ` is :ref:`UDP + // `. + // UDP listeners currently support a single filter. + repeated listener.ListenerFilter listener_filters = 9; + + // The timeout to wait for all listener filters to complete operation. If the timeout is reached, + // the accepted socket is closed without a connection being created unless + // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the + // timeout. If not specified, a default timeout of 15s is used. + google.protobuf.Duration listener_filters_timeout = 15; + + // Whether a connection should be created when listener filters timeout. Default is false. + // + // .. attention:: + // + // Some listener filters, such as :ref:`Proxy Protocol filter + // `, should not be used with this option. It will cause + // unexpected behavior when a connection is created. + bool continue_on_listener_filters_timeout = 17; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Additional socket options that may not be present in Envoy source code or + // precompiled binaries. + repeated core.SocketOption socket_options = 13; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; + + // Specifies the intended direction of the traffic relative to the local Envoy. + core.TrafficDirection traffic_direction = 16; + + // If the protocol in the listener socket address in :ref:`protocol + // ` is :ref:`UDP + // `, this field specifies the actual udp + // listener to create, i.e. :ref:`udp_listener_name + // ` = "raw_udp_listener" for + // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". + listener.UdpListenerConfig udp_listener_config = 18; + + // Used to represent an API listener, which is used in non-proxy clients. The type of API + // exposed to the non-proxy application depends on the type of API listener. + // When this field is set, no other field except for :ref:`name` + // should be set. + // + // .. note:: + // + // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + // not LDS. + // + // [#next-major-version: In the v3 API, instead of this messy approach where the socket + // listener fields are directly in the top-level Listener message and the API listener types + // are in the ApiListener message, the socket listener messages should be in their own message, + // and the top-level Listener should essentially be a oneof that selects between the + // socket listener and the various types of API listener. That way, a given Listener message + // can structurally only contain the fields of the relevant type.] + config.listener.v2.ApiListener api_listener = 19; + + // The listener's connection balancer configuration, currently only applicable to TCP listeners. + // If no configuration is specified, Envoy will not attempt to balance active connections between + // worker threads. + ConnectionBalanceConfig connection_balance_config = 20; + + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and + // create one socket for each worker thread. This makes inbound connections + // distribute among worker threads roughly evenly in cases where there are a high number + // of connections. When this flag is set to false, all worker threads share one socket. + // + // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart + // (see `3rd paragraph in 'soreuseport' commit message + // `_). + // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket + // `_. + bool reuse_port = 21; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener.proto index 949075840dd..273b29cb5dd 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener.proto @@ -2,207 +2,10 @@ syntax = "proto3"; package envoy.api.v2.listener; +import public "envoy/api/v2/listener/listener_components.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; - -import "envoy/api/v2/auth/cert.proto"; -import "envoy/api/v2/core/address.proto"; -import "envoy/api/v2/core/base.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "validate/validate.proto"; - -// [#protodoc-title: Listener components] -// Listener :ref:`configuration overview ` - -message Filter { - reserved 3; - - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being - // instantiated. See the supported filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 4; - } -} - -// Specifies the match criteria for selecting a specific filter chain for a -// listener. -// -// In order for a filter chain to be selected, *ALL* of its criteria must be -// fulfilled by the incoming connection, properties of which are set by the -// networking stack and/or listener filters. -// -// The following order applies: -// -// 1. Destination port. -// 2. Destination IP address. -// 3. Server name (e.g. SNI for TLS protocol), -// 4. Transport protocol. -// 5. Application protocols (e.g. ALPN for TLS protocol). -// 6. Source type (e.g. any, local or external network). -// 7. Source IP address. -// 8. Source port. -// -// For criteria that allow ranges or wildcards, the most specific value in any -// of the configured filter chains that matches the incoming connection is going -// to be used (e.g. for SNI ``www.example.com`` the most specific match would be -// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter -// chain without ``server_names`` requirements). -// -// [#comment: Implemented rules are kept in the preference order, with deprecated fields -// listed at the end, because that's how we want to list them in the docs. -// -// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] -message FilterChainMatch { - enum ConnectionSourceType { - // Any connection source matches. - ANY = 0; - - // Match a connection originating from the same host. - LOCAL = 1; - - // Match a connection originating from a different host. - EXTERNAL = 2; - } - - reserved 1; - - // Optional destination port to consider when use_original_dst is set on the - // listener in determining a filter chain match. - google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; - - // If non-empty, an IP address and prefix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - repeated core.CidrRange prefix_ranges = 3; - - // If non-empty, an IP address and suffix length to match addresses when the - // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. - // [#not-implemented-hide:] - string address_suffix = 4; - - // [#not-implemented-hide:] - google.protobuf.UInt32Value suffix_len = 5; - - // Specifies the connection source IP match type. Can be any, local or external network. - ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; - - // The criteria is satisfied if the source IP address of the downstream - // connection is contained in at least one of the specified subnets. If the - // parameter is not specified or the list is empty, the source IP address is - // ignored. - repeated core.CidrRange source_prefix_ranges = 6; - - // The criteria is satisfied if the source port of the downstream connection - // is contained in at least one of the specified ports. If the parameter is - // not specified, the source port is ignored. - repeated uint32 source_ports = 7 - [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; - - // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining - // a filter chain match. Those values will be compared against the server names of a new - // connection, when detected by one of the listener filters. - // - // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` - // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. - // - // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. - // - // .. attention:: - // - // See the :ref:`FAQ entry ` on how to configure SNI for more - // information. - repeated string server_names = 11; - - // If non-empty, a transport protocol to consider when determining a filter chain match. - // This value will be compared against the transport protocol of a new connection, when - // it's detected by one of the listener filters. - // - // Suggested values include: - // - // * ``raw_buffer`` - default, used when no transport protocol is detected, - // * ``tls`` - set by :ref:`envoy.listener.tls_inspector ` - // when TLS protocol is detected. - string transport_protocol = 9; - - // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when - // determining a filter chain match. Those values will be compared against the application - // protocols of a new connection, when detected by one of the listener filters. - // - // Suggested values include: - // - // * ``http/1.1`` - set by :ref:`envoy.listener.tls_inspector - // `, - // * ``h2`` - set by :ref:`envoy.listener.tls_inspector ` - // - // .. attention:: - // - // Currently, only :ref:`TLS Inspector ` provides - // application protocol detection based on the requested - // `ALPN `_ values. - // - // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, - // and matching on values other than ``h2`` is going to lead to a lot of false negatives, - // unless all connecting clients are known to use ALPN. - repeated string application_protocols = 10; -} - -// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and -// various other parameters. -message FilterChain { - // The criteria to use when matching a connection to this filter chain. - FilterChainMatch filter_chain_match = 1; - - // The TLS context for this filter chain. - auth.DownstreamTlsContext tls_context = 2; - - // A list of individual network filters that make up the filter chain for - // connections established with the listener. Order matters as the filters are - // processed sequentially as connection events happen. Note: If the filter - // list is empty, the connection will close by default. - repeated Filter filters = 3; - - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; - - // [#not-implemented-hide:] filter chain metadata. - core.Metadata metadata = 5; - - // See :ref:`base.TransportSocket` description. - core.TransportSocket transport_socket = 6; - - // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - // chain is to be dynamically updated or removed via FCDS a unique name must be provided. - string name = 7; -} - -message ListenerFilter { - // The name of the filter to instantiate. The name must match a - // :ref:`supported filter `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Filter specific configuration which depends on the filter being instantiated. - // See the supported filters for further documentation. - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } -} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener_components.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener_components.proto new file mode 100644 index 00000000000..d9c8cfbfcb9 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/listener_components.proto @@ -0,0 +1,271 @@ +syntax = "proto3"; + +package envoy.api.v2.listener; + +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/type/range.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.listener"; +option java_outer_classname = "ListenerComponentsProto"; +option java_multiple_files = true; +option csharp_namespace = "Envoy.Api.V2.ListenerNS"; +option ruby_package = "Envoy.Api.V2.ListenerNS"; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + reserved 3; + + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true]; + + google.protobuf.Any typed_config = 4; + } +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +// +// In order for a filter chain to be selected, *ALL* of its criteria must be +// fulfilled by the incoming connection, properties of which are set by the +// networking stack and/or listener filters. +// +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +// +// For criteria that allow ranges or wildcards, the most specific value in any +// of the configured filter chains that matches the incoming connection is going +// to be used (e.g. for SNI ``www.example.com`` the most specific match would be +// ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter +// chain without ``server_names`` requirements). +// +// [#comment: Implemented rules are kept in the preference order, with deprecated fields +// listed at the end, because that's how we want to list them in the docs. +// +// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] +// [#next-free-field: 13] +message FilterChainMatch { + enum ConnectionSourceType { + // Any connection source matches. + ANY = 0; + + // Match a connection originating from the same host. + LOCAL = 1 [(udpa.annotations.enum_value_migrate).rename = "SAME_IP_OR_LOOPBACK"]; + + // Match a connection originating from a different host. + EXTERNAL = 2; + } + + reserved 1; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + repeated core.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // Specifies the connection source IP match type. Can be any, local or external network. + ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + repeated core.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + repeated uint32 source_ports = 7 + [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; + + // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining + // a filter chain match. Those values will be compared against the server names of a new + // connection, when detected by one of the listener filters. + // + // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` + // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. + // + // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string server_names = 11; + + // If non-empty, a transport protocol to consider when determining a filter chain match. + // This value will be compared against the transport protocol of a new connection, when + // it's detected by one of the listener filters. + // + // Suggested values include: + // + // * ``raw_buffer`` - default, used when no transport protocol is detected, + // * ``tls`` - set by :ref:`envoy.listener.tls_inspector ` + // when TLS protocol is detected. + string transport_protocol = 9; + + // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when + // determining a filter chain match. Those values will be compared against the application + // protocols of a new connection, when detected by one of the listener filters. + // + // Suggested values include: + // + // * ``http/1.1`` - set by :ref:`envoy.listener.tls_inspector + // `, + // * ``h2`` - set by :ref:`envoy.listener.tls_inspector ` + // + // .. attention:: + // + // Currently, only :ref:`TLS Inspector ` provides + // application protocol detection based on the requested + // `ALPN `_ values. + // + // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, + // and matching on values other than ``h2`` is going to lead to a lot of false negatives, + // unless all connecting clients are known to use ALPN. + repeated string application_protocols = 10; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +// [#next-free-field: 8] +message FilterChain { + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // The TLS context for this filter chain. + // + // .. attention:: + // + // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are + // set, `transport_socket` takes priority. + auth.DownstreamTlsContext tls_context = 2 [deprecated = true]; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.Metadata metadata = 5; + + // Optional custom transport socket implementation to use for downstream connections. + // To setup TLS, set a transport socket with name `tls` and + // :ref:`DownstreamTlsContext ` in the `typed_config`. + // If no transport socket configuration is specified, new connections + // will be set up with plaintext. + core.TransportSocket transport_socket = 6; + + // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no + // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter + // chain is to be dynamically updated or removed via FCDS a unique name must be provided. + string name = 7; +} + +// [#not-implemented-hide:] +// Listener filter chain match configuration. This is a recursive structure which allows complex +// nested match configurations to be built using various logical operators. +// [#next-free-field: 6] +message ListenerFilterChainMatchPredicate { + // A set of match configurations used for logical operations. + message MatchSet { + // The list of rules that make up the set. + repeated ListenerFilterChainMatchPredicate rules = 1 + [(validate.rules).repeated = {min_items: 2}]; + } + + oneof rule { + option (validate.required) = true; + + // A set that describes a logical OR. If any member of the set matches, the match configuration + // matches. + MatchSet or_match = 1; + + // A set that describes a logical AND. If all members of the set match, the match configuration + // matches. + MatchSet and_match = 2; + + // A negation match. The match configuration will match if the negated match condition matches. + ListenerFilterChainMatchPredicate not_match = 3; + + // The match configuration will always match. + bool any_match = 4 [(validate.rules).bool = {const: true}]; + + // Match destination port. Particularly, the match evaluation must use the recovered local port if + // the owning listener filter is after :ref:`an original_dst listener filter `. + type.Int32Range destination_port_range = 5; + } +} + +message ListenerFilter { + // The name of the filter to instantiate. The name must match a + // :ref:`supported filter `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Filter specific configuration which depends on the filter being instantiated. + // See the supported filters for further documentation. + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true]; + + google.protobuf.Any typed_config = 3; + } + + // [#not-implemented-hide:] + // Decide when to disable this listener filter on incoming traffic. + // Example: + // 0. always enable filter + // don't set `filter_disabled` + // 1. disable when the destination port is 3306 + // rule.destination_port_range = Int32Range {start = 3306, end = 3307} + // 2. disable when the destination port is 3306 or 15000 + // rule.or_match = MatchSet.rules [ + // rule.destination_port_range = Int32Range {start = 3306, end = 3307}, + // rule.destination_port_range = Int32Range {start = 15000, end = 15001}, + // ] + ListenerFilterChainMatchPredicate filter_disabled = 4; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/udp_listener_config.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/udp_listener_config.proto index 4b489b99884..31404b41d53 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/udp_listener_config.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/listener/udp_listener_config.proto @@ -2,16 +2,19 @@ syntax = "proto3"; package envoy.api.v2.listener; +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; + +import "udpa/annotations/migrate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.listener"; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -// [#protodoc-title: Udp Listener Config] +// [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` message UdpListenerConfig { @@ -23,8 +26,11 @@ message UdpListenerConfig { // Used to create a specific listener factory. To some factory, e.g. // "raw_udp_listener", config is not needed. oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } + +message ActiveRawUdpListenerConfig { +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/rds.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/rds.proto index 120c4bd4e32..f54308aafb5 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/rds.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/rds.proto @@ -2,24 +2,24 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "RdsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_generic_services = true; - -import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/discovery.proto"; -import "envoy/api/v2/route/route.proto"; import "google/api/annotations.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/resource.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; -// [#protodoc-title: HTTP route configuration] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` +import public "envoy/api/v2/route.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "RdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; + +// [#protodoc-title: RDS] // The resource_names field in DiscoveryRequest specifies a route configuration. // This allows an Envoy configuration with multiple HTTP listeners (and @@ -27,6 +27,8 @@ import "validate/validate.proto"; // configurations. Each listener will bind its HTTP connection manager filter to // a route table via this identifier. service RouteDiscoveryService { + option (envoy.annotations.resource).type = "envoy.api.v2.RouteConfiguration"; + rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } @@ -34,98 +36,29 @@ service RouteDiscoveryService { } rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http) = { - post: "/v2/discovery:routes" - body: "*" - }; + option (google.api.http).post = "/v2/discovery:routes"; + option (google.api.http).body = "*"; } } // Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for // a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered // during the processing of an HTTP request if a route for the request cannot be resolved. The -// :ref:`resource_names_subscribe ` +// :ref:`resource_names_subscribe ` // field contains a list of virtual host names or aliases to track. The contents of an alias would // be the contents of a *host* or *authority* header used to make an http request. An xDS server // will match an alias to a virtual host based on the content of :ref:`domains' -// ` field. The *resource_names_unsubscribe* field contains -// a list of virtual host names that have been :ref:`unsubscribed ` -// from the routing table associated with the RouteConfiguration. +// ` field. The *resource_names_unsubscribe* field +// contains a list of virtual host names that have been :ref:`unsubscribed +// ` from the routing table associated with the RouteConfiguration. service VirtualHostDiscoveryService { + option (envoy.annotations.resource).type = "envoy.api.v2.route.VirtualHost"; + rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } } -// [#comment:next free field: 10] -message RouteConfiguration { - // The name of the route configuration. For example, it might match - // :ref:`route_config_name - // ` in - // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. - string name = 1; - - // An array of virtual hosts that make up the route table. - repeated route.VirtualHost virtual_hosts = 2; - - // An array of virtual hosts will be dynamically loaded via the VHDS API. - // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - // on-demand discovery of virtual hosts. The contents of these two fields will be merged to - // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - // taking precedence. - // [#not-implemented-hide:] - Vhds vhds = 9; - - // Optionally specifies a list of HTTP headers that the connection manager - // will consider to be internal only. If they are found on external requests they will be cleaned - // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - // information. - repeated string internal_only_headers = 3; - - // Specifies a list of HTTP headers that should be added to each response that - // the connection manager encodes. Headers specified at this level are applied - // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // that the connection manager encodes. - repeated string response_headers_to_remove = 5; - - // Specifies a list of HTTP headers that should be added to each request - // routed by the HTTP connection manager. Headers specified at this level are - // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 6 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // routed by the HTTP connection manager. - repeated string request_headers_to_remove = 8; - - // An optional boolean that specifies whether the clusters that the route - // table refers to will be validated by the cluster manager. If set to true - // and a route refers to a non-existent cluster, the route table will not - // load. If set to false and a route refers to a non-existent cluster, the - // route table will load and the router filter will return a 404 if the route - // is selected at runtime. This setting defaults to true if the route table - // is statically defined via the :ref:`route_config - // ` - // option. This setting default to false if the route table is loaded dynamically via the - // :ref:`rds - // ` - // option. Users may wish to override the default behavior in certain cases (for example when - // using CDS with a static route table). - google.protobuf.BoolValue validate_clusters = 7; -} - -// [#not-implemented-hide:] -message Vhds { - // Configuration source specifier for VHDS. - core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message RdsDummy { } diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/route.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/route.proto new file mode 100644 index 00000000000..11ae686239d --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/route.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.api.v2; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/route/route_components.proto"; + +import "google/protobuf/wrappers.proto"; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "RouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; + +// [#protodoc-title: HTTP route configuration] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// [#next-free-field: 11] +message RouteConfiguration { + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. + string name = 1; + + // An array of virtual hosts that make up the route table. + repeated route.VirtualHost virtual_hosts = 2; + + // An array of virtual hosts will be dynamically loaded via the VHDS API. + // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used + // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for + // on-demand discovery of virtual hosts. The contents of these two fields will be merged to + // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration + // taking precedence. + Vhds vhds = 9; + + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + repeated string internal_only_headers = 3; + + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or + // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + repeated string response_headers_to_remove = 5; + + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or + // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 6 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // routed by the HTTP connection manager. + repeated string request_headers_to_remove = 8; + + // By default, headers that should be added/removed are evaluated from most to least specific: + // + // * route level + // * virtual host level + // * connection manager level + // + // To allow setting overrides at the route or virtual host level, this order can be reversed + // by setting this option to true. Defaults to false. + // + // [#next-major-version: In the v3 API, this will default to true.] + bool most_specific_header_mutations_wins = 10; + + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may wish to override the default behavior in certain cases (for example when + // using CDS with a static route table). + google.protobuf.BoolValue validate_clusters = 7; +} + +message Vhds { + // Configuration source specifier for VHDS. + core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route.proto index c4fe54419b1..ec13e9e5c80 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route.proto @@ -2,1397 +2,8 @@ syntax = "proto3"; package envoy.api.v2.route; +import public "envoy/api/v2/route/route_components.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.route"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2.route"; - -import "envoy/api/v2/core/base.proto"; -import "envoy/type/matcher/regex.proto"; -import "envoy/type/matcher/string.proto"; -import "envoy/type/percent.proto"; -import "envoy/type/range.proto"; - -import "google/protobuf/any.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; - -import "validate/validate.proto"; - -// [#protodoc-title: HTTP route] -// * Routing :ref:`architecture overview ` -// * HTTP :ref:`router filter ` - -// The top level element in the routing configuration is a virtual host. Each virtual host has -// a logical name as well as a set of domains that get routed to it based on the incoming request's -// host header. This allows a single listener to service multiple top level domain path trees. Once -// a virtual host is selected based on the domain, the routes are processed in order to see which -// upstream cluster to route to or whether to perform a redirect. -// [#comment:next free field: 17] -message VirtualHost { - enum TlsRequirementType { - // No TLS requirement for the virtual host. - NONE = 0; - - // External requests must use TLS. If a request is external and it is not - // using TLS, a 301 redirect will be sent telling the client to use HTTPS. - EXTERNAL_ONLY = 1; - - // All requests must use TLS. If a request is not using TLS, a 301 redirect - // will be sent telling the client to use HTTPS. - ALL = 2; - } - - reserved 9; - - // The logical name of the virtual host. This is used when emitting certain - // statistics but is not relevant for routing. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // A list of domains (host/authority header) that will be matched to this - // virtual host. Wildcard hosts are supported in the suffix or prefix form. - // - // Domain search order: - // 1. Exact domain names: ``www.foo.com``. - // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. - // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. - // 4. Special wildcard ``*`` matching any domain. - // - // .. note:: - // - // The wildcard will not match the empty string. - // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. - // The longest wildcards match first. - // Only a single virtual host in the entire route configuration can match on ``*``. A domain - // must be unique across all virtual hosts or the config will fail to load. - repeated string domains = 2 [(validate.rules).repeated = {min_items: 1}]; - - // The list of routes that will be matched, in order, for incoming requests. - // The first route that matches will be used. - repeated Route routes = 3; - - // Specifies the type of TLS enforcement the virtual host expects. If this option is not - // specified, there is no TLS requirement for the virtual host. - TlsRequirementType require_tls = 4; - - // A list of virtual clusters defined for this virtual host. Virtual clusters - // are used for additional statistics gathering. - repeated VirtualCluster virtual_clusters = 5; - - // Specifies a set of rate limit configurations that will be applied to the - // virtual host. - repeated RateLimit rate_limits = 6; - - // Specifies a list of HTTP headers that should be added to each request - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 7 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // handled by this virtual host. - repeated string request_headers_to_remove = 13; - - // Specifies a list of HTTP headers that should be added to each response - // handled by this virtual host. Headers specified at this level are applied - // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // handled by this virtual host. - repeated string response_headers_to_remove = 11; - - // Indicates that the virtual host has a CORS policy. - CorsPolicy cors = 8; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map per_filter_config = 12; - - // The per_filter_config field can be used to provide virtual host-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 15; - - // Decides whether the :ref:`x-envoy-attempt-count - // ` header should be included - // in the upstream request. Setting this option will cause it to override any existing header - // value, so in the case of two Envoys on the request path with this option enabled, the upstream - // will see the attempt count as perceived by the second Envoy. Defaults to false. - // This header is unaffected by the - // :ref:`suppress_envoy_headers - // ` flag. - bool include_request_attempt_count = 14; - - // Indicates the retry policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - RetryPolicy retry_policy = 16; - - // Indicates the hedge policy for all routes in this virtual host. Note that setting a - // route level entry will take precedence over this config and it'll be treated - // independently (e.g.: values are not inherited). - HedgePolicy hedge_policy = 17; -} - -// A route is both a specification of how to match a request as well as an indication of what to do -// next (e.g., redirect, forward, rewrite, etc.). -// -// .. attention:: -// -// Envoy supports routing on HTTP method via :ref:`header matching -// `. -// [#comment:next free field: 15] -message Route { - reserved 6; - - // Name for the route. - string name = 14; - - // Route matching parameters. - RouteMatch match = 1 [(validate.rules).message = {required: true}]; - - oneof action { - option (validate.required) = true; - - // Route request to some upstream cluster. - RouteAction route = 2; - - // Return a redirect. - RedirectAction redirect = 3; - - // Return an arbitrary HTTP response directly, without proxying. - DirectResponseAction direct_response = 7; - } - - // The Metadata field can be used to provide additional information - // about the route. It can be used for configuration, stats, and logging. - // The metadata should go under the filter namespace that will need it. - // For instance, if the metadata is intended for the Router filter, - // the filter name should be specified as *envoy.router*. - core.Metadata metadata = 4; - - // Decorator for the matched route. - Decorator decorator = 5; - - // The per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map per_filter_config = 8; - - // The per_filter_config field can be used to provide route-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` for - // if and how it is utilized. - map typed_per_filter_config = 13; - - // Specifies a set of headers that will be added to requests matching this - // route. Headers specified at this level are applied before headers from the - // enclosing :ref:`envoy_api_msg_route.VirtualHost` and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 9 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request - // matching this route. - repeated string request_headers_to_remove = 12; - - // Specifies a set of headers that will be added to responses to requests - // matching this route. Headers specified at this level are applied before - // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - // details on header value syntax, see the documentation on - // :ref:`custom request headers `. - repeated core.HeaderValueOption response_headers_to_add = 10 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each response - // to requests matching this route. - repeated string response_headers_to_remove = 11; - - // Presence of the object defines whether the connection manager's tracing configuration - // is overridden by this route specific instance. - Tracing tracing = 15; -} - -// Compared to the :ref:`cluster ` field that specifies a -// single upstream cluster as the target of a request, the :ref:`weighted_clusters -// ` option allows for specification of -// multiple upstream clusters along with weights that indicate the percentage of -// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the -// weights. -// [#comment:next free field: 11] -message WeightedCluster { - message ClusterWeight { - reserved 7; - - // Name of the upstream cluster. The cluster must exist in the - // :ref:`cluster manager configuration `. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // An integer between 0 and :ref:`total_weight - // `. When a request matches the route, - // the choice of an upstream cluster is determined by its weight. The sum of weights across all - // entries in the clusters array must add up to the total_weight, which defaults to 100. - google.protobuf.UInt32Value weight = 2; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - // the upstream cluster with metadata matching what is set in this field will be considered for - // load balancing. Note that this will be merged with what's provided in :ref: - // `RouteAction.MetadataMatch `, with values - // here taking precedence. The filter name should be specified as *envoy.lb*. - core.Metadata metadata_match = 3; - - // Specifies a list of headers to be added to requests when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption request_headers_to_add = 4 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of HTTP headers that should be removed from each request when - // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - repeated string request_headers_to_remove = 9; - - // Specifies a list of headers to be added to responses when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - // Headers specified at this level are applied before headers from the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - // header value syntax, see the documentation on :ref:`custom request headers - // `. - repeated core.HeaderValueOption response_headers_to_add = 5 - [(validate.rules).repeated = {max_items: 1000}]; - - // Specifies a list of headers to be removed from responses when this cluster is selected - // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - repeated string response_headers_to_remove = 6; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map per_filter_config = 8; - - // The per_filter_config field can be used to provide weighted cluster-specific - // configurations for filters. The key should match the filter name, such as - // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter - // specific; see the :ref:`HTTP filter documentation ` - // for if and how it is utilized. - map typed_per_filter_config = 10; - } - - // Specifies one or more upstream clusters associated with the route. - repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; - - // Specifies the total weight across all clusters. The sum of all cluster weights must equal this - // value, which must be greater than 0. Defaults to 100. - google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies the runtime key prefix that should be used to construct the - // runtime keys associated with each cluster. When the *runtime_key_prefix* is - // specified, the router will look for weights associated with each upstream - // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - // *cluster[i]* denotes an entry in the clusters array field. If the runtime - // key for the cluster does not exist, the value specified in the - // configuration file will be used as the default weight. See the :ref:`runtime documentation - // ` for how key names map to the underlying implementation. - string runtime_key_prefix = 2; -} - -message RouteMatch { - message GrpcRouteMatchOptions { - } - - reserved 5; - - oneof path_specifier { - option (validate.required) = true; - - // If specified, the route is a prefix rule meaning that the prefix must - // match the beginning of the *:path* header. - string prefix = 1; - - // If specified, the route is an exact path rule meaning that the path must - // exactly match the *:path* header once the query string is removed. - string path = 2; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. The regex grammar is defined `here - // `_. - // - // Examples: - // - // * The regex ``/b[io]t`` matches the path */bit* - // * The regex ``/b[io]t`` matches the path */bot* - // * The regex ``/b[io]t`` does not match the path */bite* - // * The regex ``/b[io]t`` does not match the path */bit/bot* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex` as it is not safe for use with - // untrusted input in all cases. - string regex = 3 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; - - // If specified, the route is a regular expression rule meaning that the - // regex must match the *:path* header once the query string is removed. The entire path - // (without the query string) must match the regex. The rule will not match if only a - // subsequence of the *:path* header matches the regex. - // - // [#next-major-version: In the v3 API we should redo how path specification works such - // that we utilize StringMatcher, and additionally have consistent options around whether we - // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - // to deprecate the existing options. We should even consider whether we want to do away with - // path_specifier entirely and just rely on a set of header matchers which can already match - // on :path, etc. The issue with that is it is unclear how to generically deal with query string - // stripping. This needs more thought.] - type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; - } - - // Indicates that prefix/path matching should be case insensitive. The default - // is true. - google.protobuf.BoolValue case_sensitive = 4; - - // Indicates that the route should additionally match on a runtime key. Every time the route - // is considered for a match, it must also fall under the percentage of matches indicated by - // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the router continues to evaluate the remaining match criteria. A runtime_fraction - // route configuration can be used to roll out route changes in a gradual manner without full - // code/config deploys. Refer to the :ref:`traffic shifting - // ` docs for additional documentation. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - // integer with the assumption that the value is an integral percentage out of 100. For - // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - core.RuntimeFractionalPercent runtime_fraction = 9; - - // Specifies a set of headers that the route should match on. The router will - // check the request’s headers against all the specified headers in the route - // config. A match will happen if all the headers in the route are present in - // the request with the same values (or based on presence if the value field - // is not in the config). - repeated HeaderMatcher headers = 6; - - // Specifies a set of URL query parameters on which the route should - // match. The router will check the query string from the *path* header - // against all the specified query parameters. If the number of specified - // query parameters is nonzero, they all must match the *path* header's - // query string for a match to occur. - repeated QueryParameterMatcher query_parameters = 7; - - // If specified, only gRPC requests will be matched. The router will check - // that the content-type header has a application/grpc or one of the various - // application/grpc+ values. - GrpcRouteMatchOptions grpc = 8; -} - -// [#comment:next free field: 11] -message CorsPolicy { - // Specifies the origins that will be allowed to do CORS requests. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match`. - repeated string allow_origin = 1 [deprecated = true]; - - // Specifies regex patterns that match allowed origins. - // - // An origin is allowed if either allow_origin or allow_origin_regex match. - // - // .. attention:: - // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for - // use with untrusted input in all cases. - repeated string allow_origin_regex = 8 - [(validate.rules).repeated = {items {string {max_bytes: 1024}}}, deprecated = true]; - - // Specifies string patterns that match allowed origins. An origin is allowed if any of the - // string matchers match. - repeated type.matcher.StringMatcher allow_origin_string_match = 11; - - // Specifies the content for the *access-control-allow-methods* header. - string allow_methods = 2; - - // Specifies the content for the *access-control-allow-headers* header. - string allow_headers = 3; - - // Specifies the content for the *access-control-expose-headers* header. - string expose_headers = 4; - - // Specifies the content for the *access-control-max-age* header. - string max_age = 5; - - // Specifies whether the resource allows credentials. - google.protobuf.BoolValue allow_credentials = 6; - - oneof enabled_specifier { - // Specifies if CORS is enabled. Defaults to true. Only effective on route. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`filter_enabled` field instead. - google.protobuf.BoolValue enabled = 7 [deprecated = true]; - - // Specifies if CORS is enabled. - // - // More information on how this can be controlled via runtime can be found - // :ref:`here `. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - core.RuntimeFractionalPercent filter_enabled = 9; - } - - // Specifies if CORS policies are evaluated and tracked when filter is off but - // does not enforce any policies. - // - // More information on how this can be controlled via runtime can be found - // :ref:`here `. - // - // .. note:: - // - // This field defaults to 100/:ref:`HUNDRED - // `. - core.RuntimeFractionalPercent shadow_enabled = 10; -} - -// [#comment:next free field: 30] -message RouteAction { - enum ClusterNotFoundResponseCode { - // HTTP status code - 503 Service Unavailable. - SERVICE_UNAVAILABLE = 0; - - // HTTP status code - 404 Not Found. - NOT_FOUND = 1; - } - - // Configures :ref:`internal redirect ` behavior. - enum InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0; - HANDLE_INTERNAL_REDIRECT = 1; - } - - // The router is capable of shadowing traffic from one cluster to another. The current - // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - // respond before returning the response from the primary cluster. All normal statistics are - // collected for the shadow cluster making this feature useful for testing. - // - // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - message RequestMirrorPolicy { - // Specifies the cluster that requests will be mirrored to. The cluster must - // exist in the cluster manager configuration. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If not specified, all requests to the target cluster will be mirrored. If - // specified, Envoy will lookup the runtime key to get the % of requests to - // mirror. Valid values are from 0 to 10000, allowing for increments of - // 0.01% of requests to be mirrored. If the runtime key is specified in the - // configuration but not present in runtime, 0 is the default and thus 0% of - // requests will be mirrored. - // - // .. attention:: - // - // **This field is deprecated**. Set the - // :ref:`runtime_fraction - // ` field instead. - string runtime_key = 2 [deprecated = true]; - - // If both :ref:`runtime_key - // ` and this field are not - // specified, all requests to the target cluster will be mirrored. - // - // If specified, this field takes precedence over the `runtime_key` field and requests must also - // fall under the percentage of matches indicated by this field. - // - // For some fraction N/D, a random number in the range [0,D) is selected. If the - // number is <= the value of the numerator N, or if the key is not present, the default - // value, the request will be mirrored. - // - // .. note:: - // - // Parsing this field is implemented such that the runtime key's data may be represented - // as a :ref:`FractionalPercent ` proto represented - // as JSON/YAML and may also be represented as an integer with the assumption that the value - // is an integral percentage out of 100. For instance, a runtime key lookup returning the - // value "42" would parse as a `FractionalPercent` whose numerator is 42 and denominator is - // HUNDRED. This is behaviour is different to that of the deprecated `runtime_key` field, - // where the implicit denominator is 10000. - core.RuntimeFractionalPercent runtime_fraction = 3; - } - - // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - // `. - message HashPolicy { - message Header { - // The name of the request header that will be used to obtain the hash - // key. If the request header is not present, no hash will be produced. - string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // Envoy supports two types of cookie affinity: - // - // 1. Passive. Envoy takes a cookie that's present in the cookies header and - // hashes on its value. - // - // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - // on the first request from the client in its response to the client, - // based on the endpoint the request gets sent to. The client then - // presents this on the next and all subsequent requests. The hash of - // this is sufficient to ensure these requests get sent to the same - // endpoint. The cookie is generated by hashing the source and - // destination ports and addresses so that multiple independent HTTP2 - // streams on the same connection will independently receive the same - // cookie, even if they arrive at the Envoy simultaneously. - message Cookie { - // The name of the cookie that will be used to obtain the hash key. If the - // cookie is not present and ttl below is not set, no hash will be - // produced. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, a cookie with the TTL will be generated if the cookie is - // not present. If the TTL is present and zero, the generated cookie will - // be a session cookie. - google.protobuf.Duration ttl = 2; - - // The name of the path for the cookie. If no path is specified here, no path - // will be set for the cookie. - string path = 3; - } - - message ConnectionProperties { - // Hash on source IP address. - bool source_ip = 1; - } - - oneof policy_specifier { - option (validate.required) = true; - - // Header hash policy. - Header header = 1; - - // Cookie hash policy. - Cookie cookie = 2; - - // Connection properties hash policy. - ConnectionProperties connection_properties = 3; - } - - // The flag that shortcircuits the hash computing. This field provides a - // 'fallback' style of configuration: "if a terminal policy doesn't work, - // fallback to rest of the policy list", it saves time when the terminal - // policy works. - // - // If true, and there is already a hash computed, ignore rest of the - // list of hash polices. - // For example, if the following hash methods are configured: - // - // ========= ======== - // specifier terminal - // ========= ======== - // Header A true - // Header B false - // Header C false - // ========= ======== - // - // The generateHash process ends if policy "header A" generates a hash, as - // it's a terminal policy. - bool terminal = 4; - } - - // Allows enabling and disabling upgrades on a per-route basis. - // This overrides any enabled/disabled upgrade filter chain specified in the - // HttpConnectionManager - // :ref:upgrade_configs` - // ` - // but does not affect any custom filter chain specified there. - message UpgradeConfig { - // The case-insensitive name of this upgrade, e.g. "websocket". - // For each upgrade type present in upgrade_configs, requests with - // Upgrade: [upgrade_type] will be proxied upstream. - string upgrade_type = 1; - - // Determines if upgrades are available on this route. Defaults to true. - google.protobuf.BoolValue enabled = 2; - } - - reserved 12, 18, 19, 16, 22, 21; - - oneof cluster_specifier { - option (validate.required) = true; - - // Indicates the upstream cluster to which the request should be routed - // to. - string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Envoy will determine the cluster to route to by reading the value of the - // HTTP header named by cluster_header from the request headers. If the - // header is not found or the referenced cluster does not exist, Envoy will - // return a 404 response. - // - // .. attention:: - // - // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - string cluster_header = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Multiple upstream clusters can be specified for a given route. The - // request is routed to one of the upstream clusters based on weights - // assigned to each cluster. See - // :ref:`traffic splitting ` - // for additional documentation. - WeightedCluster weighted_clusters = 3; - } - - // The HTTP status code to use when configured cluster is not found. - // The default response code is 503 Service Unavailable. - ClusterNotFoundResponseCode cluster_not_found_response_code = 20 - [(validate.rules).enum = {defined_only: true}]; - - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what's set in this field will be considered - // for load balancing. If using :ref:`weighted_clusters - // `, metadata will be merged, with values - // provided there taking precedence. The filter name should be specified as *envoy.lb*. - core.Metadata metadata_match = 4; - - // Indicates that during forwarding, the matched prefix (or path) should be - // swapped with this value. This option allows application URLs to be rooted - // at a different path from those exposed at the reverse proxy layer. The router filter will - // place the original path before rewrite into the :ref:`x-envoy-original-path - // ` header. - // - // .. attention:: - // - // Pay careful attention to the use of trailing slashes in the - // :ref:`route's match ` prefix value. - // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single - // :ref:`Route `, as shown by the below config entries: - // - // .. code-block:: yaml - // - // - match: - // prefix: "/prefix/" - // route: - // prefix_rewrite: "/" - // - match: - // prefix: "/prefix" - // route: - // prefix_rewrite: "/" - // - // Having above entries in the config, requests to */prefix* will be stripped to */*, while - // requests to */prefix/etc* will be stripped to */etc*. - string prefix_rewrite = 5; - - oneof host_rewrite_specifier { - // Indicates that during forwarding, the host header will be swapped with - // this value. - string host_rewrite = 6; - - // Indicates that during forwarding, the host header will be swapped with - // the hostname of the upstream host chosen by the cluster manager. This - // option is applicable only when the destination cluster for a route is of - // type *strict_dns* or *logical_dns*. Setting this to true with other cluster - // types has no effect. - google.protobuf.BoolValue auto_host_rewrite = 7; - - // Indicates that during forwarding, the host header will be swapped with the content of given - // downstream or :ref:`custom ` header. - // If header value is empty, host header is left intact. - // - // .. attention:: - // - // Pay attention to the potential security implications of using this option. Provided header - // must come from trusted source. - string auto_host_rewrite_header = 29; - } - - // Specifies the upstream timeout for the route. If not specified, the default is 15s. This - // spans between the point at which the entire downstream request (i.e. end-of-stream) has been - // processed and when the upstream response has been completely processed. A value of 0 will - // disable the route's timeout. - // - // .. note:: - // - // This timeout includes all retries. See also - // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - // :ref:`retry overview `. - google.protobuf.Duration timeout = 8; - - // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - // although the connection manager wide :ref:`stream_idle_timeout - // ` - // will still apply. A value of 0 will completely disable the route's idle timeout, even if a - // connection manager stream idle timeout is configured. - // - // The idle timeout is distinct to :ref:`timeout - // `, which provides an upper bound - // on the upstream response time; :ref:`idle_timeout - // ` instead bounds the amount - // of time the request's stream may be idle. - // - // After header decoding, the idle timeout will apply on downstream and - // upstream request events. Each time an encode/decode event for headers or - // data is processed for the stream, the timer will be reset. If the timeout - // fires, the stream is terminated with a 408 Request Timeout error code if no - // upstream response header has been received, otherwise a stream reset - // occurs. - google.protobuf.Duration idle_timeout = 24; - - // Indicates that the route has a retry policy. Note that if this is set, - // it'll take precedence over the virtual host level retry policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - RetryPolicy retry_policy = 9; - - // Indicates that the route has a request mirroring policy. - RequestMirrorPolicy request_mirror_policy = 10; - - // Optionally specifies the :ref:`routing priority `. - // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once - // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] - core.RoutingPriority priority = 11; - - // Specifies a set of rate limit configurations that could be applied to the - // route. - repeated RateLimit rate_limits = 13; - - // Specifies if the rate limit filter should include the virtual host rate - // limits. By default, if the route configured rate limits, the virtual host - // :ref:`rate_limits ` are not applied to the - // request. - google.protobuf.BoolValue include_vh_rate_limits = 14; - - // Specifies a list of hash policies to use for ring hash load balancing. Each - // hash policy is evaluated individually and the combined result is used to - // route the request. The method of combination is deterministic such that - // identical lists of hash policies will produce the same hash. Since a hash - // policy examines specific parts of a request, it can fail to produce a hash - // (i.e. if the hashed header is not present). If (and only if) all configured - // hash policies fail to generate a hash, no hash will be produced for - // the route. In this case, the behavior is the same as if no hash policies - // were specified (i.e. the ring hash load balancer will choose a random - // backend). If a hash policy has the "terminal" attribute set to true, and - // there is already a hash generated, the hash is returned immediately, - // ignoring the rest of the hash policy list. - repeated HashPolicy hash_policy = 15; - - // Indicates that the route has a CORS policy. - CorsPolicy cors = 17; - - // If present, and the request is a gRPC request, use the - // `grpc-timeout header `_, - // or its default value (infinity) instead of - // :ref:`timeout `, but limit the applied timeout - // to the maximum value specified here. If configured as 0, the maximum allowed timeout for - // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - // and gRPC requests time out like any other requests using - // :ref:`timeout ` or its default. - // This can be used to prevent unexpected upstream request timeouts due to potentially long - // time gaps between gRPC request and response in gRPC streaming mode. - google.protobuf.Duration max_grpc_timeout = 23; - - // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - // the provided duration from the header. This is useful in allowing Envoy to set its global - // timeout to be less than that of the deadline imposed by the calling client, which makes it more - // likely that Envoy will handle the timeout instead of having the call canceled by the client. - // The offset will only be applied if the provided grpc_timeout is greater than the offset. This - // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - // infinity). - google.protobuf.Duration grpc_timeout_offset = 28; - - repeated UpgradeConfig upgrade_configs = 25; - - InternalRedirectAction internal_redirect_action = 26; - - // Indicates that the route has a hedge policy. Note that if this is set, - // it'll take precedence over the virtual host level hedge policy entirely - // (e.g.: policies are not merged, most internal one becomes the enforced policy). - HedgePolicy hedge_policy = 27; -} - -// HTTP retry :ref:`architecture overview `. -// [#comment:next free field: 10] -message RetryPolicy { - message RetryPriority { - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - - message RetryHostPredicate { - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - oneof config_type { - google.protobuf.Struct config = 2; - - google.protobuf.Any typed_config = 3; - } - } - - message RetryBackOff { - // Specifies the base interval between retries. This parameter is required and must be greater - // than zero. Values less than 1 ms are rounded up to 1 ms. - // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - // back-off algorithm. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {} - }]; - - // Specifies the maximum interval between retries. This parameter is optional, but must be - // greater than or equal to the `base_interval` if set. The default is 10 times the - // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - // of Envoy's back-off algorithm. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; - } - - // Specifies the conditions under which retry takes place. These are the same - // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - string retry_on = 1; - - // Specifies the allowed number of retries. This parameter is optional and - // defaults to 1. These are the same conditions documented for - // :ref:`config_http_filters_router_x-envoy-max-retries`. - google.protobuf.UInt32Value num_retries = 2; - - // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - // same conditions documented for - // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - // - // .. note:: - // - // If left unspecified, Envoy will use the global - // :ref:`route timeout ` for the request. - // Consequently, when using a :ref:`5xx ` based - // retry policy, a request that times out will not be retried as the total timeout budget - // would have been exhausted. - google.protobuf.Duration per_try_timeout = 3; - - // Specifies an implementation of a RetryPriority which is used to determine the - // distribution of load across priorities used for retries. Refer to - // :ref:`retry plugin configuration ` for more details. - RetryPriority retry_priority = 4; - - // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - // for retries. If any of the predicates reject the host, host selection will be reattempted. - // Refer to :ref:`retry plugin configuration ` for more - // details. - repeated RetryHostPredicate retry_host_predicate = 5; - - // The maximum number of times host selection will be reattempted before giving up, at which - // point the host that was last selected will be routed to. If unspecified, this will default to - // retrying once. - int64 host_selection_retry_max_attempts = 6; - - // HTTP status codes that should trigger a retry in addition to those specified by retry_on. - repeated uint32 retriable_status_codes = 7; - - // Specifies parameters that control retry back off. This parameter is optional, in which case the - // default base interval is 25 milliseconds or, if set, the current value of the - // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - // describes Envoy's back-off algorithm. - RetryBackOff retry_back_off = 8; - - // HTTP response headers that trigger a retry if present in the response. A retry will be - // triggered if any of the header matches match the upstream response headers. - // The field is only consulted if 'retriable-headers' retry policy is active. - repeated HeaderMatcher retriable_headers = 9; - - // HTTP headers which must be present in the request for retries to be attempted. - repeated HeaderMatcher retriable_request_headers = 10; -} - -// HTTP request hedging :ref:`architecture overview `. -message HedgePolicy { - // Specifies the number of initial requests that should be sent upstream. - // Must be at least 1. - // Defaults to 1. - // [#not-implemented-hide:] - google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; - - // Specifies a probability that an additional upstream request should be sent - // on top of what is specified by initial_requests. - // Defaults to 0. - // [#not-implemented-hide:] - type.FractionalPercent additional_request_chance = 2; - - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. - // Defaults to false. - bool hedge_on_per_try_timeout = 3; -} - -message RedirectAction { - enum RedirectResponseCode { - // Moved Permanently HTTP Status Code - 301. - MOVED_PERMANENTLY = 0; - - // Found HTTP Status Code - 302. - FOUND = 1; - - // See Other HTTP Status Code - 303. - SEE_OTHER = 2; - - // Temporary Redirect HTTP Status Code - 307. - TEMPORARY_REDIRECT = 3; - - // Permanent Redirect HTTP Status Code - 308. - PERMANENT_REDIRECT = 4; - } - - // When the scheme redirection take place, the following rules apply: - // 1. If the source URI scheme is `http` and the port is explicitly - // set to `:80`, the port will be removed after the redirection - // 2. If the source URI scheme is `https` and the port is explicitly - // set to `:443`, the port will be removed after the redirection - oneof scheme_rewrite_specifier { - // The scheme portion of the URL will be swapped with "https". - bool https_redirect = 4; - - // The scheme portion of the URL will be swapped with this value. - string scheme_redirect = 7; - } - - // The host portion of the URL will be swapped with this value. - string host_redirect = 1; - - // The port value of the URL will be swapped with this value. - uint32 port_redirect = 8; - - oneof path_rewrite_specifier { - // The path portion of the URL will be swapped with this value. - string path_redirect = 2; - - // Indicates that during redirection, the matched prefix (or path) - // should be swapped with this value. This option allows redirect URLs be dynamically created - // based on the request. - // - // .. attention:: - // - // Pay attention to the use of trailing slashes as mentioned in - // :ref:`RouteAction's prefix_rewrite `. - string prefix_rewrite = 5; - } - - // The HTTP status code to use in the redirect response. The default response - // code is MOVED_PERMANENTLY (301). - RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; - - // Indicates that during redirection, the query portion of the URL will - // be removed. Default value is false. - bool strip_query = 6; -} - -message DirectResponseAction { - // Specifies the HTTP response status to be returned. - uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; - - // Specifies the content of the response body. If this setting is omitted, - // no body is included in the generated response. - // - // .. note:: - // - // Headers can be specified using *response_headers_to_add* in the enclosing - // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or - // :ref:`envoy_api_msg_route.VirtualHost`. - core.DataSource body = 2; -} - -message Decorator { - // The operation name associated with the request matched to this route. If tracing is - // enabled, this information will be used as the span name reported for this request. - // - // .. note:: - // - // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden - // by the :ref:`x-envoy-decorator-operation - // ` header. - string operation = 1 [(validate.rules).string = {min_bytes: 1}]; -} - -message Tracing { - // Target percentage of requests managed by this HTTP connection manager that will be force - // traced if the :ref:`x-client-trace-id ` - // header is set. This field is a direct analog for the runtime variable - // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - // `. - // Default: 100% - type.FractionalPercent client_sampling = 1; - - // Target percentage of requests managed by this HTTP connection manager that will be randomly - // selected for trace generation, if not requested by the client or not forced. This field is - // a direct analog for the runtime variable 'tracing.random_sampling' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.FractionalPercent random_sampling = 2; - - // Target percentage of requests managed by this HTTP connection manager that will be traced - // after all other sampling checks have been applied (client-directed, force tracing, random - // sampling). This field functions as an upper limit on the total configured sampling rate. For - // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - // of client requests with the appropriate headers to be force traced. This field is a direct - // analog for the runtime variable 'tracing.global_enabled' in the - // :ref:`HTTP Connection Manager `. - // Default: 100% - type.FractionalPercent overall_sampling = 3; -} - -// A virtual cluster is a way of specifying a regex matching rule against -// certain important endpoints such that statistics are generated explicitly for -// the matched requests. The reason this is useful is that when doing -// prefix/path matching Envoy does not always know what the application -// considers to be an endpoint. Thus, it’s impossible for Envoy to generically -// emit per endpoint statistics. However, often systems have highly critical -// endpoints that they wish to get “perfect” statistics on. Virtual cluster -// statistics are perfect in the sense that they are emitted on the downstream -// side such that they include network level failures. -// -// Documentation for :ref:`virtual cluster statistics `. -// -// .. note:: -// -// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for -// every application endpoint. This is both not easily maintainable and as well the matching and -// statistics output are not free. -message VirtualCluster { - // Specifies a regex pattern to use for matching requests. The entire path of the request - // must match the regex. The regex grammar used is defined `here - // `_. - // - // Examples: - // - // * The regex ``/rides/\d+`` matches the path */rides/0* - // * The regex ``/rides/\d+`` matches the path */rides/123* - // * The regex ``/rides/\d+`` does not match the path */rides/123/456* - // - // .. attention:: - // This field has been deprecated in favor of `headers` as it is not safe for use with - // untrusted input in all cases. - string pattern = 1 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; - - // Specifies a list of header matchers to use for matching requests. Each specified header must - // match. The pseudo-headers `:path` and `:method` can be used to match the request path and - // method, respectively. - repeated HeaderMatcher headers = 4; - - // Specifies the name of the virtual cluster. The virtual cluster name as well - // as the virtual host name are used when emitting statistics. The statistics are emitted by the - // router filter and are documented :ref:`here `. - string name = 2 [(validate.rules).string = {min_bytes: 1}]; - - // Optionally specifies the HTTP method to match on. For example GET, PUT, - // etc. - // - // .. attention:: - // This field has been deprecated in favor of `headers`. - core.RequestMethod method = 3 [deprecated = true]; -} - -// Global rate limiting :ref:`architecture overview `. -message RateLimit { - message Action { - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("source_cluster", "") - // - // is derived from the :option:`--service-cluster` option. - message SourceCluster { - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("destination_cluster", "") - // - // Once a request matches against a route table rule, a routed cluster is determined by one of - // the following :ref:`route table configuration ` - // settings: - // - // * :ref:`cluster ` indicates the upstream cluster - // to route to. - // * :ref:`weighted_clusters ` - // chooses a cluster randomly from a set of clusters with attributed weight. - // * :ref:`cluster_header ` indicates which - // header in the request contains the target cluster. - message DestinationCluster { - } - - // The following descriptor entry is appended when a header contains a key that matches the - // *header_name*: - // - // .. code-block:: cpp - // - // ("", "") - message RequestHeaders { - // The header name to be queried from the request headers. The header’s - // value is used to populate the value of the descriptor entry for the - // descriptor_key. - string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The key to use in the descriptor entry. - string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; - } - - // The following descriptor entry is appended to the descriptor and is populated using the - // trusted address from :ref:`x-forwarded-for `: - // - // .. code-block:: cpp - // - // ("remote_address", "") - message RemoteAddress { - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("generic_key", "") - message GenericKey { - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; - } - - // The following descriptor entry is appended to the descriptor: - // - // .. code-block:: cpp - // - // ("header_match", "") - message HeaderValueMatch { - // The value to use in the descriptor entry. - string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; - - // If set to true, the action will append a descriptor entry when the - // request matches the headers. If set to false, the action will append a - // descriptor entry when the request does not match the headers. The - // default value is true. - google.protobuf.BoolValue expect_match = 2; - - // Specifies a set of headers that the rate limit action should match - // on. The action will check the request’s headers against all the - // specified headers in the config. A match will happen if all the - // headers in the config are present in the request with the same values - // (or based on presence if the value field is not in the config). - repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; - } - - oneof action_specifier { - option (validate.required) = true; - - // Rate limit on source cluster. - SourceCluster source_cluster = 1; - - // Rate limit on destination cluster. - DestinationCluster destination_cluster = 2; - - // Rate limit on request headers. - RequestHeaders request_headers = 3; - - // Rate limit on remote address. - RemoteAddress remote_address = 4; - - // Rate limit on a generic key. - GenericKey generic_key = 5; - - // Rate limit on the existence of request headers. - HeaderValueMatch header_value_match = 6; - } - } - - // Refers to the stage set in the filter. The rate limit configuration only - // applies to filters with the same stage number. The default stage number is - // 0. - // - // .. note:: - // - // The filter supports a range of 0 - 10 inclusively for stage numbers. - google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; - - // The key to be set in runtime to disable this rate limit configuration. - string disable_key = 2; - - // A list of actions that are to be applied for this rate limit configuration. - // Order matters as the actions are processed sequentially and the descriptor - // is composed by appending descriptor entries in that sequence. If an action - // cannot append a descriptor entry, no descriptor is generated for the - // configuration. See :ref:`composing actions - // ` for additional documentation. - repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; -} - -// .. attention:: -// -// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* -// header. Thus, if attempting to match on *Host*, match on *:authority* instead. -// -// .. attention:: -// -// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both -// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., -// -// .. code-block:: json -// -// { -// "name": ":method", -// "exact_match": "POST" -// } -// -// .. attention:: -// In the absence of any header match specifier, match will default to :ref:`present_match -// `. i.e, a request that has the :ref:`name -// ` header will match, regardless of the header's -// value. -// -// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] -message HeaderMatcher { - reserved 2, 3; - - // Specifies the name of the header in the request. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // Specifies how the header match will be performed to route the request. - oneof header_match_specifier { - // If specified, header match will be performed based on the value of the header. - string exact_match = 4; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. The regex grammar used in the value field is defined - // `here `_. - // - // Examples: - // - // * The regex ``\d{3}`` matches the value *123* - // * The regex ``\d{3}`` does not match the value *1234* - // * The regex ``\d{3}`` does not match the value *123.456* - // - // .. attention:: - // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use - // with untrusted input in all cases. - string regex_match = 5 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; - - // If specified, this regex string is a regular expression rule which implies the entire request - // header value must match the regex. The rule will not match if only a subsequence of the - // request header value matches the regex. - type.matcher.RegexMatcher safe_regex_match = 11; - - // If specified, header match will be performed based on range. - // The rule will match if the request header value is within this range. - // The entire request header value must represent an integer in base 10 notation: consisting of - // an optional plus or minus sign followed by a sequence of digits. The rule will not match if - // the header value does not represent an integer. Match will fail for empty values, floating - // point numbers or if only a subsequence of the header value is an integer. - // - // Examples: - // - // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - // "-1somestring" - type.Int64Range range_match = 6; - - // If specified, header match will be performed based on whether the header is in the - // request. - bool present_match = 7; - - // If specified, header match will be performed based on the prefix of the header value. - // Note: empty prefix is not allowed, please use present_match instead. - // - // Examples: - // - // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; - - // If specified, header match will be performed based on the suffix of the header value. - // Note: empty suffix is not allowed, please use present_match instead. - // - // Examples: - // - // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; - } - - // If specified, the match result will be inverted before checking. Defaults to false. - // - // Examples: - // - // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - // * The range [-10,0) will match the value -1, so it will not match when inverted. - bool invert_match = 8; -} - -// Query parameter matching treats the query string of a request's :path header -// as an ampersand-separated list of keys and/or key=value elements. -message QueryParameterMatcher { - // Specifies the name of a key that must be present in the requested - // *path*'s query string. - string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; - - // Specifies the value of the key. If the value is absent, a request - // that contains the key in its query string will match, whether the - // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") - // - // ..attention:: - // This field is deprecated. Use an `exact` match inside the `string_match` field. - string value = 3 [deprecated = true]; - - // Specifies whether the query parameter value is a regular expression. - // Defaults to false. The entire query parameter value (i.e., the part to - // the right of the equals sign in "key=value") must match the regex. - // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. - // - // ..attention:: - // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. - google.protobuf.BoolValue regex = 4 [deprecated = true]; - - oneof query_parameter_match_specifier { - // Specifies whether a query parameter value should match against a string. - type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; - - // Specifies whether a query parameter should be present. - bool present_match = 6; - } -} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route_components.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route_components.proto new file mode 100644 index 00000000000..f5e6bae79a3 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/route/route_components.proto @@ -0,0 +1,1495 @@ +syntax = "proto3"; + +package envoy.api.v2.route; + +import "envoy/api/v2/core/base.proto"; +import "envoy/type/matcher/regex.proto"; +import "envoy/type/matcher/string.proto"; +import "envoy/type/percent.proto"; +import "envoy/type/range.proto"; +import "envoy/type/tracing/v2/custom_tag.proto"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2.route"; +option java_outer_classname = "RouteComponentsProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; + +// [#protodoc-title: HTTP route components] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +// [#next-free-field: 19] +message VirtualHost { + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + reserved 9; + + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the suffix or prefix form. + // + // Domain search order: + // 1. Exact domain names: ``www.foo.com``. + // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. + // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. + // 4. Special wildcard ``*`` matching any domain. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. + // The longest wildcards match first. + // Only a single virtual host in the entire route configuration can match on ``*``. A domain + // must be unique across all virtual hosts or the config will fail to load. + repeated string domains = 2 [(validate.rules).repeated = {min_items: 1}]; + + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + repeated Route routes = 3; + + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; + + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + repeated VirtualCluster virtual_clusters = 5; + + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + repeated RateLimit rate_limits = 6; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 7 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // handled by this virtual host. + repeated string request_headers_to_remove = 13; + + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the + // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + repeated string response_headers_to_remove = 11; + + // Indicates that the virtual host has a CORS policy. + CorsPolicy cors = 8; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map per_filter_config = 12 [deprecated = true]; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 15; + + // Decides whether the :ref:`x-envoy-attempt-count + // ` header should be included + // in the upstream request. Setting this option will cause it to override any existing header + // value, so in the case of two Envoys on the request path with this option enabled, the upstream + // will see the attempt count as perceived by the second Envoy. Defaults to false. + // This header is unaffected by the + // :ref:`suppress_envoy_headers + // ` flag. + bool include_request_attempt_count = 14; + + // Indicates the retry policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + RetryPolicy retry_policy = 16; + + // Indicates the hedge policy for all routes in this virtual host. Note that setting a + // route level entry will take precedence over this config and it'll be treated + // independently (e.g.: values are not inherited). + HedgePolicy hedge_policy = 17; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum + // value of this and the listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; +} + +// A filter-defined action type. +message FilterAction { + google.protobuf.Any action = 1; +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +// [#next-free-field: 18] +message Route { + reserved 6; + + // Name for the route. + string name = 14; + + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message = {required: true}]; + + oneof action { + option (validate.required) = true; + + // Route request to some upstream cluster. + RouteAction route = 2; + + // Return a redirect. + RedirectAction redirect = 3; + + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + + // [#not-implemented-hide:] + // If true, a filter will define the action (e.g., it could dynamically generate the + // RouteAction). + FilterAction filter_action = 17; + } + + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as *envoy.router*. + core.Metadata metadata = 4; + + // Decorator for the matched route. + Decorator decorator = 5; + + // The per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map per_filter_config = 8 [deprecated = true]; + + // The per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map typed_per_filter_config = 13; + + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_api_msg_route.VirtualHost` and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 9 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request + // matching this route. + repeated string request_headers_to_remove = 12; + + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + repeated core.HeaderValueOption response_headers_to_add = 10 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 11; + + // Presence of the object defines whether the connection manager's tracing configuration + // is overridden by this route specific instance. + Tracing tracing = 15; + + // The maximum bytes which will be buffered for retries and shadowing. + // If set, the bytes actually buffered will be the minimum value of this and the + // listener per_connection_buffer_limit_bytes. + google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; +} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +message WeightedCluster { + // [#next-free-field: 11] + message ClusterWeight { + reserved 7; + + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // An integer between 0 and :ref:`total_weight + // `. When a request matches the route, + // the choice of an upstream cluster is determined by its weight. The sum of weights across all + // entries in the clusters array must add up to the total_weight, which defaults to 100. + google.protobuf.UInt32Value weight = 2; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + // the upstream cluster with metadata matching what is set in this field will be considered for + // load balancing. Note that this will be merged with what's provided in + // :ref:`RouteAction.metadata_match `, with + // values here taking precedence. The filter name should be specified as *envoy.lb*. + core.Metadata metadata_match = 3; + + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 4 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of HTTP headers that should be removed from each request when + // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + repeated string request_headers_to_remove = 9; + + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 5 + [(validate.rules).repeated = {max_items: 1000}]; + + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + repeated string response_headers_to_remove = 6; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map per_filter_config = 8 [deprecated = true]; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map typed_per_filter_config = 10; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; + + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, which must be greater than 0. Defaults to 100. + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the *runtime_key_prefix* is + // specified, the router will look for weights associated with each upstream + // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where + // *cluster[i]* denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + string runtime_key_prefix = 2; +} + +// [#next-free-field: 12] +message RouteMatch { + message GrpcRouteMatchOptions { + } + + message TlsContextMatchOptions { + // If specified, the route will match against whether or not a certificate is presented. + // If not specified, certificate presentation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue presented = 1; + + // If specified, the route will match against whether or not a certificate is validated. + // If not specified, certificate validation status (true or false) will not be considered when route matching. + google.protobuf.BoolValue validated = 2; + } + + reserved 5; + + oneof path_specifier { + option (validate.required) = true; + + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; + + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. The regex grammar is defined `here + // `_. + // + // Examples: + // + // * The regex ``/b[io]t`` matches the path */bit* + // * The regex ``/b[io]t`` matches the path */bot* + // * The regex ``/b[io]t`` does not match the path */bite* + // * The regex ``/b[io]t`` does not match the path */bit/bot* + // + // .. attention:: + // This field has been deprecated in favor of `safe_regex` as it is not safe for use with + // untrusted input in all cases. + string regex = 3 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. + // + // [#next-major-version: In the v3 API we should redo how path specification works such + // that we utilize StringMatcher, and additionally have consistent options around whether we + // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + // to deprecate the existing options. We should even consider whether we want to do away with + // path_specifier entirely and just rely on a set of header matchers which can already match + // on :path, etc. The issue with that is it is unclear how to generically deal with query string + // stripping. This needs more thought.] + type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; + } + + // Indicates that prefix/path matching should be case insensitive. The default + // is true. + google.protobuf.BoolValue case_sensitive = 4; + + // Indicates that the route should additionally match on a runtime key. Every time the route + // is considered for a match, it must also fall under the percentage of matches indicated by + // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the router continues to evaluate the remaining match criteria. A runtime_fraction + // route configuration can be used to roll out route changes in a gradual manner without full + // code/config deploys. Refer to the :ref:`traffic shifting + // ` docs for additional documentation. + // + // .. note:: + // + // Parsing this field is implemented such that the runtime key's data may be represented + // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + // integer with the assumption that the value is an integral percentage out of 100. For + // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + core.RuntimeFractionalPercent runtime_fraction = 9; + + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; + + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; + + // If specified, only gRPC requests will be matched. The router will check + // that the content-type header has a application/grpc or one of the various + // application/grpc+ values. + GrpcRouteMatchOptions grpc = 8; + + // If specified, the client tls context will be matched against the defined + // match options. + // + // [#next-major-version: unify with RBAC] + TlsContextMatchOptions tls_context = 11; +} + +// [#next-free-field: 12] +message CorsPolicy { + // Specifies the origins that will be allowed to do CORS requests. + // + // An origin is allowed if either allow_origin or allow_origin_regex match. + // + // .. attention:: + // This field has been deprecated in favor of `allow_origin_string_match`. + repeated string allow_origin = 1 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Specifies regex patterns that match allowed origins. + // + // An origin is allowed if either allow_origin or allow_origin_regex match. + // + // .. attention:: + // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for + // use with untrusted input in all cases. + repeated string allow_origin_regex = 8 + [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; + + // Specifies string patterns that match allowed origins. An origin is allowed if any of the + // string matchers match. + repeated type.matcher.StringMatcher allow_origin_string_match = 11; + + // Specifies the content for the *access-control-allow-methods* header. + string allow_methods = 2; + + // Specifies the content for the *access-control-allow-headers* header. + string allow_headers = 3; + + // Specifies the content for the *access-control-expose-headers* header. + string expose_headers = 4; + + // Specifies the content for the *access-control-max-age* header. + string max_age = 5; + + // Specifies whether the resource allows credentials. + google.protobuf.BoolValue allow_credentials = 6; + + oneof enabled_specifier { + // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`filter_enabled` field instead. + google.protobuf.BoolValue enabled = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Specifies the % of requests for which the CORS filter is enabled. + // + // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + // filter will be enabled for 100% of the requests. + // + // If :ref:`runtime_key ` is + // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + core.RuntimeFractionalPercent filter_enabled = 9; + } + + // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + // enforced. + // + // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those + // fields have to explicitly disable the filter in order for this setting to take effect. + // + // If :ref:`runtime_key ` is specified, + // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + // and track the request's *Origin* to determine if it's valid but will not enforce any policies. + core.RuntimeFractionalPercent shadow_enabled = 10; +} + +// [#next-free-field: 32] +message RouteAction { + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // Configures :ref:`internal redirect ` behavior. + enum InternalRedirectAction { + PASS_THROUGH_INTERNAL_REDIRECT = 0; + HANDLE_INTERNAL_REDIRECT = 1; + } + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + // + // .. note:: + // + // Shadowing will not be triggered if the primary cluster does not exist. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If not specified, all requests to the target cluster will be mirrored. If + // specified, Envoy will lookup the runtime key to get the % of requests to + // mirror. Valid values are from 0 to 10000, allowing for increments of + // 0.01% of requests to be mirrored. If the runtime key is specified in the + // configuration but not present in runtime, 0 is the default and thus 0% of + // requests will be mirrored. + // + // .. attention:: + // + // **This field is deprecated**. Set the + // :ref:`runtime_fraction + // ` + // field instead. Mirroring occurs if both this and + // ` + // are not set. + string runtime_key = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // If not specified, all requests to the target cluster will be mirrored. + // + // If specified, this field takes precedence over the `runtime_key` field and requests must also + // fall under the percentage of matches indicated by this field. + // + // For some fraction N/D, a random number in the range [0,D) is selected. If the + // number is <= the value of the numerator N, or if the key is not present, the default + // value, the request will be mirrored. + core.RuntimeFractionalPercent runtime_fraction = 3; + } + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + // [#next-free-field: 6] + message HashPolicy { + message Header { + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. If the TTL is present and zero, the generated cookie will + // be a session cookie. + google.protobuf.Duration ttl = 2; + + // The name of the path for the cookie. If no path is specified here, no path + // will be set for the cookie. + string path = 3; + } + + message ConnectionProperties { + // Hash on source IP address. + bool source_ip = 1; + } + + message QueryParameter { + // The name of the URL query parameter that will be used to obtain the hash + // key. If the parameter is not present, no hash will be produced. Query + // parameter names are case-sensitive. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + + // Query parameter hash policy. + QueryParameter query_parameter = 5; + } + + // The flag that short-circuits the hash computing. This field provides a + // 'fallback' style of configuration: "if a terminal policy doesn't work, + // fallback to rest of the policy list", it saves time when the terminal + // policy works. + // + // If true, and there is already a hash computed, ignore rest of the + // list of hash polices. + // For example, if the following hash methods are configured: + // + // ========= ======== + // specifier terminal + // ========= ======== + // Header A true + // Header B false + // Header C false + // ========= ======== + // + // The generateHash process ends if policy "header A" generates a hash, as + // it's a terminal policy. + bool terminal = 4; + } + + // Allows enabling and disabling upgrades on a per-route basis. + // This overrides any enabled/disabled upgrade filter chain specified in the + // HttpConnectionManager + // :ref:`upgrade_configs + // ` + // but does not affect any custom filter chain specified there. + message UpgradeConfig { + // The case-insensitive name of this upgrade, e.g. "websocket". + // For each upgrade type present in upgrade_configs, requests with + // Upgrade: [upgrade_type] will be proxied upstream. + string upgrade_type = 1; + + // Determines if upgrades are available on this route. Defaults to true. + google.protobuf.BoolValue enabled = 2; + } + + reserved 12, 18, 19, 16, 22, 21; + + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } + + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum = {defined_only: true}]; + + // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + // in the upstream cluster with metadata matching what's set in this field will be considered + // for load balancing. If using :ref:`weighted_clusters + // `, metadata will be merged, with values + // provided there taking precedence. The filter name should be specified as *envoy.lb*. + core.Metadata metadata_match = 4; + + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. The router filter will + // place the original path before rewrite into the :ref:`x-envoy-original-path + // ` header. + // + // .. attention:: + // + // Pay careful attention to the use of trailing slashes in the + // :ref:`route's match ` prefix value. + // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single + // :ref:`Route `, as shown by the below config entries: + // + // .. code-block:: yaml + // + // - match: + // prefix: "/prefix/" + // route: + // prefix_rewrite: "/" + // - match: + // prefix: "/prefix" + // route: + // prefix_rewrite: "/" + // + // Having above entries in the config, requests to */prefix* will be stripped to */*, while + // requests to */prefix/etc* will be stripped to */etc*. + string prefix_rewrite = 5; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite = 6 [(udpa.annotations.field_migrate).rename = "host_rewrite_literal"]; + + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; + + // Indicates that during forwarding, the host header will be swapped with the content of given + // downstream or :ref:`custom ` header. + // If header value is empty, host header is left intact. + // + // .. attention:: + // + // Pay attention to the potential security implications of using this option. Provided header + // must come from trusted source. + string auto_host_rewrite_header = 29 + [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; + } + + // Specifies the upstream timeout for the route. If not specified, the default is 15s. This + // spans between the point at which the entire downstream request (i.e. end-of-stream) has been + // processed and when the upstream response has been completely processed. A value of 0 will + // disable the route's timeout. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration timeout = 8; + + // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + // although the connection manager wide :ref:`stream_idle_timeout + // ` + // will still apply. A value of 0 will completely disable the route's idle timeout, even if a + // connection manager stream idle timeout is configured. + // + // The idle timeout is distinct to :ref:`timeout + // `, which provides an upper bound + // on the upstream response time; :ref:`idle_timeout + // ` instead bounds the amount + // of time the request's stream may be idle. + // + // After header decoding, the idle timeout will apply on downstream and + // upstream request events. Each time an encode/decode event for headers or + // data is processed for the stream, the timer will be reset. If the timeout + // fires, the stream is terminated with a 408 Request Timeout error code if no + // upstream response header has been received, otherwise a stream reset + // occurs. + google.protobuf.Duration idle_timeout = 24; + + // Indicates that the route has a retry policy. Note that if this is set, + // it'll take precedence over the virtual host level retry policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + RetryPolicy retry_policy = 9; + + // Indicates that the route has a request mirroring policy. + // + // .. attention:: + // This field has been deprecated in favor of `request_mirror_policies` which supports one or + // more mirroring policies. + RequestMirrorPolicy request_mirror_policy = 10 [deprecated = true]; + + // Indicates that the route has request mirroring policies. + repeated RequestMirrorPolicy request_mirror_policies = 30; + + // Optionally specifies the :ref:`routing priority `. + core.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; + + // Specifies a set of rate limit configurations that could be applied to the + // route. + repeated RateLimit rate_limits = 13; + + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + google.protobuf.BoolValue include_vh_rate_limits = 14; + + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). If a hash policy has the "terminal" attribute set to true, and + // there is already a hash generated, the hash is returned immediately, + // ignoring the rest of the hash policy list. + repeated HashPolicy hash_policy = 15; + + // Indicates that the route has a CORS policy. + CorsPolicy cors = 17; + + // If present, and the request is a gRPC request, use the + // `grpc-timeout header `_, + // or its default value (infinity) instead of + // :ref:`timeout `, but limit the applied timeout + // to the maximum value specified here. If configured as 0, the maximum allowed timeout for + // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used + // and gRPC requests time out like any other requests using + // :ref:`timeout ` or its default. + // This can be used to prevent unexpected upstream request timeouts due to potentially long + // time gaps between gRPC request and response in gRPC streaming mode. + google.protobuf.Duration max_grpc_timeout = 23; + + // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting + // the provided duration from the header. This is useful in allowing Envoy to set its global + // timeout to be less than that of the deadline imposed by the calling client, which makes it more + // likely that Envoy will handle the timeout instead of having the call canceled by the client. + // The offset will only be applied if the provided grpc_timeout is greater than the offset. This + // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + // infinity). + google.protobuf.Duration grpc_timeout_offset = 28; + + repeated UpgradeConfig upgrade_configs = 25; + + InternalRedirectAction internal_redirect_action = 26; + + // An internal redirect is handled, iff the number of previous internal redirects that a + // downstream request has encountered is lower than this value, and + // :ref:`internal_redirect_action ` + // is set to :ref:`HANDLE_INTERNAL_REDIRECT + // ` + // In the case where a downstream request is bounced among multiple routes by internal redirect, + // the first route that hits this threshold, or has + // :ref:`internal_redirect_action ` + // set to + // :ref:`PASS_THROUGH_INTERNAL_REDIRECT + // ` + // will pass the redirect back to downstream. + // + // If not specified, at most one redirect will be followed. + google.protobuf.UInt32Value max_internal_redirects = 31; + + // Indicates that the route has a hedge policy. Note that if this is set, + // it'll take precedence over the virtual host level hedge policy entirely + // (e.g.: policies are not merged, most internal one becomes the enforced policy). + HedgePolicy hedge_policy = 27; +} + +// HTTP retry :ref:`architecture overview `. +// [#next-free-field: 11] +message RetryPolicy { + message RetryPriority { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true]; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryHostPredicate { + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + oneof config_type { + google.protobuf.Struct config = 2 [deprecated = true]; + + google.protobuf.Any typed_config = 3; + } + } + + message RetryBackOff { + // Specifies the base interval between retries. This parameter is required and must be greater + // than zero. Values less than 1 ms are rounded up to 1 ms. + // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + // back-off algorithm. + google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { + required: true + gt {} + }]; + + // Specifies the maximum interval between retries. This parameter is optional, but must be + // greater than or equal to the `base_interval` if set. The default is 10 times the + // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + // of Envoy's back-off algorithm. + google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; + } + + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + string retry_on = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + google.protobuf.UInt32Value num_retries = 2; + + // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The + // same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + google.protobuf.Duration per_try_timeout = 3; + + // Specifies an implementation of a RetryPriority which is used to determine the + // distribution of load across priorities used for retries. Refer to + // :ref:`retry plugin configuration ` for more details. + RetryPriority retry_priority = 4; + + // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + // for retries. If any of the predicates reject the host, host selection will be reattempted. + // Refer to :ref:`retry plugin configuration ` for more + // details. + repeated RetryHostPredicate retry_host_predicate = 5; + + // The maximum number of times host selection will be reattempted before giving up, at which + // point the host that was last selected will be routed to. If unspecified, this will default to + // retrying once. + int64 host_selection_retry_max_attempts = 6; + + // HTTP status codes that should trigger a retry in addition to those specified by retry_on. + repeated uint32 retriable_status_codes = 7; + + // Specifies parameters that control retry back off. This parameter is optional, in which case the + // default base interval is 25 milliseconds or, if set, the current value of the + // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times + // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + // describes Envoy's back-off algorithm. + RetryBackOff retry_back_off = 8; + + // HTTP response headers that trigger a retry if present in the response. A retry will be + // triggered if any of the header matches match the upstream response headers. + // The field is only consulted if 'retriable-headers' retry policy is active. + repeated HeaderMatcher retriable_headers = 9; + + // HTTP headers which must be present in the request for retries to be attempted. + repeated HeaderMatcher retriable_request_headers = 10; +} + +// HTTP request hedging :ref:`architecture overview `. +message HedgePolicy { + // Specifies the number of initial requests that should be sent upstream. + // Must be at least 1. + // Defaults to 1. + // [#not-implemented-hide:] + google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; + + // Specifies a probability that an additional upstream request should be sent + // on top of what is specified by initial_requests. + // Defaults to 0. + // [#not-implemented-hide:] + type.FractionalPercent additional_request_chance = 2; + + // Indicates that a hedged request should be sent when the per-try timeout + // is hit. This will only occur if the retry policy also indicates that a + // timed out request should be retried. + // Once a timed out request is retried due to per try timeout, the router + // filter will ensure that it is not retried again even if the returned + // response headers would otherwise be retried according the specified + // :ref:`RetryPolicy `. + // Defaults to false. + bool hedge_on_per_try_timeout = 3; +} + +// [#next-free-field: 9] +message RedirectAction { + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + + // When the scheme redirection take place, the following rules apply: + // 1. If the source URI scheme is `http` and the port is explicitly + // set to `:80`, the port will be removed after the redirection + // 2. If the source URI scheme is `https` and the port is explicitly + // set to `:443`, the port will be removed after the redirection + oneof scheme_rewrite_specifier { + // The scheme portion of the URL will be swapped with "https". + bool https_redirect = 4; + + // The scheme portion of the URL will be swapped with this value. + string scheme_redirect = 7; + } + + // The host portion of the URL will be swapped with this value. + string host_redirect = 1; + + // The port value of the URL will be swapped with this value. + uint32 port_redirect = 8; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + string path_redirect = 2; + + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + // + // .. attention:: + // + // Pay attention to the use of trailing slashes as mentioned in + // :ref:`RouteAction's prefix_rewrite `. + string prefix_rewrite = 5; + } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; +} + +message DirectResponseAction { + // Specifies the HTTP response status to be returned. + uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; + + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using *response_headers_to_add* in the enclosing + // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or + // :ref:`envoy_api_msg_route.VirtualHost`. + core.DataSource body = 2; +} + +message Decorator { + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + string operation = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Whether the decorated details should be propagated to the other party. The default is true. + google.protobuf.BoolValue propagate = 2; +} + +message Tracing { + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + type.FractionalPercent client_sampling = 1; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.FractionalPercent random_sampling = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + type.FractionalPercent overall_sampling = 3; + + // A list of custom tags with unique tag name to create tags for the active span. + // It will take effect after merging with the :ref:`corresponding configuration + // ` + // configured in the HTTP connection manager. If two tags with the same name are configured + // each in the HTTP connection manager and the route level, the one configured here takes + // priority. + repeated type.tracing.v2.CustomTag custom_tags = 4; +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect” statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +message VirtualCluster { + // Specifies a regex pattern to use for matching requests. The entire path of the request + // must match the regex. The regex grammar used is defined `here + // `_. + // + // Examples: + // + // * The regex ``/rides/\d+`` matches the path */rides/0* + // * The regex ``/rides/\d+`` matches the path */rides/123* + // * The regex ``/rides/\d+`` does not match the path */rides/123/456* + // + // .. attention:: + // This field has been deprecated in favor of `headers` as it is not safe for use with + // untrusted input in all cases. + string pattern = 1 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + + // Specifies a list of header matchers to use for matching requests. Each specified header must + // match. The pseudo-headers `:path` and `:method` can be used to match the request path and + // method, respectively. + repeated HeaderMatcher headers = 4; + + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + string name = 2 [(validate.rules).string = {min_bytes: 1}]; + + // Optionally specifies the HTTP method to match on. For example GET, PUT, + // etc. + // + // .. attention:: + // This field has been deprecated in favor of `headers`. + core.RequestMethod method = 3 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; +} + +// Global rate limiting :ref:`architecture overview `. +message RateLimit { + // [#next-free-field: 7] + message Action { + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + } + + // The following descriptor entry is appended when a header contains a key that matches the + // *header_name*: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + } + } + + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* +// header. Thus, if attempting to match on *Host*, match on *:authority* instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "exact_match": "POST" +// } +// +// .. attention:: +// In the absence of any header match specifier, match will default to :ref:`present_match +// `. i.e, a request that has the :ref:`name +// ` header will match, regardless of the header's +// value. +// +// [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] +// [#next-free-field: 12] +message HeaderMatcher { + reserved 2, 3; + + // Specifies the name of the header in the request. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Specifies how the header match will be performed to route the request. + oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. The regex grammar used in the value field is defined + // `here `_. + // + // Examples: + // + // * The regex ``\d{3}`` matches the value *123* + // * The regex ``\d{3}`` does not match the value *1234* + // * The regex ``\d{3}`` does not match the value *123.456* + // + // .. attention:: + // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use + // with untrusted input in all cases. + string regex_match = 5 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. + type.matcher.RegexMatcher safe_regex_match = 11; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, + // "-1somestring" + type.Int64Range range_match = 6; + + // If specified, header match will be performed based on whether the header is in the + // request. + bool present_match = 7; + + // If specified, header match will be performed based on the prefix of the header value. + // Note: empty prefix is not allowed, please use present_match instead. + // + // Examples: + // + // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. + string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; + + // If specified, header match will be performed based on the suffix of the header value. + // Note: empty suffix is not allowed, please use present_match instead. + // + // Examples: + // + // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. + string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; + } + + // If specified, the match result will be inverted before checking. Defaults to false. + // + // Examples: + // + // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. + // * The range [-10,0) will match the value -1, so it will not match when inverted. + bool invert_match = 8; +} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +// [#next-free-field: 7] +message QueryParameterMatcher { + // Specifies the name of a key that must be present in the requested + // *path*'s query string. + string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; + + // Specifies the value of the key. If the value is absent, a request + // that contains the key in its query string will match, whether the + // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") + // + // ..attention:: + // This field is deprecated. Use an `exact` match inside the `string_match` field. + string value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Specifies whether the query parameter value is a regular expression. + // Defaults to false. The entire query parameter value (i.e., the part to + // the right of the equals sign in "key=value") must match the regex. + // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. + // + // ..attention:: + // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. + google.protobuf.BoolValue regex = 4 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + oneof query_parameter_match_specifier { + // Specifies whether a query parameter value should match against a string. + type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; + + // Specifies whether a query parameter should be present. + bool present_match = 6; + } +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/scoped_route.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/scoped_route.proto new file mode 100644 index 00000000000..43f81cf9202 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/scoped_route.proto @@ -0,0 +1,107 @@ +syntax = "proto3"; + +package envoy.api.v2; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "ScopedRouteProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; + +// [#protodoc-title: HTTP scoped routing configuration] +// * Routing :ref:`architecture overview ` + +// Specifies a routing scope, which associates a +// :ref:`Key` to a +// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). +// +// The HTTP connection manager builds up a table consisting of these Key to +// RouteConfiguration mappings, and looks up the RouteConfiguration to use per +// request according to the algorithm specified in the +// :ref:`scope_key_builder` +// assigned to the HttpConnectionManager. +// +// For example, with the following configurations (in YAML): +// +// HttpConnectionManager config: +// +// .. code:: +// +// ... +// scoped_routes: +// name: foo-scoped-routes +// scope_key_builder: +// fragments: +// - header_value_extractor: +// name: X-Route-Selector +// element_separator: , +// element: +// separator: = +// key: vip +// +// ScopedRouteConfiguration resources (specified statically via +// :ref:`scoped_route_configurations_list` +// or obtained dynamically via SRDS): +// +// .. code:: +// +// (1) +// name: route-scope1 +// route_configuration_name: route-config1 +// key: +// fragments: +// - string_key: 172.10.10.20 +// +// (2) +// name: route-scope2 +// route_configuration_name: route-config2 +// key: +// fragments: +// - string_key: 172.20.20.30 +// +// A request from a client such as: +// +// .. code:: +// +// GET / HTTP/1.1 +// Host: foo.com +// X-Route-Selector: vip=172.10.10.20 +// +// would result in the routing table defined by the `route-config1` +// RouteConfiguration being assigned to the HTTP request/stream. +// +message ScopedRouteConfiguration { + // Specifies a key which is matched against the output of the + // :ref:`scope_key_builder` + // specified in the HttpConnectionManager. The matching is done per HTTP + // request and is dependent on the order of the fragments contained in the + // Key. + message Key { + message Fragment { + oneof type { + option (validate.required) = true; + + // A string to match against. + string string_key = 1; + } + } + + // The ordered set of fragments to match against. The order must match the + // fragments in the corresponding + // :ref:`scope_key_builder`. + repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; + } + + // The name assigned to the routing scope. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an + // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated + // with this scope. + string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; + + // The key to match against. + Key key = 3 [(validate.rules).message = {required: true}]; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/api/v2/srds.proto b/xds/third_party/envoy/src/main/proto/envoy/api/v2/srds.proto index 224ae070774..f874307d7e1 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/api/v2/srds.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/api/v2/srds.proto @@ -2,20 +2,24 @@ syntax = "proto3"; package envoy.api.v2; -option java_outer_classname = "SrdsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.api.v2"; -option java_generic_services = true; - import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; -import "validate/validate.proto"; +import "envoy/annotations/resource.proto"; +import "udpa/annotations/migrate.proto"; -// [#protodoc-title: HTTP scoped routing configuration] +import public "envoy/api/v2/scoped_route.proto"; + +option java_package = "io.envoyproxy.envoy.api.v2"; +option java_outer_classname = "SrdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; + +// [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` -// + // The Scoped Routes Discovery Service (SRDS) API distributes // :ref:`ScopedRouteConfiguration` // resources. Each ScopedRouteConfiguration resource represents a "routing @@ -24,6 +28,8 @@ import "validate/validate.proto"; // :ref:`RouteConfiguration` message) to each // HTTP request. service ScopedRoutesDiscoveryService { + option (envoy.annotations.resource).type = "envoy.api.v2.ScopedRouteConfiguration"; + rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } @@ -31,103 +37,12 @@ service ScopedRoutesDiscoveryService { } rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) { - option (google.api.http) = { - post: "/v2/discovery:scoped-routes" - body: "*" - }; + option (google.api.http).post = "/v2/discovery:scoped-routes"; + option (google.api.http).body = "*"; } } -// Specifies a routing scope, which associates a -// :ref:`Key` to a -// :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). -// -// The HTTP connection manager builds up a table consisting of these Key to -// RouteConfiguration mappings, and looks up the RouteConfiguration to use per -// request according to the algorithm specified in the -// :ref:`scope_key_builder` -// assigned to the HttpConnectionManager. -// -// For example, with the following configurations (in YAML): -// -// HttpConnectionManager config: -// -// .. code:: -// -// ... -// scoped_routes: -// name: foo-scoped-routes -// scope_key_builder: -// fragments: -// - header_value_extractor: -// name: X-Route-Selector -// element_separator: , -// element: -// separator: = -// key: vip -// -// ScopedRouteConfiguration resources (specified statically via -// :ref:`scoped_route_configurations_list` -// or obtained dynamically via SRDS): -// -// .. code:: -// -// (1) -// name: route-scope1 -// route_configuration_name: route-config1 -// key: -// fragments: -// - string_key: 172.10.10.20 -// -// (2) -// name: route-scope2 -// route_configuration_name: route-config2 -// key: -// fragments: -// - string_key: 172.20.20.30 -// -// A request from a client such as: -// -// .. code:: -// -// GET / HTTP/1.1 -// Host: foo.com -// X-Route-Selector: vip=172.10.10.20 -// -// would result in the routing table defined by the `route-config1` -// RouteConfiguration being assigned to the HTTP request/stream. -// -// [#comment:next free field: 4] -message ScopedRouteConfiguration { - // Specifies a key which is matched against the output of the - // :ref:`scope_key_builder` - // specified in the HttpConnectionManager. The matching is done per HTTP - // request and is dependent on the order of the fragments contained in the - // Key. - message Key { - message Fragment { - oneof type { - option (validate.required) = true; - - // A string to match against. - string string_key = 1; - } - } - - // The ordered set of fragments to match against. The order must match the - // fragments in the corresponding - // :ref:`scope_key_builder`. - repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; - } - - // The name assigned to the routing scope. - string name = 1 [(validate.rules).string = {min_bytes: 1}]; - - // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an - // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated - // with this scope. - string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; - - // The key to match against. - Key key = 3 [(validate.rules).message = {required: true}]; +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. +message SrdsDummy { } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/filter/accesslog/v2/accesslog.proto b/xds/third_party/envoy/src/main/proto/envoy/config/filter/accesslog/v2/accesslog.proto index 8810e050e95..8a525dee910 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/filter/accesslog/v2/accesslog.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/filter/accesslog/v2/accesslog.proto @@ -2,28 +2,30 @@ syntax = "proto3"; package envoy.config.filter.accesslog.v2; -option java_outer_classname = "AccesslogProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; - import "envoy/api/v2/core/base.proto"; -import "envoy/api/v2/route/route.proto"; +import "envoy/api/v2/route/route_components.proto"; import "envoy/type/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; +option java_outer_classname = "AccesslogProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.accesslog.v3"; + // [#protodoc-title: Common access log types] message AccessLog { // The name of the access log implementation to instantiate. The name must // match a statically registered access log. Current built-in loggers include: // - // #. "envoy.file_access_log" - // #. "envoy.http_grpc_access_log" - // #. "envoy.tcp_grpc_access_log" + // #. "envoy.access_loggers.file" + // #. "envoy.access_loggers.http_grpc" + // #. "envoy.access_loggers.tcp_grpc" string name = 1; // Filter which is used to determine if the access log needs to be written. @@ -32,19 +34,20 @@ message AccessLog { // Custom configuration that depends on the access log being instantiated. Built-in // configurations include: // - // #. "envoy.file_access_log": :ref:`FileAccessLog + // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` - // #. "envoy.http_grpc_access_log": :ref:`HttpGrpcAccessLogConfig + // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig // ` - // #. "envoy.tcp_grpc_access_log": :ref:`TcpGrpcAccessLogConfig + // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig // ` oneof config_type { - google.protobuf.Struct config = 3; + google.protobuf.Struct config = 3 [deprecated = true]; google.protobuf.Any typed_config = 4; } } +// [#next-free-field: 12] message AccessLogFilter { oneof filter_specifier { option (validate.required) = true; @@ -199,6 +202,7 @@ message ResponseFlagFilter { in: "URX" in: "SI" in: "IH" + in: "DPE" } } }]; @@ -243,7 +247,7 @@ message ExtensionFilter { // Custom configuration that depends on the filter being instantiated. oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/xds/third_party/envoy/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index efdfb4be939..13dc6ffaec1 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -2,28 +2,34 @@ syntax = "proto3"; package envoy.config.filter.network.http_connection_manager.v2; -option java_outer_classname = "HttpConnectionManagerProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; - import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/core/protocol.proto"; -import "envoy/api/v2/rds.proto"; -import "envoy/api/v2/srds.proto"; +import "envoy/api/v2/route.proto"; +import "envoy/api/v2/scoped_route.proto"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/type/percent.proto"; +import "envoy/type/tracing/v2/custom_tag.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; +option java_outer_classname = "HttpConnectionManagerProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.filters.network.http_connection_manager.v3"; + // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. +// [#extension: envoy.filters.network.http_connection_manager] -// [#comment:next free field: 35] +// [#next-free-field: 36] message HttpConnectionManager { enum CodecType { // For every new connection, the connection manager will determine which @@ -40,6 +46,11 @@ message HttpConnectionManager { // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. // Prior knowledge is allowed). HTTP2 = 2; + + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HTTP3 = 3; } enum ServerHeaderTransformation { @@ -78,6 +89,7 @@ message HttpConnectionManager { ALWAYS_FORWARD_ONLY = 4; } + // [#next-free-field: 9] message Tracing { enum OperationName { // The HTTP listener is used for ingress/incoming requests. @@ -93,13 +105,20 @@ message HttpConnectionManager { // // .. attention:: // This field has been deprecated in favor of `traffic_direction`. - OperationName operation_name = 1 - [(validate.rules).enum = {defined_only: true}, deprecated = true]; + OperationName operation_name = 1 [ + deprecated = true, + (validate.rules).enum = {defined_only: true}, + (envoy.annotations.disallowed_by_default) = true + ]; // A list of header names used to create tags for the active span. The header name is used to // populate the tag name, and the header value is used to populate the tag value. The tag is // created if the specified header name is present in the request's headers. - repeated string request_headers_for_tags = 2; + // + // .. attention:: + // This field has been deprecated in favor of :ref:`custom_tags + // `. + repeated string request_headers_for_tags = 2 [deprecated = true]; // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` @@ -134,6 +153,9 @@ message HttpConnectionManager { // truncate lengthy request paths to meet the needs of a tracing backend. // Default: 256 google.protobuf.UInt32Value max_path_tag_length = 7; + + // A list of custom tags with unique tag name to create tags for the active span. + repeated type.tracing.v2.CustomTag custom_tags = 8; } message InternalAddressConfig { @@ -141,7 +163,7 @@ message HttpConnectionManager { bool unix_sockets = 1; } - // [#comment:next free field: 7] + // [#next-free-field: 7] message SetCurrentClientCertDetails { reserved 2; @@ -239,6 +261,10 @@ message HttpConnectionManager { // `. Tracing tracing = 7; + // Additional settings for HTTP requests handled by the connection manager. These will be + // applicable to both HTTP1 and HTTP2 requests. + api.v2.core.HttpProtocolOptions common_http_protocol_options = 35; + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. api.v2.core.Http1ProtocolOptions http_protocol_options = 8; @@ -267,10 +293,12 @@ message HttpConnectionManager { // idle timeout is defined as the period in which there are no active // requests. If not set, there is no idle timeout. When the idle timeout is // reached the connection will be closed. If the connection is an HTTP/2 - // connection a drain sequence will occur prior to closing the connection. See - // :ref:`drain_timeout - // `. - google.protobuf.Duration idle_timeout = 11; + // connection a drain sequence will occur prior to closing the connection. + // This field is deprecated. Use :ref:`idle_timeout + // ` + // instead. + google.protobuf.Duration idle_timeout = 11 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected @@ -340,7 +368,7 @@ message HttpConnectionManager { // .. NOTE:: // To be useful in avoiding the race condition described above, this timeout must be set // to *at least* +<100ms to account for - // a reasonsable "worst" case processing time for a full iteration of Envoy's event loop>. + // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. // // .. WARNING:: // A value of 0 will completely disable delayed close processing. When disabled, the downstream @@ -475,6 +503,7 @@ message ScopedRouteConfigurationsList { [(validate.rules).repeated = {min_items: 1}]; } +// [#next-free-field: 6] message ScopedRoutes { // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` @@ -547,7 +576,9 @@ message ScopedRoutes { } } - // The final scope key consists of the ordered union of these fragments. + // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the + // fragments of a :ref:`ScopedRouteConfiguration`. + // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } @@ -597,7 +628,7 @@ message HttpFilter { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { - google.protobuf.Struct config = 2; + google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 4; } diff --git a/xds/third_party/envoy/src/main/proto/envoy/config/listener/v2/api_listener.proto b/xds/third_party/envoy/src/main/proto/envoy/config/listener/v2/api_listener.proto index 0c2253596e4..3f974cad9e6 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/config/listener/v2/api_listener.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/config/listener/v2/api_listener.proto @@ -2,19 +2,23 @@ syntax = "proto3"; package envoy.config.listener.v2; +import "google/protobuf/any.proto"; + +import "udpa/annotations/migrate.proto"; + +option java_package = "io.envoyproxy.envoy.config.listener.v2"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.config.listener.v2"; +option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; -import "google/protobuf/any.proto"; +// [#protodoc-title: API listener] -// [#not-implemented-hide:] // Describes a type of API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. message ApiListener { // The type in this field determines the type of API listener. At present, the following // types are supported: - // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, diff --git a/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/ads.proto b/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/ads.proto index 63b129069ed..01759e5f1b3 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/ads.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/ads.proto @@ -2,12 +2,14 @@ syntax = "proto3"; package envoy.service.discovery.v2; +import "envoy/api/v2/discovery.proto"; + +option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_generic_services = true; -import "envoy/api/v2/discovery.proto"; +// [#protodoc-title: Aggregated Discovery Service (ADS)] // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v2` for backwards diff --git a/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/sds.proto b/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/sds.proto index 7e9ae261a36..6a131ad413b 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/sds.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/service/discovery/v2/sds.proto @@ -2,32 +2,38 @@ syntax = "proto3"; package envoy.service.discovery.v2; -option java_outer_classname = "SdsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.service.discovery.v2"; - import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; -// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing -// services: https://github.com/google/protobuf/issues/4221 -message SdsDummy { -} +import "envoy/annotations/resource.proto"; +import "udpa/annotations/migrate.proto"; + +option java_package = "io.envoyproxy.envoy.service.discovery.v2"; +option java_outer_classname = "SdsProto"; +option java_multiple_files = true; +option java_generic_services = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.service.secret.v3"; + +// [#protodoc-title: Secret Discovery Service (SDS)] service SecretDiscoveryService { - rpc DeltaSecrets(stream envoy.api.v2.DeltaDiscoveryRequest) - returns (stream envoy.api.v2.DeltaDiscoveryResponse) { + option (envoy.annotations.resource).type = "envoy.api.v2.auth.Secret"; + + rpc DeltaSecrets(stream api.v2.DeltaDiscoveryRequest) + returns (stream api.v2.DeltaDiscoveryResponse) { } - rpc StreamSecrets(stream envoy.api.v2.DiscoveryRequest) - returns (stream envoy.api.v2.DiscoveryResponse) { + rpc StreamSecrets(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { } - rpc FetchSecrets(envoy.api.v2.DiscoveryRequest) returns (envoy.api.v2.DiscoveryResponse) { - option (google.api.http) = { - post: "/v2/discovery:secrets" - body: "*" - }; + rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { + option (google.api.http).post = "/v2/discovery:secrets"; + option (google.api.http).body = "*"; } } + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message SdsDummy { +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/service/load_stats/v2/lrs.proto b/xds/third_party/envoy/src/main/proto/envoy/service/load_stats/v2/lrs.proto index d7029db0b5e..a82d703de8c 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/service/load_stats/v2/lrs.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/service/load_stats/v2/lrs.proto @@ -2,11 +2,6 @@ syntax = "proto3"; package envoy.service.load_stats.v2; -option java_outer_classname = "LrsProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; -option java_generic_services = true; - import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/endpoint/load_report.proto"; @@ -14,6 +9,11 @@ import "google/protobuf/duration.proto"; import "validate/validate.proto"; +option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; +option java_outer_classname = "LrsProto"; +option java_multiple_files = true; +option java_generic_services = true; + // [#protodoc-title: Load reporting service] service LoadReportingService { @@ -53,10 +53,10 @@ service LoadReportingService { // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsRequest { // Node identifier for Envoy instance. - envoy.api.v2.core.Node node = 1; + api.v2.core.Node node = 1; // A list of load stats to report. - repeated envoy.api.v2.endpoint.ClusterStats cluster_stats = 2; + repeated api.v2.endpoint.ClusterStats cluster_stats = 2; } // The management server sends envoy a LoadStatsResponse with all clusters it @@ -64,7 +64,7 @@ message LoadStatsRequest { // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { // Clusters to report stats for. - repeated string clusters = 1 [(validate.rules).repeated .min_items = 1]; + repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/http.proto b/xds/third_party/envoy/src/main/proto/envoy/type/http.proto new file mode 100644 index 00000000000..12160c6354a --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/http.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.type; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "HttpProto"; +option java_multiple_files = true; + +// [#protodoc-title: HTTP] + +enum CodecClientType { + HTTP1 = 0; + + HTTP2 = 1; + + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. + HTTP3 = 2; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/regex.proto b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/regex.proto index 98819364d9e..2dd5bbe047c 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/regex.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/regex.proto @@ -2,15 +2,15 @@ syntax = "proto3"; package envoy.type.matcher; -option java_outer_classname = "RegexProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.type.matcher"; - import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; -// [#protodoc-title: RegexMatcher] +option java_package = "io.envoyproxy.envoy.type.matcher"; +option java_outer_classname = "RegexProto"; +option java_multiple_files = true; + +// [#protodoc-title: Regex matcher] // A regex matcher designed for safety when used with untrusted input. message RegexMatcher { diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/string.proto b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/string.proto index f926af343fd..2cbfc247649 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/type/matcher/string.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/type/matcher/string.proto @@ -2,17 +2,19 @@ syntax = "proto3"; package envoy.type.matcher; -option java_outer_classname = "StringProto"; -option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.type.matcher"; - import "envoy/type/matcher/regex.proto"; +import "envoy/annotations/deprecation.proto"; import "validate/validate.proto"; -// [#protodoc-title: StringMatcher] +option java_package = "io.envoyproxy.envoy.type.matcher"; +option java_outer_classname = "StringProto"; +option java_multiple_files = true; + +// [#protodoc-title: String matcher] // Specifies the way to match a string. +// [#next-free-field: 7] message StringMatcher { oneof match_pattern { option (validate.required) = true; @@ -53,11 +55,20 @@ message StringMatcher { // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. - string regex = 4 [(validate.rules).string = {max_bytes: 1024}, deprecated = true]; + string regex = 4 [ + deprecated = true, + (validate.rules).string = {max_bytes: 1024}, + (envoy.annotations.disallowed_by_default) = true + ]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } + + // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no + // effect for the safe_regex match. + // For example, the matcher *data* will match both input string *Data* and *data* if set to true. + bool ignore_case = 6; } // Specifies a list of ways to match a string. diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/metadata/v2/metadata.proto b/xds/third_party/envoy/src/main/proto/envoy/type/metadata/v2/metadata.proto new file mode 100644 index 00000000000..67653519ba9 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/metadata/v2/metadata.proto @@ -0,0 +1,97 @@ +syntax = "proto3"; + +package envoy.type.metadata.v2; + +import "udpa/annotations/migrate.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.metadata.v2"; +option java_outer_classname = "MetadataProto"; +option java_multiple_files = true; +option (udpa.annotations.file_migrate).move_to_package = "envoy.type.metadata.v3"; + +// [#protodoc-title: Metadata] + +// MetadataKey provides a general interface using `key` and `path` to retrieve value from +// :ref:`Metadata `. +// +// For example, for the following Metadata: +// +// .. code-block:: yaml +// +// filter_metadata: +// envoy.xxx: +// prop: +// foo: bar +// xyz: +// hello: envoy +// +// The following MetadataKey will retrieve a string value "bar" from the Metadata. +// +// .. code-block:: yaml +// +// key: envoy.xxx +// path: +// - key: prop +// - key: foo +// +message MetadataKey { + // Specifies the segment in a path to retrieve value from Metadata. + // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. + message PathSegment { + oneof segment { + option (validate.required) = true; + + // If specified, use the key to retrieve the value in a Struct. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + } + } + + // The key name of Metadata to retrieve the Struct from the metadata. + // Typically, it represents a builtin subsystem or custom extension. + string key = 1 [(validate.rules).string = {min_bytes: 1}]; + + // The path to retrieve the Value from the Struct. It can be a prefix or a full path, + // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, + // which depends on the particular scenario. + // + // Note: Due to that only the key type segment is supported, the path can not specify a list + // unless the list is the last segment. + repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; +} + +// Describes what kind of metadata. +message MetadataKind { + // Represents dynamic metadata associated with the request. + message Request { + } + + // Represents metadata from :ref:`the route`. + message Route { + } + + // Represents metadata from :ref:`the upstream cluster`. + message Cluster { + } + + // Represents metadata from :ref:`the upstream + // host`. + message Host { + } + + oneof kind { + option (validate.required) = true; + + // Request kind of metadata. + Request request = 1; + + // Route kind of metadata. + Route route = 2; + + // Cluster kind of metadata. + Cluster cluster = 3; + + // Host kind of metadata. + Host host = 4; + } +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/percent.proto b/xds/third_party/envoy/src/main/proto/envoy/type/percent.proto index 6d0868fd0ed..3420342dee2 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/type/percent.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/type/percent.proto @@ -2,11 +2,11 @@ syntax = "proto3"; package envoy.type; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.type"; - -import "validate/validate.proto"; // [#protodoc-title: Percent] diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/range.proto b/xds/third_party/envoy/src/main/proto/envoy/type/range.proto index f31cf32f07c..e550ca19bfc 100644 --- a/xds/third_party/envoy/src/main/proto/envoy/type/range.proto +++ b/xds/third_party/envoy/src/main/proto/envoy/type/range.proto @@ -2,9 +2,9 @@ syntax = "proto3"; package envoy.type; +option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.type"; // [#protodoc-title: Range] @@ -18,6 +18,16 @@ message Int64Range { int64 end = 2; } +// Specifies the int32 start and end of the range using half-open interval semantics [start, +// end). +message Int32Range { + // start of the range (inclusive) + int32 start = 1; + + // end of the range (exclusive) + int32 end = 2; +} + // Specifies the double start and end of the range using half-open interval semantics [start, // end). message DoubleRange { diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/semantic_version.proto b/xds/third_party/envoy/src/main/proto/envoy/type/semantic_version.proto new file mode 100644 index 00000000000..a7dbf7ebd6e --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/semantic_version.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.type; + +option java_package = "io.envoyproxy.envoy.type"; +option java_outer_classname = "SemanticVersionProto"; +option java_multiple_files = true; + +// [#protodoc-title: Semantic Version] + +// Envoy uses SemVer (https://semver.org/). Major/minor versions indicate +// expected behaviors and APIs, the patch version field is used only +// for security fixes and can be generally ignored. +message SemanticVersion { + uint32 major_number = 1; + + uint32 minor_number = 2; + + uint32 patch = 3; +} diff --git a/xds/third_party/envoy/src/main/proto/envoy/type/tracing/v2/custom_tag.proto b/xds/third_party/envoy/src/main/proto/envoy/type/tracing/v2/custom_tag.proto new file mode 100644 index 00000000000..683a5c53677 --- /dev/null +++ b/xds/third_party/envoy/src/main/proto/envoy/type/tracing/v2/custom_tag.proto @@ -0,0 +1,83 @@ +syntax = "proto3"; + +package envoy.type.tracing.v2; + +import "envoy/type/metadata/v2/metadata.proto"; + +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.type.tracing.v2"; +option java_outer_classname = "CustomTagProto"; +option java_multiple_files = true; + +// [#protodoc-title: Custom Tag] + +// Describes custom tags for the active span. +// [#next-free-field: 6] +message CustomTag { + // Literal type custom tag with static value for the tag value. + message Literal { + // Static literal value to populate the tag value. + string value = 1 [(validate.rules).string = {min_bytes: 1}]; + } + + // Environment type custom tag with environment name and default value. + message Environment { + // Environment variable name to obtain the value to populate the tag value. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When the environment variable is not found, + // the tag value will be populated with this default value if specified, + // otherwise no tag will be populated. + string default_value = 2; + } + + // Header type custom tag with header name and default value. + message Header { + // Header name to obtain the value to populate the tag value. + string name = 1 [(validate.rules).string = {min_bytes: 1}]; + + // When the header does not exist, + // the tag value will be populated with this default value if specified, + // otherwise no tag will be populated. + string default_value = 2; + } + + // Metadata type custom tag using + // :ref:`MetadataKey ` to retrieve the protobuf value + // from :ref:`Metadata `, and populate the tag value with + // `the canonical JSON `_ + // representation of it. + message Metadata { + // Specify what kind of metadata to obtain tag value from. + metadata.v2.MetadataKind kind = 1; + + // Metadata key to define the path to retrieve the tag value. + metadata.v2.MetadataKey metadata_key = 2; + + // When no valid metadata is found, + // the tag value would be populated with this default value if specified, + // otherwise no tag would be populated. + string default_value = 3; + } + + // Used to populate the tag name. + string tag = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Used to specify what kind of custom tag. + oneof type { + option (validate.required) = true; + + // A literal custom tag. + Literal literal = 2; + + // An environment custom tag. + Environment environment = 3; + + // A request header custom tag. + Header request_header = 4; + + // A custom tag to obtain tag value from the metadata. + Metadata metadata = 5; + } +} diff --git a/xds/third_party/udpa/import.sh b/xds/third_party/udpa/import.sh index 0cd18d3dbf1..68d577ebc2d 100755 --- a/xds/third_party/udpa/import.sh +++ b/xds/third_party/udpa/import.sh @@ -18,12 +18,14 @@ set -e BRANCH=master # import VERSION from one of the google internal CLs -VERSION=94324803a497c8f76dbc78df393ef629d3a9f3c3 +VERSION=edbea6a78f6d1ba34edc69c53a396b1d88d59651 GIT_REPO="https://github.com/cncf/udpa.git" GIT_BASE_DIR=udpa SOURCE_PROTO_BASE_DIR=udpa TARGET_PROTO_BASE_DIR=src/main/proto FILES=( +udpa/annotations/migrate.proto +udpa/annotations/sensitive.proto udpa/data/orca/v1/orca_load_report.proto udpa/service/orca/v1/orca.proto ) diff --git a/xds/third_party/udpa/src/main/proto/udpa/annotations/migrate.proto b/xds/third_party/udpa/src/main/proto/udpa/annotations/migrate.proto new file mode 100644 index 00000000000..1c42a6404dc --- /dev/null +++ b/xds/third_party/udpa/src/main/proto/udpa/annotations/migrate.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "google/protobuf/descriptor.proto"; + +// Magic number in this file derived from top 28bit of SHA256 digest of +// "udpa.annotation.migrate". + +extend google.protobuf.MessageOptions { + MigrateAnnotation message_migrate = 171962766; +} + +extend google.protobuf.FieldOptions { + FieldMigrateAnnotation field_migrate = 171962766; +} + +extend google.protobuf.EnumOptions { + MigrateAnnotation enum_migrate = 171962766; +} + +extend google.protobuf.EnumValueOptions { + MigrateAnnotation enum_value_migrate = 171962766; +} + +extend google.protobuf.FileOptions { + FileMigrateAnnotation file_migrate = 171962766; +} + +message MigrateAnnotation { + // Rename the message/enum/enum value in next version. + string rename = 1; +} + +message FieldMigrateAnnotation { + // Rename the field in next version. + string rename = 1; + + // Add the field to a named oneof in next version. If this already exists, the + // field will join its siblings under the oneof, otherwise a new oneof will be + // created with the given name. + string oneof_promotion = 2; +} + +message FileMigrateAnnotation { + // Move all types in the file to another package, this implies changing proto + // file path. + string move_to_package = 2; +} diff --git a/xds/third_party/udpa/src/main/proto/udpa/annotations/sensitive.proto b/xds/third_party/udpa/src/main/proto/udpa/annotations/sensitive.proto new file mode 100644 index 00000000000..8dc921f24b5 --- /dev/null +++ b/xds/third_party/udpa/src/main/proto/udpa/annotations/sensitive.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package udpa.annotations; + +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.FieldOptions { + // Magic number is the 28 most significant bits in the sha256sum of "udpa.annotations.sensitive". + // When set to true, `sensitive` indicates that this field contains sensitive data, such as + // personally identifiable information, passwords, or private keys, and should be redacted for + // display by tools aware of this annotation. Note that that this has no effect on standard + // Protobuf functions such as `TextFormat::PrintToString`. + bool sensitive = 76569463; +} From a6c93dc473f7cebb78e112a63061690820ce80ea Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Thu, 27 Feb 2020 12:53:11 -0800 Subject: [PATCH 80/86] api,netty: fix MethodDescriptor and InternalKnownTransport for netty-shaded Resolves #6765 --- api/src/main/java/io/grpc/InternalKnownTransport.java | 1 + api/src/main/java/io/grpc/MethodDescriptor.java | 3 +-- netty/src/main/java/io/grpc/netty/NettyClientStream.java | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/api/src/main/java/io/grpc/InternalKnownTransport.java b/api/src/main/java/io/grpc/InternalKnownTransport.java index 3aca25fbb8e..e05eb03d1a9 100644 --- a/api/src/main/java/io/grpc/InternalKnownTransport.java +++ b/api/src/main/java/io/grpc/InternalKnownTransport.java @@ -24,6 +24,7 @@ @Internal public enum InternalKnownTransport { NETTY, + NETTY_SHADED, ; } diff --git a/api/src/main/java/io/grpc/MethodDescriptor.java b/api/src/main/java/io/grpc/MethodDescriptor.java index e9f8dd063c3..51c65b08cc5 100644 --- a/api/src/main/java/io/grpc/MethodDescriptor.java +++ b/api/src/main/java/io/grpc/MethodDescriptor.java @@ -51,8 +51,7 @@ public final class MethodDescriptor { // Must be set to InternalKnownTransport.values().length // Not referenced to break the dependency. - private final AtomicReferenceArray rawMethodNames = new AtomicReferenceArray<>(1); - + private final AtomicReferenceArray rawMethodNames = new AtomicReferenceArray<>(2); /** * Gets the cached "raw" method name for this Method Descriptor. The raw name is transport diff --git a/netty/src/main/java/io/grpc/netty/NettyClientStream.java b/netty/src/main/java/io/grpc/netty/NettyClientStream.java index d0193263514..6008a597c67 100644 --- a/netty/src/main/java/io/grpc/netty/NettyClientStream.java +++ b/netty/src/main/java/io/grpc/netty/NettyClientStream.java @@ -54,7 +54,9 @@ */ class NettyClientStream extends AbstractClientStream { private static final InternalMethodDescriptor methodDescriptorAccessor = - new InternalMethodDescriptor(InternalKnownTransport.NETTY); + new InternalMethodDescriptor( + NettyClientTransport.class.getName().contains("grpc.netty.shaded") + ? InternalKnownTransport.NETTY_SHADED : InternalKnownTransport.NETTY); private final Sink sink = new Sink(); private final TransportState state; From 849dc2e259a637f2d0259b2c23a756286fd66173 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Thu, 27 Feb 2020 14:23:29 -0800 Subject: [PATCH 81/86] okhttp: fix incorrect connection-level flow control handling at beginning of connection (v1.28.x backport) Specifically, this addresses bugs that occur when the `OkHttpChannelBuilder.flowControlWindow(int)` setting is increased from its default value. Two changes: 1. On starting a connection, ensure the value of `OkHttpChannelBuilder.flowControlWindow(int)` is sent via Settings.INITIAL_WINDOW_SIZE. Also send a WINDOW_UPDATE after Settings to update the connection-level window. 2. Always initialize the `OutboundFlowController` with an initialWindowSize of 65335 bytes per the [http2 spec](https://http2.github.io/http2-spec/#InitialWindowSize) instead of using the inbound window size. Fixes #6685 Backport of #6742 --- .../io/grpc/okhttp/OkHttpClientTransport.java | 29 ++-- .../grpc/okhttp/OutboundFlowController.java | 7 +- .../src/main/java/io/grpc/okhttp/Utils.java | 1 + .../okhttp/OkHttpClientTransportTest.java | 136 ++++++++++++++++-- 4 files changed, 147 insertions(+), 26 deletions(-) diff --git a/okhttp/src/main/java/io/grpc/okhttp/OkHttpClientTransport.java b/okhttp/src/main/java/io/grpc/okhttp/OkHttpClientTransport.java index b238b9237d6..8bfd50a2c2f 100644 --- a/okhttp/src/main/java/io/grpc/okhttp/OkHttpClientTransport.java +++ b/okhttp/src/main/java/io/grpc/okhttp/OkHttpClientTransport.java @@ -18,6 +18,7 @@ import static com.google.common.base.Preconditions.checkState; import static io.grpc.internal.GrpcUtil.TIMER_SERVICE; +import static io.grpc.okhttp.Utils.DEFAULT_WINDOW_SIZE; import static io.grpc.okhttp.Utils.DEFAULT_WINDOW_UPDATE_RATIO; import com.google.common.annotations.VisibleForTesting; @@ -488,8 +489,7 @@ public Runnable start(Listener listener) { synchronized (lock) { frameWriter = new ExceptionHandlingFrameWriter(OkHttpClientTransport.this, testFrameWriter, testFrameLogger); - outboundFlow = - new OutboundFlowController(OkHttpClientTransport.this, frameWriter, initialWindowSize); + outboundFlow = new OutboundFlowController(OkHttpClientTransport.this, frameWriter); } serializingExecutor.execute(new Runnable() { @Override @@ -515,7 +515,7 @@ public void run() { synchronized (lock) { frameWriter = new ExceptionHandlingFrameWriter(this, rawFrameWriter); - outboundFlow = new OutboundFlowController(this, frameWriter, initialWindowSize); + outboundFlow = new OutboundFlowController(this, frameWriter); } final CountDownLatch latch = new CountDownLatch(1); // Connecting in the serializingExecutor, so that some stream operations like synStream @@ -605,11 +605,7 @@ sslSocketFactory, hostnameVerifier, sock, getOverridenHost(), getOverridenPort() }); // Schedule to send connection preface & settings before any other write. try { - synchronized (lock) { - frameWriter.connectionPreface(); - Settings settings = new Settings(); - frameWriter.settings(settings); - } + sendConnectionPrefaceAndSettings(); } finally { latch.countDown(); } @@ -629,6 +625,23 @@ public void run() { return null; } + /** + * Should only be called once when the transport is first established. + */ + @VisibleForTesting + void sendConnectionPrefaceAndSettings() { + synchronized (lock) { + frameWriter.connectionPreface(); + Settings settings = new Settings(); + OkHttpSettingsUtil.set(settings, OkHttpSettingsUtil.INITIAL_WINDOW_SIZE, initialWindowSize); + frameWriter.settings(settings); + if (initialWindowSize > DEFAULT_WINDOW_SIZE) { + frameWriter.windowUpdate( + Utils.CONNECTION_STREAM_ID, initialWindowSize - DEFAULT_WINDOW_SIZE); + } + } + } + private Socket createHttpProxySocket(InetSocketAddress address, InetSocketAddress proxyAddress, String proxyUsername, String proxyPassword) throws StatusException { try { diff --git a/okhttp/src/main/java/io/grpc/okhttp/OutboundFlowController.java b/okhttp/src/main/java/io/grpc/okhttp/OutboundFlowController.java index 441bb21151c..c935363213d 100644 --- a/okhttp/src/main/java/io/grpc/okhttp/OutboundFlowController.java +++ b/okhttp/src/main/java/io/grpc/okhttp/OutboundFlowController.java @@ -17,6 +17,7 @@ package io.grpc.okhttp; import static io.grpc.okhttp.Utils.CONNECTION_STREAM_ID; +import static io.grpc.okhttp.Utils.DEFAULT_WINDOW_SIZE; import static java.lang.Math.ceil; import static java.lang.Math.max; import static java.lang.Math.min; @@ -38,11 +39,11 @@ class OutboundFlowController { private final OutboundFlowState connectionState; OutboundFlowController( - OkHttpClientTransport transport, FrameWriter frameWriter, int initialWindowSize) { + OkHttpClientTransport transport, FrameWriter frameWriter) { this.transport = Preconditions.checkNotNull(transport, "transport"); this.frameWriter = Preconditions.checkNotNull(frameWriter, "frameWriter"); - this.initialWindowSize = initialWindowSize; - connectionState = new OutboundFlowState(CONNECTION_STREAM_ID, initialWindowSize); + this.initialWindowSize = DEFAULT_WINDOW_SIZE; + connectionState = new OutboundFlowState(CONNECTION_STREAM_ID, DEFAULT_WINDOW_SIZE); } /** diff --git a/okhttp/src/main/java/io/grpc/okhttp/Utils.java b/okhttp/src/main/java/io/grpc/okhttp/Utils.java index 7ae35f9a379..2dc5f1e1ec9 100644 --- a/okhttp/src/main/java/io/grpc/okhttp/Utils.java +++ b/okhttp/src/main/java/io/grpc/okhttp/Utils.java @@ -42,6 +42,7 @@ class Utils { * is sent to expand the window. */ static final float DEFAULT_WINDOW_UPDATE_RATIO = 0.5f; + static final int DEFAULT_WINDOW_SIZE = 65535; static final int CONNECTION_STREAM_ID = 0; public static Metadata convertHeaders(List
      http2Headers) { diff --git a/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java b/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java index 769d0097f90..016fea83ea9 100644 --- a/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java +++ b/okhttp/src/test/java/io/grpc/okhttp/OkHttpClientTransportTest.java @@ -404,6 +404,36 @@ public void maxMessageSizeShouldBeEnforced() throws Exception { shutdownAndVerify(); } + @Test + public void includeInitialWindowSizeInFirstSettings() throws Exception { + int initialWindowSize = 65535; + startTransport( + DEFAULT_START_STREAM_ID, null, true, DEFAULT_MAX_MESSAGE_SIZE, initialWindowSize, null); + clientTransport.sendConnectionPrefaceAndSettings(); + + ArgumentCaptor settings = ArgumentCaptor.forClass(Settings.class); + verify(frameWriter, timeout(TIME_OUT_MS)).settings(settings.capture()); + assertEquals(65535, settings.getValue().get(7)); + } + + /** + * A "large" window size is anything over 65535 (the starting size for any connection-level + * flow control value). + */ + @Test + public void includeInitialWindowSizeInFirstSettings_largeWindowSize() throws Exception { + int initialWindowSize = 75535; // 65535 + 10000 + startTransport( + DEFAULT_START_STREAM_ID, null, true, DEFAULT_MAX_MESSAGE_SIZE, initialWindowSize, null); + clientTransport.sendConnectionPrefaceAndSettings(); + + ArgumentCaptor settings = ArgumentCaptor.forClass(Settings.class); + verify(frameWriter, timeout(TIME_OUT_MS)).settings(settings.capture()); + assertEquals(75535, settings.getValue().get(7)); + + verify(frameWriter, timeout(TIME_OUT_MS)).windowUpdate(0, 10000); + } + /** * When nextFrame throws IOException, the transport should be aborted. */ @@ -836,39 +866,39 @@ public void windowUpdateWithInboundFlowControl() throws Exception { shutdownAndVerify(); } + /** + * Outbound flow control where the initial flow control window stays at the default size of 65535. + */ @Test public void outboundFlowControl() throws Exception { - outboundFlowControl(INITIAL_WINDOW_SIZE); - } - - private void outboundFlowControl(int windowSize) throws Exception { - startTransport( - DEFAULT_START_STREAM_ID, null, true, DEFAULT_MAX_MESSAGE_SIZE, windowSize, null); + initTransport(); MockStreamListener listener = new MockStreamListener(); OkHttpClientStream stream = clientTransport.newStream(method, new Metadata(), CallOptions.DEFAULT); stream.start(listener); + + // Outbound window always starts at 65535 until changed by Settings.INITIAL_WINDOW_SIZE + int initialOutboundWindowSize = 65535; + int messageLength = initialOutboundWindowSize / 2 + 1; + // The first message should be sent out. - int messageLength = windowSize / 2 + 1; InputStream input = new ByteArrayInputStream(new byte[messageLength]); stream.writeMessage(input); stream.flush(); verify(frameWriter, timeout(TIME_OUT_MS)).data( eq(false), eq(3), any(Buffer.class), eq(messageLength + HEADER_LENGTH)); - // The second message should be partially sent out. input = new ByteArrayInputStream(new byte[messageLength]); stream.writeMessage(input); stream.flush(); - int partiallySentSize = - windowSize - messageLength - HEADER_LENGTH; + int partiallySentSize = initialOutboundWindowSize - messageLength - HEADER_LENGTH; verify(frameWriter, timeout(TIME_OUT_MS)) .data(eq(false), eq(3), any(Buffer.class), eq(partiallySentSize)); - // Get more credit, the rest data should be sent out. - frameHandler().windowUpdate(3, windowSize); - frameHandler().windowUpdate(0, windowSize); + // Get more credit so the rest of the data should be sent out. + frameHandler().windowUpdate(3, initialOutboundWindowSize); + frameHandler().windowUpdate(0, initialOutboundWindowSize); verify(frameWriter, timeout(TIME_OUT_MS)).data( eq(false), eq(3), any(Buffer.class), eq(messageLength + HEADER_LENGTH - partiallySentSize)); @@ -878,14 +908,90 @@ private void outboundFlowControl(int windowSize) throws Exception { shutdownAndVerify(); } + /** + * Outbound flow control where the initial window size is reduced before a stream is started. + */ @Test public void outboundFlowControl_smallWindowSize() throws Exception { - outboundFlowControl(100); + initTransport(); + + int initialOutboundWindowSize = 100; + setInitialWindowSize(initialOutboundWindowSize); + + MockStreamListener listener = new MockStreamListener(); + OkHttpClientStream stream = + clientTransport.newStream(method, new Metadata(), CallOptions.DEFAULT); + stream.start(listener); + + int messageLength = 75; + // The first message should be sent out. + InputStream input = new ByteArrayInputStream(new byte[messageLength]); + stream.writeMessage(input); + stream.flush(); + verify(frameWriter, timeout(TIME_OUT_MS)).data( + eq(false), eq(3), any(Buffer.class), eq(messageLength + HEADER_LENGTH)); + + // The second message should be partially sent out. + input = new ByteArrayInputStream(new byte[messageLength]); + stream.writeMessage(input); + stream.flush(); + int partiallySentSize = initialOutboundWindowSize - messageLength - HEADER_LENGTH; + verify(frameWriter, timeout(TIME_OUT_MS)) + .data(eq(false), eq(3), any(Buffer.class), eq(partiallySentSize)); + + // Get more credit so the rest of the data should be sent out. + frameHandler().windowUpdate(3, initialOutboundWindowSize); + verify(frameWriter, timeout(TIME_OUT_MS)).data( + eq(false), eq(3), any(Buffer.class), + eq(messageLength + HEADER_LENGTH - partiallySentSize)); + + stream.cancel(Status.CANCELLED); + listener.waitUntilStreamClosed(); + shutdownAndVerify(); } + /** + * Outbound flow control where the initial window size is increased before a stream is started. + */ @Test public void outboundFlowControl_bigWindowSize() throws Exception { - outboundFlowControl(INITIAL_WINDOW_SIZE * 2); + initTransport(); + + int initialOutboundWindowSize = 131070; // 65535 * 2 + setInitialWindowSize(initialOutboundWindowSize); + frameHandler().windowUpdate(0, 65535); + + MockStreamListener listener = new MockStreamListener(); + OkHttpClientStream stream = + clientTransport.newStream(method, new Metadata(), CallOptions.DEFAULT); + stream.start(listener); + + int messageLength = 100000; + // The first message should be sent out. + InputStream input = new ByteArrayInputStream(new byte[messageLength]); + stream.writeMessage(input); + stream.flush(); + verify(frameWriter, timeout(TIME_OUT_MS)).data( + eq(false), eq(3), any(Buffer.class), eq(messageLength + HEADER_LENGTH)); + + // The second message should be partially sent out. + input = new ByteArrayInputStream(new byte[messageLength]); + stream.writeMessage(input); + stream.flush(); + int partiallySentSize = initialOutboundWindowSize - messageLength - HEADER_LENGTH; + verify(frameWriter, timeout(TIME_OUT_MS)) + .data(eq(false), eq(3), any(Buffer.class), eq(partiallySentSize)); + + // Get more credit so the rest of the data should be sent out. + frameHandler().windowUpdate(0, initialOutboundWindowSize); + frameHandler().windowUpdate(3, initialOutboundWindowSize); + verify(frameWriter, timeout(TIME_OUT_MS)).data( + eq(false), eq(3), any(Buffer.class), + eq(messageLength + HEADER_LENGTH - partiallySentSize)); + + stream.cancel(Status.CANCELLED); + listener.waitUntilStreamClosed(); + shutdownAndVerify(); } @Test From 1c089f8550ff3dbf48252ec7b1fad5d974dd9ef9 Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Thu, 27 Feb 2020 17:05:57 -0800 Subject: [PATCH 82/86] xds: set disable overprovisioning client feature and user agent in node identifier (v1.28.x backport) (#6779) Add "envoy.lb.does_not_support_overprovisioning" to xDS node identifier client features. Set the new user_agent_name and user_agent_version fields for build version. Backport of #6766. --- .../main/java/io/grpc/internal/GrpcUtil.java | 30 +++++++++++++- .../main/java/io/grpc/xds/Bootstrapper.java | 11 ++++- .../java/io/grpc/xds/BootstrapperTest.java | 40 ++++++++----------- 3 files changed, 54 insertions(+), 27 deletions(-) diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index c8b71119446..e65aa9ff655 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -62,6 +62,7 @@ import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; /** * Common utilities for GRPC. @@ -443,11 +444,36 @@ public static String getGrpcUserAgent( return builder.toString(); } + @Immutable + public static final class GrpcBuildVersion { + private final String userAgent; + private final String implementationVersion; + + private GrpcBuildVersion(String userAgent, String implementationVersion) { + this.userAgent = Preconditions.checkNotNull(userAgent, "userAgentName"); + this.implementationVersion = + Preconditions.checkNotNull(implementationVersion, "implementationVersion"); + } + + public String getUserAgent() { + return userAgent; + } + + public String getImplementationVersion() { + return implementationVersion; + } + + @Override + public String toString() { + return userAgent + " " + implementationVersion; + } + } + /** * Returns the build version of gRPC. */ - public static String getGrpcBuildVersion() { - return "gRPC Java " + IMPLEMENTATION_VERSION; + public static GrpcBuildVersion getGrpcBuildVersion() { + return new GrpcBuildVersion("gRPC Java", IMPLEMENTATION_VERSION); } /** diff --git a/xds/src/main/java/io/grpc/xds/Bootstrapper.java b/xds/src/main/java/io/grpc/xds/Bootstrapper.java index 02de61e5c72..cceb900c027 100644 --- a/xds/src/main/java/io/grpc/xds/Bootstrapper.java +++ b/xds/src/main/java/io/grpc/xds/Bootstrapper.java @@ -25,6 +25,7 @@ import io.envoyproxy.envoy.api.v2.core.Node; import io.grpc.Internal; import io.grpc.internal.GrpcUtil; +import io.grpc.internal.GrpcUtil.GrpcBuildVersion; import io.grpc.internal.JsonParser; import io.grpc.internal.JsonUtil; import io.grpc.xds.XdsLogger.XdsLogLevel; @@ -47,6 +48,9 @@ public abstract class Bootstrapper { private static final String LOG_PREFIX = "xds-bootstrap"; private static final String BOOTSTRAP_PATH_SYS_ENV_VAR = "GRPC_XDS_BOOTSTRAP"; + @VisibleForTesting + static final String CLIENT_FEATURE_DISABLE_OVERPROVISIONING = + "envoy.lb.does_not_support_overprovisioning"; private static final Bootstrapper DEFAULT_INSTANCE = new Bootstrapper() { @Override @@ -159,9 +163,12 @@ static BootstrapInfo parseConfig(String rawData) throws IOException { nodeBuilder.setLocality(localityBuilder); } } - String buildVersion = GrpcUtil.getGrpcBuildVersion(); + GrpcBuildVersion buildVersion = GrpcUtil.getGrpcBuildVersion(); logger.log(XdsLogLevel.INFO, "Build version: {0}", buildVersion); - nodeBuilder.setBuildVersion(buildVersion); + nodeBuilder.setBuildVersion(buildVersion.toString()); + nodeBuilder.setUserAgentName(buildVersion.getUserAgent()); + nodeBuilder.setUserAgentVersion(buildVersion.getImplementationVersion()); + nodeBuilder.addClientFeatures(CLIENT_FEATURE_DISABLE_OVERPROVISIONING); return new BootstrapInfo(servers, nodeBuilder.build()); } diff --git a/xds/src/test/java/io/grpc/xds/BootstrapperTest.java b/xds/src/test/java/io/grpc/xds/BootstrapperTest.java index b1b227dcfae..37ef4161259 100644 --- a/xds/src/test/java/io/grpc/xds/BootstrapperTest.java +++ b/xds/src/test/java/io/grpc/xds/BootstrapperTest.java @@ -24,6 +24,7 @@ import io.envoyproxy.envoy.api.v2.core.Locality; import io.envoyproxy.envoy.api.v2.core.Node; import io.grpc.internal.GrpcUtil; +import io.grpc.internal.GrpcUtil.GrpcBuildVersion; import io.grpc.xds.Bootstrapper.BootstrapInfo; import io.grpc.xds.Bootstrapper.ServerInfo; import java.io.IOException; @@ -41,7 +42,6 @@ public class BootstrapperTest { @Rule public ExpectedException thrown = ExpectedException.none(); @Test - @SuppressWarnings("deprecation") public void parseBootstrap_validData_singleXdsServer() throws IOException { String rawData = "{\n" + " \"node\": {\n" @@ -79,7 +79,7 @@ public void parseBootstrap_validData_singleXdsServer() throws IOException { assertThat(serverInfo.getChannelCredentials().get(2).getType()).isEqualTo("google_default"); assertThat(serverInfo.getChannelCredentials().get(2).getConfig()).isNull(); assertThat(info.getNode()).isEqualTo( - Node.newBuilder() + getNodeBuilder() .setId("ENVOY_NODE_ID") .setCluster("ENVOY_CLUSTER") .setLocality( @@ -92,12 +92,10 @@ public void parseBootstrap_validData_singleXdsServer() throws IOException { .putFields("TRAFFICDIRECTOR_NETWORK_NAME", Value.newBuilder().setStringValue("VPC_NETWORK_NAME").build()) .build()) - .setBuildVersion(GrpcUtil.getGrpcBuildVersion()) .build()); } @Test - @SuppressWarnings("deprecation") public void parseBootstrap_validData_multipleXdsServers() throws IOException { String rawData = "{\n" + " \"node\": {\n" @@ -144,7 +142,7 @@ public void parseBootstrap_validData_multipleXdsServers() throws IOException { .isEqualTo("trafficdirector-bar.googleapis.com:443"); assertThat(serverInfoList.get(1).getChannelCredentials()).isEmpty(); assertThat(info.getNode()).isEqualTo( - Node.newBuilder() + getNodeBuilder() .setId("ENVOY_NODE_ID") .setCluster("ENVOY_CLUSTER") .setLocality( @@ -157,12 +155,10 @@ public void parseBootstrap_validData_multipleXdsServers() throws IOException { .putFields("TRAFFICDIRECTOR_NETWORK_NAME", Value.newBuilder().setStringValue("VPC_NETWORK_NAME").build()) .build()) - .setBuildVersion(GrpcUtil.getGrpcBuildVersion()) .build()); } @Test - @SuppressWarnings("deprecation") public void parseBootstrap_IgnoreIrrelevantFields() throws IOException { String rawData = "{\n" + " \"node\": {\n" @@ -202,7 +198,7 @@ public void parseBootstrap_IgnoreIrrelevantFields() throws IOException { assertThat(serverInfo.getChannelCredentials().get(2).getType()).isEqualTo("google_default"); assertThat(serverInfo.getChannelCredentials().get(2).getConfig()).isNull(); assertThat(info.getNode()).isEqualTo( - Node.newBuilder() + getNodeBuilder() .setId("ENVOY_NODE_ID") .setCluster("ENVOY_CLUSTER") .setLocality( @@ -215,7 +211,6 @@ public void parseBootstrap_IgnoreIrrelevantFields() throws IOException { .putFields("TRAFFICDIRECTOR_NETWORK_NAME", Value.newBuilder().setStringValue("VPC_NETWORK_NAME").build()) .build()) - .setBuildVersion(GrpcUtil.getGrpcBuildVersion()) .build()); } @@ -228,7 +223,6 @@ public void parseBootstrap_emptyData() throws IOException { } @Test - @SuppressWarnings("deprecation") public void parseBootstrap_minimumRequiredFields() throws IOException { String rawData = "{\n" + " \"xds_servers\": []\n" @@ -236,16 +230,10 @@ public void parseBootstrap_minimumRequiredFields() throws IOException { BootstrapInfo info = Bootstrapper.parseConfig(rawData); assertThat(info.getServers()).isEmpty(); - assertThat(info.getNode()) - .isEqualTo( - Node.newBuilder() - .setBuildVersion( - GrpcUtil.getGrpcBuildVersion()) - .build()); + assertThat(info.getNode()).isEqualTo(getNodeBuilder().build()); } @Test - @SuppressWarnings("deprecation") public void parseBootstrap_minimalUsableData() throws IOException { String rawData = "{\n" + " \"xds_servers\": [\n" @@ -260,12 +248,7 @@ public void parseBootstrap_minimalUsableData() throws IOException { ServerInfo serverInfo = Iterables.getOnlyElement(info.getServers()); assertThat(serverInfo.getServerUri()).isEqualTo("trafficdirector.googleapis.com:443"); assertThat(serverInfo.getChannelCredentials()).isEmpty(); - assertThat(info.getNode()) - .isEqualTo( - Node.newBuilder() - .setBuildVersion( - GrpcUtil.getGrpcBuildVersion()) - .build()); + assertThat(info.getNode()).isEqualTo(getNodeBuilder().build()); } @Test @@ -320,4 +303,15 @@ public void parseBootstrap_serverWithoutServerUri() throws IOException { thrown.expectMessage("Invalid bootstrap: 'xds_servers' contains unknown server."); Bootstrapper.parseConfig(rawData); } + + @SuppressWarnings("deprecation") + private static Node.Builder getNodeBuilder() { + GrpcBuildVersion buildVersion = GrpcUtil.getGrpcBuildVersion(); + return + Node.newBuilder() + .setBuildVersion(buildVersion.toString()) + .setUserAgentName(buildVersion.getUserAgent()) + .setUserAgentVersion(buildVersion.getImplementationVersion()) + .addClientFeatures(Bootstrapper.CLIENT_FEATURE_DISABLE_OVERPROVISIONING); + } } From 365b3b445f04b2c3b4053fdb266b64244595e9db Mon Sep 17 00:00:00 2001 From: ZHANG Dapeng Date: Fri, 28 Feb 2020 10:46:15 -0800 Subject: [PATCH 83/86] bom: publish grpc-xds to bom --- bom/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bom/build.gradle b/bom/build.gradle index 67914432227..43668511e3b 100644 --- a/bom/build.gradle +++ b/bom/build.gradle @@ -12,7 +12,7 @@ publishing { pom.withXml { // Generate bom using subprojects - def internalProjects = [project.name, 'grpc-xds', 'grpc-gae-interop-testing-jdk8', 'grpc-compiler'] + def internalProjects = [project.name, 'grpc-gae-interop-testing-jdk8', 'grpc-compiler'] def dependencyManagement = asNode().appendNode('dependencyManagement') def dependencies = dependencyManagement.appendNode('dependencies') From f254439a163190f9805ea860699b2788ade10a3c Mon Sep 17 00:00:00 2001 From: Chengyuan Zhang Date: Fri, 6 Mar 2020 12:14:44 -0800 Subject: [PATCH 84/86] xds: treat target server authority opaquely for resolving cluster name (#6767) (#6810) Fixes usage of target hostname:port in xDS plugin. The target hostname:port used to construct gRPC channel should be treated opaquely. XdsNameResolver should not try to split it and should use it opaquely for sending LDS requests. In received RouteConfiguration messages, do not stripe off port (if any) for finding the virtual host with domain name matching the requested LDS resource name. --- xds/src/main/java/io/grpc/xds/XdsClient.java | 12 +- .../main/java/io/grpc/xds/XdsClientImpl.java | 23 +- .../java/io/grpc/xds/XdsNameResolver.java | 14 +- .../java/io/grpc/xds/XdsClientImplTest.java | 207 +++++++++--------- .../java/io/grpc/xds/XdsNameResolverTest.java | 62 +++--- 5 files changed, 155 insertions(+), 163 deletions(-) diff --git a/xds/src/main/java/io/grpc/xds/XdsClient.java b/xds/src/main/java/io/grpc/xds/XdsClient.java index ce2a3e70e47..f230d1d2e50 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClient.java +++ b/xds/src/main/java/io/grpc/xds/XdsClient.java @@ -391,19 +391,17 @@ interface EndpointWatcher { abstract void shutdown(); /** - * Registers a watcher to receive {@link ConfigUpdate} for service with the given hostname and - * port. + * Registers a watcher to receive {@link ConfigUpdate} for service with the given target + * authority. * *

      Unlike watchers for cluster data and endpoint data, at most one ConfigWatcher can be * registered. Once it is registered, it cannot be unregistered. * - * @param hostName the host name part of the "xds:" URI for the server name that the gRPC client - * targets for. Must NOT contain port. - * @param port the port part of the "xds:" URI for the server name that the gRPC client targets - * for. -1 if not specified. + * @param targetAuthority authority of the "xds:" URI for the server name that the gRPC client + * targets for. * @param watcher the {@link ConfigWatcher} to receive {@link ConfigUpdate}. */ - void watchConfigData(String hostName, int port, ConfigWatcher watcher) { + void watchConfigData(String targetAuthority, ConfigWatcher watcher) { } /** diff --git a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java index 5aca5d8e387..2f55a7e8907 100644 --- a/xds/src/main/java/io/grpc/xds/XdsClientImpl.java +++ b/xds/src/main/java/io/grpc/xds/XdsClientImpl.java @@ -157,9 +157,6 @@ final class XdsClientImpl extends XdsClient { // never change. @Nullable private ConfigWatcher configWatcher; - // The host name portion of "xds:" URI that the gRPC client targets for. - @Nullable - private String hostName; // The "xds:" URI (including port suffix if present) that the gRPC client targets for. @Nullable private String ldsResourceName; @@ -233,15 +230,10 @@ private void cleanUpResources() { } @Override - void watchConfigData(String hostName, int port, ConfigWatcher watcher) { - checkState(configWatcher == null, "watcher for %s already registered", hostName); + void watchConfigData(String targetAuthority, ConfigWatcher watcher) { + checkState(configWatcher == null, "watcher for %s already registered", targetAuthority); + ldsResourceName = checkNotNull(targetAuthority, "targetAuthority"); configWatcher = checkNotNull(watcher, "watcher"); - this.hostName = checkNotNull(hostName, "hostName"); - if (port == -1) { - ldsResourceName = hostName; - } else { - ldsResourceName = hostName + ":" + port; - } logger.log(XdsLogLevel.INFO, "Started watching config {0}", ldsResourceName); if (rpcRetryTimer != null && rpcRetryTimer.isPending()) { // Currently in retry backoff. @@ -540,11 +532,12 @@ private void handleLdsResponse(DiscoveryResponse ldsResponse) { // data or one supersedes the other. TBD. if (requestedHttpConnManager.hasRouteConfig()) { RouteConfiguration rc = requestedHttpConnManager.getRouteConfig(); - clusterName = findClusterNameInRouteConfig(rc, hostName); + clusterName = findClusterNameInRouteConfig(rc, ldsResourceName); if (clusterName == null) { errorMessage = "Listener " + ldsResourceName + " : cannot find a valid cluster name in any " - + "virtual hosts inside RouteConfiguration with domains matching: " + hostName; + + "virtual hosts inside RouteConfiguration with domains matching: " + + ldsResourceName; } } else if (requestedHttpConnManager.hasRds()) { Rds rds = requestedHttpConnManager.getRds(); @@ -650,14 +643,14 @@ private void handleRdsResponse(DiscoveryResponse rdsResponse) { // Resolved cluster name for the requested resource, if exists. String clusterName = null; if (requestedRouteConfig != null) { - clusterName = findClusterNameInRouteConfig(requestedRouteConfig, hostName); + clusterName = findClusterNameInRouteConfig(requestedRouteConfig, ldsResourceName); if (clusterName == null) { adsStream.sendNackRequest( ADS_TYPE_URL_RDS, ImmutableList.of(adsStream.rdsResourceName), rdsResponse.getVersionInfo(), "RouteConfiguration " + requestedRouteConfig.getName() + ": cannot find a " + "valid cluster name in any virtual hosts with domains matching: " - + hostName); + + ldsResourceName); return; } } diff --git a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java index bcc442d3252..99cacf744de 100644 --- a/xds/src/main/java/io/grpc/xds/XdsNameResolver.java +++ b/xds/src/main/java/io/grpc/xds/XdsNameResolver.java @@ -16,7 +16,6 @@ package io.grpc.xds; -import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import com.google.common.base.Stopwatch; @@ -32,6 +31,7 @@ import io.grpc.SynchronizationContext; import io.grpc.internal.BackoffPolicy; import io.grpc.internal.GrpcAttributes; +import io.grpc.internal.GrpcUtil; import io.grpc.internal.JsonParser; import io.grpc.internal.ObjectPool; import io.grpc.xds.Bootstrapper.BootstrapInfo; @@ -43,7 +43,6 @@ import io.grpc.xds.XdsClient.XdsClientFactory; import io.grpc.xds.XdsLogger.XdsLogLevel; import java.io.IOException; -import java.net.URI; import java.util.List; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; @@ -61,8 +60,6 @@ final class XdsNameResolver extends NameResolver { private final XdsLogger logger; private final String authority; - private final String hostName; - private final int port; private final XdsChannelFactory channelFactory; private final SynchronizationContext syncContext; private final ScheduledExecutorService timeService; @@ -83,12 +80,7 @@ final class XdsNameResolver extends NameResolver { Supplier stopwatchSupplier, XdsChannelFactory channelFactory, Bootstrapper bootstrapper) { - URI nameUri = URI.create("//" + checkNotNull(name, "name")); - checkArgument(nameUri.getHost() != null, "Invalid hostname: %s", name); - authority = - checkNotNull(nameUri.getAuthority(), "nameUri (%s) doesn't have an authority", nameUri); - hostName = nameUri.getHost(); - port = nameUri.getPort(); // -1 if not specified + authority = GrpcUtil.checkAuthority(checkNotNull(name, "name")); this.channelFactory = checkNotNull(channelFactory, "channelFactory"); this.syncContext = checkNotNull(args.getSynchronizationContext(), "syncContext"); this.timeService = checkNotNull(args.getScheduledExecutorService(), "timeService"); @@ -140,7 +132,7 @@ XdsClient createXdsClient() { }; xdsClientPool = new RefCountedXdsClientObjectPool(xdsClientFactory); xdsClient = xdsClientPool.getObject(); - xdsClient.watchConfigData(hostName, port, new ConfigWatcher() { + xdsClient.watchConfigData(authority, new ConfigWatcher() { @Override public void onConfigChanged(ConfigUpdate update) { logger.log( diff --git a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java index e2b75e01c49..389a53c2857 100644 --- a/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsClientImplTest.java @@ -123,9 +123,7 @@ @RunWith(JUnit4.class) public class XdsClientImplTest { - private static final String TARGET_NAME = "foo.googleapis.com:8080"; - private static final String HOSTNAME = "foo.googleapis.com"; - private static final int PORT = 8080; + private static final String TARGET_AUTHORITY = "foo.googleapis.com:8080"; private static final Node NODE = Node.getDefaultInstance(); private static final FakeClock.TaskFilter RPC_RETRY_TASK_FILTER = @@ -284,7 +282,7 @@ ManagedChannel createChannel(List servers) { xdsClient = new XdsClientImpl( - TARGET_NAME, + TARGET_AUTHORITY, servers, channelFactory, NODE, @@ -321,13 +319,13 @@ public void tearDown() { */ @Test public void ldsResponseWithoutMatchingResource() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); assertThat(fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); @@ -357,7 +355,7 @@ public void ldsResponseWithoutMatchingResource() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); @@ -379,13 +377,13 @@ public void ldsResponseWithoutMatchingResource() { */ @Test public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); assertThat(fakeClock.getPendingTasks(LDS_RESOURCE_FETCH_TIMEOUT_TASK_FILTER)).hasSize(1); @@ -399,7 +397,7 @@ public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { "some other cluster"))); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRouteConfig(routeConfig).build())))); DiscoveryResponse response = buildDiscoveryResponse("0", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"); @@ -408,7 +406,7 @@ public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { // Client sends an NACK LDS request. verify(requestObserver) .onNext( - argThat(new DiscoveryRequestMatcher("", "foo.googleapis.com:8080", + argThat(new DiscoveryRequestMatcher("", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); verify(configWatcher, never()).onConfigChanged(any(ConfigUpdate.class)); @@ -430,13 +428,13 @@ public void failToFindVirtualHostInLdsResponseInLineRouteConfig() { */ @Test public void resolveVirtualHostInLdsResponse() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); ScheduledTask ldsRespTimer = Iterables.getOnlyElement( @@ -462,14 +460,14 @@ public void resolveVirtualHostInLdsResponse() { ImmutableList.of("baz.googleapis.com"), "cluster-baz.googleapis.com")))) .build()))), - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack( HttpConnectionManager.newBuilder() - .setRouteConfig( + .setRouteConfig( // target route configuration buildRouteConfiguration("route-foo.googleapis.com", ImmutableList.of( - buildVirtualHost( - ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost( // matching virtual host + ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com"), "cluster.googleapis.com"), buildVirtualHost( ImmutableList.of("something does not match"), @@ -483,7 +481,7 @@ public void resolveVirtualHostInLdsResponse() { // Client sends an ACK request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); ArgumentCaptor configUpdateCaptor = ArgumentCaptor.forClass(null); @@ -502,13 +500,13 @@ public void resolveVirtualHostInLdsResponse() { */ @Test public void rdsResponseWithoutMatchingResource() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); Rds rdsConfig = @@ -519,7 +517,7 @@ public void rdsResponseWithoutMatchingResource() { .setRouteConfigName("route-foo.googleapis.com") .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -528,7 +526,7 @@ public void rdsResponseWithoutMatchingResource() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); // Client sends an (first) RDS request. @@ -545,13 +543,15 @@ public void rdsResponseWithoutMatchingResource() { buildRouteConfiguration( "some resource name does not match route-foo.googleapis.com", ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), "whatever cluster")))), Any.pack( buildRouteConfiguration( "some other resource name does not match route-foo.googleapis.com", ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), "some more whatever cluster"))))); response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); @@ -577,7 +577,7 @@ public void rdsResponseWithoutMatchingResource() { */ @Test public void resolveVirtualHostInRdsResponse() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); @@ -590,7 +590,7 @@ public void resolveVirtualHostInRdsResponse() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -606,11 +606,11 @@ public void resolveVirtualHostInRdsResponse() { List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( buildVirtualHost(ImmutableList.of("something does not match"), "some cluster"), - buildVirtualHost(ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost(ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com:443"), "cluster.googleapis.com")))), // matching virtual host Any.pack( buildRouteConfiguration( @@ -643,7 +643,7 @@ public void resolveVirtualHostInRdsResponse() { */ @Test public void failToFindVirtualHostInRdsResponse() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); @@ -656,7 +656,7 @@ public void failToFindVirtualHostInRdsResponse() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -711,7 +711,7 @@ public void failToFindVirtualHostInRdsResponse() { */ @Test public void matchingVirtualHostDoesNotContainRouteAction() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); @@ -724,7 +724,7 @@ public void matchingVirtualHostDoesNotContainRouteAction() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -777,28 +777,29 @@ public void matchingVirtualHostDoesNotContainRouteAction() { */ @Test public void notifyUpdatedResources() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management server sends back an LDS response containing a RouteConfiguration for the // requested Listener directly in-line. RouteConfiguration routeConfig = buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost( // matching virtual host + ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com:443"), "cluster.googleapis.com"), buildVirtualHost(ImmutableList.of("something does not match"), "some cluster"))); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRouteConfig(routeConfig).build()))) ); DiscoveryResponse response = @@ -807,7 +808,7 @@ public void notifyUpdatedResources() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); // Cluster name is resolved and notified to config watcher. @@ -820,13 +821,13 @@ public void notifyUpdatedResources() { buildRouteConfiguration( "another-route-foo.googleapis.com", ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost(ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com:443"), "another-cluster.googleapis.com"), buildVirtualHost(ImmutableList.of("something does not match"), "some cluster"))); listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRouteConfig(routeConfig).build()))) ); response = @@ -835,7 +836,7 @@ public void notifyUpdatedResources() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "1", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "1", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0001"))); // Updated cluster name is notified to config watcher. @@ -855,7 +856,7 @@ public void notifyUpdatedResources() { .build(); listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); response = @@ -864,7 +865,7 @@ public void notifyUpdatedResources() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "2", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "2", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0002"))); // Client sends an (first) RDS request. @@ -881,7 +882,7 @@ public void notifyUpdatedResources() { ImmutableList.of( buildVirtualHost(ImmutableList.of("something does not match"), "some cluster"), - buildVirtualHost(ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost(ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com:443"), "some-other-cluster.googleapis.com"))))); response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); @@ -904,7 +905,7 @@ public void notifyUpdatedResources() { buildRouteConfiguration( "some-route-to-foo.googleapis.com", ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost(ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com:443"), "an-updated-cluster.googleapis.com"))))); response = buildDiscoveryResponse("1", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0001"); responseObserver.onNext(response); @@ -943,13 +944,13 @@ public void notifyUpdatedResources() { */ @Test public void waitRdsResponsesForRequestedResource() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management sends back an LDS response telling client to do RDS. @@ -962,7 +963,7 @@ public void waitRdsResponsesForRequestedResource() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -971,7 +972,7 @@ public void waitRdsResponsesForRequestedResource() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); // Client sends an (first) RDS request. @@ -993,7 +994,8 @@ public void waitRdsResponsesForRequestedResource() { buildRouteConfiguration( "some resource name does not match route-foo.googleapis.com", ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), "some more cluster"))))); response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); @@ -1014,11 +1016,13 @@ public void waitRdsResponsesForRequestedResource() { routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("something does not match"), + buildVirtualHost( + ImmutableList.of("something does not match"), "some cluster"), - buildVirtualHost(ImmutableList.of("foo.googleapis.com", "bar.googleapis.com"), + buildVirtualHost( // matching virtual host + ImmutableList.of(TARGET_AUTHORITY, "bar.googleapis.com:443"), "another-cluster.googleapis.com"))))); response = buildDiscoveryResponse("1", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0001"); responseObserver.onNext(response); @@ -1042,13 +1046,13 @@ public void waitRdsResponsesForRequestedResource() { */ @Test public void routeConfigurationRemovedNotifiedToWatcher() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management sends back an LDS response telling client to do RDS. @@ -1061,7 +1065,7 @@ public void routeConfigurationRemovedNotifiedToWatcher() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -1070,7 +1074,7 @@ public void routeConfigurationRemovedNotifiedToWatcher() { // Client sends an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); // Client sends an (first) RDS request. @@ -1082,9 +1086,10 @@ public void routeConfigurationRemovedNotifiedToWatcher() { List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), // matching virtual host "cluster.googleapis.com"))))); response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); @@ -1108,7 +1113,7 @@ public void routeConfigurationRemovedNotifiedToWatcher() { // Client sent an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "1", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "1", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0001"))); // Notify config watcher with an error. @@ -1124,7 +1129,7 @@ public void routeConfigurationRemovedNotifiedToWatcher() { */ @Test public void updateRdsRequestResourceWhileInitialResourceFetchInProgress() { - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); StreamObserver responseObserver = responseObservers.poll(); StreamObserver requestObserver = requestObservers.poll(); @@ -1138,7 +1143,7 @@ public void updateRdsRequestResourceWhileInitialResourceFetchInProgress() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -1166,8 +1171,10 @@ public void updateRdsRequestResourceWhileInitialResourceFetchInProgress() { .build(); listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ - Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) + Any.pack( + buildListener( + TARGET_AUTHORITY, /* matching resource */ + Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); response = buildDiscoveryResponse("1", listeners, XdsClientImpl.ADS_TYPE_URL_LDS, "0001"); responseObserver.onNext(response); @@ -1187,9 +1194,10 @@ public void updateRdsRequestResourceWhileInitialResourceFetchInProgress() { List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-bar.googleapis.com", + "route-bar.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), // matching virtual host "cluster.googleapis.com"))))); response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); @@ -2439,7 +2447,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { InOrder inOrder = Mockito.inOrder(mockedDiscoveryService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); ArgumentCaptor> responseObserverCaptor = ArgumentCaptor.forClass(null); @@ -2451,7 +2459,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { // Client sends an LDS request for the host name (with port) to management server. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management server closes the RPC stream immediately. @@ -2471,7 +2479,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { // Client retried by sending an LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management server closes the RPC stream with an error. @@ -2491,7 +2499,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { // Client retried again by sending an LDS. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management server responses with a listener for the requested resource. @@ -2503,7 +2511,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse ldsResponse = @@ -2512,7 +2520,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { // Client sent back an ACK LDS request. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "0", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "0", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, "0000"))); // Client sent an RDS request based on the received listener. @@ -2531,7 +2539,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { responseObserver = responseObserverCaptor.getValue(); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // RPC stream closed immediately @@ -2548,7 +2556,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { responseObserver = responseObserverCaptor.getValue(); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management server sends an LDS response. @@ -2559,9 +2567,10 @@ public void streamClosedAndRetryWhenResolvingConfig() { List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), // matching virtual host "cluster.googleapis.com"))))); DiscoveryResponse rdsResponse = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); @@ -2581,7 +2590,7 @@ public void streamClosedAndRetryWhenResolvingConfig() { fakeClock.runDueTasks(); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verifyNoMoreInteractions(backoffPolicyProvider, backoffPolicy1, backoffPolicy2); @@ -2595,7 +2604,7 @@ public void streamClosedAndRetry() { InOrder inOrder = Mockito.inOrder(mockedDiscoveryService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); ArgumentCaptor> responseObserverCaptor = ArgumentCaptor.forClass(null); @@ -2643,7 +2652,7 @@ public void streamClosedAndRetry() { // Retry resumes requests for all wanted resources. verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2672,7 +2681,7 @@ public void streamClosedAndRetry() { responseObserver = responseObserverCaptor.getValue(); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2701,7 +2710,7 @@ public void streamClosedAndRetry() { responseObserver = responseObserverCaptor.getValue(); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2734,7 +2743,7 @@ public void streamClosedAndRetry() { requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2762,7 +2771,7 @@ public void streamClosedAndRetry() { .streamAggregatedResources(responseObserverCaptor.capture()); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2783,7 +2792,7 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { InOrder inOrder = Mockito.inOrder(mockedDiscoveryService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); ArgumentCaptor> responseObserverCaptor = ArgumentCaptor.forClass(null); @@ -2807,7 +2816,7 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { StreamObserver requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); // Management server becomes unreachable. @@ -2828,7 +2837,7 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2852,7 +2861,7 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2895,7 +2904,7 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { responseObserver = responseObserverCaptor.getValue(); requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2922,7 +2931,7 @@ public void streamClosedAndRetryRaceWithAddingAndRemovingWatchers() { requestObserver = requestObservers.poll(); verify(requestObserver) - .onNext(eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + .onNext(eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); verify(requestObserver, never()) .onNext(eq(buildDiscoveryRequest(NODE, "", "cluster.googleapis.com", @@ -2940,7 +2949,7 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { InOrder inOrder = Mockito.inOrder(mockedDiscoveryService, backoffPolicyProvider, backoffPolicy1, backoffPolicy2); - xdsClient.watchConfigData(HOSTNAME, PORT, configWatcher); + xdsClient.watchConfigData(TARGET_AUTHORITY, configWatcher); ArgumentCaptor> responseObserverCaptor = ArgumentCaptor.forClass(null); @@ -2959,7 +2968,7 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse response = @@ -2994,7 +3003,7 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { // Client resumed requests and management server sends back LDS resources again. verify(requestObserver).onNext( - eq(buildDiscoveryRequest(NODE, "", "foo.googleapis.com:8080", + eq(buildDiscoveryRequest(NODE, "", TARGET_AUTHORITY, XdsClientImpl.ADS_TYPE_URL_LDS, ""))); responseObserver.onNext(response); @@ -3012,9 +3021,10 @@ public void streamClosedAndRetryReschedulesAllResourceFetchTimer() { List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), // matching virtual host "cluster-foo.googleapis.com"))))); response = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); responseObserver.onNext(response); @@ -3119,7 +3129,7 @@ public void reportLoadStatsToServer() { // Simulates the use case of watching clusters/endpoints based on service config resolved by // LDS/RDS. private void waitUntilConfigResolved(StreamObserver responseObserver) { - // Client sent an LDS request for resource "foo.googleapis.com:8080" (Omitted). + // Client sent an LDS request for resource TARGET_AUTHORITY (Omitted). // Management server responses with a listener telling client to do RDS. Rds rdsConfig = @@ -3130,7 +3140,7 @@ private void waitUntilConfigResolved(StreamObserver responseO .build(); List listeners = ImmutableList.of( - Any.pack(buildListener("foo.googleapis.com:8080", /* matching resource */ + Any.pack(buildListener(TARGET_AUTHORITY, /* matching resource */ Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build()))) ); DiscoveryResponse ldsResponse = @@ -3144,9 +3154,10 @@ private void waitUntilConfigResolved(StreamObserver responseO List routeConfigs = ImmutableList.of( Any.pack( buildRouteConfiguration( - "route-foo.googleapis.com", + "route-foo.googleapis.com", // target route configuration ImmutableList.of( - buildVirtualHost(ImmutableList.of("foo.googleapis.com"), + buildVirtualHost( + ImmutableList.of(TARGET_AUTHORITY), // matching virtual host "cluster.googleapis.com"))))); DiscoveryResponse rdsResponse = buildDiscoveryResponse("0", routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, "0000"); diff --git a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java index 7ce286d8c99..81571fc066d 100644 --- a/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java +++ b/xds/src/test/java/io/grpc/xds/XdsNameResolverTest.java @@ -78,8 +78,7 @@ // TODO(creamsoup) use parsed service config @SuppressWarnings("deprecation") public class XdsNameResolverTest { - private static final String HOST_NAME = "foo.googleapis.com"; - private static final int PORT = 443; + private static final String AUTHORITY = "foo.googleapis.com:80"; private static final Node FAKE_BOOTSTRAP_NODE = Node.newBuilder().setId("XdsNameResolverTest").build(); @@ -166,7 +165,7 @@ public BootstrapInfo readBootstrap() { }; xdsNameResolver = new XdsNameResolver( - HOST_NAME + ":" + PORT, + AUTHORITY, args, backoffPolicyProvider, fakeClock.getStopwatchSupplier(), @@ -191,7 +190,7 @@ public BootstrapInfo readBootstrap() { XdsNameResolver resolver = new XdsNameResolver( - HOST_NAME + ":" + PORT, + AUTHORITY, args, backoffPolicyProvider, fakeClock.getStopwatchSupplier(), @@ -216,7 +215,7 @@ public BootstrapInfo readBootstrap() throws IOException { XdsNameResolver resolver = new XdsNameResolver( - HOST_NAME + ":" + PORT, + AUTHORITY, args, backoffPolicyProvider, fakeClock.getStopwatchSupplier(), @@ -240,7 +239,7 @@ public void resolve_passXdsClientPoolInResult() { // Simulate receiving an LDS response that contains cluster resolution directly in-line. String clusterName = "cluster-foo.googleapis.com"; responseObserver.onNext( - buildLdsResponseForCluster("0", HOST_NAME, PORT, clusterName, "0000")); + buildLdsResponseForCluster("0", AUTHORITY, clusterName, "0000")); ArgumentCaptor resolutionResultCaptor = ArgumentCaptor.forClass(null); verify(mockListener).onResult(resolutionResultCaptor.capture()); @@ -258,7 +257,7 @@ public void resolve_foundResource() { // Simulate receiving an LDS response that contains cluster resolution directly in-line. String clusterName = "cluster-foo.googleapis.com"; responseObserver.onNext( - buildLdsResponseForCluster("0", HOST_NAME, PORT, clusterName, "0000")); + buildLdsResponseForCluster("0", AUTHORITY, clusterName, "0000")); ArgumentCaptor resolutionResultCaptor = ArgumentCaptor.forClass(null); verify(mockListener).onResult(resolutionResultCaptor.capture()); @@ -286,7 +285,7 @@ public void resolve_ResourceNotFound() { // Simulate receiving an LDS response that does not contain requested resource. String clusterName = "cluster-bar.googleapis.com"; responseObserver.onNext( - buildLdsResponseForCluster("0", "bar.googleapis.com", 80, clusterName, "0000")); + buildLdsResponseForCluster("0", "bar.googleapis.com", clusterName, "0000")); fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); ArgumentCaptor resolutionResultCaptor = ArgumentCaptor.forClass(null); @@ -305,7 +304,7 @@ public void resolve_resourceUpdated() { // Simulate receiving an LDS response that contains cluster resolution directly in-line. responseObserver.onNext( - buildLdsResponseForCluster("0", HOST_NAME, PORT, "cluster-foo.googleapis.com", "0000")); + buildLdsResponseForCluster("0", AUTHORITY, "cluster-foo.googleapis.com", "0000")); ArgumentCaptor resolutionResultCaptor = ArgumentCaptor.forClass(null); verify(mockListener).onResult(resolutionResultCaptor.capture()); @@ -325,14 +324,14 @@ public void resolve_resourceUpdated() { // Simulate receiving another LDS response that tells client to do RDS. String routeConfigName = "route-foo.googleapis.com"; responseObserver.onNext( - buildLdsResponseForRdsResource("1", HOST_NAME, PORT, routeConfigName, "0001")); + buildLdsResponseForRdsResource("1", AUTHORITY, routeConfigName, "0001")); // Client sent an RDS request for resource "route-foo.googleapis.com" (Omitted in this test). // Simulate receiving an RDS response that contains the resource "route-foo.googleapis.com" // with cluster resolution for "foo.googleapis.com". responseObserver.onNext( - buildRdsResponseForCluster("0", routeConfigName, "foo.googleapis.com", + buildRdsResponseForCluster("0", routeConfigName, AUTHORITY, "cluster-blade.googleapis.com", "0000")); verify(mockListener, times(2)).onResult(resolutionResultCaptor.capture()); @@ -355,7 +354,7 @@ public void resolve_resourceNewlyAdded() { // Simulate receiving an LDS response that does not contain requested resource. responseObserver.onNext( - buildLdsResponseForCluster("0", "bar.googleapis.com", 80, + buildLdsResponseForCluster("0", "bar.googleapis.com", "cluster-bar.googleapis.com", "0000")); fakeClock.forwardTime(XdsClientImpl.INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS); @@ -367,7 +366,7 @@ public void resolve_resourceNewlyAdded() { // Simulate receiving another LDS response that contains cluster resolution directly in-line. responseObserver.onNext( - buildLdsResponseForCluster("1", HOST_NAME, PORT, "cluster-foo.googleapis.com", + buildLdsResponseForCluster("1", AUTHORITY, "cluster-foo.googleapis.com", "0001")); verify(mockListener, times(2)).onResult(resolutionResultCaptor.capture()); @@ -387,35 +386,33 @@ public void resolve_resourceNewlyAdded() { } /** - * Builds an LDS DiscoveryResponse containing the mapping of given host name (with port if any) to - * the given cluster name directly in-line. Clients receiving this response is able to resolve - * cluster name for the given hostname:port immediately. + * Builds an LDS DiscoveryResponse containing the mapping of given host to + * the given cluster name directly in-line. Clients receiving this response is + * able to resolve cluster name for the given host immediately. */ private static DiscoveryResponse buildLdsResponseForCluster( - String versionInfo, String hostName, int port, String clusterName, String nonce) { - String ldsResourceName = port == -1 ? hostName : hostName + ":" + port; + String versionInfo, String host, String clusterName, String nonce) { List listeners = ImmutableList.of( - Any.pack(buildListener(ldsResourceName, + Any.pack(buildListener(host, // target Listener resource Any.pack( HttpConnectionManager.newBuilder() .setRouteConfig( - buildRouteConfiguration("route-foo.googleapis.com", + buildRouteConfiguration("route-foo.googleapis.com", // doesn't matter ImmutableList.of( buildVirtualHost( - ImmutableList.of("foo.googleapis.com"), + ImmutableList.of(host), // exact match clusterName)))) .build())))); return buildDiscoveryResponse(versionInfo, listeners, XdsClientImpl.ADS_TYPE_URL_LDS, nonce); } /** - * Builds an LDS DiscoveryResponse containing the mapping of given host name (with port if any) to - * the given RDS resource name. Clients receiving this response is able to send an RDS request for - * resolving the cluster name for the given hostname:port. + * Builds an LDS DiscoveryResponse containing the mapping of given host to + * the given RDS resource name. Clients receiving this response is able to + * send an RDS request for resolving the cluster name for the given host. */ private static DiscoveryResponse buildLdsResponseForRdsResource( - String versionInfo, String hostName, int port, String routeConfigName, String nonce) { - String ldsResourceName = port == -1 ? hostName : hostName + ":" + port; + String versionInfo, String host, String routeConfigName, String nonce) { Rds rdsConfig = Rds.newBuilder() // Must set to use ADS. @@ -425,19 +422,20 @@ private static DiscoveryResponse buildLdsResponseForRdsResource( .build(); List listeners = ImmutableList.of( - Any.pack(buildListener(ldsResourceName, - Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build())))); + Any.pack( + buildListener( + host, Any.pack(HttpConnectionManager.newBuilder().setRds(rdsConfig).build())))); return buildDiscoveryResponse(versionInfo, listeners, XdsClientImpl.ADS_TYPE_URL_LDS, nonce); } /** - * Builds an RDS DiscoveryResponse containing the mapping of given route config name to the given - * cluster name under. + * Builds an RDS DiscoveryResponse containing route configuration with the given name and a + * virtual host that matches the given host to the given cluster name. */ private static DiscoveryResponse buildRdsResponseForCluster( String versionInfo, String routeConfigName, - String hostName, + String host, String clusterName, String nonce) { List routeConfigs = ImmutableList.of( @@ -445,7 +443,7 @@ private static DiscoveryResponse buildRdsResponseForCluster( buildRouteConfiguration( routeConfigName, ImmutableList.of( - buildVirtualHost(ImmutableList.of(hostName), clusterName))))); + buildVirtualHost(ImmutableList.of(host), clusterName))))); return buildDiscoveryResponse(versionInfo, routeConfigs, XdsClientImpl.ADS_TYPE_URL_RDS, nonce); } } From ab72c63e585e277d154f3fbf970aa34e96afe16b Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Mon, 9 Mar 2020 10:18:22 -0700 Subject: [PATCH 85/86] Update README etc to reference 1.28.0 --- README.md | 28 ++++++++++++------------ cronet/README.md | 2 +- documentation/android-channel-builder.md | 4 ++-- examples/example-xds/README.md | 4 ++-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9b379ff0c74..9fc11366bbc 100644 --- a/README.md +++ b/README.md @@ -30,8 +30,8 @@ For a guided tour, take a look at the [quick start guide](https://grpc.io/docs/quickstart/java.html) or the more explanatory [gRPC basics](https://grpc.io/docs/tutorials/basic/java.html). -The [examples](https://github.com/grpc/grpc-java/tree/v1.27.2/examples) and the -[Android example](https://github.com/grpc/grpc-java/tree/v1.27.2/examples/android) +The [examples](https://github.com/grpc/grpc-java/tree/v1.28.0/examples) and the +[Android example](https://github.com/grpc/grpc-java/tree/v1.28.0/examples/android) are standalone projects that showcase the usage of gRPC. Download @@ -42,37 +42,37 @@ Download [the JARs][]. Or for Maven with non-Android, add to your `pom.xml`: io.grpc grpc-netty-shaded - 1.27.2 + 1.28.0 io.grpc grpc-protobuf - 1.27.2 + 1.28.0 io.grpc grpc-stub - 1.27.2 + 1.28.0 ``` Or for Gradle with non-Android, add to your dependencies: ```gradle -implementation 'io.grpc:grpc-netty-shaded:1.27.2' -implementation 'io.grpc:grpc-protobuf:1.27.2' -implementation 'io.grpc:grpc-stub:1.27.2' +implementation 'io.grpc:grpc-netty-shaded:1.28.0' +implementation 'io.grpc:grpc-protobuf:1.28.0' +implementation 'io.grpc:grpc-stub:1.28.0' ``` For Android client, use `grpc-okhttp` instead of `grpc-netty-shaded` and `grpc-protobuf-lite` instead of `grpc-protobuf`: ```gradle -implementation 'io.grpc:grpc-okhttp:1.27.2' -implementation 'io.grpc:grpc-protobuf-lite:1.27.2' -implementation 'io.grpc:grpc-stub:1.27.2' +implementation 'io.grpc:grpc-okhttp:1.28.0' +implementation 'io.grpc:grpc-protobuf-lite:1.28.0' +implementation 'io.grpc:grpc-stub:1.28.0' ``` [the JARs]: -https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.27.2 +https://search.maven.org/search?q=g:io.grpc%20AND%20v:1.28.0 Development snapshots are available in [Sonatypes's snapshot repository](https://oss.sonatype.org/content/repositories/snapshots/). @@ -104,7 +104,7 @@ For protobuf-based codegen integrated with the Maven build system, you can use com.google.protobuf:protoc:3.11.0:exe:${os.detected.classifier} grpc-java - io.grpc:protoc-gen-grpc-java:1.27.2:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:1.28.0:exe:${os.detected.classifier} @@ -134,7 +134,7 @@ protobuf { } plugins { grpc { - artifact = 'io.grpc:protoc-gen-grpc-java:1.27.2' + artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' } } generateProtoTasks { diff --git a/cronet/README.md b/cronet/README.md index 54c62084634..02466c5f9c5 100644 --- a/cronet/README.md +++ b/cronet/README.md @@ -26,7 +26,7 @@ In your app module's `build.gradle` file, include a dependency on both `grpc-cro Google Play Services Client Library for Cronet ``` -implementation 'io.grpc:grpc-cronet:1.27.2' +implementation 'io.grpc:grpc-cronet:1.28.0' implementation 'com.google.android.gms:play-services-cronet:16.0.0' ``` diff --git a/documentation/android-channel-builder.md b/documentation/android-channel-builder.md index 9337b9df895..203636e33d7 100644 --- a/documentation/android-channel-builder.md +++ b/documentation/android-channel-builder.md @@ -36,8 +36,8 @@ In your `build.gradle` file, include a dependency on both `grpc-android` and `grpc-okhttp`: ``` -implementation 'io.grpc:grpc-android:1.27.2' -implementation 'io.grpc:grpc-okhttp:1.27.2' +implementation 'io.grpc:grpc-android:1.28.0' +implementation 'io.grpc:grpc-okhttp:1.28.0' ``` You also need permission to access the device's network state in your diff --git a/examples/example-xds/README.md b/examples/example-xds/README.md index b0f388224f0..0f3f6fccdd1 100644 --- a/examples/example-xds/README.md +++ b/examples/example-xds/README.md @@ -19,7 +19,7 @@ encounter issues please consult [COMPILING.md](../../COMPILING.md). 1. The server does not use XDS, so recent releases work fine. Building using recent releases is much easier, so check out the most recent release tag: ``` -$ git checkout v1.27.2 +$ git checkout v1.28.0 ``` 2. Build the hello-world example server or the hostname example server. See @@ -40,7 +40,7 @@ $ git checkout master ``` To: ``` - grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.27.2" } + grpc { artifact = "io.grpc:protoc-gen-grpc-java:1.28.0" } ``` From 3dbd250eae2c5e4f4e5e7046c6573805cc0dcc29 Mon Sep 17 00:00:00 2001 From: Jihun Cho Date: Mon, 9 Mar 2020 10:22:12 -0700 Subject: [PATCH 86/86] Bump version to 1.28.0 --- android-interop-testing/app/build.gradle | 14 +++++++------- android/build.gradle | 6 +++--- build.gradle | 2 +- .../src/test/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/test/golden/TestService.java.txt | 2 +- .../testLite/golden/TestDeprecatedService.java.txt | 2 +- compiler/src/testLite/golden/TestService.java.txt | 2 +- core/src/main/java/io/grpc/internal/GrpcUtil.java | 2 +- examples/android/clientcache/app/build.gradle | 10 +++++----- examples/android/helloworld/app/build.gradle | 8 ++++---- examples/android/routeguide/app/build.gradle | 8 ++++---- examples/android/strictmode/app/build.gradle | 8 ++++---- examples/build.gradle | 2 +- examples/example-alts/build.gradle | 2 +- examples/example-gauth/build.gradle | 2 +- examples/example-gauth/pom.xml | 4 ++-- examples/example-hostname/build.gradle | 2 +- examples/example-hostname/pom.xml | 4 ++-- .../android/helloworld/app/build.gradle | 8 ++++---- examples/example-kotlin/build.gradle | 2 +- examples/example-tls/build.gradle | 2 +- examples/example-tls/pom.xml | 4 ++-- examples/example-xds/build.gradle | 2 +- examples/pom.xml | 4 ++-- 24 files changed, 52 insertions(+), 52 deletions(-) diff --git a/android-interop-testing/app/build.gradle b/android-interop-testing/app/build.gradle index 394f5319a7d..377a33aae0b 100644 --- a/android-interop-testing/app/build.gradle +++ b/android-interop-testing/app/build.gradle @@ -41,7 +41,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -72,12 +72,12 @@ dependencies { implementation 'junit:junit:4.12' // You need to build grpc-java to obtain the grpc libraries below. - implementation 'io.grpc:grpc-auth:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-census:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-testing:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-auth:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-census:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-testing:1.28.0' // CURRENT_GRPC_VERSION androidTestImplementation 'androidx.test:rules:1.1.0-alpha1' androidTestImplementation 'androidx.test:runner:1.1.0-alpha1' diff --git a/android/build.gradle b/android/build.gradle index 3234ec0612b..ec63362a203 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'com.android.library' group = "io.grpc" -version = "1.28.0-SNAPSHOT" // CURRENT_GRPC_VERSION +version = "1.28.0" // CURRENT_GRPC_VERSION description = 'gRPC: Android' buildscript { @@ -47,9 +47,9 @@ dependencies { errorprone 'com.google.errorprone:error_prone_core:2.3.4' errorproneJavac 'com.google.errorprone:javac:9+181-r4173-1' - implementation 'io.grpc:grpc-core:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-core:1.28.0' // CURRENT_GRPC_VERSION - testImplementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION testImplementation 'junit:junit:4.12' testImplementation 'org.robolectric:robolectric:3.7.1' testImplementation 'com.google.truth:truth:1.0' diff --git a/build.gradle b/build.gradle index 2c1bae914d5..dcc6f5f1272 100644 --- a/build.gradle +++ b/build.gradle @@ -18,7 +18,7 @@ subprojects { apply plugin: "net.ltgt.errorprone" group = "io.grpc" - version = "1.28.0-SNAPSHOT" // CURRENT_GRPC_VERSION + version = "1.28.0" // CURRENT_GRPC_VERSION repositories { maven { // The google mirror is less flaky than mavenCentral() diff --git a/compiler/src/test/golden/TestDeprecatedService.java.txt b/compiler/src/test/golden/TestDeprecatedService.java.txt index 452fbb6aea9..c176dcaf283 100644 --- a/compiler/src/test/golden/TestDeprecatedService.java.txt +++ b/compiler/src/test/golden/TestDeprecatedService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0)", comments = "Source: grpc/testing/compiler/test.proto") @java.lang.Deprecated public final class TestDeprecatedServiceGrpc { diff --git a/compiler/src/test/golden/TestService.java.txt b/compiler/src/test/golden/TestService.java.txt index 79618564a48..28b5f28cc37 100644 --- a/compiler/src/test/golden/TestService.java.txt +++ b/compiler/src/test/golden/TestService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0)", comments = "Source: grpc/testing/compiler/test.proto") public final class TestServiceGrpc { diff --git a/compiler/src/testLite/golden/TestDeprecatedService.java.txt b/compiler/src/testLite/golden/TestDeprecatedService.java.txt index d5566f0ee5f..eda0aa5faa6 100644 --- a/compiler/src/testLite/golden/TestDeprecatedService.java.txt +++ b/compiler/src/testLite/golden/TestDeprecatedService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0)", comments = "Source: grpc/testing/compiler/test.proto") @java.lang.Deprecated public final class TestDeprecatedServiceGrpc { diff --git a/compiler/src/testLite/golden/TestService.java.txt b/compiler/src/testLite/golden/TestService.java.txt index e238eecd955..4002b7bebd7 100644 --- a/compiler/src/testLite/golden/TestService.java.txt +++ b/compiler/src/testLite/golden/TestService.java.txt @@ -21,7 +21,7 @@ import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; * */ @javax.annotation.Generated( - value = "by gRPC proto compiler (version 1.28.0-SNAPSHOT)", + value = "by gRPC proto compiler (version 1.28.0)", comments = "Source: grpc/testing/compiler/test.proto") public final class TestServiceGrpc { diff --git a/core/src/main/java/io/grpc/internal/GrpcUtil.java b/core/src/main/java/io/grpc/internal/GrpcUtil.java index e65aa9ff655..b8ca1c3844a 100644 --- a/core/src/main/java/io/grpc/internal/GrpcUtil.java +++ b/core/src/main/java/io/grpc/internal/GrpcUtil.java @@ -196,7 +196,7 @@ public byte[] parseAsciiString(byte[] serialized) { public static final Splitter ACCEPT_ENCODING_SPLITTER = Splitter.on(',').trimResults(); - private static final String IMPLEMENTATION_VERSION = "1.28.0-SNAPSHOT"; // CURRENT_GRPC_VERSION + private static final String IMPLEMENTATION_VERSION = "1.28.0"; // CURRENT_GRPC_VERSION /** * The default timeout in nanos for a keepalive ping request. diff --git a/examples/android/clientcache/app/build.gradle b/examples/android/clientcache/app/build.gradle index c7dcbb4742e..cc76cd269d9 100644 --- a/examples/android/clientcache/app/build.gradle +++ b/examples/android/clientcache/app/build.gradle @@ -30,7 +30,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -50,12 +50,12 @@ dependencies { implementation 'com.android.support:appcompat-v7:27.0.2' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' testImplementation 'junit:junit:4.12' testImplementation 'com.google.truth:truth:1.0' - testImplementation 'io.grpc:grpc-testing:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + testImplementation 'io.grpc:grpc-testing:1.28.0' // CURRENT_GRPC_VERSION } diff --git a/examples/android/helloworld/app/build.gradle b/examples/android/helloworld/app/build.gradle index 0c6e7d93b9e..65500df3581 100644 --- a/examples/android/helloworld/app/build.gradle +++ b/examples/android/helloworld/app/build.gradle @@ -29,7 +29,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -49,8 +49,8 @@ dependencies { implementation 'com.android.support:appcompat-v7:27.0.2' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' } diff --git a/examples/android/routeguide/app/build.gradle b/examples/android/routeguide/app/build.gradle index 871c06db7ff..4e0834c4ec1 100644 --- a/examples/android/routeguide/app/build.gradle +++ b/examples/android/routeguide/app/build.gradle @@ -28,7 +28,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -48,8 +48,8 @@ dependencies { implementation 'com.android.support:appcompat-v7:27.0.2' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' } diff --git a/examples/android/strictmode/app/build.gradle b/examples/android/strictmode/app/build.gradle index c364f747f57..27bf732927e 100644 --- a/examples/android/strictmode/app/build.gradle +++ b/examples/android/strictmode/app/build.gradle @@ -29,7 +29,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -49,8 +49,8 @@ dependencies { implementation 'com.android.support:appcompat-v7:28.0.0' // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0' // CURRENT_GRPC_VERSION implementation 'javax.annotation:javax.annotation-api:1.2' } diff --git a/examples/build.gradle b/examples/build.gradle index 56a80523fc7..566cd928cca 100644 --- a/examples/build.gradle +++ b/examples/build.gradle @@ -22,7 +22,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.11.0' def protocVersion = protobufVersion diff --git a/examples/example-alts/build.gradle b/examples/example-alts/build.gradle index 0284bf3bde0..1f66f328648 100644 --- a/examples/example-alts/build.gradle +++ b/examples/example-alts/build.gradle @@ -23,7 +23,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION def protocVersion = '3.11.0' dependencies { diff --git a/examples/example-gauth/build.gradle b/examples/example-gauth/build.gradle index 053be337fac..3936801d1da 100644 --- a/examples/example-gauth/build.gradle +++ b/examples/example-gauth/build.gradle @@ -23,7 +23,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.11.0' def protocVersion = protobufVersion diff --git a/examples/example-gauth/pom.xml b/examples/example-gauth/pom.xml index bf3498e1b1a..3c046cf7280 100644 --- a/examples/example-gauth/pom.xml +++ b/examples/example-gauth/pom.xml @@ -6,13 +6,13 @@ jar - 1.28.0-SNAPSHOT + 1.28.0 example-gauth https://github.com/grpc/grpc-java UTF-8 - 1.28.0-SNAPSHOT + 1.28.0 3.11.0 1.7 diff --git a/examples/example-hostname/build.gradle b/examples/example-hostname/build.gradle index 5a23ee13def..a9db2c72a6b 100644 --- a/examples/example-hostname/build.gradle +++ b/examples/example-hostname/build.gradle @@ -20,7 +20,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION def protobufVersion = '3.11.0' dependencies { diff --git a/examples/example-hostname/pom.xml b/examples/example-hostname/pom.xml index 63b8b158bc1..91a933c0ec1 100644 --- a/examples/example-hostname/pom.xml +++ b/examples/example-hostname/pom.xml @@ -6,13 +6,13 @@ jar - 1.28.0-SNAPSHOT + 1.28.0 example-hostname https://github.com/grpc/grpc-java UTF-8 - 1.28.0-SNAPSHOT + 1.28.0 3.11.0 1.7 diff --git a/examples/example-kotlin/android/helloworld/app/build.gradle b/examples/example-kotlin/android/helloworld/app/build.gradle index bc258da7954..f02c8806390 100644 --- a/examples/example-kotlin/android/helloworld/app/build.gradle +++ b/examples/example-kotlin/android/helloworld/app/build.gradle @@ -51,7 +51,7 @@ android { protobuf { protoc { artifact = 'com.google.protobuf:protoc:3.11.0' } plugins { - grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + grpc { artifact = 'io.grpc:protoc-gen-grpc-java:1.28.0' // CURRENT_GRPC_VERSION } } generateProtoTasks { @@ -73,9 +73,9 @@ dependencies { implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" // You need to build grpc-java to obtain these libraries below. - implementation 'io.grpc:grpc-okhttp:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-protobuf-lite:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION - implementation 'io.grpc:grpc-stub:1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-okhttp:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-protobuf-lite:1.28.0' // CURRENT_GRPC_VERSION + implementation 'io.grpc:grpc-stub:1.28.0' // CURRENT_GRPC_VERSION } repositories { mavenCentral() } diff --git a/examples/example-kotlin/build.gradle b/examples/example-kotlin/build.gradle index d0395b66aac..91a51f6e1fa 100644 --- a/examples/example-kotlin/build.gradle +++ b/examples/example-kotlin/build.gradle @@ -25,7 +25,7 @@ repositories { // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION dependencies { def kotlinVersion = plugins.findPlugin("org.jetbrains.kotlin.jvm").kotlinPluginVersion diff --git a/examples/example-tls/build.gradle b/examples/example-tls/build.gradle index 4e81a126790..c9563a44dd6 100644 --- a/examples/example-tls/build.gradle +++ b/examples/example-tls/build.gradle @@ -23,7 +23,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION def nettyTcNativeVersion = '2.0.28.Final' def protocVersion = '3.11.0' diff --git a/examples/example-tls/pom.xml b/examples/example-tls/pom.xml index 9fdd9255882..7cfe2edf959 100644 --- a/examples/example-tls/pom.xml +++ b/examples/example-tls/pom.xml @@ -6,13 +6,13 @@ jar - 1.28.0-SNAPSHOT + 1.28.0 example-tls https://github.com/grpc/grpc-java UTF-8 - 1.28.0-SNAPSHOT + 1.28.0 3.11.0 2.0.25.Final diff --git a/examples/example-xds/build.gradle b/examples/example-xds/build.gradle index b9b03a4a5aa..f06d8b1dac4 100644 --- a/examples/example-xds/build.gradle +++ b/examples/example-xds/build.gradle @@ -18,7 +18,7 @@ targetCompatibility = 1.7 // Feel free to delete the comment at the next line. It is just for safely // updating the version in our release process. -def grpcVersion = '1.28.0-SNAPSHOT' // CURRENT_GRPC_VERSION +def grpcVersion = '1.28.0' // CURRENT_GRPC_VERSION dependencies { // This example's client is the same as the helloworld client. We depend on the helloworld diff --git a/examples/pom.xml b/examples/pom.xml index bf7b1e534ff..bab47a0932f 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,13 +6,13 @@ jar - 1.28.0-SNAPSHOT + 1.28.0 examples https://github.com/grpc/grpc-java UTF-8 - 1.28.0-SNAPSHOT + 1.28.0 3.11.0 3.11.0