Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c725fd1ba4 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -23,4 +23,3 @@ _site/
|
||||
dump.rdb
|
||||
.apt_generated
|
||||
artifacts
|
||||
.sts4-cache
|
||||
|
||||
51
.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file → Executable file
51
.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file → Executable file
@@ -1,18 +1,22 @@
|
||||
/*
|
||||
* Copyright 2007-present the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
*/
|
||||
|
||||
import java.net.*;
|
||||
import java.io.*;
|
||||
import java.nio.channels.*;
|
||||
@@ -20,12 +24,11 @@ import java.util.Properties;
|
||||
|
||||
public class MavenWrapperDownloader {
|
||||
|
||||
private static final String WRAPPER_VERSION = "0.5.6";
|
||||
/**
|
||||
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
|
||||
*/
|
||||
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
|
||||
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
|
||||
private static final String DEFAULT_DOWNLOAD_URL =
|
||||
"https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar";
|
||||
|
||||
/**
|
||||
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
|
||||
@@ -73,13 +76,13 @@ public class MavenWrapperDownloader {
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading from: " + url);
|
||||
System.out.println("- Downloading from: : " + url);
|
||||
|
||||
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
|
||||
if(!outputFile.getParentFile().exists()) {
|
||||
if(!outputFile.getParentFile().mkdirs()) {
|
||||
System.out.println(
|
||||
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||
"- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
|
||||
@@ -95,16 +98,6 @@ public class MavenWrapperDownloader {
|
||||
}
|
||||
|
||||
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
|
||||
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
|
||||
String username = System.getenv("MVNW_USERNAME");
|
||||
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
|
||||
Authenticator.setDefault(new Authenticator() {
|
||||
@Override
|
||||
protected PasswordAuthentication getPasswordAuthentication() {
|
||||
return new PasswordAuthentication(username, password);
|
||||
}
|
||||
});
|
||||
}
|
||||
URL website = new URL(urlString);
|
||||
ReadableByteChannel rbc;
|
||||
rbc = Channels.newChannel(website.openStream());
|
||||
|
||||
BIN
.mvn/wrapper/maven-wrapper.jar
vendored
Normal file → Executable file
BIN
.mvn/wrapper/maven-wrapper.jar
vendored
Normal file → Executable file
Binary file not shown.
3
.mvn/wrapper/maven-wrapper.properties
vendored
Normal file → Executable file
3
.mvn/wrapper/maven-wrapper.properties
vendored
Normal file → Executable file
@@ -1,2 +1 @@
|
||||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
|
||||
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
|
||||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.5.4/apache-maven-3.5.4-bin.zip
|
||||
574
README.adoc
574
README.adoc
@@ -4,7 +4,6 @@ Manual changes to this file will be lost when it is generated again.
|
||||
Edit the files in the src/main/asciidoc/ directory instead.
|
||||
////
|
||||
|
||||
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
@@ -18,6 +17,14 @@ image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.sv
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
//= Overview
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
@@ -32,7 +39,7 @@ To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` a
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown inn the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
@@ -42,20 +49,565 @@ Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Apache Kafka Streams Binder
|
||||
=== Overview
|
||||
|
||||
=== Usage
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
To use Apache Kafka Streams binder, you need to add `spring-cloud-stream-binder-kafka-streams` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
.Kafka Binder
|
||||
image::{github-raw}/docs/src/main/asciidoc/images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
[source,xml]
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` 1.0.0 jar and is designed to be used with a broker of at least that version.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
=== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Unknown Kafka producer or consumer properties provided through this configuration are filtered out and not allowed to propagate.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
In addition to support known Kafka consumer properties, unknown consumer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
In addition to support known Kafka producer properties, unknown producer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.topic.create.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `DefaultKafkaHeaderMapper` that uses JSON deserialization for the headers.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
The payload cannot be used because, by the time this expression is evaluated, the payload is already in the form of a `byte[]`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.output.producer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy` and `lz4`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
====== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
====== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
=== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
When using compacted topics, a record with a `null` value (also called a tombstone record) represents the deletion of a key.
|
||||
To receive such messages in a `@StreamListener` method, the parameter must be marked as not required to receive a `null` value argument.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
@Payload(required = false) Customer customer) {
|
||||
// customer is null if a tombstone record
|
||||
...
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public interface KafkaBindingRebalanceListener {
|
||||
|
||||
/**
|
||||
* Invoked by the container before any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedBeforeCommit(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by the container after any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedAfterCommit(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance.
|
||||
* Applications might only want to perform seek operations on an initial assignment.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions,
|
||||
boolean initial) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
[[building]]
|
||||
@@ -179,4 +731,4 @@ added after the original pull request but before a merge.
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
|
||||
// ======================================================================================
|
||||
// ======================================================================================
|
||||
311
docs/pom.xml
311
docs/pom.xml
@@ -7,56 +7,319 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>2.2.0.M1</version>
|
||||
</parent>
|
||||
<packaging>jar</packaging>
|
||||
<packaging>pom</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-docs</name>
|
||||
<description>Spring Cloud Stream Kafka Binder Docs</description>
|
||||
<properties>
|
||||
<docs.main>spring-cloud-stream-binder-kafka</docs.main>
|
||||
<main.basedir>${basedir}/..</main.basedir>
|
||||
<maven.plugin.plugin.version>3.4</maven.plugin.plugin.version>
|
||||
<configprops.inclusionPattern>.*stream.*</configprops.inclusionPattern>
|
||||
<upload-docs-zip.phase>deploy</upload-docs-zip.phase>
|
||||
<spring-doc-resources.version>0.1.1.RELEASE</spring-doc-resources.version>
|
||||
<spring-asciidoctor-extensions.version>0.1.0.RELEASE
|
||||
</spring-asciidoctor-extensions.version>
|
||||
<asciidoctorj-pdf.version>1.5.0-alpha.16</asciidoctorj-pdf.version>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>${project.groupId}</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<sourceDirectory>src/main/asciidoc</sourceDirectory>
|
||||
</build>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>docs</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<version>${maven-dependency-plugin.version}</version>
|
||||
<inherited>false</inherited>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>unpack-docs</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>unpack</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<artifactItems>
|
||||
<artifactItem>
|
||||
<groupId>org.springframework.cloud
|
||||
</groupId>
|
||||
<artifactId>spring-cloud-build-docs
|
||||
</artifactId>
|
||||
<version>${spring-cloud-build.version}
|
||||
</version>
|
||||
<classifier>sources</classifier>
|
||||
<type>jar</type>
|
||||
<overWrite>false</overWrite>
|
||||
<outputDirectory>${docs.resources.dir}
|
||||
</outputDirectory>
|
||||
</artifactItem>
|
||||
</artifactItems>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>unpack-docs-resources</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>unpack</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<artifactItems>
|
||||
<artifactItem>
|
||||
<groupId>io.spring.docresources</groupId>
|
||||
<artifactId>spring-doc-resources</artifactId>
|
||||
<version>${spring-doc-resources.version}</version>
|
||||
<type>zip</type>
|
||||
<overWrite>true</overWrite>
|
||||
<outputDirectory>${project.build.directory}/refdocs/</outputDirectory>
|
||||
</artifactItem>
|
||||
</artifactItems>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>copy-asciidoc-resources</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.build.directory}/refdocs/</outputDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/main/asciidoc</directory>
|
||||
<filtering>false</filtering>
|
||||
<excludes>
|
||||
<exclude>ghpages.sh</exclude>
|
||||
</excludes>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctor-maven-plugin</artifactId>
|
||||
<version>${asciidoctor-maven-plugin.version}</version>
|
||||
<inherited>false</inherited>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.spring.asciidoctor</groupId>
|
||||
<artifactId>spring-asciidoctor-extensions</artifactId>
|
||||
<version>${spring-asciidoctor-extensions.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctorj-pdf</artifactId>
|
||||
<version>${asciidoctorj-pdf.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<configuration>
|
||||
<sourceDirectory>${project.build.directory}/refdocs/</sourceDirectory>
|
||||
<sourceDocumentName>${docs.main}.adoc</sourceDocumentName>
|
||||
<attributes>
|
||||
<spring-cloud-stream-version>${project.version}</spring-cloud-stream-version>
|
||||
<!-- <docs-url>https://cloud.spring.io/</docs-url> -->
|
||||
<!-- <docs-version></docs-version> -->
|
||||
<docs-version>${project.version}/</docs-version>
|
||||
<docs-url>https://cloud.spring.io/spring-cloud-static/</docs-url>
|
||||
</attributes>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>generate-html-documentation</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>process-asciidoc</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<backend>html5</backend>
|
||||
<sourceHighlighter>highlight.js</sourceHighlighter>
|
||||
<doctype>book</doctype>
|
||||
<attributes>
|
||||
// these attributes are required to use the doc resources
|
||||
<docinfo>shared</docinfo>
|
||||
<stylesdir>css/</stylesdir>
|
||||
<stylesheet>spring.css</stylesheet>
|
||||
<linkcss>true</linkcss>
|
||||
<icons>font</icons>
|
||||
<highlightjsdir>js/highlight</highlightjsdir>
|
||||
<highlightjs-theme>atom-one-dark-reasonable</highlightjs-theme>
|
||||
<allow-uri-read>true</allow-uri-read>
|
||||
<nofooter />
|
||||
<toc>left</toc>
|
||||
<toc-levels>4</toc-levels>
|
||||
<spring-cloud-version>${project.version}</spring-cloud-version>
|
||||
<sectlinks>true</sectlinks>
|
||||
</attributes>
|
||||
<outputFile>${docs.main}.html</outputFile>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>generate-docbook</id>
|
||||
<phase>none</phase>
|
||||
<goals>
|
||||
<goal>process-asciidoc</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>generate-index</id>
|
||||
<phase>none</phase>
|
||||
<goals>
|
||||
<goal>process-asciidoc</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>${maven-antrun-plugin.version}</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>ant-contrib</groupId>
|
||||
<artifactId>ant-contrib</artifactId>
|
||||
<version>1.0b3</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>ant</groupId>
|
||||
<artifactId>ant</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.ant</groupId>
|
||||
<artifactId>ant-nodeps</artifactId>
|
||||
<version>1.8.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.tigris.antelope</groupId>
|
||||
<artifactId>antelopetasks</artifactId>
|
||||
<version>3.2.10</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jruby</groupId>
|
||||
<artifactId>jruby-complete</artifactId>
|
||||
<version>1.7.17</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctorj</artifactId>
|
||||
<version>1.5.8</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>readme</id>
|
||||
<phase>process-resources</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<java classname="org.jruby.Main" failonerror="yes">
|
||||
<arg
|
||||
value="${docs.resources.dir}/ruby/generate_readme.sh" />
|
||||
<arg value="-o" />
|
||||
<arg value="${main.basedir}/README.adoc" />
|
||||
</java>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>assert-no-unresolved-links</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<fileset id="unresolved.file"
|
||||
dir="${basedir}/target/generated-docs/" includes="**/*.html">
|
||||
<contains text="Unresolved" />
|
||||
</fileset>
|
||||
<fail message="[Unresolved] Found...failing">
|
||||
<condition>
|
||||
<resourcecount when="greater" count="0"
|
||||
refid="unresolved.file" />
|
||||
</condition>
|
||||
</fail>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>setup-maven-properties</id>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<exportAntProperties>true</exportAntProperties>
|
||||
<target>
|
||||
<taskdef
|
||||
resource="net/sf/antcontrib/antcontrib.properties" />
|
||||
<taskdef name="stringutil"
|
||||
classname="ise.antelope.tasks.StringUtilTask" />
|
||||
<var name="version-type" value="${project.version}" />
|
||||
<propertyregex property="version-type"
|
||||
override="true" input="${version-type}" regexp=".*\.(.*)"
|
||||
replace="\1" />
|
||||
<propertyregex property="version-type"
|
||||
override="true" input="${version-type}" regexp="(M)\d+"
|
||||
replace="MILESTONE" />
|
||||
<propertyregex property="version-type"
|
||||
override="true" input="${version-type}" regexp="(RC)\d+"
|
||||
replace="MILESTONE" />
|
||||
<propertyregex property="version-type"
|
||||
override="true" input="${version-type}" regexp="BUILD-(.*)"
|
||||
replace="SNAPSHOT" />
|
||||
<stringutil string="${version-type}"
|
||||
property="spring-cloud-repo">
|
||||
<lowercase />
|
||||
</stringutil>
|
||||
<var name="github-tag" value="v${project.version}" />
|
||||
<propertyregex property="github-tag"
|
||||
override="true" input="${github-tag}" regexp=".*SNAPSHOT"
|
||||
replace="master" />
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>copy-css</id>
|
||||
<phase>none</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>generate-documentation-index</id>
|
||||
<phase>none</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>copy-generated-html</id>
|
||||
<phase>none</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<artifactId>maven-deploy-plugin</artifactId>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<inherited>false</inherited>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
@@ -11,43 +11,8 @@ image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.sv
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Apache Kafka Streams Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka Streams binder, you need to add `spring-cloud-stream-binder-kafka-streams` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
//= Overview
|
||||
include::overview.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
|===
|
||||
|Name | Default | Description
|
||||
|
||||
|spring.cloud.stream.binders | | Additional per-binder properties (see {@link BinderProperties}) if more then one binder of the same type is used (i.e., connect to multiple instances of RabbitMq). Here you can specify multiple binder configurations, each with different environment settings. For example; spring.cloud.stream.binders.rabbit1.environment. . . , spring.cloud.stream.binders.rabbit2.environment. . .
|
||||
|spring.cloud.stream.binding-retry-interval | `30` | Retry interval (in seconds) used to schedule binding attempts. Default: 30 sec.
|
||||
|spring.cloud.stream.bindings | | Additional binding properties (see {@link BinderProperties}) per binding name (e.g., 'input`). For example; This sets the content-type for the 'input' binding of a Sink application: 'spring.cloud.stream.bindings.input.contentType=text/plain'
|
||||
|spring.cloud.stream.default-binder | | The name of the binder to use by all bindings in the event multiple binders available (e.g., 'rabbit').
|
||||
|spring.cloud.stream.dynamic-destination-cache-size | `10` | The maximum size of Least Recently Used (LRU) cache of dynamic destinations. Once this size is reached, new destinations will trigger the removal of old destinations. Default: 10
|
||||
|spring.cloud.stream.dynamic-destinations | `[]` | A list of destinations that can be bound dynamically. If set, only listed destinations can be bound.
|
||||
|spring.cloud.stream.function.batch-mode | `false` |
|
||||
|spring.cloud.stream.function.bindings | |
|
||||
|spring.cloud.stream.instance-count | `1` | The number of deployed instances of an application. Default: 1. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-count" where 'foo' is the name of the binding.
|
||||
|spring.cloud.stream.instance-index | `0` | The instance id of the application: a number from 0 to instanceCount-1. Used for partitioning and with Kafka. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-index" where 'foo' is the name of the binding.
|
||||
|spring.cloud.stream.instance-index-list | | A list of instance id's from 0 to instanceCount-1. Used for partitioning and with Kafka. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-index-list" where 'foo' is the name of the binding. This setting will override the one set in 'spring.cloud.stream.instance-index'
|
||||
|spring.cloud.stream.integration.message-handler-not-propagated-headers | | Message header names that will NOT be copied from the inbound message.
|
||||
|spring.cloud.stream.kafka.binder.authorization-exception-retry-interval | | Time between retries after AuthorizationException is caught in the ListenerContainer; defalt is null which disables retries. For more info see: {@link org.springframework.kafka.listener.ConsumerProperties#setAuthorizationExceptionRetryInterval(java.time.Duration)}
|
||||
|spring.cloud.stream.kafka.binder.auto-add-partitions | `false` |
|
||||
|spring.cloud.stream.kafka.binder.auto-alter-topics | `false` |
|
||||
|spring.cloud.stream.kafka.binder.auto-create-topics | `true` |
|
||||
|spring.cloud.stream.kafka.binder.brokers | `[localhost]` |
|
||||
|spring.cloud.stream.kafka.binder.certificate-store-directory | | When a certificate store location is given as classpath URL (classpath:), then the binder moves the resource from the classpath location inside the JAR to a location on the filesystem. If this value is set, then this location is used, otherwise, the certificate file is copied to the directory returned by java.io.tmpdir.
|
||||
|spring.cloud.stream.kafka.binder.configuration | | Arbitrary kafka properties that apply to both producers and consumers.
|
||||
|spring.cloud.stream.kafka.binder.consider-down-when-any-partition-has-no-leader | `false` |
|
||||
|spring.cloud.stream.kafka.binder.consumer-properties | | Arbitrary kafka consumer properties.
|
||||
|spring.cloud.stream.kafka.binder.header-mapper-bean-name | | The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
|spring.cloud.stream.kafka.binder.headers | `[]` |
|
||||
|spring.cloud.stream.kafka.binder.health-timeout | `60` | Time to wait to get partition information in seconds; default 60.
|
||||
|spring.cloud.stream.kafka.binder.jaas | |
|
||||
|spring.cloud.stream.kafka.binder.min-partition-count | `1` |
|
||||
|spring.cloud.stream.kafka.binder.producer-properties | | Arbitrary kafka producer properties.
|
||||
|spring.cloud.stream.kafka.binder.replication-factor | `-1` |
|
||||
|spring.cloud.stream.kafka.binder.required-acks | `1` |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.batch-timeout | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.buffer-size | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.compression-type | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.configuration | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.error-channel-enabled | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.header-mode | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.header-patterns | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.message-key-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-count | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-key-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-key-extractor-name | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-selector-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-selector-name | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.required-groups | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.sync | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.topic | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.use-native-encoding | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix | |
|
||||
|spring.cloud.stream.kafka.bindings | |
|
||||
|spring.cloud.stream.metrics.export-properties | | List of properties that are going to be appended to each message. This gets populate by onApplicationEvent, once the context refreshes to avoid overhead of doing per message basis.
|
||||
|spring.cloud.stream.metrics.key | | The name of the metric being emitted. Should be an unique value per application. Defaults to: ${spring.application.name:${vcap.application.name:${spring.config.name:application}}}.
|
||||
|spring.cloud.stream.metrics.meter-filter | | Pattern to control the 'meters' one wants to capture. By default all 'meters' will be captured. For example, 'spring.integration.*' will only capture metric information for meters whose name starts with 'spring.integration'.
|
||||
|spring.cloud.stream.metrics.properties | | Application properties that should be added to the metrics payload For example: `spring.application**`.
|
||||
|spring.cloud.stream.metrics.schedule-interval | `60s` | Interval expressed as Duration for scheduling metrics snapshots publishing. Defaults to 60 seconds
|
||||
|spring.cloud.stream.override-cloud-connectors | `false` | This property is only applicable when the cloud profile is active and Spring Cloud Connectors are provided with the application. If the property is false (the default), the binder detects a suitable bound service (for example, a RabbitMQ service bound in Cloud Foundry for the RabbitMQ binder) and uses it for creating connections (usually through Spring Cloud Connectors). When set to true, this property instructs binders to completely ignore the bound services and rely on Spring Boot properties (for example, relying on the spring.rabbitmq.* properties provided in the environment for the RabbitMQ binder). The typical usage of this property is to be nested in a customized environment when connecting to multiple systems.
|
||||
|spring.cloud.stream.pollable-source | `none` | A semi-colon delimited list of binding names of pollable sources. Binding names follow the same naming convention as functions. For example, name '...pollable-source=foobar' will be accessible as 'foobar-iin-0'' binding
|
||||
|spring.cloud.stream.sendto.destination | `none` | The name of the header used to determine the name of the output destination
|
||||
|spring.cloud.stream.source | | A colon delimited string representing the names of the sources based on which source bindings will be created. This is primarily to support cases where source binding may be required without providing a corresponding Supplier. (e.g., for cases where the actual source of data is outside of scope of spring-cloud-stream - HTTP -> Stream)
|
||||
|
||||
|===
|
||||
@@ -1,65 +1,12 @@
|
||||
[[kafka-dlq-processing]]
|
||||
=== Dead-Letter Topic Processing
|
||||
|
||||
[[dlq-partition-selection]]
|
||||
==== Dead-Letter Topic Partition Selection
|
||||
|
||||
By default, records are published to the Dead-Letter topic using the same partition as the original record.
|
||||
This means the Dead-Letter topic must have at least as many partitions as the original record.
|
||||
|
||||
To change this behavior, add a `DlqPartitionFunction` implementation as a `@Bean` to the application context.
|
||||
Only one such bean can be present.
|
||||
The function is provided with the consumer group, the failed `ConsumerRecord` and the exception.
|
||||
For example, if you always want to route to partition 0, you might use:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public DlqPartitionFunction partitionFunction() {
|
||||
return (group, record, ex) -> 0;
|
||||
}
|
||||
----
|
||||
====
|
||||
NOTE: If you set a consumer binding's `dlqPartitions` property to 1 (and the binder's `minPartitionCount` is equal to `1`), there is no need to supply a `DlqPartitionFunction`; the framework will always use partition 0.
|
||||
If you set a consumer binding's `dlqPartitions` property to a value greater than `1` (or the binder's `minPartitionCount` is greater than `1`), you **must** provide a `DlqPartitionFunction` bean, even if the partition count is the same as the original topic's.
|
||||
|
||||
It is also possible to define a custom name for the DLQ topic.
|
||||
In order to do so, create an implementation of `DlqDestinationResolver` as a `@Bean` to the application context.
|
||||
When the binder detects such a bean, that takes precedence, otherwise it will use the `dlqName` property.
|
||||
If neither of these are found, it will default to `error.<destination>.<group>`.
|
||||
Here is an example of `DlqDestinationResolver` as a `@Bean`.
|
||||
|
||||
====
|
||||
[source]
|
||||
----
|
||||
@Bean
|
||||
public DlqDestinationResolver dlqDestinationResolver() {
|
||||
return (rec, ex) -> {
|
||||
if (rec.topic().equals("word1")) {
|
||||
return "topic1-dlq";
|
||||
}
|
||||
else {
|
||||
return "topic2-dlq";
|
||||
}
|
||||
};
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
One important thing to keep in mind when providing an implementation for `DlqDestinationResolver` is that the provisioner in the binder will not auto create topics for the application.
|
||||
This is because there is no way for the binder to infer the names of all the DLQ topics the implementation might send to.
|
||||
Therefore, if you provide DLQ names using this strategy, it is the application's responsibility to ensure that those topics are created beforehand.
|
||||
|
||||
[[dlq-handling]]
|
||||
==== Handling Records in a Dead-Letter Topic
|
||||
|
||||
Because the framework cannot anticipate how users would want to dispose of dead-lettered messages, it does not provide any standard mechanism to handle them.
|
||||
Because you cannot anticipate how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The sample Spring Boot application within this topic is an example of how to route those messages back to the original topic, but it moves them to a "`parking lot`" topic after three attempts.
|
||||
The application is another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It exits when no messages are received for 5 seconds.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
@@ -141,7 +88,7 @@ public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, exiting");
|
||||
System.out.println("Idle, terminating");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
|
||||
# Set default props like MAVEN_PATH, ROOT_FOLDER etc.
|
||||
function set_default_props() {
|
||||
# The script should be run from the root folder
|
||||
# The script should be executed from the root folder
|
||||
ROOT_FOLDER=`pwd`
|
||||
echo "Current folder is ${ROOT_FOLDER}"
|
||||
|
||||
@@ -65,7 +65,7 @@ function build_docs_if_applicable() {
|
||||
}
|
||||
|
||||
# Get the name of the `docs.main` property
|
||||
# Get allowed branches - assumes that a `docs` module is available under `docs` profile
|
||||
# Get whitelisted branches - assumes that a `docs` module is available under `docs` profile
|
||||
function retrieve_doc_properties() {
|
||||
MAIN_ADOC_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
@@ -75,14 +75,14 @@ function retrieve_doc_properties() {
|
||||
echo "Extracted 'main.adoc' from Maven build [${MAIN_ADOC_VALUE}]"
|
||||
|
||||
|
||||
ALLOW_PROPERTY=${ALLOW_PROPERTY:-"docs.allowed.branches"}
|
||||
ALLOWED_BRANCHES_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
WHITELIST_PROPERTY=${WHITELIST_PROPERTY:-"docs.whitelisted.branches"}
|
||||
WHITELISTED_BRANCHES_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args="\${${ALLOW_PROPERTY}}" \
|
||||
-Dexec.args="\${${WHITELIST_PROPERTY}}" \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec \
|
||||
-P docs \
|
||||
-pl docs)
|
||||
echo "Extracted '${ALLOW_PROPERTY}' from Maven build [${ALLOWED_BRANCHES_VALUE}]"
|
||||
echo "Extracted '${WHITELIST_PROPERTY}' from Maven build [${WHITELISTED_BRANCHES_VALUE}]"
|
||||
}
|
||||
|
||||
# Stash any outstanding changes
|
||||
@@ -148,9 +148,9 @@ function copy_docs_for_current_version() {
|
||||
else
|
||||
echo -e "Current branch is [${CURRENT_BRANCH}]"
|
||||
# https://stackoverflow.com/questions/29300806/a-bash-script-to-check-if-a-string-is-present-in-a-comma-separated-list-of-strin
|
||||
if [[ ",${ALLOWED_BRANCHES_VALUE}," = *",${CURRENT_BRANCH},"* ]] ; then
|
||||
if [[ ",${WHITELISTED_BRANCHES_VALUE}," = *",${CURRENT_BRANCH},"* ]] ; then
|
||||
mkdir -p ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is allowed! Will copy the current docs to the [${CURRENT_BRANCH}] folder"
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is whitelisted! Will copy the current docs to the [${CURRENT_BRANCH}] folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
@@ -169,7 +169,7 @@ function copy_docs_for_current_version() {
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is not on the allow list! Check out the Maven [${ALLOW_PROPERTY}] property in
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is not on the white list! Check out the Maven [${WHITELIST_PROPERTY}] property in
|
||||
[docs] module available under [docs] profile. Won't commit any changes to gh-pages for this branch."
|
||||
fi
|
||||
fi
|
||||
@@ -250,10 +250,10 @@ the script will work in the following manner:
|
||||
|
||||
- if there's no gh-pages / target for docs module then the script ends
|
||||
- for master branch the generated docs are copied to the root of gh-pages branch
|
||||
- for any other branch (if that branch is allowed) a subfolder with branch name is created
|
||||
- for any other branch (if that branch is whitelisted) a subfolder with branch name is created
|
||||
and docs are copied there
|
||||
- if the version switch is passed (-v) then a tag with (v) prefix will be retrieved and a folder
|
||||
with that version number will be created in the gh-pages branch. WARNING! No allow verification will take place
|
||||
with that version number will be created in the gh-pages branch. WARNING! No whitelist verification will take place
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
@@ -327,4 +327,4 @@ build_docs_if_applicable
|
||||
retrieve_doc_properties
|
||||
stash_changes
|
||||
add_docs_from_target
|
||||
checkout_previous_branch
|
||||
checkout_previous_branch
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 233 KiB |
File diff suppressed because it is too large
Load Diff
@@ -19,7 +19,7 @@ To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` a
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown inn the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
@@ -40,7 +40,7 @@ The Apache Kafka Binder implementation maps each destination to an Apache Kafka
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` version `2.3.1`.
|
||||
The binder currently uses the Apache Kafka `kafka-clients` 1.0.0 jar and is designed to be used with a broker of at least that version.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
@@ -49,7 +49,7 @@ Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to the binder, see the https://cloud.spring.io/spring-cloud-static/spring-cloud-stream/current/reference/html/spring-cloud-stream.html#binding-properties[binding properties] in core documentation.
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
@@ -106,17 +106,13 @@ spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
NOTE: If you are using Kafka broker versions prior to 2.4, then this value should be set to at least `1`.
|
||||
Starting with version 3.0.8, the binder uses `-1` as the default value, which indicates that the broker 'default.replication.factor' property will be used to determine the number of replicas.
|
||||
Check with your Kafka broker admins to see if there is a policy in place that requires a minimum replication factor, if that's the case then, typically, the `default.replication.factor` will match that value and `-1` should be used, unless you need a replication factor greater than the minimum.
|
||||
+
|
||||
Default: `-1`.
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.create.topics.enable` setting of the broker and does not influence it.
|
||||
NOTE: This setting is independent of the `auto.topic.create.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
@@ -139,33 +135,13 @@ Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `BinderHeaderMapper` bean that uses JSON deserialization for the headers.
|
||||
If this custom `BinderHeaderMapper` bean is not made available to the binder using this property, then the binder will look for a header mapper bean with the name `kafkaBinderHeaderMapper` that is of type `BinderHeaderMapper` before falling back to a default `BinderHeaderMapper` created by the binder.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
spring.cloud.stream.kafka.binder.considerDownWhenAnyPartitionHasNoLeader::
|
||||
Flag to set the binder health as `down`, when any partitions on the topic, regardless of the consumer that is receiving data from it, is found without a leader.
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
spring.cloud.stream.kafka.binder.certificateStoreDirectory::
|
||||
When the truststore or keystore certificate location is given as a classpath URL (`classpath:...`), the binder copies the resource from the classpath location inside the JAR file to a location on the filesystem.
|
||||
This is true for both broker level certificates (`ssl.truststore.location` and `ssl.keystore.location`) and certificates intended for schema registry (`schema.registry.ssl.truststore.location` and `schema.registry.ssl.keystore.location`).
|
||||
Keep in mind that the truststore and keystore classpath locations must be provided under `spring.cloud.stream.kafka.binder.configuration...`.
|
||||
For example, `spring.cloud.stream.kafka.binder.configuration.ssl.truststore.location`, ``spring.cloud.stream.kafka.binder.configuration.schema.registry.ssl.truststore.location`, etc.
|
||||
The file will be moved to the location specified as the value for this property which must be an existing directory on the filesystem that is writable by the process running the application.
|
||||
If this value is not set and the certificate file is a classpath resource, then it will be moved to System's temp directory as returned by `System.getProperty("java.io.tmpdir")`.
|
||||
This is also true, if this value is present, but the directory cannot be found on the filesystem or is not writable.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `DefaultKafkaHeaderMapper` that uses JSON deserialization for the headers.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.consumer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
@@ -191,15 +167,9 @@ By default, offsets are committed after all records in the batch of records retu
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
This property is deprecated as of 3.1 in favor of using `ackMode`.
|
||||
If the `ackMode` is not set and batch mode is not enabled, `RECORD` ackMode will be used.
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
autoCommitOffset::
|
||||
|
||||
Starting with version 3.1, this property is deprecated.
|
||||
See `ackMode` for more details on alternatives.
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
@@ -208,56 +178,39 @@ When this property is set to `false`, Kafka binder sets the ack mode to `org.spr
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
ackMode::
|
||||
Specify the container ack mode.
|
||||
This is based on the AckMode enumeration defined in Spring Kafka.
|
||||
If `ackEachRecord` property is set to `true` and consumer is not in batch mode, then this will use the ack mode of `RECORD`, otherwise, use the provided ack mode using this property.
|
||||
|
||||
autoCommitOnError::
|
||||
In pollable consumers, if set to `true`, it always auto commits on error.
|
||||
If not set (the default) or false, it will not auto commit in pollable consumers.
|
||||
Note that this property is only applicable for pollable consumers.
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaBindingRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
See <<reset-offsets>> for more information about this property.
|
||||
Must be false if a `KafkaRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
See <<reset-offsets>> for more information about this property.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property or by defining a `@Bean` of type `DlqDestinationResolver`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
By default, a failed record is sent to the same partition number in the DLQ topic as the original record.
|
||||
See <<dlq-partition-selection>> for how to change that behavior.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
dlqPartitions::
|
||||
When `enableDlq` is true, and this property is not set, a dead letter topic with the same number of partitions as the primary topic(s) is created.
|
||||
Usually, dead-letter records are sent to the same partition in the dead-letter topic as the original record.
|
||||
This behavior can be changed; see <<dlq-partition-selection>>.
|
||||
If this property is set to `1` and there is no `DqlPartitionFunction` bean, all dead-letter records will be written to partition `0`.
|
||||
If this property is greater than `1`, you **MUST** provide a `DlqPartitionFunction` bean.
|
||||
Note that the actual partition count is affected by the binder's `minPartitionCount` property.
|
||||
+
|
||||
Default: `none`
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
@@ -267,8 +220,6 @@ Default: null (If not specified, messages that result in errors are forwarded to
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
When native decoding is enabled on the consumer (i.e., useNativeDecoding: true) , the application must provide corresponding key/value serializers for DLQ.
|
||||
This must be provided in the form of `dlqProducerProperties.configuration.key.serializer` and `dlqProducerProperties.configuration.value.serializer`.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
@@ -308,75 +259,11 @@ topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of -1 is used).
|
||||
pollTimeout::
|
||||
Timeout used for polling in pollable consumers.
|
||||
+
|
||||
Default: 5 seconds.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
txCommitRecovered::
|
||||
When using a transactional binder, the offset of a recovered record (e.g. when retries are exhausted and the record is sent to a dead letter topic) will be committed via a new transaction, by default.
|
||||
Setting this property to `false` suppresses committing the offset of recovered record.
|
||||
+
|
||||
Default: true.
|
||||
commonErrorHandlerBeanName::
|
||||
`CommonErrorHandler` bean name to use per consumer binding.
|
||||
When present, this user provided `CommonErrorHandler` takes precedence over any other error handlers defined by the binder.
|
||||
This is a handy way to express error handlers, if the application does not want to use a `ListenerContainerCustomizer` and then check the destination/group combination to set an error handler.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[reset-offsets]]
|
||||
==== Resetting Offsets
|
||||
|
||||
When an application starts, the initial position in each assigned partition depends on two properties `startOffset` and `resetOffsets`.
|
||||
If `resetOffsets` is `false`, normal Kafka consumer https://kafka.apache.org/documentation/#consumerconfigs_auto.offset.reset[`auto.offset.reset`] semantics apply.
|
||||
i.e. If there is no committed offset for a partition for the binding's consumer group, the position is `earliest` or `latest`.
|
||||
By default, bindings with an explicit `group` use `earliest`, and anonymous bindings (with no `group`) use `latest`.
|
||||
These defaults can be overridden by setting the `startOffset` binding property.
|
||||
There will be no committed offset(s) the first time the binding is started with a particular `group`.
|
||||
The other condition where no committed offset exists is if the offset has been expired.
|
||||
With modern brokers (since 2.1), and default broker properties, the offsets are expired 7 days after the last member leaves the group.
|
||||
See the https://kafka.apache.org/documentation/#brokerconfigs_offsets.retention.minutes[`offsets.retention.minutes`] broker property for more information.
|
||||
|
||||
When `resetOffsets` is `true`, the binder applies similar semantics to those that apply when there is no committed offset on the broker, as if this binding has never consumed from the topic; i.e. any current committed offset is ignored.
|
||||
|
||||
Following are two use cases when this might be used.
|
||||
|
||||
1. Consuming from a compacted topic containing key/value pairs.
|
||||
Set `resetOffsets` to `true` and `startOffset` to `earliest`; the binding will perform a `seekToBeginning` on all newly assigned partitions.
|
||||
|
||||
2. Consuming from a topic containing events, where you are only interested in events that occur while this binding is running.
|
||||
Set `resetOffsets` to `true` and `startOffset` to `latest`; the binding will perform a `seekToEnd` on all newly assigned partitions.
|
||||
|
||||
IMPORTANT: If a rebalance occurs after the initial assignment, the seeks will only be performed on any newly assigned partitions that were not assigned during the initial assignment.
|
||||
|
||||
For more control over topic offsets, see <<rebalance-listener>>; when a listener is provided, `resetOffsets` should not be set to `true`, otherwise, that will cause an error.
|
||||
|
||||
==== Consuming Batches
|
||||
|
||||
Starting with version 3.0, when `spring.cloud.stream.binding.<name>.consumer.batch-mode` is set to `true`, all of the records received by polling the Kafka `Consumer` will be presented as a `List<?>` to the listener method.
|
||||
Otherwise, the method will be called with one record at a time.
|
||||
The size of the batch is controlled by Kafka consumer properties `max.poll.records`, `fetch.min.bytes`, `fetch.max.wait.ms`; refer to the Kafka documentation for more information.
|
||||
|
||||
Bear in mind that batch mode is not supported with `@StreamListener` - it only works with the newer functional programming model.
|
||||
|
||||
IMPORTANT: Retry within the binder is not supported when using batch mode, so `maxAttempts` will be overridden to 1.
|
||||
You can configure a `SeekToCurrentBatchErrorHandler` (using a `ListenerContainerCustomizer`) to achieve similar functionality to retry in the binder.
|
||||
You can also use a manual `AckMode` and call `Ackowledgment.nack(index, sleep)` to commit the offsets for a partial batch and have the remaining records redelivered.
|
||||
Refer to the https://docs.spring.io/spring-kafka/docs/2.3.0.BUILD-SNAPSHOT/reference/html/#committing-offsets[Spring for Apache Kafka documentation] for more information about these techniques.
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.producer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
@@ -397,13 +284,6 @@ sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
sendTimeoutExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to evaluate the time to wait for ack when synchronous publish is enabled -- for example, `headers['mySendTimeout']`.
|
||||
The value of the timeout is in milliseconds.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
@@ -411,13 +291,7 @@ How long the producer waits to allow more messages to accumulate in the same bat
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
In the case of a regular processor (`Function<String, String>` or `Function<Message<?>, Message<?>`), if the produced key needs to be same as the incoming key from the topic, this property can be set as below.
|
||||
`spring.cloud.stream.kafka.bindings.<output-binding-name>.producer.messageKeyExpression: headers['kafka_receivedMessageKey']`
|
||||
There is an important caveat to keep in mind for reactive functions.
|
||||
In that case, it is up to the application to manually copy the headers from the incoming messages to outbound messages.
|
||||
You can set the header, e.g. `myKey` and use `headers['myKey']` as suggested above or, for convenience, simply set the `KafkaHeaders.MESSAGE_KEY` header, and you do not need to set this property at all.
|
||||
The payload cannot be used because, by the time this expression is evaluated, the payload is already in the form of a `byte[]`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
@@ -431,7 +305,6 @@ For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
@@ -447,22 +320,8 @@ topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of -1 is used).
|
||||
useTopicHeader::
|
||||
Set to `true` to override the default binding destination (topic name) with the value of the `KafkaHeaders.TOPIC` message header in the outbound message.
|
||||
If the header is not present, the default binding destination is used.
|
||||
+
|
||||
Default: `false`.
|
||||
recordMetadataChannel::
|
||||
The bean name of a `MessageChannel` to which successful send results should be sent; the bean must exist in the application context.
|
||||
The message sent to the channel is the sent message (after conversion, if any) with an additional header `KafkaHeaders.RECORD_METADATA`.
|
||||
The header contains a `RecordMetadata` object provided by the Kafka client; it includes the partition and offset where the record was written in the topic.
|
||||
+
|
||||
`ResultMetadata meta = sendResultMsg.getHeaders().get(KafkaHeaders.RECORD_METADATA, RecordMetadata.class)`
|
||||
+
|
||||
Failed sends go the producer error channel (if configured); see <<kafka-error-channels>>.
|
||||
+
|
||||
Default: null.
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
@@ -472,38 +331,20 @@ If a topic already exists with a larger number of partitions than the maximum of
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy`, `lz4` and `zstd`.
|
||||
Supported values are `none`, `gzip`, `snappy` and `lz4`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
closeTimeout::
|
||||
Timeout in number of seconds to wait for when closing the producer.
|
||||
+
|
||||
Default: `30`
|
||||
|
||||
allowNonTransactional::
|
||||
Normally, all output bindings associated with a transactional binder will publish in a new transaction, if one is not already in process.
|
||||
This property allows you to override that behavior.
|
||||
If set to true, records published to this output binding will not be run in a transaction, unless one is already in process.
|
||||
+
|
||||
Default: `false`
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `ackMode` to `MANUAL` and Relying on Manual Acknowledgement
|
||||
===== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.ackMode` be set to `MANUAL`.
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
@@ -615,47 +456,6 @@ Usually, applications may use principals that do not have administrative rights
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
====== Multi-binder configuration and JAAS
|
||||
|
||||
When connecting to multiple clusters in which each one requires separate JAAS configuration, then set the JAAS configuration using the property `sasl.jaas.config`.
|
||||
When this property is present in the applicaiton, it takes precedence over the other strategies mentioned above.
|
||||
See this https://cwiki.apache.org/confluence/display/KAFKA/KIP-85%3A+Dynamic+JAAS+configuration+for+Kafka+clients[KIP-85] for more details.
|
||||
|
||||
For example, if you have two clusters in your application with separate JAAS configuration, then the following is a template that you can use:
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
binders:
|
||||
kafka1:
|
||||
type: kafka
|
||||
environment:
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
kafka:
|
||||
binder:
|
||||
brokers: localhost:9092
|
||||
configuration.sasl.jaas.config: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-secret\";"
|
||||
kafka2:
|
||||
type: kafka
|
||||
environment:
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
kafka:
|
||||
binder:
|
||||
brokers: localhost:9093
|
||||
configuration.sasl.jaas.config: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"user1\" password=\"user1-secret\";"
|
||||
kafka.binder:
|
||||
configuration:
|
||||
security.protocol: SASL_PLAINTEXT
|
||||
sasl.mechanism: PLAIN
|
||||
```
|
||||
|
||||
Note that both the Kafka clusters, and the `sasl.jaas.config` values for each of them are different in the above configuration.
|
||||
|
||||
See this https://github.com/spring-cloud/spring-cloud-stream-samples/tree/main/multi-binder-samples/kafka-multi-binder-jaas[sample application] for more details on how to setup and run such an application.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
@@ -696,65 +496,11 @@ public class Application {
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-transactional-binder]]
|
||||
=== Transactional Binder
|
||||
|
||||
Enable transactions by setting `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` to a non-empty value, e.g. `tx-`.
|
||||
When used in a processor application, the consumer starts the transaction; any records sent on the consumer thread participate in the same transaction.
|
||||
When the listener exits normally, the listener container will send the offset to the transaction and commit it.
|
||||
A common producer factory is used for all producer bindings configured using `spring.cloud.stream.kafka.binder.transaction.producer.*` properties; individual binding Kafka producer properties are ignored.
|
||||
|
||||
IMPORTANT: Normal binder retries (and dead lettering) are not supported with transactions because the retries will run in the original transaction, which may be rolled back and any published records will be rolled back too.
|
||||
When retries are enabled (the common property `maxAttempts` is greater than zero) the retry properties are used to configure a `DefaultAfterRollbackProcessor` to enable retries at the container level.
|
||||
Similarly, instead of publishing dead-letter records within the transaction, this functionality is moved to the listener container, again via the `DefaultAfterRollbackProcessor` which runs after the main transaction has rolled back.
|
||||
|
||||
If you wish to use transactions in a source application, or from some arbitrary thread for producer-only transaction (e.g. `@Scheduled` method), you must get a reference to the transactional producer factory and define a `KafkaTransactionManager` bean using it.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public PlatformTransactionManager transactionManager(BinderFactory binders,
|
||||
@Value("${unique.tx.id.per.instance}") String txId) {
|
||||
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
KafkaTransactionManager tm = new KafkaTransactionManager<>(pf);
|
||||
tm.setTransactionId(txId)
|
||||
return tm;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Notice that we get a reference to the binder using the `BinderFactory`; use `null` in the first argument when there is only one binder configured.
|
||||
If more than one binder is configured, use the binder name to get the reference.
|
||||
Once we have a reference to the binder, we can obtain a reference to the `ProducerFactory` and create a transaction manager.
|
||||
|
||||
Then you would use normal Spring transaction support, e.g. `TransactionTemplate` or `@Transactional`, for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public static class Sender {
|
||||
|
||||
@Transactional
|
||||
public void doInTransaction(MessageChannel output, List<String> stuffToSend) {
|
||||
stuffToSend.forEach(stuff -> output.send(new GenericMessage<>(stuff)));
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
If you wish to synchronize producer-only transactions with those from some other transaction manager, use a `ChainedTransactionManager`.
|
||||
|
||||
IMPORTANT: If you deploy multiple instances of your application, each instance needs a unique `transactionIdPrefix`.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See https://cloud.spring.io/spring-cloud-static/spring-cloud-stream/current/reference/html/spring-cloud-stream.html#spring-cloud-stream-overview-error-handling[this section on error handling] for more information.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
@@ -770,25 +516,9 @@ You can consume these exceptions with your own Spring Integration flow.
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Micrometer library.
|
||||
The binder creates the `KafkaBinderMetrics` bean if Micrometer is on the classpath and no other such beans provided by the application.
|
||||
The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
You can exclude `KafkaBinderMetrics` from creating the necessary infrastructure like consumers and then reporting the metrics by providing the following component in the application.
|
||||
|
||||
```
|
||||
@Component
|
||||
class NoOpBindingMeters {
|
||||
NoOpBindingMeters(MeterRegistry registry) {
|
||||
registry.config().meterFilter(
|
||||
MeterFilter.denyNameStartsWith(KafkaBinderMetrics.OFFSET_LAG_METRIC_NAME));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
More details on how to suppress meters selectively can be found https://micrometer.io/docs/concepts#_meter_filters[here].
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
@@ -808,10 +538,10 @@ public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaBindingRebalanceListener
|
||||
=== Using a KafkaRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaBindingRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
Starting with version 2.1, if you provide a single `KafkaRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
@@ -857,117 +587,3 @@ public interface KafkaBindingRebalanceListener {
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
|
||||
[[retry-and-dlq-processing]]
|
||||
=== Retry and Dead Letter Processing
|
||||
|
||||
By default, when you configure retry (e.g. `maxAttemts`) and `enableDlq` in a consumer binding, these functions are performed within the binder, with no participation by the listener container or Kafka consumer.
|
||||
|
||||
There are situations where it is preferable to move this functionality to the listener container, such as:
|
||||
|
||||
* The aggregate of retries and delays will exceed the consumer's `max.poll.interval.ms` property, potentially causing a partition rebalance.
|
||||
* You wish to publish the dead letter to a different Kafka cluster.
|
||||
* You wish to add retry listeners to the error handler.
|
||||
* ...
|
||||
|
||||
To configure moving this functionality from the binder to the container, define a `@Bean` of type `ListenerContainerWithDlqAndRetryCustomizer`.
|
||||
This interface has the following methods:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
/**
|
||||
* Configure the container.
|
||||
* @param container the container.
|
||||
* @param destinationName the destination name.
|
||||
* @param group the group.
|
||||
* @param dlqDestinationResolver a destination resolver for the dead letter topic (if
|
||||
* enableDlq).
|
||||
* @param backOff the backOff using retry properties (if configured).
|
||||
* @see #retryAndDlqInBinding(String, String)
|
||||
*/
|
||||
void configure(AbstractMessageListenerContainer<?, ?> container, String destinationName, String group,
|
||||
@Nullable BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> dlqDestinationResolver,
|
||||
@Nullable BackOff backOff);
|
||||
|
||||
/**
|
||||
* Return false to move retries and DLQ from the binding to a customized error handler
|
||||
* using the retry metadata and/or a {@code DeadLetterPublishingRecoverer} when
|
||||
* configured via
|
||||
* {@link #configure(AbstractMessageListenerContainer, String, String, BiFunction, BackOff)}.
|
||||
* @param destinationName the destination name.
|
||||
* @param group the group.
|
||||
* @return true to disable retrie in the binding
|
||||
*/
|
||||
default boolean retryAndDlqInBinding(String destinationName, String group) {
|
||||
return true;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
The destination resolver and `BackOff` are created from the binding properties (if configured).
|
||||
You can then use these to create a custom error handler and dead letter publisher; for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
ListenerContainerWithDlqAndRetryCustomizer cust(KafkaTemplate<?, ?> template) {
|
||||
return new ListenerContainerWithDlqAndRetryCustomizer() {
|
||||
|
||||
@Override
|
||||
public void configure(AbstractMessageListenerContainer<?, ?> container, String destinationName,
|
||||
String group,
|
||||
@Nullable BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> dlqDestinationResolver,
|
||||
@Nullable BackOff backOff) {
|
||||
|
||||
if (destinationName.equals("topicWithLongTotalRetryConfig")) {
|
||||
ConsumerRecordRecoverer dlpr = new DeadLetterPublishingRecoverer(template),
|
||||
dlqDestinationResolver);
|
||||
container.setCommonErrorHandler(new DefaultErrorHandler(dlpr, backOff));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean retryAndDlqInBinding(String destinationName, String group) {
|
||||
return !destinationName.contains("topicWithLongTotalRetryConfig");
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Now, only a single retry delay needs to be greater than the consumer's `max.poll.interval.ms` property.
|
||||
|
||||
[[consumer-producer-config-customizer]]
|
||||
=== Customizing Consumer and Producer configuration
|
||||
|
||||
If you want advanced customization of consumer and producer configuration that is used for creating `ConsumerFactory` and `ProducerFactory` in Kafka,
|
||||
you can implement the following customizers.
|
||||
|
||||
* ConsumerConfigCustomizer
|
||||
* ProducerConfigCustomizer
|
||||
|
||||
Both of these interfaces provide a way to configure the config map used for consumer and producer properties.
|
||||
For example, if you want to gain access to a bean that is defined at the application level, you can inject that in the implementation of the `configure` method.
|
||||
When the binder discovers that these customizers are available as beans, it will invoke the `configure` method right before creating the consumer and producer factories.
|
||||
|
||||
Both of these interfaces also provide access to both the binding and destination names so that they can be accessed while customizing producer and consumer properties.
|
||||
|
||||
[[admin-client-config-customization]]
|
||||
=== Customizing AdminClient Configuration
|
||||
|
||||
As with consumer and producer config customization above, applications can also customize the configuration for admin clients by providing an `AdminClientConfigCustomizer`.
|
||||
AdminClientConfigCustomizer's configure method provides access to the admin client properties, using which you can define further customization.
|
||||
Binder's Kafka topic provisioner gives the highest precedence for the properties given through this customizer.
|
||||
Here is an example of providing this customizer bean.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public AdminClientConfigCustomizer adminClientConfigCustomizer() {
|
||||
return props -> {
|
||||
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
@@ -32,8 +32,10 @@ Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinat
|
||||
// ======================================================================================
|
||||
|
||||
|
||||
*{project-version}*
|
||||
*{spring-cloud-stream-version}*
|
||||
|
||||
[#index-link]
|
||||
{docs-url}spring-cloud-stream/{docs-version}home.html
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
@@ -44,8 +46,6 @@ include::partitions.adoc[]
|
||||
|
||||
include::kafka-streams.adoc[]
|
||||
|
||||
include::tips.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
@@ -1,865 +0,0 @@
|
||||
== Tips, Tricks and Recipes
|
||||
|
||||
=== Simple DLQ with Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
As a developer, I want to write a consumer application that processes records from a Kafka topic.
|
||||
However, if some error occurs in processing, I don't want the application to stop completely.
|
||||
Instead, I want to send the record in error to a DLT (Dead-Letter-Topic) and then continue processing new records.
|
||||
|
||||
==== Solution
|
||||
|
||||
The solution for this problem is to use the DLQ feature in Spring Cloud Stream.
|
||||
For the purposes of this discussion, let us assume that the following is our processor function.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<byte[]> processData() {
|
||||
return s -> {
|
||||
throw new RuntimeException();
|
||||
};
|
||||
```
|
||||
|
||||
This is a very trivial function that throws an exception for all the records that it processes, but you can take this function and extend it to any other similar situations.
|
||||
|
||||
In order to send the records in error to a DLT, we need to provide the following configuration.
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
group: my-group
|
||||
destination: input-topic
|
||||
kafka:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
consumer:
|
||||
enableDlq: true
|
||||
dlqName: input-topic-dlq
|
||||
```
|
||||
|
||||
In order to activate DLQ, the application must provide a group name.
|
||||
Anonymous consumers cannot use the DLQ facilities.
|
||||
We also need to enable DLQ by setting the `enableDLQ` property on the Kafka consumer binding to `true`.
|
||||
Finally, we can optionally provide the DLT name by providing the `dlqName` on Kafka consumer binding, which otherwise default to `input-topic-dlq.my-group.error` in this case.
|
||||
|
||||
Note that in the example consumer provided above, the type of the payload is `byte[]`.
|
||||
By default, the DLQ producer in Kafka binder expects the payload of type `byte[]`.
|
||||
If that is not the case, then we need to provide the configuration for proper serializer.
|
||||
For example, let us re-write the consumer function as below:
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<String> processData() {
|
||||
return s -> {
|
||||
throw new RuntimeException();
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Now, we need to tell Spring Cloud Stream, how we want to serialize the data when writing to the DLT.
|
||||
Here is the modified configuration for this scenario:
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
group: my-group
|
||||
destination: input-topic
|
||||
kafka:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
consumer:
|
||||
enableDlq: true
|
||||
dlqName: input-topic-dlq
|
||||
dlqProducerProperties:
|
||||
configuration:
|
||||
value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
|
||||
```
|
||||
|
||||
=== DLQ with Advanced Retry Options
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
This is similar to the recipe above, but as a developer I would like to configure the way retries are handled.
|
||||
|
||||
==== Solution
|
||||
|
||||
If you followed the above recipe, then you get the default retry options built into the Kafka binder when the processing encounters an error.
|
||||
|
||||
By default, the binder retires for a maximum of 3 attempts with a one second initial delay, 2.0 multiplier with each back off with a max delay of 10 seconds.
|
||||
You can change all these configurations as below:
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.maxAtttempts
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.backOffInitialInterval
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.backOffMultipler
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.backOffMaxInterval
|
||||
```
|
||||
|
||||
If you want, you can also provide a list of retryable exceptions by providing a map of boolean values.
|
||||
For example,
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.retryableExceptions.java.lang.IllegalStateException=true
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.retryableExceptions.java.lang.IllegalArgumentException=false
|
||||
```
|
||||
|
||||
By default, any exceptions not listed in the map above will be retried.
|
||||
If that is not desired, then you can disable that by providing,
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.defaultRetryable=false
|
||||
```
|
||||
|
||||
You can also provide your own `RetryTemplate` and mark it as `@StreamRetryTemplate` which will be scanned and used by the binder.
|
||||
This is useful when you want more sophisticated retry strategies and policies.
|
||||
|
||||
If you have multiple `@StreamRetryTemplate` beans, then you can specify which one your binding wants by using the property,
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.retry-template-name=<your-retry-template-bean-name>
|
||||
```
|
||||
|
||||
=== Handling Deserialization errors with DLQ
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I have a processor that encounters a deserilzartion exception in Kafka consumer.
|
||||
I would expect that the Spring Cloud Stream DLQ mechanism will catch that scenario, but it does not.
|
||||
How can I handle this?
|
||||
|
||||
==== Solution
|
||||
|
||||
The normal DLQ mechanism offered by Spring Cloud Stream will not help when Kafka consumer throws an irrecoverable deserialization excepion.
|
||||
This is because, this exception happens even before the consumer's `poll()` method returns.
|
||||
Spring for Apache Kafka project offers some great ways to help the binder with this situation.
|
||||
Let us explore those.
|
||||
|
||||
Assuming this is our function:
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<String> functionName() {
|
||||
return s -> {
|
||||
System.out.println(s);
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
It is a trivial function that takes a `String` parameter.
|
||||
|
||||
We want to bypass the message converters provided by Spring Cloud Stream and want to use native deserializers instead.
|
||||
In the case of `String` types, it does not make much sense, but for more complex types like AVRO etc. you have to rely on external deserializers and therefore want to delegate the conversion to Kafka.
|
||||
|
||||
Now when the consumer receives the data, let us assume that there is a bad record that causes a deserilziation errror, maybe someone passed an `Integer` instead of a `String` for example.
|
||||
In that case, if you don't do something in the application, the excption will be propagated through the chain and your application will exit eventually.
|
||||
|
||||
In order to handle this, you can add a `ListenerContainerCustomizer` `@Bean` that configures a `SeekToCurrentErrorHandler`.
|
||||
This `SeekToCurrentErrorHandler` is configured with a `DeadLetterPublishingRecoverer`.
|
||||
We also need to configure an `ErrorHandlingDeserializer` for the consumer.
|
||||
That sounds like a lot of complex things, but in reality, it boils down to these 3 beans in this case.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public ListenerContainerCustomizer<AbstractMessageListenerContainer<byte[], byte[]>> customizer(SeekToCurrentErrorHandler errorHandler) {
|
||||
return (container, dest, group) -> {
|
||||
container.setErrorHandler(errorHandler);
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
@Bean
|
||||
public SeekToCurrentErrorHandler errorHandler(DeadLetterPublishingRecoverer deadLetterPublishingRecoverer) {
|
||||
return new SeekToCurrentErrorHandler(deadLetterPublishingRecoverer);
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
@Bean
|
||||
public DeadLetterPublishingRecoverer publisher(KafkaOperations bytesTemplate) {
|
||||
return new DeadLetterPublishingRecoverer(bytesTemplate);
|
||||
}
|
||||
```
|
||||
|
||||
Let us analyze each of them.
|
||||
The first one is the `ListenerContainerCustomizer` bean that takes a `SeekToCurrentErrorHandler`.
|
||||
The container is now customized with that particular error handler.
|
||||
You can learn more about container customization https://docs.spring.io/spring-cloud-stream/docs/current/reference/html/spring-cloud-stream.html#_advanced_consumer_configuration[here].
|
||||
|
||||
The second bean is the `SeekToCurrentErrorHandler` that is configured with a publishing to a `DLT`.
|
||||
See https://docs.spring.io/spring-kafka/docs/current/reference/html/#seek-to-current[here] for more details on `SeekToCurrentErrorHandler`.
|
||||
|
||||
The third bean is the `DeadLetterPublishingRecoverer` that is ultimately responsible for sending to the `DLT`.
|
||||
By default, the `DLT` topic is named as the ORIGINAL_TOPIC_NAME.DLT.
|
||||
You can change that though.
|
||||
See the https://docs.spring.io/spring-kafka/docs/current/reference/html/#dead-letters[docs] for more details.
|
||||
|
||||
|
||||
We also need to configure an https://docs.spring.io/spring-kafka/docs/current/reference/html/#error-handling-deserializer[ErrorHandlingDeserializer] through application config.
|
||||
|
||||
The `ErrorHandlingDeserializer` delegates to the actual deserializer.
|
||||
In case of errors, it sets key/value of the record to be null and includes the raw bytes of the message.
|
||||
It then sets the exception in a header and passes this record to the listener, which then calls the registered error handler.
|
||||
|
||||
Following is the configuration required:
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
function:
|
||||
definition: functionName
|
||||
bindings:
|
||||
functionName-in-0:
|
||||
group: group-name
|
||||
destination: input-topic
|
||||
consumer:
|
||||
use-native-decoding: true
|
||||
kafka:
|
||||
bindings:
|
||||
functionName-in-0:
|
||||
consumer:
|
||||
enableDlq: true
|
||||
dlqName: dlq-topic
|
||||
dlqProducerProperties:
|
||||
configuration:
|
||||
value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
configuration:
|
||||
value.deserializer: org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
|
||||
spring.deserializer.value.delegate.class: org.apache.kafka.common.serialization.StringDeserializer
|
||||
```
|
||||
|
||||
We are providing the `ErrorHandlingDeserializer` through the `configuration` property on the binding.
|
||||
We are also indicating that the actual deserializer to delegate is the `StringDeserializer`.
|
||||
|
||||
Keep in mind that none of the dlq properties above are relevant for the discussions in this recipe.
|
||||
They are purely meant for addressing any application level errors only.
|
||||
|
||||
=== Basic offset management in Kafka binder
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I want to write a Spring Cloud Stream Kafka consumer applicaiton and not sure about how it manages Kafka consumer offsets.
|
||||
Can you exaplain?
|
||||
|
||||
==== Solution
|
||||
|
||||
We encourage you read the https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current/reference/html/spring-cloud-stream-binder-kafka.html#reset-offsets[docs] section on this to get a thorough understanding on it.
|
||||
|
||||
Here is it in a gist:
|
||||
|
||||
Kafka supports two types of offsets to start with by default - `earliest` and `latest`.
|
||||
Their semantics are self-explanatory from their names.
|
||||
|
||||
Assuming you are running the consumer for the first time.
|
||||
If you miss the group.id in your Spring Cloud Stream application, then it becomes an anonymous consumer.
|
||||
Whenever, you have an anonymous consumer, in that case, Spring Cloud Stream application by default will start from the `latest` available offset in the topic partition.
|
||||
On the other hand, if you explicitly specify a group.id, then by default, the Spring Cloud Stream application will start from the `earliest` available offset in the topic partiton.
|
||||
|
||||
In both cases above (consumers with explicit groups and anonymous groups), the starting offset can be switched around by using the property `spring.cloud.stream.kafka.bindings.<binding-name>.consumer.startOffset` and setting it to either `earliest` or `latest`.
|
||||
|
||||
Now, assume that you already ran the consumer before and now starting it again.
|
||||
In this case, the starting offset semantics in the above case do not apply as the consumer finds an already committed offset for the consumer group (In the case of an anonymous consumer, although the application does not provide a group.id, the binder will auto generate one for you).
|
||||
It simply picks up from the last committed offset onward.
|
||||
This is true, even when you have a `startOffset` value provided.
|
||||
|
||||
However, you can override the default behavior where the consumer starts from the last committed offset by using the `resetOffsets` property.
|
||||
In order to do that, set the property `spring.cloud.stream.kafka.bindings.<binding-name>.consumer.resetOffsets` to `true` (which is `false` by default).
|
||||
Then make sure you provide the `startOffset` value (either `earliest` or `latest`).
|
||||
When you do that and then start the consumer application, each time you start, it starts as if this is starting for the first time and ignore any committed offsets for the partition.
|
||||
|
||||
=== Seeking to arbitrary offsets in Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Using Kafka binder, I know that it can set the offset to either `earliest` or `latest`, but I have a requirement to seek the offset to something in the middle, an arbitrary offset.
|
||||
Is there a way to achieve this using Spring Cloud Stream Kafka biner?
|
||||
|
||||
==== Solution
|
||||
|
||||
Previously we saw how Kafka binder allows you to tackle basic offset management.
|
||||
By default, the binder does not allow you to rewind to an arbitrary offset, at least through the mechanism we saw in that reipce.
|
||||
However, there are some low-level strategies that the binder provides to achieve this use case.
|
||||
Let's explore them.
|
||||
|
||||
First of all, when you want to reset to an arbitrary offset other than `earliest` or `latest`, make sure to leave the `resetOffsets` configuration to its defaults, which is `false`.
|
||||
Then you have to provide a custom bean of type `KafkaBindingRebalanceListener`, which will be injected into all consumer bindings.
|
||||
It is an interface that comes with a few default methods, but here is the method that we are interested in:
|
||||
|
||||
```
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance. Applications
|
||||
* might only want to perform seek operations on an initial assignment. While the
|
||||
* 'initial' argument is true for each thread (when concurrency is greater than 1),
|
||||
* implementations should keep track of exactly which partitions have been sought.
|
||||
* There is a race in that a rebalance could occur during startup and so a topic/
|
||||
* partition that has been sought on one thread may be re-assigned to another
|
||||
* thread and you may not wish to re-seek it at that time.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment on the current thread.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions, boolean initial) {
|
||||
// do nothing
|
||||
}
|
||||
```
|
||||
|
||||
Let us look at the details.
|
||||
|
||||
In essence, this method will be invoked each time during the initial assignment for a topic partition or after a rebalance.
|
||||
For better illustration, let us assume that our topic is `foo` and it has 4 partitions.
|
||||
Initially, we are only starting a single consumer in the group and this consumer will consume from all partitions.
|
||||
When the consumer starts for the first time, all 4 partitions are getting initially assigned.
|
||||
However, we do not want to start the partitions to consume at the defaults (`earliest` since we define a group), rather for each partition, we want them to consume after seeking to arbitrary offsets.
|
||||
Imagine that you have a business case to consume from certain offsets as below.
|
||||
|
||||
```
|
||||
Partition start offset
|
||||
|
||||
0 1000
|
||||
1 2000
|
||||
2 2000
|
||||
3 1000
|
||||
```
|
||||
|
||||
This could be achieved by implementing the above method as below.
|
||||
|
||||
```
|
||||
|
||||
@Override
|
||||
public void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions, boolean initial) {
|
||||
|
||||
Map<TopicPartition, Long> topicPartitionOffset = new HashMap<>();
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 0), 1000L);
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 1), 2000L);
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 2), 2000L);
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 3), 1000L);
|
||||
|
||||
if (initial) {
|
||||
partitions.forEach(tp -> {
|
||||
if (topicPartitionOffset.containsKey(tp)) {
|
||||
final Long offset = topicPartitionOffset.get(tp);
|
||||
try {
|
||||
consumer.seek(tp, offset);
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Handle excpetions carefully.
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This is just a rudimentary implementation.
|
||||
Real world use cases are much more complex than this and you need to adjust accordingly, but this certainly gives you a basic sketch.
|
||||
When consumer `seek` fails, it may throw some runtime exceptions and you need to decide what to do in those cases.
|
||||
|
||||
==== What if we start a second consumer with the same group id?
|
||||
|
||||
When we add a second consumer, a rebalance will occur and some partitions will be moved around.
|
||||
Let's say that the new consumer gets partitions `2` and `3`.
|
||||
When this new Spring Cloud Stream consumer calls this `onPartitionsAssigned` method, it will see that this is the initial assignment for partititon `2` and `3` on this consumer.
|
||||
Therefore, it will do the seek operation becuase of the conditional check on the `initial` argument.
|
||||
In the case of the first consumer, it now only has partitons `0` and `1`
|
||||
However, for this consumer it was simply a rebalance event and not considered as an intial assignment.
|
||||
Thus, it will not re-seek to the given offsets because of the conditional check on the `initial` argument.
|
||||
|
||||
=== How do I manually acknowledge using Kafka binder?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Using Kafka binder, I want to manually acknowledge messages in my consumer.
|
||||
How do I do that?
|
||||
|
||||
==== Solution
|
||||
|
||||
By default, Kafka binder delegates to the default commit settings in Spring for Apache Kafka project.
|
||||
The default `ackMode` in Spring Kafka is `batch`.
|
||||
See https://docs.spring.io/spring-kafka/docs/current/reference/html/#committing-offsets[here] for more details on that.
|
||||
|
||||
There are situations in which you want to disable this default commit behavior and rely on manual commits.
|
||||
Following steps allow you to do that.
|
||||
|
||||
Set the property `spring.cloud.stream.kafka.bindings.<binding-name>.consumer.ackMode` to either `MANUAL` or `MANUAL_IMMEDIATE`.
|
||||
When it is set like that, then there will be a header called `kafka_acknowledgment` (from `KafkaHeaders.ACKNOWLEDGMENT`) present in the message received by the consumer method.
|
||||
|
||||
For example, imagine this as your consumer method.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<Message<String>> myConsumer() {
|
||||
return msg -> {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Then you set the property `spring.cloud.stream.bindings.myConsumer-in-0.consumer.ackMode` to `MANUAL` or `MANUAL_IMMEDIATE`.
|
||||
|
||||
=== How do I override the default binding names in Spring Cloud Stream?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Spring Cloud Stream creates default bindings based on the function definition and signature, but how do I override these to more domain friendly names?
|
||||
|
||||
==== Solution
|
||||
|
||||
Assume that following is your function signature.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Function<String, String> uppercase(){
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
By default, Spring Cloud Stream will create the bindings as below.
|
||||
|
||||
1. uppercase-in-0
|
||||
2. uppercase-out-0
|
||||
|
||||
You can override these bindings to something by using the following properties.
|
||||
|
||||
```
|
||||
spring.cloud.stream.function.bindings.uppercase-in-0=my-transformer-in
|
||||
spring.cloud.stream.function.bindings.uppercase-out-0=my-transformer-out
|
||||
```
|
||||
|
||||
After this, all binding properties must be made on the new names, `my-transformer-in` and `my-transformer-out`.
|
||||
|
||||
Here is another example with Kafka Streams and multiple inputs.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public BiFunction<KStream<String, Order>, KTable<String, Account>, KStream<String, EnrichedOrder>> processOrder() {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
By default, Spring Cloud Stream will create three different binding names for this function.
|
||||
|
||||
1. processOrder-in-0
|
||||
2. processOrder-in-1
|
||||
3. processOrder-out-0
|
||||
|
||||
You have to use these binding names each time you want to set some configuration on these bindings.
|
||||
You don't like that, and you want to use more domain-friendly and readable binding names, for example, something like.
|
||||
|
||||
1. orders
|
||||
2. accounts
|
||||
3. enrichedOrders
|
||||
|
||||
You can easily do that by simply setting these three properties
|
||||
|
||||
1. spring.cloud.stream.function.bindings.processOrder-in-0=orders
|
||||
2. spring.cloud.stream.function.bindings.processOrder-in-1=accounts
|
||||
3. spring.cloud.stream.function.bindings.processOrder-out-0=enrichedOrders
|
||||
|
||||
Once you do that, it overrides the default binding names and any properties that you want to set on them must be on these new binding names.
|
||||
|
||||
=== How do I send a message key as part of my record?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I need to send a key along with the payload of the record, is there a way to do that in Spring Cloud Stream?
|
||||
|
||||
==== Solution
|
||||
|
||||
It is often necessary that you want to send associative data structure like a map as the record with a key and value.
|
||||
Spring Cloud Stream allows you to do that in a straightforward manner.
|
||||
Following is a basic blueprint for doing this, but you may want to adapt it to your paricular use case.
|
||||
|
||||
Here is sample producer method (aka `Supplier`).
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supplier() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader(KafkaHeaders.MESSAGE_KEY, "my-foo").build();
|
||||
}
|
||||
```
|
||||
|
||||
This is a trivial function that sends a message with a `String` payload, but also with a key.
|
||||
Note that we set the key as a message header using `KafkaHeaders.MESSAGE_KEY`.
|
||||
|
||||
If you want to change the key from the default `kafka_messageKey`, then in the configuration, we need to specify this property:
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.supplier-out-0.producer.messageKeyExpression=headers['my-special-key']
|
||||
```
|
||||
|
||||
Please note that we use the binding name `supplier-out-0` since that is our function name, please update accordingly.
|
||||
|
||||
Then, we use this new key when we produce the message.
|
||||
|
||||
=== How do I use native serializer and deserializer instead of message conversion done by Spring Cloud Stream?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Instead of using the message converters in Spring Cloud Stream, I want to use native Serializer and Deserializer in Kafka.
|
||||
By default, Spring Cloud Stream takes care of this conversion using its internal built-in message converters.
|
||||
How can I bypass this and delegate the responsibility to Kafka?
|
||||
|
||||
==== Solution
|
||||
|
||||
This is really easy to do.
|
||||
|
||||
All you have to do is to provide the following property to enable native serialization.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.useNativeEncoding: true
|
||||
```
|
||||
|
||||
Then, you need to also set the serailzers.
|
||||
There are a couple of ways to do this.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.configurarion.key.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.configurarion.value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
```
|
||||
|
||||
or using the binder configuration.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.binder.configurarion.key.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
spring.cloud.stream.kafka.binder.configurarion.value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
```
|
||||
|
||||
When using the binder way, it is applied against all the bindings whereas setting them at the bindings are per binding.
|
||||
|
||||
On the deserializing side, you just need to provide the deserializers as configuration.
|
||||
|
||||
For example,
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.consumer.configurarion.key.deserializer: org.apache.kafka.common.serialization.StringDeserializer
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.configurarion.value.deserializer: org.apache.kafka.common.serialization.StringDeserializer
|
||||
```
|
||||
|
||||
You can also set them at the binder level.
|
||||
|
||||
There is an optional property that you can set to force native decoding.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.consumer.useNativeDecoding: true
|
||||
```
|
||||
|
||||
However, in the case of Kafka binder, this is unncessary, as by the time it reaches the binder, Kafka already deserializes them using the configured deserializers.
|
||||
|
||||
=== Explain how offset resetting work in Kafka Streams binder
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
By default, Kafka Streams binder always starts from the earliest offset for a new consumer.
|
||||
Sometimes, it is beneficial or required by the application to start from the latest offset.
|
||||
Kafka Streams binder allows you to do that.
|
||||
|
||||
==== Solution
|
||||
|
||||
Before we look at the solution, let us look at the following scenario.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public BiConsumer<KStream<Object, Object>, KTable<Object, Object>> myBiConsumer{
|
||||
(s, t) -> s.join(t, ...)
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
We have a `BiConsumer` bean that requires two input bindings.
|
||||
In this case, the first binding is for a `KStream` and the second one is for a `KTable`.
|
||||
When running this application for the first time, by default, both bindings start from the `earliest` offset.
|
||||
What about I want to start from the `latest` offset due to some requirements?
|
||||
You can do this by enabling the following properties.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.streams.bindings.myBiConsumer-in-0.consumer.startOffset: latest
|
||||
spring.cloud.stream.kafka.streams.bindings.myBiConsumer-in-1.consumer.startOffset: latest
|
||||
```
|
||||
|
||||
If you want only one binding to start from the `latest` offset and the other to consumer from the default `earliest`, then leave the latter binding out from the configuration.
|
||||
|
||||
Keep in mind that, once there are committed offsets, these setting are *not* honored and the committed offsets take precedence.
|
||||
|
||||
=== Keeping track of successful sending of records (producing) in Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I have a Kafka producer application and I want to keep track of all my successful sedings.
|
||||
|
||||
==== Solution
|
||||
|
||||
Let us assume that we have this following supplier in the application.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supplier() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader(KafkaHeaders.MESSAGE_KEY, "my-foo").build();
|
||||
}
|
||||
```
|
||||
|
||||
Then, we need to define a new `MessageChannel` bean to capture all the successful send information.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public MessageChannel fooRecordChannel() {
|
||||
return new DirectChannel();
|
||||
}
|
||||
```
|
||||
|
||||
Next, define this property in the application configuration to provide the bean name for the `recordMetadataChannel`.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.supplier-out-0.producer.recordMetadataChannel: fooRecordChannel
|
||||
```
|
||||
|
||||
At this point, successful sent information will be sent to the `fooRecordChannel`.
|
||||
|
||||
You can write an `IntegrationFlow` as below to see the information.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public IntegrationFlow integrationFlow() {
|
||||
return f -> f.channel("fooRecordChannel")
|
||||
.handle((payload, messageHeaders) -> payload);
|
||||
}
|
||||
```
|
||||
|
||||
In the `handle` method, the payload is what got sent to Kafka and the message headers contain a special key called `kafka_recordMetadata`.
|
||||
Its value is a `RecordMetadata` that contains information about topic partition, current offset etc.
|
||||
|
||||
=== Adding custom header mapper in Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I have a Kafka producer application that sets some headers, but they are missing in the consumer application. Why is that?
|
||||
|
||||
==== Solution
|
||||
|
||||
Under normal circumstances, this should be fine.
|
||||
|
||||
Imagine, you have the following producer.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supply() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader("foo", "bar").build();
|
||||
}
|
||||
```
|
||||
|
||||
On the consumer side, you should still see the header "foo", and the following should not give you any issues.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<Message<String>> consume() {
|
||||
return s -> {
|
||||
final String foo = (String)s.getHeaders().get("foo");
|
||||
System.out.println(foo);
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
If you provide a https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/3.1.3/reference/html/spring-cloud-stream-binder-kafka.html#_kafka_binder_properties[custom header mapper] in the application, then this won't work.
|
||||
Let's say you have an empty `KafkaHeaderMapper` in the application.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public KafkaHeaderMapper kafkaBinderHeaderMapper() {
|
||||
return new KafkaHeaderMapper() {
|
||||
@Override
|
||||
public void fromHeaders(MessageHeaders headers, Headers target) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> target) {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
If that is your implementation, then you will miss the `foo` header on the consumer.
|
||||
Chances are that, you may have some logic inside those `KafkaHeaderMapper` methods.
|
||||
You need the following to populate the `foo` header.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public KafkaHeaderMapper kafkaBinderHeaderMapper() {
|
||||
return new KafkaHeaderMapper() {
|
||||
@Override
|
||||
public void fromHeaders(MessageHeaders headers, Headers target) {
|
||||
final String foo = (String) headers.get("foo");
|
||||
target.add("foo", foo.getBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> target) {
|
||||
final Header foo = source.lastHeader("foo");
|
||||
target.put("foo", new String(foo.value()));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That will properly populate the `foo` header from the producer to consumer.
|
||||
|
||||
==== Special note on the id header
|
||||
|
||||
In Spring Cloud Stream, the `id` header is a special header, but some applications may want to have special custom id headers - something like `custom-id` or `ID` or `Id`.
|
||||
The first one (`custom-id`) will propagate without any custom header mapper from producer to consumer.
|
||||
However, if you produce with a variant of the framework reserved `id` header - such as `ID`, `Id`, `iD` etc. then you will run into issues with the internals of the framework.
|
||||
See this https://stackoverflow.com/questions/68412600/change-the-behaviour-in-spring-cloud-stream-make-header-matcher-case-sensitive[StackOverflow thread] fore more context on this use case.
|
||||
In that case, you must use a custom `KafkaHeaderMapper` to map the case-sensitive id header.
|
||||
For example, let's say you have the following producer.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supply() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader("Id", "my-id").build();
|
||||
}
|
||||
```
|
||||
|
||||
The header `Id` above will be gone from the consuming side as it clashes with the framework `id` header.
|
||||
You can provide a custom `KafkaHeaderMapper` to solve this issue.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public KafkaHeaderMapper kafkaBinderHeaderMapper1() {
|
||||
return new KafkaHeaderMapper() {
|
||||
@Override
|
||||
public void fromHeaders(MessageHeaders headers, Headers target) {
|
||||
final String myId = (String) headers.get("Id");
|
||||
target.add("Id", myId.getBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> target) {
|
||||
final Header Id = source.lastHeader("Id");
|
||||
target.put("Id", new String(Id.value()));
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
By doing this, both `id` and `Id` headers will be available from the producer to the consumer side.
|
||||
|
||||
=== Producing to multiple topics in transaction
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
How do I produce transactional messages to multiple Kafka topics?
|
||||
|
||||
For more context, see this https://stackoverflow.com/questions/68928091/dlq-bounded-retry-and-eos-when-producing-to-multiple-topics-using-spring-cloud[StackOverflow question].
|
||||
|
||||
==== Solution
|
||||
|
||||
Use transactional support in Kafka binder for transactions and then provide an `AfterRollbackProcessor`.
|
||||
In order to produce to multiple topics, use `StreamBridge` API.
|
||||
|
||||
Below are the code snippets for this:
|
||||
|
||||
```
|
||||
@Autowired
|
||||
StreamBridge bridge;
|
||||
|
||||
@Bean
|
||||
Consumer<String> input() {
|
||||
return str -> {
|
||||
System.out.println(str);
|
||||
this.bridge.send("left", str.toUpperCase());
|
||||
this.bridge.send("right", str.toLowerCase());
|
||||
if (str.equals("Fail")) {
|
||||
throw new RuntimeException("test");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
ListenerContainerCustomizer<AbstractMessageListenerContainer<?, ?>> customizer(BinderFactory binders) {
|
||||
return (container, dest, group) -> {
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
KafkaTemplate<byte[], byte[]> template = new KafkaTemplate<>(pf);
|
||||
DefaultAfterRollbackProcessor rollbackProcessor = rollbackProcessor(template);
|
||||
container.setAfterRollbackProcessor(rollbackProcessor);
|
||||
};
|
||||
}
|
||||
|
||||
DefaultAfterRollbackProcessor rollbackProcessor(KafkaTemplate<byte[], byte[]> template) {
|
||||
return new DefaultAfterRollbackProcessor<>(
|
||||
new DeadLetterPublishingRecoverer(template), new FixedBackOff(2000L, 2L), template, true);
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
==== Required Configuration
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix: tx-
|
||||
spring.cloud.stream.kafka.binder.required-acks=all
|
||||
spring.cloud.stream.bindings.input-in-0.group=foo
|
||||
spring.cloud.stream.bindings.input-in-0.destination=input
|
||||
spring.cloud.stream.bindings.left.destination=left
|
||||
spring.cloud.stream.bindings.right.destination=right
|
||||
|
||||
spring.cloud.stream.kafka.bindings.input-in-0.consumer.maxAttempts=1
|
||||
```
|
||||
|
||||
in order to test, you can use the following:
|
||||
|
||||
```
|
||||
@Bean
|
||||
public ApplicationRunner runner(KafkaTemplate<byte[], byte[]> template) {
|
||||
return args -> {
|
||||
System.in.read();
|
||||
template.send("input", "Fail".getBytes());
|
||||
template.send("input", "Good".getBytes());
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Some important notes:
|
||||
|
||||
Please ensure that you don't have any DLQ settings on the application configuration as we manually configure DLT (By default it will be published to a topic named `input.DLT` based on the initial consumer function).
|
||||
Also, reset the `maxAttempts` on consumer binding to `1` in order to avoid retries by the binder.
|
||||
It will be max tried a total of 3 in the example above (initial try + the 2 attempts in the `FixedBackoff`).
|
||||
|
||||
See the https://stackoverflow.com/questions/68928091/dlq-bounded-retry-and-eos-when-producing-to-multiple-topics-using-spring-cloud[StackOverflow thread] for more details on how to test this code.
|
||||
If you are using Spring Cloud Stream to test it by adding more consumer functions, make sure to set the `isolation-level` on the consumer binding to `read-committed`.
|
||||
|
||||
This https://stackoverflow.com/questions/68941306/spring-cloud-stream-database-transaction-does-not-roll-back[StackOverflow thread] is also related to this discussion.
|
||||
|
||||
=== Pitfalls to avoid when running multiple pollable consumers
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
How can I run multiple instances of the pollable consumers and generate unique `client.id` for each instance?
|
||||
|
||||
==== Solution
|
||||
|
||||
Assuming that I have the following definition:
|
||||
|
||||
```
|
||||
spring.cloud.stream.pollable-source: foo
|
||||
spring.cloud.stream.bindings.foo-in-0.group: my-group
|
||||
```
|
||||
|
||||
When running the application, the Kafka consumer generates a client.id (something like `consumer-my-group-1`).
|
||||
For each instance of the application that is running, this `client.id` will be the same, causing unexpected issues.
|
||||
|
||||
In order to fix this, you can add the following property on each instance of the application:
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.foo-in-0.consumer.configuration.client.id=${client.id}
|
||||
```
|
||||
|
||||
See this https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1139[GitHub issue] for more details.
|
||||
|
||||
36
mvnw
vendored
36
mvnw
vendored
@@ -8,7 +8,7 @@
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
@@ -19,7 +19,7 @@
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Maven Start Up Batch script
|
||||
# Maven2 Start Up Batch script
|
||||
#
|
||||
# Required ENV vars:
|
||||
# ------------------
|
||||
@@ -114,6 +114,7 @@ if $mingw ; then
|
||||
M2_HOME="`(cd "$M2_HOME"; pwd)`"
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
|
||||
# TODO classpath?
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
@@ -211,11 +212,7 @@ else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
|
||||
fi
|
||||
if [ -n "$MVNW_REPOURL" ]; then
|
||||
jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
else
|
||||
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
fi
|
||||
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
|
||||
while IFS="=" read key value; do
|
||||
case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
|
||||
esac
|
||||
@@ -224,38 +221,22 @@ else
|
||||
echo "Downloading from: $jarUrl"
|
||||
fi
|
||||
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
|
||||
if $cygwin; then
|
||||
wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
|
||||
fi
|
||||
|
||||
if command -v wget > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found wget ... using wget"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
wget "$jarUrl" -O "$wrapperJarPath"
|
||||
else
|
||||
wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
|
||||
fi
|
||||
wget "$jarUrl" -O "$wrapperJarPath"
|
||||
elif command -v curl > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found curl ... using curl"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
curl -o "$wrapperJarPath" "$jarUrl" -f
|
||||
else
|
||||
curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
|
||||
fi
|
||||
|
||||
curl -o "$wrapperJarPath" "$jarUrl"
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Falling back to using Java to download"
|
||||
fi
|
||||
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
|
||||
# For Cygwin, switch paths to Windows format before running javac
|
||||
if $cygwin; then
|
||||
javaClass=`cygpath --path --windows "$javaClass"`
|
||||
fi
|
||||
if [ -e "$javaClass" ]; then
|
||||
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
@@ -296,11 +277,6 @@ if $cygwin; then
|
||||
MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
|
||||
fi
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
exec "$JAVACMD" \
|
||||
|
||||
343
mvnw.cmd
vendored
Normal file → Executable file
343
mvnw.cmd
vendored
Normal file → Executable file
@@ -1,182 +1,161 @@
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||
@REM or more contributor license agreements. See the NOTICE file
|
||||
@REM distributed with this work for additional information
|
||||
@REM regarding copyright ownership. The ASF licenses this file
|
||||
@REM to you under the Apache License, Version 2.0 (the
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM http://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@REM KIND, either express or implied. See the License for the
|
||||
@REM specific language governing permissions and limitations
|
||||
@REM under the License.
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@REM
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM set title of command window
|
||||
title %0
|
||||
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
|
||||
|
||||
@REM Execute a user defined script before this one
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
|
||||
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
|
||||
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
|
||||
:skipRcPre
|
||||
|
||||
@setlocal
|
||||
|
||||
set ERROR_CODE=0
|
||||
|
||||
@REM To isolate internal variables from possible post scripts, we use another setlocal
|
||||
@setlocal
|
||||
|
||||
@REM ==== START VALIDATION ====
|
||||
if not "%JAVA_HOME%" == "" goto OkJHome
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME not found in your environment. >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
:OkJHome
|
||||
if exist "%JAVA_HOME%\bin\java.exe" goto init
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME is set to an invalid directory. >&2
|
||||
echo JAVA_HOME = "%JAVA_HOME%" >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
@REM ==== END VALIDATION ====
|
||||
|
||||
:init
|
||||
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
|
||||
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
|
||||
|
||||
set EXEC_DIR=%CD%
|
||||
set WDIR=%EXEC_DIR%
|
||||
:findBaseDir
|
||||
IF EXIST "%WDIR%"\.mvn goto baseDirFound
|
||||
cd ..
|
||||
IF "%WDIR%"=="%CD%" goto baseDirNotFound
|
||||
set WDIR=%CD%
|
||||
goto findBaseDir
|
||||
|
||||
:baseDirFound
|
||||
set MAVEN_PROJECTBASEDIR=%WDIR%
|
||||
cd "%EXEC_DIR%"
|
||||
goto endDetectBaseDir
|
||||
|
||||
:baseDirNotFound
|
||||
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
|
||||
cd "%EXEC_DIR%"
|
||||
|
||||
:endDetectBaseDir
|
||||
|
||||
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
|
||||
|
||||
@setlocal EnableExtensions EnableDelayedExpansion
|
||||
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
|
||||
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
|
||||
|
||||
:endReadAdditionalConfig
|
||||
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
|
||||
FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
|
||||
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||
)
|
||||
|
||||
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
if exist %WRAPPER_JAR% (
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Found %WRAPPER_JAR%
|
||||
)
|
||||
) else (
|
||||
if not "%MVNW_REPOURL%" == "" (
|
||||
SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
)
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||
echo Downloading from: %DOWNLOAD_URL%
|
||||
)
|
||||
|
||||
powershell -Command "&{"^
|
||||
"$webclient = new-object System.Net.WebClient;"^
|
||||
"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
|
||||
"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
|
||||
"}"^
|
||||
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
|
||||
"}"
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Finished downloading %WRAPPER_JAR%
|
||||
)
|
||||
)
|
||||
@REM End of extension
|
||||
|
||||
@REM Provide a "standardized" way to retrieve the CLI args that will
|
||||
@REM work with both Windows and non-Windows executions.
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
:error
|
||||
set ERROR_CODE=1
|
||||
|
||||
:end
|
||||
@endlocal & set ERROR_CODE=%ERROR_CODE%
|
||||
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
|
||||
@REM check for post script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
|
||||
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
|
||||
:skipRcPost
|
||||
|
||||
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
|
||||
if "%MAVEN_BATCH_PAUSE%" == "on" pause
|
||||
|
||||
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
|
||||
|
||||
exit /B %ERROR_CODE%
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||
@REM or more contributor license agreements. See the NOTICE file
|
||||
@REM distributed with this work for additional information
|
||||
@REM regarding copyright ownership. The ASF licenses this file
|
||||
@REM to you under the Apache License, Version 2.0 (the
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM https://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@REM KIND, either express or implied. See the License for the
|
||||
@REM specific language governing permissions and limitations
|
||||
@REM under the License.
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven2 Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@REM
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM set title of command window
|
||||
title %0
|
||||
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
|
||||
|
||||
@REM Execute a user defined script before this one
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
|
||||
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
|
||||
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
|
||||
:skipRcPre
|
||||
|
||||
@setlocal
|
||||
|
||||
set ERROR_CODE=0
|
||||
|
||||
@REM To isolate internal variables from possible post scripts, we use another setlocal
|
||||
@setlocal
|
||||
|
||||
@REM ==== START VALIDATION ====
|
||||
if not "%JAVA_HOME%" == "" goto OkJHome
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME not found in your environment. >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
:OkJHome
|
||||
if exist "%JAVA_HOME%\bin\java.exe" goto init
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME is set to an invalid directory. >&2
|
||||
echo JAVA_HOME = "%JAVA_HOME%" >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
@REM ==== END VALIDATION ====
|
||||
|
||||
:init
|
||||
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
|
||||
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
|
||||
|
||||
set EXEC_DIR=%CD%
|
||||
set WDIR=%EXEC_DIR%
|
||||
:findBaseDir
|
||||
IF EXIST "%WDIR%"\.mvn goto baseDirFound
|
||||
cd ..
|
||||
IF "%WDIR%"=="%CD%" goto baseDirNotFound
|
||||
set WDIR=%CD%
|
||||
goto findBaseDir
|
||||
|
||||
:baseDirFound
|
||||
set MAVEN_PROJECTBASEDIR=%WDIR%
|
||||
cd "%EXEC_DIR%"
|
||||
goto endDetectBaseDir
|
||||
|
||||
:baseDirNotFound
|
||||
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
|
||||
cd "%EXEC_DIR%"
|
||||
|
||||
:endDetectBaseDir
|
||||
|
||||
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
|
||||
|
||||
@setlocal EnableExtensions EnableDelayedExpansion
|
||||
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
|
||||
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
|
||||
|
||||
:endReadAdditionalConfig
|
||||
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"
|
||||
FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO (
|
||||
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||
)
|
||||
|
||||
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
if exist %WRAPPER_JAR% (
|
||||
echo Found %WRAPPER_JAR%
|
||||
) else (
|
||||
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||
echo Downloading from: %DOWNLOAD_URL%
|
||||
powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"
|
||||
echo Finished downloading %WRAPPER_JAR%
|
||||
)
|
||||
@REM End of extension
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
:error
|
||||
set ERROR_CODE=1
|
||||
|
||||
:end
|
||||
@endlocal & set ERROR_CODE=%ERROR_CODE%
|
||||
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
|
||||
@REM check for post script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
|
||||
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
|
||||
:skipRcPost
|
||||
|
||||
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
|
||||
if "%MAVEN_BATCH_PAUSE%" == "on" pause
|
||||
|
||||
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
|
||||
|
||||
exit /B %ERROR_CODE%
|
||||
|
||||
205
pom.xml
205
pom.xml
@@ -2,29 +2,20 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>2.2.0.M1</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<version>2.1.4.RELEASE</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<scm>
|
||||
<url>https://github.com/spring-cloud/spring-cloud-stream-binder-kafka</url>
|
||||
<connection>scm:git:git://github.com/spring-cloud/spring-cloud-stream-binder-kafka.git
|
||||
</connection>
|
||||
<developerConnection>
|
||||
scm:git:ssh://git@github.com/spring-cloud/spring-cloud-stream-binder-kafka.git
|
||||
</developerConnection>
|
||||
<tag>HEAD</tag>
|
||||
</scm>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.8.0</spring-kafka.version>
|
||||
<spring-integration-kafka.version>5.5.5</spring-integration-kafka.version>
|
||||
<kafka.version>3.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>3.2.1</spring-cloud-stream.version>
|
||||
<spring-kafka.version>2.2.2.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.1.0.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>2.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>2.2.0.M1</spring-cloud-stream.version>
|
||||
<maven-checkstyle-plugin.failsOnError>true</maven-checkstyle-plugin.failsOnError>
|
||||
<maven-checkstyle-plugin.failsOnViolation>true</maven-checkstyle-plugin.failsOnViolation>
|
||||
<maven-checkstyle-plugin.includeTestSourceDirectory>true</maven-checkstyle-plugin.includeTestSourceDirectory>
|
||||
@@ -35,7 +26,7 @@
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kafka-streams</module>
|
||||
<module>docs</module>
|
||||
</modules>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
@@ -59,7 +50,13 @@
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
@@ -95,13 +92,7 @@
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
@@ -121,23 +112,14 @@
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.junit.vintage</groupId>
|
||||
<artifactId>junit-vintage-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
@@ -163,16 +145,6 @@
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>${maven-compiler-plugin.version}</version>
|
||||
<configuration>
|
||||
<source>${java.version}</source>
|
||||
<target>${java.version}</target>
|
||||
<compilerArgument>-parameters</compilerArgument>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
@@ -183,91 +155,66 @@
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>spring</id>
|
||||
|
||||
|
||||
</profile>
|
||||
<profile>
|
||||
<id>coverage</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>env.TRAVIS</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.jacoco</groupId>
|
||||
<artifactId>jacoco-maven-plugin</artifactId>
|
||||
<version>0.7.9</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>agent</id>
|
||||
<goals>
|
||||
<goal>prepare-agent</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>report</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>report</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
</repositories>
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/libs-release-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
</pluginRepositories>
|
||||
</profile>
|
||||
</profiles>
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>rsocket-snapshots</id>
|
||||
<name>RSocket Snapshots</name>
|
||||
<url>https://oss.jfrog.org/oss-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/snapshot</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/milestone</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
</pluginRepository>
|
||||
</pluginRepositories>
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>2.2.0.M1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
provides: spring-cloud-starter-stream-kafka
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>2.2.0.M1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2020-2020 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,24 +14,26 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Called by the binder to customize the factories.
|
||||
* Properties for configuring topics.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 3.0.6
|
||||
*
|
||||
* @since 2.0
|
||||
* @deprecated in favor of {@link KafkaTopicProperties}
|
||||
*/
|
||||
public interface ClientFactoryCustomizer {
|
||||
@Deprecated
|
||||
public class KafkaAdminProperties extends KafkaTopicProperties {
|
||||
|
||||
default void configure(ProducerFactory<?, ?> pf) {
|
||||
public Map<String, String> getConfiguration() {
|
||||
return getProperties();
|
||||
}
|
||||
|
||||
default void configure(ConsumerFactory<?, ?> cf) {
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
setProperties(configuration);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2021 the original author or authors.
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,34 +16,24 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.time.Duration;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.AssertTrue;
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties.CompressionType;
|
||||
import org.springframework.core.io.DefaultResourceLoader;
|
||||
import org.springframework.core.io.Resource;
|
||||
import org.springframework.expression.Expression;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
@@ -60,20 +50,18 @@ import org.springframework.util.StringUtils;
|
||||
* @author Gary Russell
|
||||
* @author Rafal Zukowski
|
||||
* @author Aldo Sinanaj
|
||||
* @author Lukasz Kaminski
|
||||
* @author Chukwubuikem Ume-Ugwa
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
|
||||
/**
|
||||
* Arbitrary kafka properties that apply to both producers and consumers.
|
||||
*/
|
||||
@@ -89,26 +77,48 @@ public class KafkaBinderConfigurationProperties {
|
||||
*/
|
||||
private Map<String, String> producerProperties = new HashMap<>();
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
private String[] headers = new String[] {};
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
private int offsetUpdateTimeWindow = 10000;
|
||||
|
||||
private boolean autoAlterTopics;
|
||||
private int offsetUpdateCount;
|
||||
|
||||
private int offsetUpdateShutdownTimeout = 2000;
|
||||
|
||||
private int maxWait = 100;
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
|
||||
private boolean autoAddPartitions;
|
||||
|
||||
private boolean considerDownWhenAnyPartitionHasNoLeader;
|
||||
private int socketBufferSize = 2097152;
|
||||
|
||||
/**
|
||||
* ZK session timeout in milliseconds.
|
||||
*/
|
||||
private int zkSessionTimeout = 10000;
|
||||
|
||||
/**
|
||||
* ZK Connection timeout in milliseconds.
|
||||
*/
|
||||
private int zkConnectionTimeout = 10000;
|
||||
|
||||
private String requiredAcks = "1";
|
||||
|
||||
private short replicationFactor = -1;
|
||||
private short replicationFactor = 1;
|
||||
|
||||
private int fetchSize = 1024 * 1024;
|
||||
|
||||
private int minPartitionCount = 1;
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
@@ -122,21 +132,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
/**
|
||||
* Time between retries after AuthorizationException is caught in
|
||||
* the ListenerContainer; defalt is null which disables retries.
|
||||
* For more info see: {@link org.springframework.kafka.listener.ConsumerProperties#setAuthorizationExceptionRetryInterval(java.time.Duration)}
|
||||
*/
|
||||
private Duration authorizationExceptionRetryInterval;
|
||||
|
||||
/**
|
||||
* When a certificate store location is given as classpath URL (classpath:), then the binder
|
||||
* moves the resource from the classpath location inside the JAR to a location on
|
||||
* the filesystem. If this value is set, then this location is used, otherwise, the
|
||||
* certificate file is copied to the directory returned by java.io.tmpdir.
|
||||
*/
|
||||
private String certificateStoreDirectory;
|
||||
|
||||
public KafkaBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
Assert.notNull(kafkaProperties, "'kafkaProperties' cannot be null");
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
@@ -150,83 +145,21 @@ public class KafkaBinderConfigurationProperties {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the connection String
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
@Deprecated
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
|
||||
public String getKafkaConnectionString() {
|
||||
// We need to do a check on certificate file locations to see if they are given as classpath resources.
|
||||
// If that is the case, then we will move them to a file system location and use those as the certificate locations.
|
||||
// This is due to a limitation in Kafka itself in which it doesn't allow reading certificate resources from the classpath.
|
||||
// See this: https://issues.apache.org/jira/browse/KAFKA-7685
|
||||
// and this: https://cwiki.apache.org/confluence/display/KAFKA/KIP-398%3A+Support+reading+trust+store+from+classpath
|
||||
moveCertsToFileSystemIfNecessary();
|
||||
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
private void moveCertsToFileSystemIfNecessary() {
|
||||
try {
|
||||
moveBrokerCertsIfApplicable();
|
||||
moveSchemaRegistryCertsIfApplicable();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void moveBrokerCertsIfApplicable() throws IOException {
|
||||
final String trustStoreLocation = this.configuration.get("ssl.truststore.location");
|
||||
if (trustStoreLocation != null && trustStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(trustStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("ssl.truststore.location", fileSystemLocation);
|
||||
}
|
||||
final String keyStoreLocation = this.configuration.get("ssl.keystore.location");
|
||||
if (keyStoreLocation != null && keyStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(keyStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("ssl.keystore.location", fileSystemLocation);
|
||||
}
|
||||
}
|
||||
|
||||
private void moveSchemaRegistryCertsIfApplicable() throws IOException {
|
||||
String trustStoreLocation = this.configuration.get("schema.registry.ssl.truststore.location");
|
||||
if (trustStoreLocation != null && trustStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(trustStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("schema.registry.ssl.truststore.location", fileSystemLocation);
|
||||
}
|
||||
final String keyStoreLocation = this.configuration.get("schema.registry.ssl.keystore.location");
|
||||
if (keyStoreLocation != null && keyStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(keyStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("schema.registry.ssl.keystore.location", fileSystemLocation);
|
||||
}
|
||||
}
|
||||
|
||||
private String moveCertToFileSystem(String classpathLocation, String fileSystemLocation) throws IOException {
|
||||
File targetFile;
|
||||
final String tempDir = System.getProperty("java.io.tmpdir");
|
||||
Resource resource = new DefaultResourceLoader().getResource(classpathLocation);
|
||||
if (StringUtils.hasText(fileSystemLocation)) {
|
||||
final Path path = Paths.get(fileSystemLocation);
|
||||
if (!Files.exists(path) || !Files.isDirectory(path) || !Files.isWritable(path)) {
|
||||
logger.warn("The filesystem location to move the cert files (" + fileSystemLocation + ") " +
|
||||
"is not found or a directory that is writable. The system temp folder (java.io.tmpdir) will be used instead.");
|
||||
targetFile = new File(Paths.get(tempDir, resource.getFilename()).toString());
|
||||
}
|
||||
else {
|
||||
// the given location is verified to be a writable directory.
|
||||
targetFile = new File(Paths.get(fileSystemLocation, resource.getFilename()).toString());
|
||||
}
|
||||
}
|
||||
else {
|
||||
targetFile = new File(Paths.get(tempDir, resource.getFilename()).toString());
|
||||
}
|
||||
|
||||
try (InputStream inputStream = resource.getInputStream()) {
|
||||
Files.copy(inputStream, targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
return targetFile.getAbsolutePath();
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
return DEFAULT_KAFKA_CONNECTION_STRING;
|
||||
}
|
||||
@@ -235,6 +168,72 @@ public class KafkaBinderConfigurationProperties {
|
||||
return this.headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the window.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateTimeWindow() {
|
||||
return this.offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the count.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateCount() {
|
||||
return this.offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the timeout.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateShutdownTimeout() {
|
||||
return this.offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @return the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public String[] getZkNodes() {
|
||||
return this.zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @param zkNodes the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkNodes(String... zkNodes) {
|
||||
this.zkNodes = zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper port.
|
||||
* @param defaultZkPort the port.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setDefaultZkPort(String defaultZkPort) {
|
||||
this.defaultZkPort = defaultZkPort;
|
||||
}
|
||||
|
||||
public String[] getBrokers() {
|
||||
return this.brokers;
|
||||
}
|
||||
@@ -251,6 +250,83 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateTimeWindow the window.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
|
||||
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateCount the count.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateCount(int offsetUpdateCount) {
|
||||
this.offsetUpdateCount = offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateShutdownTimeout the timeout.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
|
||||
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @return the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkSessionTimeout() {
|
||||
return this.zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @param zkSessionTimeout the timout
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkSessionTimeout(int zkSessionTimeout) {
|
||||
this.zkSessionTimeout = zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @return the timout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkConnectionTimeout() {
|
||||
return this.zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @param zkConnectionTimeout the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkConnectionTimeout(int zkConnectionTimeout) {
|
||||
this.zkConnectionTimeout = zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an array of host values to a comma-separated String. It will append the
|
||||
* default port value, if not already specified.
|
||||
@@ -271,6 +347,28 @@ public class KafkaBinderConfigurationProperties {
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the wait.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getMaxWait() {
|
||||
return this.maxWait;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer user.
|
||||
* @param maxWait the wait.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setMaxWait(int maxWait) {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
|
||||
public String getRequiredAcks() {
|
||||
return this.requiredAcks;
|
||||
}
|
||||
@@ -287,6 +385,28 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the size.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getFetchSize() {
|
||||
return this.fetchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param fetchSize the size.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setFetchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
public int getMinPartitionCount() {
|
||||
return this.minPartitionCount;
|
||||
}
|
||||
@@ -303,6 +423,28 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the queue size.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param queueSize the queue size.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setQueueSize(int queueSize) {
|
||||
this.queueSize = queueSize;
|
||||
}
|
||||
|
||||
public boolean isAutoCreateTopics() {
|
||||
return this.autoCreateTopics;
|
||||
}
|
||||
@@ -311,14 +453,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.autoCreateTopics = autoCreateTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAlterTopics() {
|
||||
return autoAlterTopics;
|
||||
}
|
||||
|
||||
public void setAutoAlterTopics(boolean autoAlterTopics) {
|
||||
this.autoAlterTopics = autoAlterTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAddPartitions() {
|
||||
return this.autoAddPartitions;
|
||||
}
|
||||
@@ -327,6 +461,30 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @return the size.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public int getSocketBufferSize() {
|
||||
return this.socketBufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @param socketBufferSize the size.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public void setSocketBufferSize(int socketBufferSize) {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -371,7 +529,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
}
|
||||
}
|
||||
consumerConfiguration.putAll(this.consumerProperties);
|
||||
filterStreamManagedConfiguration(consumerConfiguration);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
return getConfigurationWithBootstrapServer(consumerConfiguration,
|
||||
@@ -402,31 +559,22 @@ public class KafkaBinderConfigurationProperties {
|
||||
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
private void filterStreamManagedConfiguration(Map<String, Object> configuration) {
|
||||
if (configuration.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)
|
||||
&& configuration.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).equals(true)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) +
|
||||
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG + "=true is not supported by the Kafka binder");
|
||||
configuration.remove(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
if (configuration.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.GROUP_ID_CONFIG) +
|
||||
"Use spring.cloud.stream.default.group or spring.cloud.stream.binding.<name>.group to specify " +
|
||||
"the group instead of " + ConsumerConfig.GROUP_ID_CONFIG);
|
||||
configuration.remove(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
private String constructIgnoredConfigMessage(String config) {
|
||||
return String.format("Ignoring provided value(s) for '%s'. ", config);
|
||||
}
|
||||
|
||||
private Map<String, Object> getConfigurationWithBootstrapServer(
|
||||
Map<String, Object> configuration, String bootstrapServersConfig) {
|
||||
final String kafkaConnectionString = getKafkaConnectionString();
|
||||
if (ObjectUtils.isEmpty(configuration.get(bootstrapServersConfig)) ||
|
||||
!kafkaConnectionString.equals("localhost:9092")) {
|
||||
configuration.put(bootstrapServersConfig, kafkaConnectionString);
|
||||
if (ObjectUtils.isEmpty(configuration.get(bootstrapServersConfig))) {
|
||||
configuration.put(bootstrapServersConfig, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = configuration.get(bootstrapServersConfig);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) configuration
|
||||
.get(bootstrapServersConfig);
|
||||
if (bootStrapServers.size() == 1
|
||||
&& bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
configuration.put(bootstrapServersConfig, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(configuration);
|
||||
}
|
||||
@@ -447,30 +595,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public Duration getAuthorizationExceptionRetryInterval() {
|
||||
return authorizationExceptionRetryInterval;
|
||||
}
|
||||
|
||||
public void setAuthorizationExceptionRetryInterval(Duration authorizationExceptionRetryInterval) {
|
||||
this.authorizationExceptionRetryInterval = authorizationExceptionRetryInterval;
|
||||
}
|
||||
|
||||
public boolean isConsiderDownWhenAnyPartitionHasNoLeader() {
|
||||
return this.considerDownWhenAnyPartitionHasNoLeader;
|
||||
}
|
||||
|
||||
public void setConsiderDownWhenAnyPartitionHasNoLeader(boolean considerDownWhenAnyPartitionHasNoLeader) {
|
||||
this.considerDownWhenAnyPartitionHasNoLeader = considerDownWhenAnyPartitionHasNoLeader;
|
||||
}
|
||||
|
||||
public String getCertificateStoreDirectory() {
|
||||
return this.certificateStoreDirectory;
|
||||
}
|
||||
|
||||
public void setCertificateStoreDirectory(String certificateStoreDirectory) {
|
||||
this.certificateStoreDirectory = certificateStoreDirectory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Domain class that models transaction capabilities in Kafka.
|
||||
*/
|
||||
@@ -652,6 +776,16 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.kafkaProducerProperties.setConfiguration(configuration);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.kafkaProducerProperties.getAdmin();
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.kafkaProducerProperties.setAdmin(admin);
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.kafkaProducerProperties.getTopic();
|
||||
}
|
||||
|
||||
@@ -26,20 +26,10 @@ import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
*/
|
||||
public class KafkaBindingProperties implements BinderSpecificPropertiesProvider {
|
||||
|
||||
/**
|
||||
* Consumer specific binding properties. @see {@link KafkaConsumerProperties}.
|
||||
*/
|
||||
private KafkaConsumerProperties consumer = new KafkaConsumerProperties();
|
||||
|
||||
/**
|
||||
* Producer specific binding properties. @see {@link KafkaProducerProperties}.
|
||||
*/
|
||||
private KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
/**
|
||||
* @return {@link KafkaConsumerProperties}
|
||||
* Consumer specific binding properties. @see {@link KafkaConsumerProperties}.
|
||||
*/
|
||||
public KafkaConsumerProperties getConsumer() {
|
||||
return this.consumer;
|
||||
}
|
||||
@@ -48,10 +38,6 @@ public class KafkaBindingProperties implements BinderSpecificPropertiesProvider
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {@link KafkaProducerProperties}
|
||||
* Producer specific binding properties. @see {@link KafkaProducerProperties}.
|
||||
*/
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016-2021 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -19,7 +19,7 @@ package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.kafka.listener.ContainerProperties;
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
|
||||
/**
|
||||
* Extended consumer properties for Kafka binder.
|
||||
@@ -86,199 +86,56 @@ public class KafkaConsumerProperties {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* When true the offset is committed after each record, otherwise the offsets for the complete set of records
|
||||
* received from the poll() are committed after all records have been processed.
|
||||
*/
|
||||
@Deprecated
|
||||
private boolean ackEachRecord;
|
||||
|
||||
/**
|
||||
* When true, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
* When false, each consumer is assigned a fixed set of partitions based on spring.cloud.stream.instanceCount and spring.cloud.stream.instanceIndex.
|
||||
*/
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
/**
|
||||
* Whether to autocommit offsets when a message has been processed.
|
||||
* If set to false, a header with the key kafka_acknowledgment of the type org.springframework.kafka.support.Acknowledgment header
|
||||
* is present in the inbound message. Applications may use this header for acknowledging messages.
|
||||
*/
|
||||
@Deprecated
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
/**
|
||||
* Controlling the container acknowledgement mode. This is the preferred way to control the ack mode on the
|
||||
* container instead of the deprecated autoCommitOffset property.
|
||||
*/
|
||||
private ContainerProperties.AckMode ackMode;
|
||||
|
||||
/**
|
||||
* Flag to enable auto commit on error in polled consumers.
|
||||
*/
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
/**
|
||||
* The starting offset for new groups. Allowed values: earliest and latest.
|
||||
*/
|
||||
private StartOffset startOffset;
|
||||
|
||||
/**
|
||||
* Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
* Must be false if a KafkaRebalanceListener is provided.
|
||||
*/
|
||||
private boolean resetOffsets;
|
||||
|
||||
/**
|
||||
* When set to true, it enables DLQ behavior for the consumer.
|
||||
* By default, messages that result in errors are forwarded to a topic named error.name-of-destination.name-of-group.
|
||||
* The DLQ topic name can be configurable by setting the dlqName property.
|
||||
*/
|
||||
private boolean enableDlq;
|
||||
|
||||
/**
|
||||
* The name of the DLQ topic to receive the error messages.
|
||||
*/
|
||||
private String dlqName;
|
||||
|
||||
/**
|
||||
* Number of partitions to use on the DLQ.
|
||||
*/
|
||||
private Integer dlqPartitions;
|
||||
|
||||
/**
|
||||
* Using this, DLQ-specific producer properties can be set.
|
||||
* All the properties available through kafka producer properties can be set through this property.
|
||||
*/
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
/**
|
||||
* @deprecated No longer used by the binder.
|
||||
*/
|
||||
@Deprecated
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
/**
|
||||
* List of trusted packages to provide the header mapper.
|
||||
*/
|
||||
private String[] trustedPackages;
|
||||
|
||||
/**
|
||||
* Indicates which standard headers are populated by the inbound channel adapter.
|
||||
* Allowed values: none, id, timestamp, or both.
|
||||
*/
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
/**
|
||||
* The name of a bean that implements RecordMessageConverter.
|
||||
*/
|
||||
private String converterBeanName;
|
||||
|
||||
/**
|
||||
* The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
*/
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
/**
|
||||
* When true, the destination is treated as a regular expression Pattern used to match topic names by the broker.
|
||||
*/
|
||||
private boolean destinationIsPattern;
|
||||
|
||||
/**
|
||||
* Map with a key/value pair containing generic Kafka consumer properties.
|
||||
* In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
private long pollTimeout = org.springframework.kafka.listener.ConsumerProperties.DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
/**
|
||||
* Transaction manager bean name - overrides the binder's transaction configuration.
|
||||
*/
|
||||
private String transactionManager;
|
||||
|
||||
/**
|
||||
* Set to false to NOT commit the offset of a successfully recovered recovered in the after rollback processor.
|
||||
*/
|
||||
private boolean txCommitRecovered = true;
|
||||
|
||||
/**
|
||||
* CommonErrorHandler bean name per consumer binding.
|
||||
* @since 3.2
|
||||
*/
|
||||
private String commonErrorHandlerBeanName;
|
||||
|
||||
/**
|
||||
* @return if each record needs to be acknowledged.
|
||||
*
|
||||
* When true the offset is committed after each record, otherwise the offsets for the complete set of records
|
||||
* received from the poll() are committed after all records have been processed.
|
||||
*
|
||||
* @deprecated since 3.1 in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isAckEachRecord() {
|
||||
return this.ackEachRecord;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param ackEachRecord
|
||||
*
|
||||
* @deprecated in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public void setAckEachRecord(boolean ackEachRecord) {
|
||||
this.ackEachRecord = ackEachRecord;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is autocommit offset enabled
|
||||
*
|
||||
* Whether to autocommit offsets when a message has been processed.
|
||||
* If set to false, a header with the key kafka_acknowledgment of the type org.springframework.kafka.support.Acknowledgment header
|
||||
* is present in the inbound message. Applications may use this header for acknowledging messages.
|
||||
*
|
||||
* @deprecated since 3.1 in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param autoCommitOffset
|
||||
*
|
||||
* @deprecated in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Container's ack mode.
|
||||
*/
|
||||
public ContainerProperties.AckMode getAckMode() {
|
||||
return this.ackMode;
|
||||
}
|
||||
|
||||
public void setAckMode(ContainerProperties.AckMode ackMode) {
|
||||
this.ackMode = ackMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return start offset
|
||||
*
|
||||
* The starting offset for new groups. Allowed values: earliest and latest.
|
||||
*/
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
@@ -287,12 +144,6 @@ public class KafkaConsumerProperties {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if resetting offset is enabled
|
||||
*
|
||||
* Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
* Must be false if a KafkaRebalanceListener is provided.
|
||||
*/
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
@@ -301,13 +152,6 @@ public class KafkaConsumerProperties {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is DLQ enabled.
|
||||
*
|
||||
* When set to true, it enables DLQ behavior for the consumer.
|
||||
* By default, messages that result in errors are forwarded to a topic named error.name-of-destination.name-of-group.
|
||||
* The DLQ topic name can be configurable by setting the dlqName property.
|
||||
*/
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
@@ -316,20 +160,10 @@ public class KafkaConsumerProperties {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is autocommit on error in polled consumers.
|
||||
*
|
||||
* This property accessor is only used in polled consumers.
|
||||
*/
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return this.autoCommitOnError;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param autoCommitOnError commit on error in polled consumers.
|
||||
*
|
||||
*/
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
@@ -354,12 +188,6 @@ public class KafkaConsumerProperties {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is auto rebalance enabled
|
||||
*
|
||||
* When true, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
* When false, each consumer is assigned a fixed set of partitions based on spring.cloud.stream.instanceCount and spring.cloud.stream.instanceIndex.
|
||||
*/
|
||||
public boolean isAutoRebalanceEnabled() {
|
||||
return this.autoRebalanceEnabled;
|
||||
}
|
||||
@@ -368,12 +196,6 @@ public class KafkaConsumerProperties {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a map of configuration
|
||||
*
|
||||
* Map with a key/value pair containing generic Kafka consumer properties.
|
||||
* In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
*/
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -382,11 +204,6 @@ public class KafkaConsumerProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return dlq name
|
||||
*
|
||||
* The name of the DLQ topic to receive the error messages.
|
||||
*/
|
||||
public String getDlqName() {
|
||||
return this.dlqName;
|
||||
}
|
||||
@@ -395,24 +212,6 @@ public class KafkaConsumerProperties {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return number of partitions on the DLQ topic
|
||||
*
|
||||
* Number of partitions to use on the DLQ.
|
||||
*/
|
||||
public Integer getDlqPartitions() {
|
||||
return this.dlqPartitions;
|
||||
}
|
||||
|
||||
public void setDlqPartitions(Integer dlqPartitions) {
|
||||
this.dlqPartitions = dlqPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return trusted packages
|
||||
*
|
||||
* List of trusted packages to provide the header mapper.
|
||||
*/
|
||||
public String[] getTrustedPackages() {
|
||||
return this.trustedPackages;
|
||||
}
|
||||
@@ -421,12 +220,6 @@ public class KafkaConsumerProperties {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return dlq producer properties
|
||||
*
|
||||
* Using this, DLQ-specific producer properties can be set.
|
||||
* All the properties available through kafka producer properties can be set through this property.
|
||||
*/
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return this.dlqProducerProperties;
|
||||
}
|
||||
@@ -435,12 +228,6 @@ public class KafkaConsumerProperties {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return standard headers
|
||||
*
|
||||
* Indicates which standard headers are populated by the inbound channel adapter.
|
||||
* Allowed values: none, id, timestamp, or both.
|
||||
*/
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
@@ -449,11 +236,6 @@ public class KafkaConsumerProperties {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return converter bean name
|
||||
*
|
||||
* The name of a bean that implements RecordMessageConverter.
|
||||
*/
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
@@ -462,11 +244,6 @@ public class KafkaConsumerProperties {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return idle event interval
|
||||
*
|
||||
* The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
*/
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
@@ -475,11 +252,6 @@ public class KafkaConsumerProperties {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is destination given through a pattern
|
||||
*
|
||||
* When true, the destination is treated as a regular expression Pattern used to match topic names by the broker.
|
||||
*/
|
||||
public boolean isDestinationIsPattern() {
|
||||
return this.destinationIsPattern;
|
||||
}
|
||||
@@ -489,10 +261,28 @@ public class KafkaConsumerProperties {
|
||||
}
|
||||
|
||||
/**
|
||||
* @return topic properties
|
||||
*
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
* No longer used; get properties such as this via {@link #getTopic()}.
|
||||
* @return Kafka admin properties
|
||||
* @deprecated No longer used
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.1.1, set properties such as this via 'topic'")
|
||||
@SuppressWarnings("deprecation")
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
// Temporary workaround to copy the topic properties to the admin one.
|
||||
final KafkaAdminProperties kafkaAdminProperties = new KafkaAdminProperties();
|
||||
kafkaAdminProperties.setReplicationFactor(this.topic.getReplicationFactor());
|
||||
kafkaAdminProperties.setReplicasAssignments(this.topic.getReplicasAssignments());
|
||||
kafkaAdminProperties.setConfiguration(this.topic.getProperties());
|
||||
return kafkaAdminProperties;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@SuppressWarnings("deprecation")
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.topic = admin;
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
@@ -501,45 +291,4 @@ public class KafkaConsumerProperties {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return timeout in pollable consumers
|
||||
*
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
public long getPollTimeout() {
|
||||
return this.pollTimeout;
|
||||
}
|
||||
|
||||
public void setPollTimeout(long pollTimeout) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the transaction manager bean name.
|
||||
*
|
||||
* Transaction manager bean name (must be {@code KafkaAwareTransactionManager}.
|
||||
*/
|
||||
public String getTransactionManager() {
|
||||
return this.transactionManager;
|
||||
}
|
||||
|
||||
public void setTransactionManager(String transactionManager) {
|
||||
this.transactionManager = transactionManager;
|
||||
}
|
||||
|
||||
public boolean isTxCommitRecovered() {
|
||||
return this.txCommitRecovered;
|
||||
}
|
||||
|
||||
public void setTxCommitRecovered(boolean txCommitRecovered) {
|
||||
this.txCommitRecovered = txCommitRecovered;
|
||||
}
|
||||
|
||||
public String getCommonErrorHandlerBeanName() {
|
||||
return commonErrorHandlerBeanName;
|
||||
}
|
||||
|
||||
public void setCommonErrorHandlerBeanName(String commonErrorHandlerBeanName) {
|
||||
this.commonErrorHandlerBeanName = commonErrorHandlerBeanName;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
@@ -33,88 +34,22 @@ import org.springframework.expression.Expression;
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
/**
|
||||
* Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
*/
|
||||
private int bufferSize = 16384;
|
||||
|
||||
/**
|
||||
* Set the compression.type producer property. Supported values are none, gzip, snappy and lz4.
|
||||
* See {@link CompressionType} for more details.
|
||||
*/
|
||||
private CompressionType compressionType = CompressionType.none;
|
||||
|
||||
/**
|
||||
* Whether the producer is synchronous.
|
||||
*/
|
||||
private boolean sync;
|
||||
|
||||
/**
|
||||
* A SpEL expression evaluated against the outgoing message used to evaluate the time to wait
|
||||
* for ack when synchronous publish is enabled.
|
||||
*/
|
||||
private Expression sendTimeoutExpression;
|
||||
|
||||
/**
|
||||
* How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
*/
|
||||
private int batchTimeout;
|
||||
|
||||
/**
|
||||
* A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
*/
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
/**
|
||||
* A comma-delimited list of simple patterns to match Spring messaging headers
|
||||
* to be mapped to the Kafka Headers in the ProducerRecord.
|
||||
*/
|
||||
private String[] headerPatterns;
|
||||
|
||||
/**
|
||||
* Map with a key/value pair containing generic Kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Set to true to override the default binding destination (topic name) with the value of the
|
||||
* KafkaHeaders.TOPIC message header in the outbound message. If the header is not present,
|
||||
* the default binding destination is used.
|
||||
*/
|
||||
private boolean useTopicHeader;
|
||||
|
||||
/**
|
||||
* The bean name of a MessageChannel to which successful send results should be sent;
|
||||
* the bean must exist in the application context.
|
||||
*/
|
||||
private String recordMetadataChannel;
|
||||
|
||||
/**
|
||||
* Transaction manager bean name - overrides the binder's transaction configuration.
|
||||
*/
|
||||
private String transactionManager;
|
||||
|
||||
/*
|
||||
* Timeout value in seconds for the duration to wait when closing the producer.
|
||||
* If not set this defaults to 30 seconds.
|
||||
*/
|
||||
private int closeTimeout;
|
||||
|
||||
/**
|
||||
* Set to true to disable transactions.
|
||||
*/
|
||||
private boolean allowNonTransactional;
|
||||
|
||||
/**
|
||||
* @return buffer size
|
||||
*
|
||||
* Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
*/
|
||||
public int getBufferSize() {
|
||||
return this.bufferSize;
|
||||
}
|
||||
@@ -123,12 +58,6 @@ public class KafkaProducerProperties {
|
||||
this.bufferSize = bufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return compression type {@link CompressionType}
|
||||
*
|
||||
* Set the compression.type producer property. Supported values are none, gzip, snappy, lz4 and zstd.
|
||||
* See {@link CompressionType} for more details.
|
||||
*/
|
||||
@NotNull
|
||||
public CompressionType getCompressionType() {
|
||||
return this.compressionType;
|
||||
@@ -138,11 +67,6 @@ public class KafkaProducerProperties {
|
||||
this.compressionType = compressionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if synchronous sending is enabled
|
||||
*
|
||||
* Whether the producer is synchronous.
|
||||
*/
|
||||
public boolean isSync() {
|
||||
return this.sync;
|
||||
}
|
||||
@@ -151,25 +75,6 @@ public class KafkaProducerProperties {
|
||||
this.sync = sync;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return timeout expression for send
|
||||
*
|
||||
* A SpEL expression evaluated against the outgoing message used to evaluate the time to wait
|
||||
* for ack when synchronous publish is enabled.
|
||||
*/
|
||||
public Expression getSendTimeoutExpression() {
|
||||
return this.sendTimeoutExpression;
|
||||
}
|
||||
|
||||
public void setSendTimeoutExpression(Expression sendTimeoutExpression) {
|
||||
this.sendTimeoutExpression = sendTimeoutExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return batch timeout
|
||||
*
|
||||
* How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
*/
|
||||
public int getBatchTimeout() {
|
||||
return this.batchTimeout;
|
||||
}
|
||||
@@ -178,11 +83,6 @@ public class KafkaProducerProperties {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return message key expression
|
||||
*
|
||||
* A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
*/
|
||||
public Expression getMessageKeyExpression() {
|
||||
return this.messageKeyExpression;
|
||||
}
|
||||
@@ -191,12 +91,6 @@ public class KafkaProducerProperties {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return header patterns
|
||||
*
|
||||
* A comma-delimited list of simple patterns to match Spring messaging headers
|
||||
* to be mapped to the Kafka Headers in the ProducerRecord.
|
||||
*/
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
@@ -205,11 +99,6 @@ public class KafkaProducerProperties {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return map of configuration
|
||||
*
|
||||
* Map with a key/value pair containing generic Kafka producer properties.
|
||||
*/
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -219,10 +108,28 @@ public class KafkaProducerProperties {
|
||||
}
|
||||
|
||||
/**
|
||||
* @return topic properties
|
||||
*
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
* No longer used; get properties such as this via {@link #getTopic()}.
|
||||
* @return Kafka admin properties
|
||||
* @deprecated No longer used
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.1.1, set properties such as this via 'topic'")
|
||||
@SuppressWarnings("deprecation")
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
// Temporary workaround to copy the topic properties to the admin one.
|
||||
final KafkaAdminProperties kafkaAdminProperties = new KafkaAdminProperties();
|
||||
kafkaAdminProperties.setReplicationFactor(this.topic.getReplicationFactor());
|
||||
kafkaAdminProperties.setReplicasAssignments(this.topic.getReplicasAssignments());
|
||||
kafkaAdminProperties.setConfiguration(this.topic.getProperties());
|
||||
return kafkaAdminProperties;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@SuppressWarnings("deprecation")
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.topic = admin;
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
@@ -231,67 +138,6 @@ public class KafkaProducerProperties {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if using topic header
|
||||
*
|
||||
* Set to true to override the default binding destination (topic name) with the value of the
|
||||
* KafkaHeaders.TOPIC message header in the outbound message. If the header is not present,
|
||||
* the default binding destination is used.
|
||||
*/
|
||||
public boolean isUseTopicHeader() {
|
||||
return this.useTopicHeader;
|
||||
}
|
||||
|
||||
public void setUseTopicHeader(boolean useTopicHeader) {
|
||||
this.useTopicHeader = useTopicHeader;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return record metadata channel
|
||||
*
|
||||
* The bean name of a MessageChannel to which successful send results should be sent;
|
||||
* the bean must exist in the application context.
|
||||
*/
|
||||
public String getRecordMetadataChannel() {
|
||||
return this.recordMetadataChannel;
|
||||
}
|
||||
|
||||
public void setRecordMetadataChannel(String recordMetadataChannel) {
|
||||
this.recordMetadataChannel = recordMetadataChannel;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the transaction manager bean name.
|
||||
*
|
||||
* Transaction manager bean name (must be {@code KafkaAwareTransactionManager}.
|
||||
*/
|
||||
public String getTransactionManager() {
|
||||
return this.transactionManager;
|
||||
}
|
||||
|
||||
public void setTransactionManager(String transactionManager) {
|
||||
this.transactionManager = transactionManager;
|
||||
}
|
||||
|
||||
/*
|
||||
* @return timeout in seconds for closing the producer
|
||||
*/
|
||||
public int getCloseTimeout() {
|
||||
return this.closeTimeout;
|
||||
}
|
||||
|
||||
public void setCloseTimeout(int closeTimeout) {
|
||||
this.closeTimeout = closeTimeout;
|
||||
}
|
||||
|
||||
public boolean isAllowNonTransactional() {
|
||||
return this.allowNonTransactional;
|
||||
}
|
||||
|
||||
public void setAllowNonTransactional(boolean allowNonTransactional) {
|
||||
this.allowNonTransactional = allowNonTransactional;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enumeration for compression types.
|
||||
*/
|
||||
@@ -317,10 +163,11 @@ public class KafkaProducerProperties {
|
||||
*/
|
||||
lz4,
|
||||
|
||||
/**
|
||||
* zstd compression.
|
||||
*/
|
||||
zstd,
|
||||
// /** // TODO: uncomment and fix docs when kafka-clients 2.1.0 or newer is the
|
||||
// default
|
||||
// * zstd compression
|
||||
// */
|
||||
// zstd
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Customizer for configuring AdminClient.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.1.2
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface AdminClientConfigCustomizer {
|
||||
|
||||
void configure(Map<String, Object> adminClientProperties);
|
||||
}
|
||||
@@ -18,27 +18,20 @@ package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.AlterConfigOp;
|
||||
import org.apache.kafka.clients.admin.AlterConfigsResult;
|
||||
import org.apache.kafka.clients.admin.Config;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeConfigsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
@@ -46,7 +39,6 @@ import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.config.ConfigResource;
|
||||
import org.apache.kafka.common.errors.TopicExistsException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
|
||||
@@ -89,10 +81,10 @@ public class KafkaTopicProvisioner implements
|
||||
// @checkstyle:on
|
||||
InitializingBean {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KafkaTopicProvisioner.class);
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
@@ -101,27 +93,14 @@ public class KafkaTopicProvisioner implements
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
/**
|
||||
* Create an instance.
|
||||
* @param kafkaBinderConfigurationProperties the binder configuration properties.
|
||||
* @param kafkaProperties the boot Kafka properties used to build the
|
||||
* @param adminClientConfigCustomizer to customize {@link AdminClient}.
|
||||
* {@link AdminClient}.
|
||||
*/
|
||||
public KafkaTopicProvisioner(
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties,
|
||||
AdminClientConfigCustomizer adminClientConfigCustomizer) {
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
normalalizeBootPropsWithBinder(this.adminClientProperties, kafkaProperties,
|
||||
kafkaBinderConfigurationProperties);
|
||||
// If the application provides an AdminConfig customizer
|
||||
// and overrides properties, that takes precedence.
|
||||
if (adminClientConfigCustomizer != null) {
|
||||
adminClientConfigCustomizer.configure(this.adminClientProperties);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -133,7 +112,7 @@ public class KafkaTopicProvisioner implements
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() {
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
@@ -154,35 +133,29 @@ public class KafkaTopicProvisioner implements
|
||||
public ProducerDestination provisionProducerDestination(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
|
||||
if (logger.isInfoEnabled()) {
|
||||
logger.info("Using kafka topic for outbound: " + name);
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false,
|
||||
properties.getExtension().getTopic());
|
||||
int partitions = 0;
|
||||
Map<String, TopicDescription> topicDescriptions = new HashMap<>();
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Attempting to retrieve the description for the topic: " + name);
|
||||
}
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
topicDescriptions.putAll(all.get(this.operationTimeout, TimeUnit.SECONDS));
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding for: " + name, ex);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
if (topicDescription != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
|
||||
Map<String, TopicDescription> topicDescriptions = null;
|
||||
try {
|
||||
topicDescriptions = all.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException(
|
||||
"Problems encountered with partitions finding", ex);
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
partitions = topicDescription.partitions().size();
|
||||
}
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
@@ -212,8 +185,8 @@ public class KafkaTopicProvisioner implements
|
||||
if (properties.getExtension().isDestinationIsPattern()) {
|
||||
Assert.isTrue(!properties.getExtension().isEnableDlq(),
|
||||
"enableDLQ is not allowed when listening to topic patterns");
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Listening to a topic pattern - " + name
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug("Listening to a topic pattern - " + name
|
||||
+ " - no provisioning performed");
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
@@ -249,7 +222,7 @@ public class KafkaTopicProvisioner implements
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("Provisioning exception encountered for " + name, ex);
|
||||
throw new ProvisioningException("provisioning exception", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -269,14 +242,14 @@ public class KafkaTopicProvisioner implements
|
||||
* @param bootProps the boot kafka properties.
|
||||
* @param binderProps the binder kafka properties.
|
||||
*/
|
||||
public static void normalalizeBootPropsWithBinder(Map<String, Object> adminProps,
|
||||
private void normalalizeBootPropsWithBinder(Map<String, Object> adminProps,
|
||||
KafkaProperties bootProps, KafkaBinderConfigurationProperties binderProps) {
|
||||
// First deal with the outlier
|
||||
String kafkaConnectionString = binderProps.getKafkaConnectionString();
|
||||
if (ObjectUtils
|
||||
.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString
|
||||
.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
@@ -290,8 +263,8 @@ public class KafkaTopicProvisioner implements
|
||||
}
|
||||
if (adminConfigNames.contains(key)) {
|
||||
Object replaced = adminProps.put(key, value);
|
||||
if (replaced != null && KafkaTopicProvisioner.logger.isDebugEnabled()) {
|
||||
KafkaTopicProvisioner.logger.debug("Overrode boot property: [" + key + "], from: ["
|
||||
if (replaced != null && this.logger.isDebugEnabled()) {
|
||||
this.logger.debug("Overrode boot property: [" + key + "], from: ["
|
||||
+ replaced + "] to: [" + value + "]");
|
||||
}
|
||||
}
|
||||
@@ -301,26 +274,21 @@ public class KafkaTopicProvisioner implements
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name,
|
||||
String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName())
|
||||
? properties.getExtension().getDlqName()
|
||||
: "error." + name + "." + group;
|
||||
int dlqPartitions = properties.getExtension().getDlqPartitions() == null
|
||||
? partitions
|
||||
: properties.getExtension().getDlqPartitions();
|
||||
try {
|
||||
final KafkaProducerProperties dlqProducerProperties = properties.getExtension().getDlqProducerProperties();
|
||||
createTopicAndPartitions(adminClient, dlqTopic, dlqPartitions,
|
||||
createTopicAndPartitions(adminClient, dlqTopic, partitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
dlqProducerProperties.getTopic());
|
||||
properties.getExtension().getTopic());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("Provisioning exception encountered for " + name, throwable);
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
@@ -344,7 +312,7 @@ public class KafkaTopicProvisioner implements
|
||||
else {
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
throw new ProvisioningException("Provisioning exception encountered for " + name, throwable);
|
||||
throw new ProvisioningException("Provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,7 +326,7 @@ public class KafkaTopicProvisioner implements
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
else {
|
||||
logger.info("Auto creation of topics is disabled.");
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -382,17 +350,13 @@ public class KafkaTopicProvisioner implements
|
||||
|
||||
Set<String> names = namesFutures.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
//check if topic.properties are different from Topic Configuration in Kafka
|
||||
if (this.configurationProperties.isAutoAlterTopics()) {
|
||||
alterTopicConfigsIfNecessary(adminClient, topicName, topicProperties);
|
||||
}
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties
|
||||
.isAutoAddPartitions()
|
||||
? Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount)
|
||||
: partitionCount;
|
||||
? Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult
|
||||
@@ -409,7 +373,7 @@ public class KafkaTopicProvisioner implements
|
||||
partitions.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: "
|
||||
this.logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
@@ -445,7 +409,7 @@ public class KafkaTopicProvisioner implements
|
||||
topicProperties.getReplicationFactor() != null
|
||||
? topicProperties.getReplicationFactor()
|
||||
: this.configurationProperties
|
||||
.getReplicationFactor());
|
||||
.getReplicationFactor());
|
||||
}
|
||||
if (topicProperties.getProperties().size() > 0) {
|
||||
newTopic.configs(topicProperties.getProperties());
|
||||
@@ -458,18 +422,18 @@ public class KafkaTopicProvisioner implements
|
||||
catch (Exception ex) {
|
||||
if (ex instanceof ExecutionException) {
|
||||
if (ex.getCause() instanceof TopicExistsException) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName
|
||||
if (this.logger.isWarnEnabled()) {
|
||||
this.logger.warn("Attempt to create topic: " + topicName
|
||||
+ ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", ex.getCause());
|
||||
this.logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", ex.getCause());
|
||||
this.logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
}
|
||||
@@ -478,51 +442,6 @@ public class KafkaTopicProvisioner implements
|
||||
}
|
||||
}
|
||||
|
||||
private void alterTopicConfigsIfNecessary(AdminClient adminClient,
|
||||
String topicName,
|
||||
KafkaTopicProperties topicProperties)
|
||||
throws InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
|
||||
ConfigResource topicConfigResource = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||
DescribeConfigsResult describeConfigsResult = adminClient
|
||||
.describeConfigs(Collections.singletonList(topicConfigResource));
|
||||
KafkaFuture<Map<ConfigResource, Config>> topicConfigurationFuture = describeConfigsResult.all();
|
||||
Map<ConfigResource, Config> topicConfigMap = topicConfigurationFuture
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
Config config = topicConfigMap.get(topicConfigResource);
|
||||
final List<AlterConfigOp> updatedConfigEntries = topicProperties.getProperties().entrySet().stream()
|
||||
.filter(propertiesEntry -> {
|
||||
// Property is new and should be added
|
||||
if (config.get(propertiesEntry.getKey()) == null) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
// Property changed and should be updated
|
||||
return !config.get(propertiesEntry.getKey()).value().equals(propertiesEntry.getValue());
|
||||
}
|
||||
|
||||
})
|
||||
.map(propertyEntry -> new ConfigEntry(propertyEntry.getKey(), propertyEntry.getValue()))
|
||||
.map(configEntry -> new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))
|
||||
.collect(Collectors.toList());
|
||||
if (!updatedConfigEntries.isEmpty()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Attempting to alter configs " + updatedConfigEntries + " for the topic:" + topicName);
|
||||
}
|
||||
Map<ConfigResource, Collection<AlterConfigOp>> alterConfigForTopics = new HashMap<>();
|
||||
alterConfigForTopics.put(topicConfigResource, updatedConfigEntries);
|
||||
AlterConfigsResult alterConfigsResult = adminClient.incrementalAlterConfigs(alterConfigForTopics);
|
||||
alterConfigsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the topic has the expected number of partitions and return the partition information.
|
||||
* @param partitionCount the expected count.
|
||||
* @param tolerateLowerPartitionsOnBroker if false, throw an exception if there are not enough partitions.
|
||||
* @param callable a Callable that will provide the partition information.
|
||||
* @param topicName the topic./
|
||||
* @return the partition information.
|
||||
*/
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable, final String topicName) {
|
||||
@@ -542,16 +461,16 @@ public class KafkaTopicProvisioner implements
|
||||
if (ex instanceof UnknownTopicOrPartitionException) {
|
||||
throw ex;
|
||||
}
|
||||
logger.error("Failed to obtain partition information", ex);
|
||||
this.logger.error("Failed to obtain partition information", ex);
|
||||
}
|
||||
// In some cases, the above partition query may not throw an UnknownTopic..Exception for various reasons.
|
||||
// For that, we are forcing another query to ensure that the topic is present on the server.
|
||||
if (CollectionUtils.isEmpty(partitions)) {
|
||||
try (AdminClient adminClient = AdminClient
|
||||
.create(this.adminClientProperties)) {
|
||||
final DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
|
||||
final AdminClient adminClient = AdminClient
|
||||
.create(this.adminClientProperties);
|
||||
final DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
try {
|
||||
describeTopicsResult.all().get();
|
||||
}
|
||||
catch (ExecutionException ex) {
|
||||
@@ -569,7 +488,7 @@ public class KafkaTopicProvisioner implements
|
||||
int partitionSize = CollectionUtils.isEmpty(partitions) ? 0 : partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: "
|
||||
this.logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
@@ -587,7 +506,7 @@ public class KafkaTopicProvisioner implements
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
logger.error("Cannot initialize Binder", ex);
|
||||
this.logger.error("Cannot initialize Binder", ex);
|
||||
throw new BinderException("Cannot initialize binder:", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020-2020 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
/**
|
||||
* A {@link BiFunction} extension for defining DLQ destination resolvers.
|
||||
*
|
||||
* The BiFunction takes the {@link ConsumerRecord} and the exception as inputs
|
||||
* and returns a topic name as the DLQ.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.9
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface DlqDestinationResolver extends BiFunction<ConsumerRecord<?, ?>, Exception, String> {
|
||||
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
import org.springframework.lang.Nullable;
|
||||
|
||||
/**
|
||||
* A TriFunction that takes a consumer group, consumer record, and throwable and returns
|
||||
* which partition to publish to the dead letter topic. Returning {@code null} means Kafka
|
||||
* will choose the partition.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 3.0
|
||||
*
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface DlqPartitionFunction {
|
||||
|
||||
/**
|
||||
* Returns the same partition as the original recor.
|
||||
*/
|
||||
DlqPartitionFunction ORIGINAL_PARTITION = (group, rec, ex) -> rec.partition();
|
||||
|
||||
/**
|
||||
* Returns 0.
|
||||
*/
|
||||
DlqPartitionFunction PARTITION_ZERO = (group, rec, ex) -> 0;
|
||||
|
||||
/**
|
||||
* Apply the function.
|
||||
* @param group the consumer group.
|
||||
* @param record the consumer record.
|
||||
* @param throwable the exception.
|
||||
* @return the DLQ partition, or null.
|
||||
*/
|
||||
@Nullable
|
||||
Integer apply(String group, ConsumerRecord<?, ?> record, Throwable throwable);
|
||||
|
||||
/**
|
||||
* Determine the fallback function to use based on the dlq partition count if no
|
||||
* {@link DlqPartitionFunction} bean is provided.
|
||||
* @param dlqPartitions the partition count.
|
||||
* @param logger the logger.
|
||||
* @return the fallback.
|
||||
*/
|
||||
static DlqPartitionFunction determineFallbackFunction(@Nullable Integer dlqPartitions, Log logger) {
|
||||
if (dlqPartitions == null) {
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else if (dlqPartitions > 1) {
|
||||
logger.error("'dlqPartitions' is > 1 but a custom DlqPartitionFunction bean is not provided");
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else {
|
||||
return PARTITION_ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.assertj.core.util.Files;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setGroupId("group1");
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setEnableAutoCommit(true);
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConsumerProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConsumerProps() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCertificateFilesAreConvertedToAbsolutePathsFromClassPathResources() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
final Map<String, String> configuration = kafkaBinderConfigurationProperties.getConfiguration();
|
||||
configuration.put("ssl.truststore.location", "classpath:testclient.truststore");
|
||||
configuration.put("ssl.keystore.location", "classpath:testclient.keystore");
|
||||
|
||||
kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
assertThat(configuration.get("ssl.truststore.location"))
|
||||
.isEqualTo(Paths.get(System.getProperty("java.io.tmpdir"), "testclient.truststore").toString());
|
||||
assertThat(configuration.get("ssl.keystore.location"))
|
||||
.isEqualTo(Paths.get(System.getProperty("java.io.tmpdir"), "testclient.keystore").toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCertificateFilesAreConvertedToGivenAbsolutePathsFromClassPathResources() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
final Map<String, String> configuration = kafkaBinderConfigurationProperties.getConfiguration();
|
||||
configuration.put("ssl.truststore.location", "classpath:testclient.truststore");
|
||||
configuration.put("ssl.keystore.location", "classpath:testclient.keystore");
|
||||
kafkaBinderConfigurationProperties.setCertificateStoreDirectory("target");
|
||||
|
||||
kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
assertThat(configuration.get("ssl.truststore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.truststore").toString());
|
||||
assertThat(configuration.get("ssl.keystore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.keystore").toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCertificateFilesAreMovedForSchemaRegistryConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
final Map<String, String> configuration = kafkaBinderConfigurationProperties.getConfiguration();
|
||||
configuration.put("schema.registry.ssl.truststore.location", "classpath:testclient.truststore");
|
||||
configuration.put("schema.registry.ssl.keystore.location", "classpath:testclient.keystore");
|
||||
kafkaBinderConfigurationProperties.setCertificateStoreDirectory("target");
|
||||
|
||||
kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
assertThat(configuration.get("schema.registry.ssl.truststore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.truststore").toString());
|
||||
assertThat(configuration.get("schema.registry.ssl.keystore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.keystore").toString());
|
||||
}
|
||||
}
|
||||
@@ -42,8 +42,6 @@ import static org.assertj.core.api.Assertions.fail;
|
||||
*/
|
||||
public class KafkaTopicProvisionerTests {
|
||||
|
||||
AdminClientConfigCustomizer adminClientConfigCustomizer = adminClientProperties -> adminClientProperties.put("foo", "bar");
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenExceptServers() throws Exception {
|
||||
@@ -60,7 +58,7 @@ public class KafkaTopicProvisionerTests {
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig, adminClientConfigCustomizer);
|
||||
bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
@@ -69,7 +67,6 @@ public class KafkaTopicProvisionerTests {
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
assertThat(configs.get("foo")).isEqualTo("bar");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@@ -89,7 +86,7 @@ public class KafkaTopicProvisionerTests {
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig, adminClientConfigCustomizer);
|
||||
bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
@@ -109,7 +106,7 @@ public class KafkaTopicProvisionerTests {
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
|
||||
"localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig, adminClientConfigCustomizer);
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
fail("Expected illegal state");
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<version>2.2.0.M1</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
@@ -54,6 +54,13 @@
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
@@ -64,16 +71,26 @@
|
||||
<artifactId>spring-boot-autoconfigure-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<!-- Following dependencies are needed to support Kafka 1.1.0 client-->
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- Following dependencies are only provided for testing and won't be packaged with the binder apps-->
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- Following dependency is only provided for testing and won't be packaged with the binder apps-->
|
||||
<dependency>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro</artifactId>
|
||||
@@ -104,4 +121,4 @@
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
</project>
|
||||
@@ -1,616 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.processor.TimestampExtractor;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.boot.context.properties.bind.BindContext;
|
||||
import org.springframework.boot.context.properties.bind.BindHandler;
|
||||
import org.springframework.boot.context.properties.bind.Bindable;
|
||||
import org.springframework.boot.context.properties.bind.PropertySourcesPlaceholdersResolver;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.MutablePropertySources;
|
||||
import org.springframework.integration.support.utils.IntegrationUtils;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.streams.RecoveringDeserializationExceptionHandler;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public abstract class AbstractKafkaStreamsBinderProcessor implements ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(AbstractKafkaStreamsBinderProcessor.class);
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
private final CleanupConfig cleanupConfig;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
protected ConfigurableApplicationContext applicationContext;
|
||||
|
||||
public AbstractKafkaStreamsBinderProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver, CleanupConfig cleanupConfig) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext)
|
||||
throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
protected Topology.AutoOffsetReset getAutoOffsetReset(String inboundName, KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
final KafkaConsumerProperties.StartOffset startOffset = extendedConsumerProperties
|
||||
.getStartOffset();
|
||||
Topology.AutoOffsetReset autoOffsetReset = null;
|
||||
if (startOffset != null) {
|
||||
switch (startOffset) {
|
||||
case earliest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.EARLIEST;
|
||||
break;
|
||||
case latest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.LATEST;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extendedConsumerProperties.isResetOffsets()) {
|
||||
AbstractKafkaStreamsBinderProcessor.LOG.warn("Detected resetOffsets configured on binding "
|
||||
+ inboundName + ". "
|
||||
+ "Setting resetOffsets in Kafka Streams binder does not have any effect.");
|
||||
}
|
||||
return autoOffsetReset;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void handleKTableGlobalKTableInputs(Object[] arguments, int index, String input, Class<?> parameterType, Object targetBean,
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean, StreamsBuilder streamsBuilder,
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
if (firstBuild) {
|
||||
addStateStoreBeans(streamsBuilder);
|
||||
}
|
||||
if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
KTable<?, ?> table = getKTable(extendedConsumerProperties, streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper =
|
||||
(KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(input, streamsBuilderFactoryBean);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addConsumerPropertiesPerSbfb(streamsBuilderFactoryBean,
|
||||
bindingServiceProperties.getConsumerProperties(input));
|
||||
arguments[index] = table;
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
GlobalKTable<?, ?> table = getGlobalKTable(extendedConsumerProperties, streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper globalKTableWrapper =
|
||||
(GlobalKTableBoundElementFactory.GlobalKTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
globalKTableWrapper.wrap((GlobalKTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(input, streamsBuilderFactoryBean);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addConsumerPropertiesPerSbfb(streamsBuilderFactoryBean,
|
||||
bindingServiceProperties.getConsumerProperties(input));
|
||||
arguments[index] = table;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked" })
|
||||
protected StreamsBuilderFactoryBean buildStreamsBuilderAndRetrieveConfig(String beanNamePostPrefix,
|
||||
ApplicationContext applicationContext, String inboundName,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
StreamsBuilderFactoryBeanConfigurer customizer,
|
||||
ConfigurableEnvironment environment, BindingProperties bindingProperties) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext
|
||||
.getBeanFactory();
|
||||
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext
|
||||
.getBean("streamConfigGlobalProperties", Map.class);
|
||||
|
||||
// Use a copy because the global configuration will be shared by multiple processors.
|
||||
Map<String, Object> streamConfiguration = new HashMap<>(streamConfigGlobalProperties);
|
||||
|
||||
if (kafkaStreamsBinderConfigurationProperties != null) {
|
||||
final Map<String, KafkaStreamsBinderConfigurationProperties.Functions> functionConfigMap = kafkaStreamsBinderConfigurationProperties.getFunctions();
|
||||
if (!CollectionUtils.isEmpty(functionConfigMap)) {
|
||||
final KafkaStreamsBinderConfigurationProperties.Functions functionConfig = functionConfigMap.get(beanNamePostPrefix);
|
||||
if (functionConfig != null) {
|
||||
final Map<String, String> functionSpecificConfig = functionConfig.getConfiguration();
|
||||
if (!CollectionUtils.isEmpty(functionSpecificConfig)) {
|
||||
streamConfiguration.putAll(functionSpecificConfig);
|
||||
}
|
||||
|
||||
String applicationId = functionConfig.getApplicationId();
|
||||
if (!StringUtils.isEmpty(applicationId)) {
|
||||
streamConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final MutablePropertySources propertySources = environment.getPropertySources();
|
||||
|
||||
if (!StringUtils.isEmpty(bindingProperties.getBinder())) {
|
||||
final KafkaStreamsBinderConfigurationProperties multiBinderKafkaStreamsBinderConfigurationProperties =
|
||||
applicationContext.getBean(bindingProperties.getBinder() + "-KafkaStreamsBinderConfigurationProperties", KafkaStreamsBinderConfigurationProperties.class);
|
||||
String connectionString = multiBinderKafkaStreamsBinderConfigurationProperties.getKafkaConnectionString();
|
||||
if (StringUtils.isEmpty(connectionString)) {
|
||||
connectionString = (String) propertySources.get(bindingProperties.getBinder() + "-kafkaStreamsBinderEnv").getProperty("spring.cloud.stream.kafka.binder.brokers");
|
||||
}
|
||||
|
||||
streamConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, connectionString);
|
||||
|
||||
String binderProvidedApplicationId = multiBinderKafkaStreamsBinderConfigurationProperties.getApplicationId();
|
||||
if (StringUtils.hasText(binderProvidedApplicationId)) {
|
||||
streamConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
binderProvidedApplicationId);
|
||||
}
|
||||
|
||||
if (multiBinderKafkaStreamsBinderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.logAndContinue) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
}
|
||||
else if (multiBinderKafkaStreamsBinderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.logAndFail) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
}
|
||||
else if (multiBinderKafkaStreamsBinderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.sendToDlq) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
RecoveringDeserializationExceptionHandler.class);
|
||||
SendToDlqAndContinue sendToDlqAndContinue = applicationContext.getBean(SendToDlqAndContinue.class);
|
||||
streamConfiguration.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, sendToDlqAndContinue);
|
||||
}
|
||||
|
||||
if (!ObjectUtils.isEmpty(multiBinderKafkaStreamsBinderConfigurationProperties.getConfiguration())) {
|
||||
streamConfiguration.putAll(multiBinderKafkaStreamsBinderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
if (!streamConfiguration.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG)) {
|
||||
streamConfiguration.put(StreamsConfig.REPLICATION_FACTOR_CONFIG,
|
||||
(int) multiBinderKafkaStreamsBinderConfigurationProperties.getReplicationFactor());
|
||||
}
|
||||
}
|
||||
|
||||
//this is only used primarily for StreamListener based processors. Although in theory, functions can use it,
|
||||
//it is ideal for functions to use the approach used in the above if statement by using a property like
|
||||
//spring.cloud.stream.kafka.streams.binder.functions.process.configuration.num.threads (assuming that process is the function name).
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
Map<String, String> bindingConfig = extendedConsumerProperties.getConfiguration();
|
||||
Assert.state(!bindingConfig.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG),
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + " cannot be overridden at the binding level; "
|
||||
+ "use multiple binders instead");
|
||||
// We will only add the per binding configuration to the current streamConfiguration and not the global one.
|
||||
streamConfiguration
|
||||
.putAll(bindingConfig);
|
||||
|
||||
String bindingLevelApplicationId = extendedConsumerProperties.getApplicationId();
|
||||
// override application.id if set at the individual binding level.
|
||||
// We provide this for backward compatibility with StreamListener based processors.
|
||||
// For function based processors see the approach used above
|
||||
// (i.e. use a property like spring.cloud.stream.kafka.streams.binder.functions.process.applicationId).
|
||||
if (StringUtils.hasText(bindingLevelApplicationId)) {
|
||||
streamConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
bindingLevelApplicationId);
|
||||
}
|
||||
|
||||
//If the application id is not set by any mechanism, then generate it.
|
||||
streamConfiguration.computeIfAbsent(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
k -> {
|
||||
String generatedApplicationID = beanNamePostPrefix + "-applicationId";
|
||||
LOG.info("Binder Generated Kafka Streams Application ID: " + generatedApplicationID);
|
||||
LOG.info("Use the binder generated application ID only for development and testing. ");
|
||||
LOG.info("For production deployments, please consider explicitly setting an application ID using a configuration property.");
|
||||
LOG.info("The generated applicationID is static and will be preserved over application restarts.");
|
||||
return generatedApplicationID;
|
||||
});
|
||||
|
||||
handleConcurrency(applicationContext, inboundName, streamConfiguration);
|
||||
|
||||
// Override deserialization exception handlers per binding
|
||||
final DeserializationExceptionHandler deserializationExceptionHandler =
|
||||
extendedConsumerProperties.getDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler == DeserializationExceptionHandler.logAndFail) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
}
|
||||
else if (deserializationExceptionHandler == DeserializationExceptionHandler.logAndContinue) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
}
|
||||
else if (deserializationExceptionHandler == DeserializationExceptionHandler.sendToDlq) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
RecoveringDeserializationExceptionHandler.class);
|
||||
streamConfiguration.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER,
|
||||
applicationContext.getBean(SendToDlqAndContinue.class));
|
||||
}
|
||||
else if (deserializationExceptionHandler == DeserializationExceptionHandler.skipAndContinue) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
SkipAndContinueExceptionHandler.class);
|
||||
}
|
||||
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration = new KafkaStreamsConfiguration(streamConfiguration);
|
||||
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(kafkaStreamsConfiguration)
|
||||
: new StreamsBuilderFactoryBean(kafkaStreamsConfiguration,
|
||||
this.cleanupConfig);
|
||||
|
||||
streamsBuilderFactoryBean.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition = BeanDefinitionBuilder
|
||||
.genericBeanDefinition(
|
||||
(Class<StreamsBuilderFactoryBean>) streamsBuilderFactoryBean.getClass(),
|
||||
() -> streamsBuilderFactoryBean)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(
|
||||
"stream-builder-" + beanNamePostPrefix, streamsBuilderBeanDefinition);
|
||||
|
||||
extendedConsumerProperties.setApplicationId((String) streamConfiguration.get(StreamsConfig.APPLICATION_ID_CONFIG));
|
||||
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBeanFromContext = applicationContext.getBean(
|
||||
"&stream-builder-" + beanNamePostPrefix, StreamsBuilderFactoryBean.class);
|
||||
//At this point, the StreamsBuilderFactoryBean is created. If the users call, getObject()
|
||||
//in the customizer, that should grant access to the StreamsBuilder.
|
||||
if (customizer != null) {
|
||||
customizer.configure(streamsBuilderFactoryBean);
|
||||
}
|
||||
return streamsBuilderFactoryBeanFromContext;
|
||||
}
|
||||
|
||||
private void handleConcurrency(ApplicationContext applicationContext, String inboundName,
|
||||
Map<String, Object> streamConfiguration) {
|
||||
// This rebinding is necessary to capture the concurrency explicitly set by the application.
|
||||
// This is added to fix this issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/899
|
||||
org.springframework.boot.context.properties.bind.Binder explicitConcurrencyResolver =
|
||||
new org.springframework.boot.context.properties.bind.Binder(ConfigurationPropertySources.get(applicationContext.getEnvironment()),
|
||||
new PropertySourcesPlaceholdersResolver(applicationContext.getEnvironment()),
|
||||
IntegrationUtils.getConversionService(this.applicationContext.getBeanFactory()), null);
|
||||
|
||||
boolean[] concurrencyExplicitlyProvided = new boolean[] {false};
|
||||
BindHandler handler = new BindHandler() {
|
||||
|
||||
@Override
|
||||
public Object onSuccess(ConfigurationPropertyName name, Bindable<?> target,
|
||||
BindContext context, Object result) {
|
||||
if (!concurrencyExplicitlyProvided[0]) {
|
||||
|
||||
concurrencyExplicitlyProvided[0] = name.getLastElement(ConfigurationPropertyName.Form.UNIFORM)
|
||||
.equals("concurrency") &&
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.bindings." + inboundName + ".consumer").isAncestorOf(name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
//Re-bind spring.cloud.stream properties to check if the application explicitly provided concurrency.
|
||||
try {
|
||||
explicitConcurrencyResolver.bind("spring.cloud.stream",
|
||||
Bindable.ofInstance(new BindingServiceProperties()), handler);
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Ignore this exception
|
||||
}
|
||||
|
||||
int concurrency = this.bindingServiceProperties.getConsumerProperties(inboundName)
|
||||
.getConcurrency();
|
||||
// override concurrency if set at the individual binding level.
|
||||
// Concurrency will be mapped to num.stream.threads.
|
||||
// This conditional also takes into account explicit concurrency settings left at the default value of 1
|
||||
// by the application to address concurrency behavior in applications with multiple processors.
|
||||
// See this GH issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/844
|
||||
if (concurrency >= 1 && concurrencyExplicitlyProvided[0]) {
|
||||
streamConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG,
|
||||
concurrency);
|
||||
}
|
||||
}
|
||||
|
||||
protected Serde<?> getValueSerde(String inboundName, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, ResolvableType resolvableType) {
|
||||
if (bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding()) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties
|
||||
.getBindingProperties(inboundName);
|
||||
return this.keyValueSerdeResolver.getInboundValueSerde(
|
||||
bindingProperties.getConsumer(), kafkaStreamsConsumerProperties, resolvableType);
|
||||
}
|
||||
else {
|
||||
return Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
protected KStream<?, ?> getKStream(String inboundName, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
if (firstBuild) {
|
||||
addStateStoreBeans(streamsBuilder);
|
||||
}
|
||||
|
||||
final boolean nativeDecoding = this.bindingServiceProperties
|
||||
.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding) {
|
||||
LOG.info("Native decoding is enabled for " + inboundName
|
||||
+ ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName
|
||||
+ ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
|
||||
KStream<?, ?> stream;
|
||||
final Serde<?> valueSerdeToUse = StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes()) ?
|
||||
new Serdes.BytesSerde() : valueSerde;
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerdeToUse, autoOffsetReset);
|
||||
|
||||
if (this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName).isDestinationIsPattern()) {
|
||||
final Pattern pattern = Pattern.compile(this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
|
||||
stream = streamsBuilder.stream(pattern, consumed);
|
||||
}
|
||||
else {
|
||||
String[] bindingTargets = StringUtils.commaDelimitedListToStringArray(
|
||||
this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
consumed);
|
||||
}
|
||||
//Check to see if event type based routing is enabled.
|
||||
//See this issue for more context: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1003
|
||||
if (StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes())) {
|
||||
AtomicBoolean matched = new AtomicBoolean();
|
||||
// Processor to retrieve the header value.
|
||||
stream.process(() -> eventTypeProcessor(kafkaStreamsConsumerProperties, matched));
|
||||
// Branching based on event type match.
|
||||
final KStream<?, ?>[] branch = stream.branch((key, value) -> matched.getAndSet(false));
|
||||
// Deserialize if we have a branch from above.
|
||||
final KStream<?, Object> deserializedKStream = branch[0].mapValues(value -> valueSerde.deserializer().deserialize(null, ((Bytes) value).get()));
|
||||
return getkStream(bindingProperties, deserializedKStream, nativeDecoding);
|
||||
}
|
||||
return getkStream(bindingProperties, stream, nativeDecoding);
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(BindingProperties bindingProperties, KStream<?, ?> stream, boolean nativeDecoding) {
|
||||
if (!nativeDecoding) {
|
||||
stream = stream.mapValues((value) -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (value != null && !StringUtils.isEmpty(contentType)) {
|
||||
returnValue = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
}
|
||||
else {
|
||||
returnValue = value;
|
||||
}
|
||||
return returnValue;
|
||||
});
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private void addStateStoreBeans(StreamsBuilder streamsBuilder) {
|
||||
try {
|
||||
final Map<String, StoreBuilder> storeBuilders = applicationContext.getBeansOfType(StoreBuilder.class);
|
||||
if (!CollectionUtils.isEmpty(storeBuilders)) {
|
||||
storeBuilders.values().forEach(storeBuilder -> {
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Pass through.
|
||||
}
|
||||
}
|
||||
|
||||
private <K, V> KTable<K, V> materializedAs(StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
|
||||
final Consumed<K, V> consumed = getConsumed(kafkaStreamsConsumerProperties, k, v, autoOffsetReset);
|
||||
return streamsBuilder.table(this.bindingServiceProperties.getBindingDestination(destination),
|
||||
consumed, getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> Materialized<K, V, KeyValueStore<Bytes, byte[]>> getMaterialized(
|
||||
String storeName, Serde<K> k, Serde<V> v) {
|
||||
return Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k).withValueSerde(v);
|
||||
}
|
||||
|
||||
private <K, V> GlobalKTable<K, V> materializedAsGlobalKTable(
|
||||
StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
final Consumed<K, V> consumed = getConsumed(kafkaStreamsConsumerProperties, k, v, autoOffsetReset);
|
||||
return streamsBuilder.globalTable(
|
||||
this.bindingServiceProperties.getBindingDestination(destination),
|
||||
consumed,
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private GlobalKTable<?, ?> getGlobalKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, String materializedAs,
|
||||
String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
return materializedAs != null
|
||||
? materializedAsGlobalKTable(streamsBuilder, bindingDestination,
|
||||
materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties)
|
||||
: streamsBuilder.globalTable(bindingDestination,
|
||||
consumed);
|
||||
}
|
||||
|
||||
private KTable<?, ?> getKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder, Serde<?> keySerde,
|
||||
Serde<?> valueSerde, String materializedAs, String bindingDestination,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
|
||||
final Serde<?> valueSerdeToUse = StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes()) ?
|
||||
new Serdes.BytesSerde() : valueSerde;
|
||||
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerdeToUse, autoOffsetReset);
|
||||
|
||||
final KTable<?, ?> kTable = materializedAs != null
|
||||
? materializedAs(streamsBuilder, bindingDestination, materializedAs,
|
||||
keySerde, valueSerdeToUse, autoOffsetReset, kafkaStreamsConsumerProperties)
|
||||
: streamsBuilder.table(bindingDestination,
|
||||
consumed);
|
||||
if (StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes())) {
|
||||
AtomicBoolean matched = new AtomicBoolean();
|
||||
final KStream<?, ?> stream = kTable.toStream();
|
||||
|
||||
// Processor to retrieve the header value.
|
||||
stream.process(() -> eventTypeProcessor(kafkaStreamsConsumerProperties, matched));
|
||||
// Branching based on event type match.
|
||||
final KStream<?, ?>[] branch = stream.branch((key, value) -> matched.getAndSet(false));
|
||||
// Deserialize if we have a branch from above.
|
||||
final KStream<?, Object> deserializedKStream = branch[0].mapValues(value -> valueSerde.deserializer().deserialize(null, ((Bytes) value).get()));
|
||||
|
||||
return deserializedKStream.toTable();
|
||||
}
|
||||
return kTable;
|
||||
}
|
||||
|
||||
private <K, V> Consumed<K, V> getConsumed(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
Serde<K> keySerde, Serde<V> valueSerde, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
TimestampExtractor timestampExtractor = null;
|
||||
if (!StringUtils.isEmpty(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName())) {
|
||||
timestampExtractor = applicationContext.getBean(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName(),
|
||||
TimestampExtractor.class);
|
||||
}
|
||||
final Consumed<K, V> consumed = Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset);
|
||||
if (timestampExtractor != null) {
|
||||
consumed.withTimestampExtractor(timestampExtractor);
|
||||
}
|
||||
if (StringUtils.hasText(kafkaStreamsConsumerProperties.getConsumedAs())) {
|
||||
consumed.withName(kafkaStreamsConsumerProperties.getConsumedAs());
|
||||
}
|
||||
return consumed;
|
||||
}
|
||||
|
||||
private <K, V> Processor<K, V> eventTypeProcessor(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, AtomicBoolean matched) {
|
||||
return new Processor() {
|
||||
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, Object value) {
|
||||
final Headers headers = this.context.headers();
|
||||
final Iterable<Header> eventTypeHeader = headers.headers(kafkaStreamsConsumerProperties.getEventTypeHeaderKey());
|
||||
if (eventTypeHeader != null && eventTypeHeader.iterator().hasNext()) {
|
||||
String eventTypeFromHeader = new String(eventTypeHeader.iterator().next().value());
|
||||
final String[] eventTypesFromBinding = StringUtils.commaDelimitedListToStringArray(kafkaStreamsConsumerProperties.getEventTypes());
|
||||
for (String eventTypeFromBinding : eventTypesFromBinding) {
|
||||
if (eventTypeFromHeader.equals(eventTypeFromBinding)) {
|
||||
matched.set(true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
/**
|
||||
* Enumeration for various {@link org.apache.kafka.streams.errors.DeserializationExceptionHandler} types.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public enum DeserializationExceptionHandler {
|
||||
|
||||
/**
|
||||
* Deserialization error handler with log and continue.
|
||||
* See {@link org.apache.kafka.streams.errors.LogAndContinueExceptionHandler}
|
||||
*/
|
||||
logAndContinue,
|
||||
/**
|
||||
* Deserialization error handler with log and fail.
|
||||
* See {@link org.apache.kafka.streams.errors.LogAndFailExceptionHandler}
|
||||
*/
|
||||
logAndFail,
|
||||
/**
|
||||
* Deserialization error handler with DLQ send.
|
||||
* See {@link org.springframework.kafka.streams.RecoveringDeserializationExceptionHandler}
|
||||
*/
|
||||
sendToDlq,
|
||||
/**
|
||||
* Deserialization error handler that silently skips the error and continue.
|
||||
*/
|
||||
skipAndContinue;
|
||||
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationPropertiesBindHandlerAdvisor;
|
||||
import org.springframework.boot.context.properties.bind.AbstractBindHandler;
|
||||
import org.springframework.boot.context.properties.bind.BindContext;
|
||||
import org.springframework.boot.context.properties.bind.BindHandler;
|
||||
import org.springframework.boot.context.properties.bind.BindResult;
|
||||
import org.springframework.boot.context.properties.bind.Bindable;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationPropertiesBindHandlerAdvisor} to detect nativeEncoding/Decoding settings
|
||||
* provided by the application explicitly.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class EncodingDecodingBindAdviceHandler implements ConfigurationPropertiesBindHandlerAdvisor {
|
||||
|
||||
private boolean encodingSettingProvided;
|
||||
private boolean decodingSettingProvided;
|
||||
|
||||
public boolean isDecodingSettingProvided() {
|
||||
return decodingSettingProvided;
|
||||
}
|
||||
|
||||
public boolean isEncodingSettingProvided() {
|
||||
return this.encodingSettingProvided;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BindHandler apply(BindHandler bindHandler) {
|
||||
BindHandler handler = new AbstractBindHandler(bindHandler) {
|
||||
@Override
|
||||
public <T> Bindable<T> onStart(ConfigurationPropertyName name,
|
||||
Bindable<T> target, BindContext context) {
|
||||
final String configName = name.toString();
|
||||
if (configName.contains("use") && configName.contains("native") &&
|
||||
(configName.contains("encoding") || configName.contains("decoding"))) {
|
||||
BindResult<T> result = context.getBinder().bind(name, target);
|
||||
if (result.isBound()) {
|
||||
if (configName.contains("encoding")) {
|
||||
EncodingDecodingBindAdviceHandler.this.encodingSettingProvided = true;
|
||||
}
|
||||
else {
|
||||
EncodingDecodingBindAdviceHandler.this.decodingSettingProvided = true;
|
||||
}
|
||||
return target.withExistingValue(result.get());
|
||||
}
|
||||
}
|
||||
return bindHandler.onStart(name, target, context);
|
||||
}
|
||||
};
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
import org.springframework.cloud.stream.config.BindingHandlerAdvise.MappingsProvider;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* {@link EnableAutoConfiguration Auto-configuration} for extended binding metadata for Kafka Streams.
|
||||
*
|
||||
* @author Chris Bono
|
||||
* @since 3.2
|
||||
*/
|
||||
@Configuration(proxyBeanMethods = false)
|
||||
public class ExtendedBindingHandlerMappingsProviderAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
public MappingsProvider kafkaStreamsExtendedPropertiesDefaultMappingsProvider() {
|
||||
return () -> {
|
||||
Map<ConfigurationPropertyName, ConfigurationPropertyName> mappings = new HashMap<>();
|
||||
mappings.put(
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams"),
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams.default"));
|
||||
mappings.put(
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams.bindings"),
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams.default"));
|
||||
return mappings;
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,8 +16,8 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
@@ -32,8 +32,6 @@ import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStr
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -56,22 +54,20 @@ public class GlobalKTableBinder extends
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers;
|
||||
|
||||
// @checkstyle:off
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
public GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue, KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsDlqDispatchers = kafkaStreamsDlqDispatchers;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -80,55 +76,13 @@ public class GlobalKTableBinder extends
|
||||
String group, GlobalKTable<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
group = this.binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
final RetryTemplate retryTemplate = buildRetryTemplate(properties);
|
||||
|
||||
final String bindingName = this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget);
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(bindingName);
|
||||
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties, retryTemplate, getBeanFactory(),
|
||||
this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget),
|
||||
this.kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
|
||||
return new DefaultBinding<GlobalKTable<Object, Object>>(bindingName, group, inputTarget, streamsBuilderFactoryBean) {
|
||||
|
||||
@Override
|
||||
public boolean isInput() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!streamsBuilderFactoryBean.isRunning()) {
|
||||
super.start();
|
||||
GlobalKTableBinder.this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
//If we cached the previous KafkaStreams object (from a binding stop on the actuator), remove it.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
if (kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams().containsKey(applicationId)) {
|
||||
kafkaStreamsBindingInformationCatalogue.removePreviousKafkaStreamsForApplicationId(applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
super.stop();
|
||||
GlobalKTableBinder.this.kafkaStreamsRegistry.unregisterKafkaStreams(kafkaStreams);
|
||||
KafkaStreamsBinderUtils.closeDlqProducerFactories(kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
//Caching the stopped KafkaStreams for health indicator purposes on the underlying processor.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
GlobalKTableBinder.this.kafkaStreamsBindingInformationCatalogue.addPreviousKafkaStreamsForApplicationId(
|
||||
(String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG), kafkaStreams);
|
||||
}
|
||||
}
|
||||
};
|
||||
this.binderConfigurationProperties, properties,
|
||||
this.kafkaStreamsDlqDispatchers);
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -164,9 +118,4 @@ public class GlobalKTableBinder extends
|
||||
.getExtendedPropertiesEntryClass();
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -18,20 +18,16 @@ package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.AdminClientConfigCustomizer;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* Configuration for GlobalKTable binder.
|
||||
@@ -40,55 +36,41 @@ import org.springframework.context.annotation.Import;
|
||||
* @since 2.1.0
|
||||
*/
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
MultiBinderPropertiesConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class,
|
||||
KafkaStreamsJaasConfiguration.class})
|
||||
public class GlobalKTableBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties, ObjectProvider<AdminClientConfigCustomizer> adminClientConfigCustomizer) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties, adminClientConfigCustomizer.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GlobalKTableBinder GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
@Qualifier("streamConfigGlobalProperties") Map<String, Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
|
||||
GlobalKTableBinder globalKTableBinder = new GlobalKTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, kafkaStreamsBindingInformationCatalogue, kafkaStreamsRegistry);
|
||||
globalKTableBinder.setKafkaStreamsExtendedBindingProperties(
|
||||
kafkaStreamsExtendedBindingProperties);
|
||||
return globalKTableBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public static BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
|
||||
return (beanFactory) -> {
|
||||
// It is safe to call getBean("outerContext") here, because this bean is
|
||||
// registered as first
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
KafkaStreamsBinderConfigurationProperties.class.getSimpleName(),
|
||||
outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBindingInformationCatalogue.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsRegistry.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsRegistry.class));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GlobalKTableBinder GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
@Qualifier("kafkaStreamsDlqDispatchers") Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
return new GlobalKTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, kafkaStreamsDlqDispatchers);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2020 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -23,7 +23,6 @@ import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@@ -40,31 +39,16 @@ public class GlobalKTableBoundElementFactory
|
||||
extends AbstractBindingTargetFactory<GlobalKTable> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler;
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
GlobalKTableBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
GlobalKTableBoundElementFactory(BindingServiceProperties bindingServiceProperties) {
|
||||
super(GlobalKTable.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.encodingDecodingBindAdviceHandler = encodingDecodingBindAdviceHandler;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GlobalKTable createInput(String name) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ConsumerProperties consumerProperties = bindingProperties.getConsumer();
|
||||
if (consumerProperties == null) {
|
||||
consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) {
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties
|
||||
.getConsumerProperties(name);
|
||||
// Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
@@ -76,9 +60,7 @@ public class GlobalKTableBoundElementFactory
|
||||
GlobalKTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
final GlobalKTable proxy = (GlobalKTable) proxyFactory.getProxy();
|
||||
this.kafkaStreamsBindingInformationCatalogue.addBindingNamePerTarget(proxy, name);
|
||||
return proxy;
|
||||
return (GlobalKTable) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -103,9 +85,8 @@ public class GlobalKTableBoundElementFactory
|
||||
|
||||
public void wrap(GlobalKTable<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
if (this.delegate == null) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,32 +16,17 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyQueryMetadata;
|
||||
import org.apache.kafka.streams.StoreQueryParameters;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.HostInfo;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
import org.apache.kafka.streams.state.StreamsMetadata;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.retry.RetryPolicy;
|
||||
import org.springframework.retry.backoff.FixedBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -52,14 +37,10 @@ import org.springframework.util.StringUtils;
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @author Serhii Siryi
|
||||
* @author Nico Pommerening
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class InteractiveQueryService {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(InteractiveQueryService.class);
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
@@ -83,84 +64,18 @@ public class InteractiveQueryService {
|
||||
* @return queryable store.
|
||||
*/
|
||||
public <T> T getQueryableStore(String storeName, QueryableStoreType<T> storeType) {
|
||||
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
KafkaStreamsBinderConfigurationProperties.StateStoreRetry stateStoreRetry = this.binderConfigurationProperties.getStateStoreRetry();
|
||||
RetryPolicy retryPolicy = new SimpleRetryPolicy(stateStoreRetry.getMaxAttempts());
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(stateStoreRetry.getBackoffPeriod());
|
||||
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
retryTemplate.setRetryPolicy(retryPolicy);
|
||||
|
||||
KafkaStreams contextSpecificKafkaStreams = getThreadContextSpecificKafkaStreams();
|
||||
|
||||
return retryTemplate.execute(context -> {
|
||||
T store = null;
|
||||
Throwable throwable = null;
|
||||
if (contextSpecificKafkaStreams != null) {
|
||||
try {
|
||||
store = contextSpecificKafkaStreams.store(
|
||||
StoreQueryParameters.fromNameAndType(
|
||||
storeName, storeType));
|
||||
}
|
||||
catch (InvalidStateStoreException e) {
|
||||
// pass through..
|
||||
throwable = e;
|
||||
for (KafkaStreams kafkaStream : this.kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
try {
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
if (store != null) {
|
||||
return store;
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
// pass through
|
||||
}
|
||||
else if (contextSpecificKafkaStreams != null) {
|
||||
LOG.warn("Store " + storeName
|
||||
+ " could not be found in Streams context, falling back to all known Streams instances");
|
||||
}
|
||||
final Set<KafkaStreams> kafkaStreams = kafkaStreamsRegistry.getKafkaStreams();
|
||||
final Iterator<KafkaStreams> iterator = kafkaStreams.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
try {
|
||||
store = iterator.next()
|
||||
.store(StoreQueryParameters.fromNameAndType(
|
||||
storeName, storeType));
|
||||
}
|
||||
catch (InvalidStateStoreException e) {
|
||||
// pass through..
|
||||
throwable = e;
|
||||
}
|
||||
}
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
throw new IllegalStateException(
|
||||
"Error when retrieving state store: " + storeName,
|
||||
throwable);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the current {@link KafkaStreams} context if executing Thread is created by a Streams App (contains a matching application id in Thread's name).
|
||||
*
|
||||
* @return KafkaStreams instance associated with Thread
|
||||
*/
|
||||
private KafkaStreams getThreadContextSpecificKafkaStreams() {
|
||||
return this.kafkaStreamsRegistry.getKafkaStreams().stream()
|
||||
.filter(this::filterByThreadName).findAny().orElse(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the supplied {@link KafkaStreams} instance belongs to the calling Thread by matching the Thread's name with the Streams Application Id.
|
||||
*
|
||||
* @param streams {@link KafkaStreams} instance to filter
|
||||
* @return true if Streams Instance is associated with Thread
|
||||
*/
|
||||
private boolean filterByThreadName(KafkaStreams streams) {
|
||||
String applicationId = kafkaStreamsRegistry.streamBuilderFactoryBean(
|
||||
streams).getStreamsConfiguration()
|
||||
.getProperty(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
// TODO: is there some better way to find out if a Stream App created the Thread?
|
||||
return Thread.currentThread().getName().contains(applicationId);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -198,74 +113,15 @@ public class InteractiveQueryService {
|
||||
* @param store store name
|
||||
* @param key key to look for
|
||||
* @param serializer {@link Serializer} for the key
|
||||
* @return the {@link HostInfo} where the key for the provided store is hosted currently
|
||||
* @return the {@link HostInfo} where the key for the provided store is hosted
|
||||
* currently
|
||||
*/
|
||||
public <K> HostInfo getHostInfo(String store, K key, Serializer<K> serializer) {
|
||||
final KeyQueryMetadata keyQueryMetadata = this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
StreamsMetadata streamsMetadata = this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
.stream()
|
||||
.map((k) -> Optional.ofNullable(k.queryMetadataForKey(store, key, serializer)))
|
||||
.map((k) -> Optional.ofNullable(k.metadataForKey(store, key, serializer)))
|
||||
.filter(Optional::isPresent).map(Optional::get).findFirst().orElse(null);
|
||||
return keyQueryMetadata != null ? keyQueryMetadata.getActiveHost() : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves and returns the {@link KeyQueryMetadata} associated with the given combination of
|
||||
* key and state store. If none found, it will return null.
|
||||
*
|
||||
* @param <K> generic type for key
|
||||
* @param store store name
|
||||
* @param key key to look for
|
||||
* @param serializer {@link Serializer} for the key
|
||||
* @return the {@link KeyQueryMetadata} if available, null otherwise.
|
||||
*/
|
||||
public <K> KeyQueryMetadata getKeyQueryMetadata(String store, K key, Serializer<K> serializer) {
|
||||
return this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
.stream()
|
||||
.map((k) -> Optional.ofNullable(k.queryMetadataForKey(store, key, serializer)))
|
||||
.filter(Optional::isPresent).map(Optional::get).findFirst().orElse(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves and returns the {@link KafkaStreams} object that is associated with the given combination of
|
||||
* key and state store. If none found, it will return null.
|
||||
*
|
||||
* @param <K> generic type for key
|
||||
* @param store store name
|
||||
* @param key key to look for
|
||||
* @param serializer {@link Serializer} for the key
|
||||
* @return {@link KafkaStreams} object associated with this combination of store and key
|
||||
*/
|
||||
public <K> KafkaStreams getKafkaStreams(String store, K key, Serializer<K> serializer) {
|
||||
final AtomicReference<KafkaStreams> kafkaStreamsAtomicReference = new AtomicReference<>();
|
||||
this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
.forEach(k -> {
|
||||
final KeyQueryMetadata keyQueryMetadata = k.queryMetadataForKey(store, key, serializer);
|
||||
if (keyQueryMetadata != null) {
|
||||
kafkaStreamsAtomicReference.set(k);
|
||||
}
|
||||
});
|
||||
return kafkaStreamsAtomicReference.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the list of {@link HostInfo} where the provided store is hosted on.
|
||||
* It also can include current host info.
|
||||
* Kafka Streams will look through all the consumer instances under the same application id
|
||||
* and retrieves all hosts info.
|
||||
*
|
||||
* Note that the end-user applications must provide `application.server` as a configuration property
|
||||
* for all the application instances when calling this method. If this is not available, then an empty list will be returned.
|
||||
*
|
||||
* @param store store name
|
||||
* @return the list of {@link HostInfo} where provided store is hosted on
|
||||
*/
|
||||
public List<HostInfo> getAllHostsInfo(String store) {
|
||||
return kafkaStreamsRegistry.getKafkaStreams()
|
||||
.stream()
|
||||
.flatMap(k -> k.allMetadataForStore(store).stream())
|
||||
.filter(Objects::nonNull)
|
||||
.map(StreamsMetadata::hostInfo)
|
||||
.collect(Collectors.toList());
|
||||
return streamsMetadata != null ? streamsMetadata.hostInfo() : null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017-2021 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,19 +16,14 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Properties;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Produced;
|
||||
import org.apache.kafka.streams.processor.StreamPartitioner;
|
||||
|
||||
import org.springframework.aop.framework.Advised;
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
@@ -42,8 +37,6 @@ import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStr
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -80,190 +73,76 @@ class KStreamBinder extends
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
private final Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers;
|
||||
|
||||
KStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver, KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsDlqDispatchers = kafkaStreamsDlqDispatchers;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget,
|
||||
// @checkstyle:off
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
|
||||
KStream<Object, Object> delegate = ((KStreamBoundElementFactory.KStreamWrapperHandler)
|
||||
((Advised) inputTarget).getAdvisors()[0].getAdvice()).getDelegate();
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerConsumerProperties(delegate, properties.getExtension());
|
||||
|
||||
// @checkstyle:on
|
||||
this.kafkaStreamsBindingInformationCatalogue
|
||||
.registerConsumerProperties(inputTarget, properties.getExtension());
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
group = this.binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
|
||||
final RetryTemplate retryTemplate = buildRetryTemplate(properties);
|
||||
|
||||
final String bindingName = this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget);
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(bindingName);
|
||||
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties, retryTemplate, getBeanFactory(),
|
||||
this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget),
|
||||
this.kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
this.binderConfigurationProperties, properties,
|
||||
this.kafkaStreamsDlqDispatchers);
|
||||
|
||||
|
||||
return new DefaultBinding<KStream<Object, Object>>(bindingName, group,
|
||||
inputTarget, streamsBuilderFactoryBean) {
|
||||
|
||||
@Override
|
||||
public boolean isInput() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!streamsBuilderFactoryBean.isRunning()) {
|
||||
super.start();
|
||||
KStreamBinder.this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
//If we cached the previous KafkaStreams object (from a binding stop on the actuator), remove it.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
if (kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams().containsKey(applicationId)) {
|
||||
kafkaStreamsBindingInformationCatalogue.removePreviousKafkaStreamsForApplicationId(applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
super.stop();
|
||||
KStreamBinder.this.kafkaStreamsRegistry.unregisterKafkaStreams(kafkaStreams);
|
||||
KafkaStreamsBinderUtils.closeDlqProducerFactories(kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
//Caching the stopped KafkaStreams for health indicator purposes on the underlying processor.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
KStreamBinder.this.kafkaStreamsBindingInformationCatalogue.addPreviousKafkaStreamsForApplicationId(
|
||||
(String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG), kafkaStreams);
|
||||
}
|
||||
}
|
||||
};
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name,
|
||||
KStream<Object, Object> outboundBindTarget,
|
||||
// @checkstyle:off
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties =
|
||||
(ExtendedProducerProperties) properties;
|
||||
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name, extendedProducerProperties);
|
||||
// @checkstyle:on
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name,
|
||||
extendedProducerProperties);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver
|
||||
.getOuboundKeySerde(properties.getExtension(), kafkaStreamsBindingInformationCatalogue.getOutboundKStreamResolvable(outboundBindTarget));
|
||||
LOG.info("Key Serde used for (outbound) " + name + ": " + keySerde.getClass().getName());
|
||||
|
||||
Serde<?> valueSerde;
|
||||
if (properties.isUseNativeEncoding()) {
|
||||
valueSerde = this.keyValueSerdeResolver.getOutboundValueSerde(properties,
|
||||
properties.getExtension(), kafkaStreamsBindingInformationCatalogue.getOutboundKStreamResolvable(outboundBindTarget));
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
LOG.info("Value Serde used for (outbound) " + name + ": " + valueSerde.getClass().getName());
|
||||
|
||||
.getOuboundKeySerde(properties.getExtension());
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getOutboundValueSerde(properties,
|
||||
properties.getExtension());
|
||||
to(properties.isUseNativeEncoding(), name, outboundBindTarget,
|
||||
(Serde<Object>) keySerde, (Serde<Object>) valueSerde, properties.getExtension());
|
||||
|
||||
final String bindingName = this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(outboundBindTarget);
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(bindingName);
|
||||
|
||||
// We need the application id to pass to DefaultBinding so that it won't be interpreted as an anonymous group.
|
||||
// In case, if we can't find application.id (which is unlikely), we just default to bindingName.
|
||||
// This will only be used for lifecycle management through actuator endpoints.
|
||||
final Properties streamsConfiguration = streamsBuilderFactoryBean.getStreamsConfiguration();
|
||||
final String applicationId = streamsConfiguration != null ? (String) streamsConfiguration.get("application.id") : bindingName;
|
||||
|
||||
return new DefaultBinding<KStream<Object, Object>>(bindingName,
|
||||
applicationId, outboundBindTarget, streamsBuilderFactoryBean) {
|
||||
|
||||
@Override
|
||||
public boolean isInput() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!streamsBuilderFactoryBean.isRunning()) {
|
||||
super.start();
|
||||
KStreamBinder.this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
//If we cached the previous KafkaStreams object (from a binding stop on the actuator), remove it.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
if (kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams().containsKey(applicationId)) {
|
||||
kafkaStreamsBindingInformationCatalogue.removePreviousKafkaStreamsForApplicationId(applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
super.stop();
|
||||
KStreamBinder.this.kafkaStreamsRegistry.unregisterKafkaStreams(kafkaStreams);
|
||||
KafkaStreamsBinderUtils.closeDlqProducerFactories(kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
//Caching the stopped KafkaStreams for health indicator purposes on the underlying processor
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
KStreamBinder.this.kafkaStreamsBindingInformationCatalogue.addPreviousKafkaStreamsForApplicationId(
|
||||
(String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG), kafkaStreams);
|
||||
}
|
||||
}
|
||||
};
|
||||
(Serde<Object>) keySerde, (Serde<Object>) valueSerde);
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void to(boolean isNativeEncoding, String name,
|
||||
KStream<Object, Object> outboundBindTarget, Serde<Object> keySerde,
|
||||
Serde<Object> valueSerde, KafkaStreamsProducerProperties properties) {
|
||||
final Produced<Object, Object> produced = Produced.with(keySerde, valueSerde);
|
||||
if (StringUtils.hasText(properties.getProducedAs())) {
|
||||
produced.withName(properties.getProducedAs());
|
||||
}
|
||||
StreamPartitioner streamPartitioner = null;
|
||||
if (!StringUtils.isEmpty(properties.getStreamPartitionerBeanName())) {
|
||||
streamPartitioner = getApplicationContext().getBean(properties.getStreamPartitionerBeanName(),
|
||||
StreamPartitioner.class);
|
||||
}
|
||||
if (streamPartitioner != null) {
|
||||
produced.withStreamPartitioner(streamPartitioner);
|
||||
}
|
||||
KStream<Object, Object> outboundBindTarget, Serde<Object> keySerde,
|
||||
Serde<Object> valueSerde) {
|
||||
if (!isNativeEncoding) {
|
||||
LOG.info("Native encoding is disabled for " + name
|
||||
+ ". Outbound message conversion done by Spring Cloud Stream.");
|
||||
outboundBindTarget.filter((k, v) -> v == null)
|
||||
.to(name, produced);
|
||||
this.kafkaStreamsMessageConversionDelegate
|
||||
.serializeOnOutbound(outboundBindTarget)
|
||||
.to(name, produced);
|
||||
.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
else {
|
||||
LOG.info("Native encoding is enabled for " + name
|
||||
+ ". Outbound serialization done at the broker.");
|
||||
outboundBindTarget.to(name, produced);
|
||||
outboundBindTarget.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017-2021 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,12 +16,13 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.AdminClientConfigCustomizer;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
@@ -39,17 +40,15 @@ import org.springframework.context.annotation.Import;
|
||||
*/
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
MultiBinderPropertiesConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class,
|
||||
KafkaStreamsJaasConfiguration.class})
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class })
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties, ObjectProvider<AdminClientConfigCustomizer> adminClientConfigCustomizer) {
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(kafkaStreamsBinderConfigurationProperties,
|
||||
kafkaProperties, adminClientConfigCustomizer.getIfUnique());
|
||||
kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@@ -59,10 +58,12 @@ public class KStreamBinderConfiguration {
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties, KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
@Qualifier("kafkaStreamsDlqDispatchers") Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
KStreamBinder kStreamBinder = new KStreamBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue, keyValueSerdeResolver, kafkaStreamsRegistry);
|
||||
KafkaStreamsBindingInformationCatalogue, keyValueSerdeResolver,
|
||||
kafkaStreamsDlqDispatchers);
|
||||
kStreamBinder.setKafkaStreamsExtendedBindingProperties(
|
||||
kafkaStreamsExtendedBindingProperties);
|
||||
return kStreamBinder;
|
||||
@@ -78,6 +79,10 @@ public class KStreamBinderConfiguration {
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBinderConfigurationProperties.class.getSimpleName(),
|
||||
outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsMessageConversionDelegate.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsMessageConversionDelegate.class));
|
||||
@@ -89,9 +94,6 @@ public class KStreamBinderConfiguration {
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsRegistry.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsRegistry.class));
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017-2020 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -22,7 +22,6 @@ import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
@@ -43,30 +42,18 @@ class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler;
|
||||
|
||||
KStreamBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.encodingDecodingBindAdviceHandler = encodingDecodingBindAdviceHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ConsumerProperties consumerProperties = bindingProperties.getConsumer();
|
||||
if (consumerProperties == null) {
|
||||
consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) {
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties
|
||||
.getConsumerProperties(name);
|
||||
// Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
return createProxyForKStream(name);
|
||||
@@ -75,18 +62,6 @@ class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream createOutput(final String name) {
|
||||
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ProducerProperties producerProperties = bindingProperties.getProducer();
|
||||
if (producerProperties == null) {
|
||||
producerProperties = this.bindingServiceProperties.getProducerProperties(name);
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isEncodingSettingProvided()) {
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
}
|
||||
}
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
@@ -103,7 +78,6 @@ class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
.getBindingProperties(name);
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerBindingProperties(proxy,
|
||||
bindingProperties);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addBindingNamePerTarget(proxy, name);
|
||||
return proxy;
|
||||
}
|
||||
|
||||
@@ -116,16 +90,15 @@ class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
}
|
||||
|
||||
static class KStreamWrapperHandler
|
||||
private static class KStreamWrapperHandler
|
||||
implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
if (this.delegate == null) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -148,9 +121,6 @@ class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
}
|
||||
}
|
||||
|
||||
KStream<Object, Object> getDelegate() {
|
||||
return delegate;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -44,7 +44,8 @@ class KStreamStreamListenerParameterAdapter
|
||||
|
||||
@Override
|
||||
public boolean supports(Class bindingTargetType, MethodParameter methodParameter) {
|
||||
return KafkaStreamsBinderUtils.supportsKStream(methodParameter, bindingTargetType);
|
||||
return KStream.class.isAssignableFrom(bindingTargetType)
|
||||
&& KStream.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,8 +16,8 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
@@ -32,8 +32,6 @@ import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStr
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -46,30 +44,30 @@ import org.springframework.util.StringUtils;
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBinder extends
|
||||
// @checkstyle:off
|
||||
AbstractBinder<KTable<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements
|
||||
ExtendedPropertiesBinder<KTable<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers;
|
||||
|
||||
// @checkstyle:off
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
KTableBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue, KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsDlqDispatchers = kafkaStreamsDlqDispatchers;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -80,62 +78,21 @@ class KTableBinder extends
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
// @checkstyle:on
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
group = this.binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
|
||||
final RetryTemplate retryTemplate = buildRetryTemplate(properties);
|
||||
|
||||
final String bindingName = this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget);
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(bindingName);
|
||||
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties, retryTemplate, getBeanFactory(),
|
||||
this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget),
|
||||
this.kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
|
||||
return new DefaultBinding<KTable<Object, Object>>(bindingName, group, inputTarget, streamsBuilderFactoryBean) {
|
||||
|
||||
@Override
|
||||
public boolean isInput() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!streamsBuilderFactoryBean.isRunning()) {
|
||||
super.start();
|
||||
KTableBinder.this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
//If we cached the previous KafkaStreams object (from a binding stop on the actuator), remove it.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
if (kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams().containsKey(applicationId)) {
|
||||
kafkaStreamsBindingInformationCatalogue.removePreviousKafkaStreamsForApplicationId(applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
super.stop();
|
||||
KTableBinder.this.kafkaStreamsRegistry.unregisterKafkaStreams(kafkaStreams);
|
||||
KafkaStreamsBinderUtils.closeDlqProducerFactories(kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
//Caching the stopped KafkaStreams for health indicator purposes on the underlying processor.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
KTableBinder.this.kafkaStreamsBindingInformationCatalogue.addPreviousKafkaStreamsForApplicationId(
|
||||
(String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG), kafkaStreams);
|
||||
}
|
||||
}
|
||||
};
|
||||
this.binderConfigurationProperties, properties,
|
||||
this.kafkaStreamsDlqDispatchers);
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KTable<Object, Object>> doBindProducer(String name,
|
||||
KTable<Object, Object> outboundBindTarget,
|
||||
// @checkstyle:off
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
// @checkstyle:on
|
||||
throw new UnsupportedOperationException(
|
||||
"No producer level binding is allowed for KTable");
|
||||
}
|
||||
@@ -165,8 +122,4 @@ class KTableBinder extends
|
||||
.getExtendedPropertiesEntryClass();
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -18,20 +18,16 @@ package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.AdminClientConfigCustomizer;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* Configuration for KTable binder.
|
||||
@@ -40,54 +36,42 @@ import org.springframework.context.annotation.Import;
|
||||
*/
|
||||
@SuppressWarnings("ALL")
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
MultiBinderPropertiesConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class,
|
||||
KafkaStreamsJaasConfiguration.class})
|
||||
public class KTableBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties, ObjectProvider<AdminClientConfigCustomizer> adminClientConfigCustomizer) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties, adminClientConfigCustomizer.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBinder kTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
@Qualifier("streamConfigGlobalProperties") Map<String, Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
KTableBinder kTableBinder = new KTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, kafkaStreamsBindingInformationCatalogue, kafkaStreamsRegistry);
|
||||
kTableBinder.setKafkaStreamsExtendedBindingProperties(kafkaStreamsExtendedBindingProperties);
|
||||
return kTableBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public static BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
|
||||
return (beanFactory) -> {
|
||||
// It is safe to call getBean("outerContext") here, because this bean is
|
||||
// registered as first
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
KafkaStreamsBinderConfigurationProperties.class.getSimpleName(),
|
||||
outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBindingInformationCatalogue.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsRegistry.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsRegistry.class));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBinder kTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
@Qualifier("kafkaStreamsDlqDispatchers") Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
KTableBinder kStreamBinder = new KTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, kafkaStreamsDlqDispatchers);
|
||||
return kStreamBinder;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2020 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -23,7 +23,6 @@ import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@@ -38,31 +37,16 @@ import org.springframework.util.Assert;
|
||||
class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler;
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
KTableBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
KTableBoundElementFactory(BindingServiceProperties bindingServiceProperties) {
|
||||
super(KTable.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.encodingDecodingBindAdviceHandler = encodingDecodingBindAdviceHandler;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KTable createInput(String name) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(name);
|
||||
ConsumerProperties consumerProperties = bindingProperties.getConsumer();
|
||||
if (consumerProperties == null) {
|
||||
consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
else {
|
||||
if (!encodingDecodingBindAdviceHandler.isDecodingSettingProvided()) {
|
||||
consumerProperties.setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties
|
||||
.getConsumerProperties(name);
|
||||
// Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
@@ -71,9 +55,7 @@ class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
KTableBoundElementFactory.KTableWrapper.class, KTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
final KTable proxy = (KTable) proxyFactory.getProxy();
|
||||
this.kafkaStreamsBindingInformationCatalogue.addBindingNamePerTarget(proxy, name);
|
||||
return proxy;
|
||||
return (KTable) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -99,9 +81,8 @@ class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
|
||||
public void wrap(KTable<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
if (this.delegate == null) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2017-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,25 +16,31 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* Application support configuration for Kafka Streams binder.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.2
|
||||
*/
|
||||
@Configuration
|
||||
public class MultiBinderPropertiesConfiguration {
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public class KafkaStreamsApplicationSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.streams.binder")
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public KafkaBinderConfigurationProperties binderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
return new KafkaStreamsBinderConfigurationProperties(kafkaProperties);
|
||||
@ConditionalOnProperty("spring.cloud.stream.kafka.streams.timeWindow.length")
|
||||
public TimeWindows configuredTimeWindow(
|
||||
KafkaStreamsApplicationSupportProperties processorProperties) {
|
||||
return processorProperties.getTimeWindow().getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getTimeWindow().getLength())
|
||||
.advanceBy(processorProperties.getTimeWindow().getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getTimeWindow().getLength());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,182 +16,52 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.time.Duration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.TaskMetadata;
|
||||
import org.apache.kafka.streams.ThreadMetadata;
|
||||
import org.apache.kafka.streams.processor.TaskMetadata;
|
||||
import org.apache.kafka.streams.processor.ThreadMetadata;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.boot.actuate.health.AbstractHealthIndicator;
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.Status;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Health indicator for Kafka Streams.
|
||||
*
|
||||
* @author Arnaud Jardiné
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsBinderHealthIndicator extends AbstractHealthIndicator implements DisposableBean {
|
||||
|
||||
/**
|
||||
* Static initialization for detecting whether the application is using Kafka client 2.5 vs lower versions.
|
||||
*/
|
||||
private static ClassLoader CLASS_LOADER = KafkaStreamsBinderHealthIndicator.class.getClassLoader();
|
||||
private static boolean isKafkaStreams25 = true;
|
||||
private static Method methodForIsRunning;
|
||||
|
||||
static {
|
||||
try {
|
||||
Class<?> KAFKA_STREAMS_STATE_CLASS = CLASS_LOADER.loadClass("org.apache.kafka.streams.KafkaStreams$State");
|
||||
|
||||
Method[] declaredMethods = KAFKA_STREAMS_STATE_CLASS.getDeclaredMethods();
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("isRunning")) {
|
||||
isKafkaStreams25 = false;
|
||||
methodForIsRunning = m;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("KafkaStreams$State class not found", e);
|
||||
}
|
||||
}
|
||||
class KafkaStreamsBinderHealthIndicator extends AbstractHealthIndicator {
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final Map<String, Object> adminClientProperties;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private AdminClient adminClient;
|
||||
|
||||
private final Lock lock = new ReentrantLock();
|
||||
|
||||
KafkaStreamsBinderHealthIndicator(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
KafkaStreamsBinderHealthIndicator(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
super("Kafka-streams health check failed");
|
||||
kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaStreamsBinderConfigurationProperties;
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
KafkaTopicProvisioner.normalalizeBootPropsWithBinder(this.adminClientProperties, kafkaProperties,
|
||||
kafkaStreamsBinderConfigurationProperties);
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doHealthCheck(Health.Builder builder) throws Exception {
|
||||
try {
|
||||
this.lock.lock();
|
||||
if (this.adminClient == null) {
|
||||
this.adminClient = AdminClient.create(this.adminClientProperties);
|
||||
}
|
||||
|
||||
final ListTopicsResult listTopicsResult = this.adminClient.listTopics();
|
||||
listTopicsResult.listings().get(this.configurationProperties.getHealthTimeout(), TimeUnit.SECONDS);
|
||||
|
||||
if (this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans().isEmpty()) {
|
||||
builder.withDetail("No Kafka Streams bindings have been established", "Kafka Streams binder did not detect any processors");
|
||||
builder.status(Status.UNKNOWN);
|
||||
}
|
||||
else {
|
||||
boolean up = true;
|
||||
final Set<KafkaStreams> kafkaStreams = kafkaStreamsRegistry.getKafkaStreams();
|
||||
Set<KafkaStreams> allKafkaStreams = new HashSet<>(kafkaStreams);
|
||||
if (this.configurationProperties.isIncludeStoppedProcessorsForHealthCheck()) {
|
||||
allKafkaStreams.addAll(kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams().values());
|
||||
}
|
||||
for (KafkaStreams kStream : allKafkaStreams) {
|
||||
if (isKafkaStreams25) {
|
||||
up &= kStream.state().isRunningOrRebalancing();
|
||||
}
|
||||
else {
|
||||
// if Kafka client version is lower than 2.5, then call the method reflectively.
|
||||
final boolean isRunningInvokedResult = (boolean) methodForIsRunning.invoke(kStream.state());
|
||||
up &= isRunningInvokedResult;
|
||||
}
|
||||
builder.withDetails(buildDetails(kStream));
|
||||
}
|
||||
builder.status(up ? Status.UP : Status.DOWN);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
builder.withDetail("No topic information available", "Kafka broker is not reachable");
|
||||
builder.status(Status.DOWN);
|
||||
builder.withException(e);
|
||||
}
|
||||
finally {
|
||||
this.lock.unlock();
|
||||
boolean up = true;
|
||||
for (KafkaStreams kStream : kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
up &= kStream.state().isRunning();
|
||||
builder.withDetails(buildDetails(kStream));
|
||||
}
|
||||
builder.status(up ? Status.UP : Status.DOWN);
|
||||
}
|
||||
|
||||
private Map<String, Object> buildDetails(KafkaStreams kafkaStreams) throws Exception {
|
||||
private static Map<String, Object> buildDetails(KafkaStreams kStreams) {
|
||||
final Map<String, Object> details = new HashMap<>();
|
||||
final Map<String, Object> perAppdIdDetails = new HashMap<>();
|
||||
|
||||
boolean isRunningResult;
|
||||
if (isKafkaStreams25) {
|
||||
isRunningResult = kafkaStreams.state().isRunningOrRebalancing();
|
||||
}
|
||||
else {
|
||||
// if Kafka client version is lower than 2.5, then call the method reflectively.
|
||||
isRunningResult = (boolean) methodForIsRunning.invoke(kafkaStreams.state());
|
||||
}
|
||||
|
||||
if (isRunningResult) {
|
||||
final Set<ThreadMetadata> threadMetadata = kafkaStreams.metadataForLocalThreads();
|
||||
for (ThreadMetadata metadata : threadMetadata) {
|
||||
perAppdIdDetails.put("threadName", metadata.threadName());
|
||||
perAppdIdDetails.put("threadState", metadata.threadState());
|
||||
perAppdIdDetails.put("adminClientId", metadata.adminClientId());
|
||||
perAppdIdDetails.put("consumerClientId", metadata.consumerClientId());
|
||||
perAppdIdDetails.put("restoreConsumerClientId", metadata.restoreConsumerClientId());
|
||||
perAppdIdDetails.put("producerClientIds", metadata.producerClientIds());
|
||||
perAppdIdDetails.put("activeTasks", taskDetails(metadata.activeTasks()));
|
||||
perAppdIdDetails.put("standbyTasks", taskDetails(metadata.standbyTasks()));
|
||||
if (kStreams.state().isRunning()) {
|
||||
for (ThreadMetadata metadata : kStreams.localThreadsMetadata()) {
|
||||
details.put("threadName", metadata.threadName());
|
||||
details.put("threadState", metadata.threadState());
|
||||
details.put("activeTasks", taskDetails(metadata.activeTasks()));
|
||||
details.put("standbyTasks", taskDetails(metadata.standbyTasks()));
|
||||
}
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsRegistry.streamBuilderFactoryBean(kafkaStreams);
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
details.put(applicationId, perAppdIdDetails);
|
||||
}
|
||||
else {
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsRegistry.streamBuilderFactoryBean(kafkaStreams);
|
||||
String applicationId = null;
|
||||
if (streamsBuilderFactoryBean != null) {
|
||||
applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
}
|
||||
else {
|
||||
final Map<String, KafkaStreams> stoppedKafkaStreamsPerBinding = kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams();
|
||||
for (String appId : stoppedKafkaStreamsPerBinding.keySet()) {
|
||||
if (stoppedKafkaStreamsPerBinding.get(appId).equals(kafkaStreams)) {
|
||||
applicationId = appId;
|
||||
}
|
||||
}
|
||||
}
|
||||
details.put(applicationId, String.format("The processor with application.id %s is down. Current state: %s", applicationId, kafkaStreams.state()));
|
||||
}
|
||||
return details;
|
||||
}
|
||||
@@ -200,29 +70,12 @@ public class KafkaStreamsBinderHealthIndicator extends AbstractHealthIndicator i
|
||||
final Map<String, Object> details = new HashMap<>();
|
||||
for (TaskMetadata metadata : taskMetadata) {
|
||||
details.put("taskId", metadata.taskId());
|
||||
if (details.containsKey("partitions")) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> partitionsInfo = (List<String>) details.get("partitions");
|
||||
partitionsInfo.addAll(addPartitionsInfo(metadata));
|
||||
}
|
||||
else {
|
||||
details.put("partitions",
|
||||
addPartitionsInfo(metadata));
|
||||
}
|
||||
details.put("partitions",
|
||||
metadata.topicPartitions().stream().map(
|
||||
p -> "partition=" + p.partition() + ", topic=" + p.topic())
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
private static List<String> addPartitionsInfo(TaskMetadata metadata) {
|
||||
return metadata.topicPartitions().stream().map(
|
||||
p -> "partition=" + p.partition() + ", topic=" + p.topic())
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
if (adminClient != null) {
|
||||
adminClient.close(Duration.ofSeconds(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,12 +16,9 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.actuate.autoconfigure.health.ConditionalOnEnabledHealthIndicator;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
@@ -33,18 +30,13 @@ import org.springframework.context.annotation.Configuration;
|
||||
@Configuration
|
||||
@ConditionalOnClass(name = "org.springframework.boot.actuate.health.HealthIndicator")
|
||||
@ConditionalOnEnabledHealthIndicator("binders")
|
||||
public class KafkaStreamsBinderHealthIndicatorConfiguration {
|
||||
class KafkaStreamsBinderHealthIndicatorConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsBinderHealthIndicator kafkaStreamsBinderHealthIndicator(
|
||||
ObjectProvider<KafkaStreamsRegistry> kafkaStreamsRegistry,
|
||||
@Qualifier("binderConfigurationProperties")KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties, KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
if (kafkaStreamsRegistry.getIfUnique() != null) {
|
||||
return new KafkaStreamsBinderHealthIndicator(kafkaStreamsRegistry.getIfUnique(), kafkaStreamsBinderConfigurationProperties,
|
||||
kafkaProperties, kafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
return null;
|
||||
@ConditionalOnBean(KafkaStreamsRegistry.class)
|
||||
KafkaStreamsBinderHealthIndicator kafkaStreamsBinderHealthIndicator(
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new KafkaStreamsBinderHealthIndicator(kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,240 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.function.ToDoubleFunction;
|
||||
|
||||
import io.micrometer.core.instrument.FunctionCounter;
|
||||
import io.micrometer.core.instrument.Gauge;
|
||||
import io.micrometer.core.instrument.Meter;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.Tag;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.kafka.common.Metric;
|
||||
import org.apache.kafka.common.MetricName;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Kafka Streams binder metrics implementation that exports the metrics available
|
||||
* through {@link KafkaStreams#metrics()} into a micrometer {@link io.micrometer.core.instrument.MeterRegistry}.
|
||||
*
|
||||
* Boot 2.2 users need to rely on this class for the metrics instead of direct support from Micrometer.
|
||||
* Micrometer added Kafka Streams metrics support in 1.4.0 which Boot 2.3 includes.
|
||||
* Therefore, the users who are on Boot 2.2, need to rely on these metrics.
|
||||
* For users who are on 2.3 Boot, this class won't be activated (See the configuration for the various
|
||||
* conditionals used).
|
||||
*
|
||||
* For the most part, this class is a copy of the Micrometer Kafka Streams support that was added in version 1.4.0.
|
||||
* We will keep this class, as long as we support Boot 2.2.x.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class KafkaStreamsBinderMetrics {
|
||||
|
||||
static final String DEFAULT_VALUE = "unknown";
|
||||
|
||||
static final String CLIENT_ID_TAG_NAME = "client-id";
|
||||
|
||||
static final String METRIC_GROUP_APP_INFO = "app-info";
|
||||
|
||||
static final String VERSION_METRIC_NAME = "version";
|
||||
|
||||
static final String START_TIME_METRIC_NAME = "start-time-ms";
|
||||
|
||||
static final String KAFKA_VERSION_TAG_NAME = "kafka-version";
|
||||
|
||||
static final String METRIC_NAME_PREFIX = "kafka.";
|
||||
|
||||
static final String METRIC_GROUP_METRICS_COUNT = "kafka-metrics-count";
|
||||
|
||||
private String kafkaVersion = DEFAULT_VALUE;
|
||||
|
||||
private String clientId = DEFAULT_VALUE;
|
||||
|
||||
private final MeterRegistry meterRegistry;
|
||||
|
||||
private MeterBinder meterBinder;
|
||||
|
||||
private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
|
||||
|
||||
private volatile Set<MetricName> currentMeters = new HashSet<>();
|
||||
|
||||
public KafkaStreamsBinderMetrics(MeterRegistry meterRegistry) {
|
||||
this.meterRegistry = meterRegistry;
|
||||
}
|
||||
|
||||
public void bindTo(Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans) {
|
||||
if (this.meterBinder == null) {
|
||||
this.meterBinder = registry -> {
|
||||
if (streamsBuilderFactoryBeans != null) {
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
final Map<MetricName, ? extends Metric> metrics = kafkaStreams.metrics();
|
||||
|
||||
prepareToBindMetrics(registry, metrics);
|
||||
checkAndBindMetrics(registry, metrics);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
this.meterBinder.bindTo(this.meterRegistry);
|
||||
}
|
||||
|
||||
public void addMetrics(Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans) {
|
||||
synchronized (KafkaStreamsBinderMetrics.this) {
|
||||
this.bindTo(streamsBuilderFactoryBeans);
|
||||
}
|
||||
}
|
||||
|
||||
void prepareToBindMetrics(MeterRegistry registry, Map<MetricName, ? extends Metric> metrics) {
|
||||
Metric startTime = null;
|
||||
for (Map.Entry<MetricName, ? extends Metric> entry : metrics.entrySet()) {
|
||||
MetricName name = entry.getKey();
|
||||
if (clientId.equals(DEFAULT_VALUE) && name.tags().get(CLIENT_ID_TAG_NAME) != null) {
|
||||
clientId = name.tags().get(CLIENT_ID_TAG_NAME);
|
||||
}
|
||||
if (METRIC_GROUP_APP_INFO.equals(name.group())) {
|
||||
if (VERSION_METRIC_NAME.equals(name.name())) {
|
||||
kafkaVersion = (String) entry.getValue().metricValue();
|
||||
}
|
||||
else if (START_TIME_METRIC_NAME.equals(name.name())) {
|
||||
startTime = entry.getValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (startTime != null) {
|
||||
bindMeter(registry, startTime, meterName(startTime), meterTags(startTime));
|
||||
}
|
||||
}
|
||||
|
||||
private void bindMeter(MeterRegistry registry, Metric metric, String name, Iterable<Tag> tags) {
|
||||
if (name.endsWith("total") || name.endsWith("count")) {
|
||||
registerCounter(registry, metric, name, tags);
|
||||
}
|
||||
else {
|
||||
registerGauge(registry, metric, name, tags);
|
||||
}
|
||||
}
|
||||
|
||||
private void registerCounter(MeterRegistry registry, Metric metric, String name, Iterable<Tag> tags) {
|
||||
FunctionCounter.builder(name, metric, toMetricValue())
|
||||
.tags(tags)
|
||||
.description(metric.metricName().description())
|
||||
.register(registry);
|
||||
}
|
||||
|
||||
private ToDoubleFunction<Metric> toMetricValue() {
|
||||
return metric -> ((Number) metric.metricValue()).doubleValue();
|
||||
}
|
||||
|
||||
private void registerGauge(MeterRegistry registry, Metric metric, String name, Iterable<Tag> tags) {
|
||||
Gauge.builder(name, metric, toMetricValue())
|
||||
.tags(tags)
|
||||
.description(metric.metricName().description())
|
||||
.register(registry);
|
||||
}
|
||||
|
||||
private List<Tag> meterTags(Metric metric) {
|
||||
return meterTags(metric, false);
|
||||
}
|
||||
|
||||
private String meterName(Metric metric) {
|
||||
String name = METRIC_NAME_PREFIX + metric.metricName().group() + "." + metric.metricName().name();
|
||||
return name.replaceAll("-metrics", "").replaceAll("-", ".");
|
||||
}
|
||||
|
||||
private List<Tag> meterTags(Metric metric, boolean includeCommonTags) {
|
||||
List<Tag> tags = new ArrayList<>();
|
||||
metric.metricName().tags().forEach((key, value) -> tags.add(Tag.of(key, value)));
|
||||
tags.add(Tag.of(KAFKA_VERSION_TAG_NAME, kafkaVersion));
|
||||
return tags;
|
||||
}
|
||||
|
||||
private boolean differentClient(List<Tag> tags) {
|
||||
for (Tag tag : tags) {
|
||||
if (tag.getKey().equals(CLIENT_ID_TAG_NAME)) {
|
||||
if (!clientId.equals(tag.getValue())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void checkAndBindMetrics(MeterRegistry registry, Map<MetricName, ? extends Metric> metrics) {
|
||||
if (!currentMeters.equals(metrics.keySet())) {
|
||||
currentMeters = new HashSet<>(metrics.keySet());
|
||||
metrics.forEach((name, metric) -> {
|
||||
//Filter out non-numeric values
|
||||
if (!(metric.metricValue() instanceof Number)) {
|
||||
return;
|
||||
}
|
||||
|
||||
//Filter out metrics from groups that include metadata
|
||||
if (METRIC_GROUP_APP_INFO.equals(name.group())) {
|
||||
return;
|
||||
}
|
||||
if (METRIC_GROUP_METRICS_COUNT.equals(name.group())) {
|
||||
return;
|
||||
}
|
||||
String meterName = meterName(metric);
|
||||
List<Tag> meterTagsWithCommonTags = meterTags(metric, true);
|
||||
//Kafka has metrics with lower number of tags (e.g. with/without topic or partition tag)
|
||||
//Remove meters with lower number of tags
|
||||
boolean hasLessTags = false;
|
||||
for (Meter other : registry.find(meterName).meters()) {
|
||||
List<Tag> tags = other.getId().getTags();
|
||||
// Only consider meters from the same client before filtering
|
||||
if (differentClient(tags)) {
|
||||
break;
|
||||
}
|
||||
if (tags.size() < meterTagsWithCommonTags.size()) {
|
||||
registry.remove(other);
|
||||
}
|
||||
// Check if already exists
|
||||
else if (tags.size() == meterTagsWithCommonTags.size()) {
|
||||
if (tags.equals(meterTagsWithCommonTags)) {
|
||||
return;
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
hasLessTags = true;
|
||||
}
|
||||
}
|
||||
if (hasLessTags) {
|
||||
return;
|
||||
}
|
||||
bindMeter(registry, metric, meterName, meterTags(metric));
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017-2021 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,67 +16,44 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
|
||||
import org.springframework.beans.BeanUtils;
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.bind.BindResult;
|
||||
import org.springframework.boot.context.properties.bind.Bindable;
|
||||
import org.springframework.boot.context.properties.bind.Binder;
|
||||
import org.springframework.boot.context.properties.bind.PropertySourcesPlaceholdersResolver;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
|
||||
import org.springframework.cloud.function.context.FunctionCatalog;
|
||||
import org.springframework.cloud.stream.binder.BinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.function.FunctionDetectorCondition;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.serde.CompositeNonNativeSerde;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.serde.MessageConverterDelegateSerde;
|
||||
import org.springframework.cloud.stream.binding.BindableProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.BindingService;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BinderProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceConfiguration;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.Environment;
|
||||
import org.springframework.core.env.MapPropertySource;
|
||||
import org.springframework.integration.context.IntegrationContextUtils;
|
||||
import org.springframework.integration.support.utils.IntegrationUtils;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.streams.KafkaStreamsMicrometerListener;
|
||||
import org.springframework.kafka.streams.RecoveringDeserializationExceptionHandler;
|
||||
import org.springframework.lang.Nullable;
|
||||
import org.springframework.messaging.converter.CompositeMessageConverter;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -86,7 +63,6 @@ import org.springframework.util.StringUtils;
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KafkaStreamsExtendedBindingProperties.class)
|
||||
@ConditionalOnBean(BindingService.class)
|
||||
@AutoConfigureAfter(BindingServiceConfiguration.class)
|
||||
@@ -98,14 +74,11 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
private static final String GLOBALKTABLE_BINDER_TYPE = "globalktable";
|
||||
|
||||
private static final String CONSUMER_PROPERTIES_PREFIX = "consumer.";
|
||||
private static final String PRODUCER_PROPERTIES_PREFIX = "producer.";
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.streams.binder")
|
||||
public KafkaStreamsBinderConfigurationProperties binderConfigurationProperties(
|
||||
KafkaProperties kafkaProperties, ConfigurableEnvironment environment,
|
||||
BindingServiceProperties properties, ConfigurableApplicationContext context) throws Exception {
|
||||
BindingServiceProperties properties) {
|
||||
final Map<String, BinderConfiguration> binderConfigurations = getBinderConfigurations(
|
||||
properties);
|
||||
for (Map.Entry<String, BinderConfiguration> entry : binderConfigurations
|
||||
@@ -118,19 +91,7 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
Map<String, Object> binderProperties = new HashMap<>();
|
||||
this.flatten(null, binderConfiguration.getProperties(), binderProperties);
|
||||
environment.getPropertySources().addFirst(
|
||||
new MapPropertySource(entry.getKey() + "-kafkaStreamsBinderEnv", binderProperties));
|
||||
|
||||
Binder binder = new Binder(ConfigurationPropertySources.get(environment),
|
||||
new PropertySourcesPlaceholdersResolver(environment),
|
||||
IntegrationUtils.getConversionService(context.getBeanFactory()), null);
|
||||
final Constructor<KafkaStreamsBinderConfigurationProperties> kafkaStreamsBinderConfigurationPropertiesConstructor =
|
||||
ReflectionUtils.accessibleConstructor(KafkaStreamsBinderConfigurationProperties.class, KafkaProperties.class);
|
||||
final KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties =
|
||||
BeanUtils.instantiateClass(kafkaStreamsBinderConfigurationPropertiesConstructor, kafkaProperties);
|
||||
final BindResult<KafkaStreamsBinderConfigurationProperties> bind = binder.bind("spring.cloud.stream.kafka.streams.binder", Bindable.ofInstance(kafkaStreamsBinderConfigurationProperties));
|
||||
context.getBeanFactory().registerSingleton(
|
||||
entry.getKey() + "-KafkaStreamsBinderConfigurationProperties",
|
||||
bind.get());
|
||||
new MapPropertySource("kafkaStreamsBinderEnv", binderProperties));
|
||||
}
|
||||
}
|
||||
return new KafkaStreamsBinderConfigurationProperties(kafkaProperties);
|
||||
@@ -171,7 +132,7 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsConfiguration kafkaStreamsConfiguration(
|
||||
@Qualifier("binderConfigurationProperties") KafkaStreamsBinderConfigurationProperties properties,
|
||||
KafkaStreamsBinderConfigurationProperties properties,
|
||||
Environment environment) {
|
||||
KafkaProperties kafkaProperties = properties.getKafkaProperties();
|
||||
Map<String, Object> streamsProperties = kafkaProperties.buildStreamsProperties();
|
||||
@@ -187,32 +148,15 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean("streamConfigGlobalProperties")
|
||||
public Map<String, Object> streamConfigGlobalProperties(
|
||||
@Qualifier("binderConfigurationProperties") KafkaStreamsBinderConfigurationProperties configProperties,
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration, ConfigurableEnvironment environment,
|
||||
SendToDlqAndContinue sendToDlqAndContinue) {
|
||||
KafkaStreamsBinderConfigurationProperties configProperties,
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration) {
|
||||
|
||||
Properties properties = kafkaStreamsConfiguration.asProperties();
|
||||
|
||||
String kafkaConnectionString = configProperties.getKafkaConnectionString();
|
||||
|
||||
if (kafkaConnectionString != null && kafkaConnectionString.equals("localhost:9092")) {
|
||||
//Making sure that the application indeed set a property.
|
||||
String kafkaStreamsBinderBroker = environment.getProperty("spring.cloud.stream.kafka.streams.binder.brokers");
|
||||
|
||||
if (StringUtils.isEmpty(kafkaStreamsBinderBroker)) {
|
||||
//Kafka Streams binder specific property for brokers is not set by the application.
|
||||
//See if there is one configured at the kafka binder level.
|
||||
String kafkaBinderBroker = environment.getProperty("spring.cloud.stream.kafka.binder.brokers");
|
||||
if (!StringUtils.isEmpty(kafkaBinderBroker)) {
|
||||
kafkaConnectionString = kafkaBinderBroker;
|
||||
configProperties.setBrokers(kafkaConnectionString);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(properties.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
configProperties.getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object bootstrapServerConfig = properties
|
||||
@@ -223,14 +167,7 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
.get(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.equals("localhost:9092")) {
|
||||
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
}
|
||||
else if (bootstrapServerConfig instanceof List) {
|
||||
List bootStrapCollection = (List) bootstrapServerConfig;
|
||||
if (bootStrapCollection.size() == 1 && bootStrapCollection.get(0).equals("localhost:9092")) {
|
||||
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
configProperties.getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -247,55 +184,31 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
Serdes.ByteArraySerde.class.getName());
|
||||
|
||||
if (configProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.logAndContinue) {
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
properties.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
LogAndContinueExceptionHandler.class.getName());
|
||||
}
|
||||
else if (configProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.logAndFail) {
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
properties.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
LogAndFailExceptionHandler.class.getName());
|
||||
}
|
||||
else if (configProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.sendToDlq) {
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
properties.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
RecoveringDeserializationExceptionHandler.class);
|
||||
properties.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, sendToDlqAndContinue);
|
||||
SendToDlqAndContinue.class.getName());
|
||||
}
|
||||
|
||||
if (!ObjectUtils.isEmpty(configProperties.getConfiguration())) {
|
||||
properties.putAll(configProperties.getConfiguration());
|
||||
}
|
||||
|
||||
Map<String, Object> mergedConsumerConfig = new HashMap<>(configProperties.mergedConsumerConfiguration());
|
||||
//Adding consumer. prefix if they are missing (in order to differentiate them from other property categories such as stream, producer etc.)
|
||||
addPrefix(properties, mergedConsumerConfig, CONSUMER_PROPERTIES_PREFIX);
|
||||
|
||||
Map<String, Object> mergedProducerConfig = new HashMap<>(configProperties.mergedProducerConfiguration());
|
||||
//Adding producer. prefix if they are missing (in order to differentiate them from other property categories such as stream, consumer etc.)
|
||||
addPrefix(properties, mergedProducerConfig, PRODUCER_PROPERTIES_PREFIX);
|
||||
|
||||
if (!properties.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG)) {
|
||||
properties.put(StreamsConfig.REPLICATION_FACTOR_CONFIG,
|
||||
(int) configProperties.getReplicationFactor());
|
||||
}
|
||||
return properties.entrySet().stream().collect(
|
||||
Collectors.toMap((e) -> String.valueOf(e.getKey()), Map.Entry::getValue));
|
||||
}
|
||||
|
||||
private void addPrefix(Properties properties, Map<String, Object> mergedConsProdConfig, String prefix) {
|
||||
Map<String, Object> mergedConfigs = new HashMap<>();
|
||||
for (String key : mergedConsProdConfig.keySet()) {
|
||||
mergedConfigs.put(key.startsWith(prefix) ? key : prefix + key, mergedConsProdConfig.get(key));
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(mergedConfigs)) {
|
||||
properties.putAll(mergedConfigs);
|
||||
}
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerResultAdapter kstreamStreamListenerResultAdapter() {
|
||||
return new KStreamStreamListenerResultAdapter();
|
||||
@@ -318,61 +231,63 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KStreamStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig,
|
||||
ObjectProvider<StreamsBuilderFactoryBeanConfigurer> customizerProvider, ConfigurableEnvironment environment) {
|
||||
ObjectProvider<CleanupConfig> cleanupConfig) {
|
||||
return new KafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
bindingServiceProperties, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue,
|
||||
kafkaStreamListenerParameterAdapter, streamListenerResultAdapters,
|
||||
cleanupConfig.getIfUnique(), customizerProvider.getIfUnique(), environment);
|
||||
cleanupConfig.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
// @ConditionalOnProperty("spring.cloud.stream.kafka.streams.function.definition")
|
||||
@ConditionalOnProperty("spring.cloud.stream.function.definition")
|
||||
public KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig,
|
||||
FunctionCatalog functionCatalog, BindableProxyFactory bindableProxyFactory) {
|
||||
return new KafkaStreamsFunctionProcessor(bindingServiceProperties, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue, kafkaStreamsMessageConversionDelegate,
|
||||
cleanupConfig.getIfUnique(), functionCatalog, bindableProxyFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsMessageConversionDelegate messageConversionDelegate(
|
||||
@Qualifier(IntegrationContextUtils.ARGUMENT_RESOLVER_MESSAGE_CONVERTER_BEAN_NAME)
|
||||
CompositeMessageConverter compositeMessageConverter,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
@Qualifier("binderConfigurationProperties") KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaStreamsMessageConversionDelegate(compositeMessageConverter, sendToDlqAndContinue,
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaStreamsMessageConversionDelegate(compositeMessageConverterFactory, sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MessageConverterDelegateSerde messageConverterDelegateSerde(
|
||||
@Qualifier(IntegrationContextUtils.ARGUMENT_RESOLVER_MESSAGE_CONVERTER_BEAN_NAME)
|
||||
CompositeMessageConverter compositeMessageConverterFactory) {
|
||||
return new MessageConverterDelegateSerde(compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CompositeNonNativeSerde compositeNonNativeSerde(
|
||||
@Qualifier(IntegrationContextUtils.ARGUMENT_RESOLVER_MESSAGE_CONVERTER_BEAN_NAME)
|
||||
CompositeMessageConverter compositeMessageConverterFactory) {
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
return new CompositeNonNativeSerde(compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kStreamBoundElementFactory(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler) {
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KStreamBoundElementFactory(bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue, encodingDecodingBindAdviceHandler);
|
||||
KafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBoundElementFactory kTableBoundElementFactory(
|
||||
BindingServiceProperties bindingServiceProperties, EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KTableBoundElementFactory(bindingServiceProperties, encodingDecodingBindAdviceHandler, KafkaStreamsBindingInformationCatalogue);
|
||||
BindingServiceProperties bindingServiceProperties) {
|
||||
return new KTableBoundElementFactory(bindingServiceProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GlobalKTableBoundElementFactory globalKTableBoundElementFactory(
|
||||
BindingServiceProperties properties, EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new GlobalKTableBoundElementFactory(properties, encodingDecodingBindAdviceHandler, KafkaStreamsBindingInformationCatalogue);
|
||||
BindingServiceProperties properties) {
|
||||
return new GlobalKTableBoundElementFactory(properties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@@ -387,18 +302,23 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@SuppressWarnings("unchecked")
|
||||
@ConditionalOnMissingBean
|
||||
public KeyValueSerdeResolver keyValueSerdeResolver(
|
||||
@Qualifier("streamConfigGlobalProperties") Object streamConfigGlobalProperties,
|
||||
@Qualifier("binderConfigurationProperties")KafkaStreamsBinderConfigurationProperties properties) {
|
||||
KafkaStreamsBinderConfigurationProperties properties) {
|
||||
return new KeyValueSerdeResolver(
|
||||
(Map<String, Object>) streamConfigGlobalProperties, properties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public QueryableStoreRegistry queryableStoreTypeRegistry(
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new QueryableStoreRegistry(kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public InteractiveQueryService interactiveQueryServices(
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
@Qualifier("binderConfigurationProperties")KafkaStreamsBinderConfigurationProperties properties) {
|
||||
KafkaStreamsBinderConfigurationProperties properties) {
|
||||
return new InteractiveQueryService(kafkaStreamsRegistry, properties);
|
||||
}
|
||||
|
||||
@@ -410,84 +330,13 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
@Bean
|
||||
public StreamsBuilderFactoryManager streamsBuilderFactoryManager(
|
||||
KafkaStreamsBindingInformationCatalogue catalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
@Nullable KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics,
|
||||
@Nullable KafkaStreamsMicrometerListener listener, KafkaProperties kafkaProperties) {
|
||||
return new StreamsBuilderFactoryManager(catalogue, kafkaStreamsRegistry, kafkaStreamsBinderMetrics, listener, kafkaProperties);
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new StreamsBuilderFactoryManager(catalogue, kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Conditional(FunctionDetectorCondition.class)
|
||||
public KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig,
|
||||
StreamFunctionProperties streamFunctionProperties,
|
||||
@Qualifier("binderConfigurationProperties") KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
ObjectProvider<StreamsBuilderFactoryBeanConfigurer> customizerProvider, ConfigurableEnvironment environment) {
|
||||
return new KafkaStreamsFunctionProcessor(bindingServiceProperties, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue, kafkaStreamsMessageConversionDelegate,
|
||||
cleanupConfig.getIfUnique(), streamFunctionProperties, kafkaStreamsBinderConfigurationProperties,
|
||||
customizerProvider.getIfUnique(), environment);
|
||||
@Bean("kafkaStreamsDlqDispatchers")
|
||||
public Map<String, KafkaStreamsDlqDispatch> dlqDispatchers() {
|
||||
return new HashMap<>();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public EncodingDecodingBindAdviceHandler encodingDecodingBindAdviceHandler() {
|
||||
return new EncodingDecodingBindAdviceHandler();
|
||||
}
|
||||
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(value = KafkaStreamsBinderMetrics.class, name = "outerContext")
|
||||
@ConditionalOnClass(name = "io.micrometer.core.instrument.MeterRegistry")
|
||||
protected class KafkaStreamsBinderMetricsConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(MeterRegistry.class)
|
||||
@ConditionalOnMissingBean(KafkaStreamsBinderMetrics.class)
|
||||
@ConditionalOnMissingClass("org.springframework.kafka.core.MicrometerConsumerListener")
|
||||
public KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics(MeterRegistry meterRegistry) {
|
||||
|
||||
return new KafkaStreamsBinderMetrics(meterRegistry);
|
||||
}
|
||||
|
||||
@ConditionalOnClass(name = "org.springframework.kafka.core.MicrometerConsumerListener")
|
||||
@ConditionalOnBean(MeterRegistry.class)
|
||||
protected class KafkaMicrometer {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean(name = "binderStreamsListener")
|
||||
public KafkaStreamsMicrometerListener binderStreamsListener(MeterRegistry meterRegistry) {
|
||||
return new KafkaStreamsMicrometerListener(meterRegistry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Configuration
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
@ConditionalOnMissingBean(KafkaStreamsBinderMetrics.class)
|
||||
@ConditionalOnClass(name = "io.micrometer.core.instrument.MeterRegistry")
|
||||
protected class KafkaStreamsBinderMetricsConfigurationWithMultiBinder {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingClass("org.springframework.kafka.core.MicrometerConsumerListener")
|
||||
public KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics(ConfigurableApplicationContext context) {
|
||||
|
||||
MeterRegistry meterRegistry = context.getBean("outerContext", ApplicationContext.class)
|
||||
.getBean(MeterRegistry.class);
|
||||
return new KafkaStreamsBinderMetrics(meterRegistry);
|
||||
}
|
||||
|
||||
@ConditionalOnClass(name = "org.springframework.kafka.core.MicrometerConsumerListener")
|
||||
@ConditionalOnBean(MeterRegistry.class)
|
||||
protected class KafkaMicrometer {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean(name = "binderStreamsListener")
|
||||
public KafkaStreamsMicrometerListener binderStreamsListener(MeterRegistry meterRegistry) {
|
||||
return new KafkaStreamsMicrometerListener(meterRegistry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,82 +16,36 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.DlqDestinationResolver;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.DlqPartitionFunction;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaOperations;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Common methods used by various Kafka Streams types across the binders.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
final class KafkaStreamsBinderUtils {
|
||||
|
||||
private static final Log LOGGER = LogFactory.getLog(KafkaStreamsBinderUtils.class);
|
||||
|
||||
private KafkaStreamsBinderUtils() {
|
||||
|
||||
}
|
||||
|
||||
static void prepareConsumerBinding(String name, String group,
|
||||
ApplicationContext context, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties,
|
||||
RetryTemplate retryTemplate,
|
||||
ConfigurableListableBeanFactory beanFactory, String bindingName,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties =
|
||||
(ExtendedConsumerProperties) properties;
|
||||
|
||||
ApplicationContext context, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties,
|
||||
Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers) {
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(
|
||||
properties.getExtension());
|
||||
if (binderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
// check for deserialization handler at the consumer binding, as that takes precedence.
|
||||
final DeserializationExceptionHandler deserializationExceptionHandler =
|
||||
properties.getExtension().getDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler == DeserializationExceptionHandler.sendToDlq) {
|
||||
.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
|
||||
@@ -102,125 +56,29 @@ final class KafkaStreamsBinderUtils {
|
||||
}
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
|
||||
Map<String, DlqPartitionFunction> partitionFunctions =
|
||||
context.getBeansOfType(DlqPartitionFunction.class, false, false);
|
||||
boolean oneFunctionPresent = partitionFunctions.size() == 1;
|
||||
Integer dlqPartitions = extendedConsumerProperties.getExtension().getDlqPartitions();
|
||||
DlqPartitionFunction partitionFunction = oneFunctionPresent
|
||||
? partitionFunctions.values().iterator().next()
|
||||
: DlqPartitionFunction.determineFallbackFunction(dlqPartitions, LOGGER);
|
||||
|
||||
ProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(
|
||||
extendedConsumerProperties.getExtension().getDlqProducerProperties()),
|
||||
binderConfigurationProperties);
|
||||
kafkaStreamsBindingInformationCatalogue.addDlqProducerFactory(streamsBuilderFactoryBean, producerFactory);
|
||||
|
||||
KafkaOperations<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
|
||||
Map<String, DlqDestinationResolver> dlqDestinationResolvers =
|
||||
context.getBeansOfType(DlqDestinationResolver.class, false, false);
|
||||
|
||||
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver =
|
||||
dlqDestinationResolvers.isEmpty() ? (cr, e) -> new TopicPartition(extendedConsumerProperties.getExtension().getDlqName(),
|
||||
partitionFunction.apply(group, cr, e)) :
|
||||
(cr, e) -> new TopicPartition(dlqDestinationResolvers.values().iterator().next().apply(cr, e),
|
||||
partitionFunction.apply(group, cr, e));
|
||||
|
||||
DeadLetterPublishingRecoverer kafkaStreamsBinderDlqRecoverer = !dlqDestinationResolvers.isEmpty() || !StringUtils
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = !StringUtils
|
||||
.isEmpty(extendedConsumerProperties.getExtension().getDlqName())
|
||||
? new DeadLetterPublishingRecoverer(kafkaTemplate, destinationResolver)
|
||||
: null;
|
||||
? new KafkaStreamsDlqDispatch(
|
||||
extendedConsumerProperties.getExtension()
|
||||
.getDlqName(),
|
||||
binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension())
|
||||
: null;
|
||||
for (String inputTopic : inputTopics) {
|
||||
if (StringUtils.isEmpty(
|
||||
extendedConsumerProperties.getExtension().getDlqName()) && dlqDestinationResolvers.isEmpty()) {
|
||||
destinationResolver = (cr, e) -> new TopicPartition("error." + inputTopic + "." + group,
|
||||
partitionFunction.apply(group, cr, e));
|
||||
kafkaStreamsBinderDlqRecoverer = new DeadLetterPublishingRecoverer(kafkaTemplate,
|
||||
destinationResolver);
|
||||
extendedConsumerProperties.getExtension().getDlqName())) {
|
||||
String dlqName = "error." + inputTopic + "." + group;
|
||||
kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName,
|
||||
binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
}
|
||||
|
||||
SendToDlqAndContinue sendToDlqAndContinue = context
|
||||
.getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(inputTopic,
|
||||
kafkaStreamsBinderDlqRecoverer);
|
||||
}
|
||||
}
|
||||
kafkaStreamsDlqDispatch);
|
||||
|
||||
if (!StringUtils.hasText(properties.getRetryTemplateName())) {
|
||||
@SuppressWarnings("unchecked")
|
||||
BeanDefinition retryTemplateBeanDefinition = BeanDefinitionBuilder
|
||||
.genericBeanDefinition(
|
||||
(Class<RetryTemplate>) retryTemplate.getClass(),
|
||||
() -> retryTemplate)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(bindingName + "-RetryTemplate", retryTemplateBeanDefinition);
|
||||
}
|
||||
}
|
||||
|
||||
private static DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, configurationProperties.getRequiredAcks());
|
||||
Map<String, Object> mergedConfig = configurationProperties
|
||||
.mergedProducerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
}
|
||||
Map<String, String> configs = producerProperties.getExtension().getConfiguration();
|
||||
Assert.state(!configs.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG),
|
||||
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " cannot be overridden at the binding level; "
|
||||
+ "use multiple binders instead");
|
||||
if (!ObjectUtils.isEmpty(configs)) {
|
||||
props.putAll(configs);
|
||||
}
|
||||
// Always send as byte[] on dlq (the same byte[] that the consumer received)
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
|
||||
ByteArraySerializer.class);
|
||||
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
}
|
||||
|
||||
|
||||
static boolean supportsKStream(MethodParameter methodParameter, Class<?> targetBeanClass) {
|
||||
return KStream.class.isAssignableFrom(targetBeanClass)
|
||||
&& KStream.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
}
|
||||
|
||||
static void closeDlqProducerFactories(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
|
||||
final List<ProducerFactory<byte[], byte[]>> dlqProducerFactories =
|
||||
kafkaStreamsBindingInformationCatalogue.getDlqProducerFactory(streamsBuilderFactoryBean);
|
||||
|
||||
if (!CollectionUtils.isEmpty(dlqProducerFactories)) {
|
||||
for (ProducerFactory<byte[], byte[]> producerFactory : dlqProducerFactories) {
|
||||
try {
|
||||
((DisposableBean) producerFactory).destroy();
|
||||
}
|
||||
catch (Exception exception) {
|
||||
throw new IllegalStateException(exception);
|
||||
}
|
||||
kafkaStreamsDlqDispatchers.put(inputTopic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,27 +16,18 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
/**
|
||||
* A catalogue that provides binding information for Kafka Streams target types such as
|
||||
@@ -47,25 +38,13 @@ import org.springframework.util.CollectionUtils;
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsBindingInformationCatalogue {
|
||||
class KafkaStreamsBindingInformationCatalogue {
|
||||
|
||||
private final Map<KStream<?, ?>, BindingProperties> bindingProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<KStream<?, ?>, KafkaStreamsConsumerProperties> consumerProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<String, StreamsBuilderFactoryBean> streamsBuilderFactoryBeanPerBinding = new HashMap<>();
|
||||
|
||||
private final Map<StreamsBuilderFactoryBean, List<ConsumerProperties>> consumerPropertiesPerSbfb = new HashMap<>();
|
||||
|
||||
private final Map<Object, ResolvableType> outboundKStreamResolvables = new HashMap<>();
|
||||
|
||||
private final Map<KStream<?, ?>, Serde<?>> keySerdeInfo = new HashMap<>();
|
||||
|
||||
private final Map<Object, String> bindingNamesPerTarget = new HashMap<>();
|
||||
|
||||
private final Map<String, KafkaStreams> previousKafkaStreamsPerApplicationId = new HashMap<>();
|
||||
|
||||
private final Map<StreamsBuilderFactoryBean, List<ProducerFactory<byte[], byte[]>>> dlqProducerFactories = new HashMap<>();
|
||||
private final Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = new HashSet<>();
|
||||
|
||||
/**
|
||||
* For a given bounded {@link KStream}, retrieve it's corresponding destination on the
|
||||
@@ -117,9 +96,7 @@ public class KafkaStreamsBindingInformationCatalogue {
|
||||
*/
|
||||
void registerBindingProperties(KStream<?, ?> bindingTarget,
|
||||
BindingProperties bindingProperties) {
|
||||
if (bindingProperties != null) {
|
||||
this.bindingProperties.put(bindingTarget, bindingProperties);
|
||||
}
|
||||
this.bindingProperties.put(bindingTarget, bindingProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -129,122 +106,20 @@ public class KafkaStreamsBindingInformationCatalogue {
|
||||
*/
|
||||
void registerConsumerProperties(KStream<?, ?> bindingTarget,
|
||||
KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
if (kafkaStreamsConsumerProperties != null) {
|
||||
this.consumerProperties.put(bindingTarget, kafkaStreamsConsumerProperties);
|
||||
}
|
||||
this.consumerProperties.put(bindingTarget, kafkaStreamsConsumerProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a mapping for KStream -> {@link StreamsBuilderFactoryBean}.
|
||||
* @param streamsBuilderFactoryBean provides the {@link StreamsBuilderFactoryBean}
|
||||
* mapped to the KStream
|
||||
*/
|
||||
void addStreamBuilderFactory(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
this.streamsBuilderFactoryBeans.add(streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
Set<StreamsBuilderFactoryBean> getStreamsBuilderFactoryBeans() {
|
||||
return new HashSet<>(this.streamsBuilderFactoryBeanPerBinding.values());
|
||||
return this.streamsBuilderFactoryBeans;
|
||||
}
|
||||
|
||||
void addStreamBuilderFactoryPerBinding(String binding, StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
this.streamsBuilderFactoryBeanPerBinding.put(binding, streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
void addConsumerPropertiesPerSbfb(StreamsBuilderFactoryBean streamsBuilderFactoryBean, ConsumerProperties consumerProperties) {
|
||||
this.consumerPropertiesPerSbfb.computeIfAbsent(streamsBuilderFactoryBean, k -> new ArrayList<>());
|
||||
this.consumerPropertiesPerSbfb.get(streamsBuilderFactoryBean).add(consumerProperties);
|
||||
}
|
||||
|
||||
public Map<StreamsBuilderFactoryBean, List<ConsumerProperties>> getConsumerPropertiesPerSbfb() {
|
||||
return this.consumerPropertiesPerSbfb;
|
||||
}
|
||||
|
||||
Map<String, StreamsBuilderFactoryBean> getStreamsBuilderFactoryBeanPerBinding() {
|
||||
return this.streamsBuilderFactoryBeanPerBinding;
|
||||
}
|
||||
|
||||
void addOutboundKStreamResolvable(Object key, ResolvableType outboundResolvable) {
|
||||
this.outboundKStreamResolvables.put(key, outboundResolvable);
|
||||
}
|
||||
|
||||
ResolvableType getOutboundKStreamResolvable(Object key) {
|
||||
return outboundKStreamResolvables.get(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adding a mapping for KStream target to its corresponding KeySerde.
|
||||
* This is used for sending to DLQ when deserialization fails. See {@link KafkaStreamsMessageConversionDelegate}
|
||||
* for details.
|
||||
*
|
||||
* @param kStreamTarget target KStream
|
||||
* @param keySerde Serde used for the key
|
||||
*/
|
||||
void addKeySerde(KStream<?, ?> kStreamTarget, Serde<?> keySerde) {
|
||||
this.keySerdeInfo.put(kStreamTarget, keySerde);
|
||||
}
|
||||
|
||||
Serde<?> getKeySerde(KStream<?, ?> kStreamTarget) {
|
||||
return this.keySerdeInfo.get(kStreamTarget);
|
||||
}
|
||||
|
||||
|
||||
Map<KStream<?, ?>, BindingProperties> getBindingProperties() {
|
||||
return bindingProperties;
|
||||
}
|
||||
|
||||
Map<KStream<?, ?>, KafkaStreamsConsumerProperties> getConsumerProperties() {
|
||||
return consumerProperties;
|
||||
}
|
||||
|
||||
void addBindingNamePerTarget(Object target, String bindingName) {
|
||||
this.bindingNamesPerTarget.put(target, bindingName);
|
||||
}
|
||||
|
||||
String bindingNamePerTarget(Object target) {
|
||||
return this.bindingNamesPerTarget.get(target);
|
||||
}
|
||||
|
||||
public List<ProducerFactory<byte[], byte[]>> getDlqProducerFactories() {
|
||||
return this.dlqProducerFactories.values()
|
||||
.stream()
|
||||
.flatMap(List::stream)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public List<ProducerFactory<byte[], byte[]>> getDlqProducerFactory(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
return this.dlqProducerFactories.get(streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
public void addDlqProducerFactory(StreamsBuilderFactoryBean streamsBuilderFactoryBean,
|
||||
ProducerFactory<byte[], byte[]> producerFactory) {
|
||||
List<ProducerFactory<byte[], byte[]>> producerFactories = this.dlqProducerFactories.get(streamsBuilderFactoryBean);
|
||||
if (CollectionUtils.isEmpty(producerFactories)) {
|
||||
producerFactories = new ArrayList<>();
|
||||
this.dlqProducerFactories.put(streamsBuilderFactoryBean, producerFactories);
|
||||
}
|
||||
producerFactories.add(producerFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Caching the previous KafkaStreams for the applicaiton.id when binding is stopped through actuator.
|
||||
* See https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
*
|
||||
* @param applicationId application.id
|
||||
* @param kafkaStreams {@link KafkaStreams} object
|
||||
*/
|
||||
public void addPreviousKafkaStreamsForApplicationId(String applicationId, KafkaStreams kafkaStreams) {
|
||||
this.previousKafkaStreamsPerApplicationId.put(applicationId, kafkaStreams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the previously cached KafkaStreams object.
|
||||
* See https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
*
|
||||
* @param applicationId application.id
|
||||
*/
|
||||
public void removePreviousKafkaStreamsForApplicationId(String applicationId) {
|
||||
this.previousKafkaStreamsPerApplicationId.remove(applicationId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all stopped KafkaStreams objects through actuator binding stop.
|
||||
* See https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
*
|
||||
* @return stopped KafkaStreams objects map
|
||||
*/
|
||||
public Map<String, KafkaStreams> getStoppedKafkaStreams() {
|
||||
return this.previousKafkaStreamsPerApplicationId;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
|
||||
/**
|
||||
* Send records in error to a DLQ.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Rafal Zukowski
|
||||
* @author Gary Russell
|
||||
*/
|
||||
class KafkaStreamsDlqDispatch {
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaTemplate<byte[], byte[]> kafkaTemplate;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaStreamsDlqDispatch(String dlqName,
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaConsumerProperties kafkaConsumerProperties) {
|
||||
ProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(
|
||||
kafkaConsumerProperties.getDlqProducerProperties()),
|
||||
kafkaBinderConfigurationProperties);
|
||||
|
||||
this.kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void sendToDlq(byte[] key, byte[] value, int partittion) {
|
||||
ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(this.dlqName,
|
||||
partittion, key, value, null);
|
||||
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key))).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(value))).append("'")
|
||||
.append(" received from ").append(partittion);
|
||||
ListenableFuture<SendResult<byte[], byte[]>> sentDlq = null;
|
||||
try {
|
||||
sentDlq = this.kafkaTemplate.send(producerRecord);
|
||||
sentDlq.addCallback(
|
||||
new ListenableFutureCallback<SendResult<byte[], byte[]>>() {
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaStreamsDlqDispatch.this.logger
|
||||
.error("Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[], byte[]> result) {
|
||||
if (KafkaStreamsDlqDispatch.this.logger.isDebugEnabled()) {
|
||||
KafkaStreamsDlqDispatch.this.logger
|
||||
.debug("Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
if (sentDlq == null) {
|
||||
KafkaStreamsDlqDispatch.this.logger
|
||||
.error("Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, configurationProperties.getRequiredAcks());
|
||||
Map<String, Object> mergedConfig = configurationProperties
|
||||
.mergedProducerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
// Always send as byte[] on dlq (the same byte[] that the consumer received)
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
|
||||
ByteArraySerializer.class);
|
||||
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
}
|
||||
|
||||
private String toDisplayString(String original) {
|
||||
if (original.length() <= 50) {
|
||||
return original;
|
||||
}
|
||||
return original.substring(0, 50) + "...";
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,62 +16,65 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
import org.springframework.beans.factory.BeanFactoryAware;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.beans.factory.support.RootBeanDefinition;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.function.KafkaStreamsBindableProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.function.context.FunctionCatalog;
|
||||
import org.springframework.cloud.function.core.FluxedFunction;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binding.BindableProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.function.FunctionConstants;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 2.2.0
|
||||
*/
|
||||
public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderProcessor implements BeanFactoryAware {
|
||||
public class KafkaStreamsFunctionProcessor implements ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(KafkaStreamsFunctionProcessor.class);
|
||||
private static final String OUTBOUND = "outbound";
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final Map<String, StreamsBuilderFactoryBean> methodStreamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
@@ -79,12 +82,12 @@ public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderPro
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
private final CleanupConfig cleanupConfig;
|
||||
private final FunctionCatalog functionCatalog;
|
||||
private final BindableProxyFactory bindableProxyFactory;
|
||||
|
||||
private ConfigurableApplicationContext applicationContext;
|
||||
|
||||
private BeanFactory beanFactory;
|
||||
private StreamFunctionProperties streamFunctionProperties;
|
||||
private KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties;
|
||||
StreamsBuilderFactoryBeanConfigurer customizer;
|
||||
ConfigurableEnvironment environment;
|
||||
|
||||
public KafkaStreamsFunctionProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
@@ -92,397 +95,113 @@ public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderPro
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
CleanupConfig cleanupConfig,
|
||||
StreamFunctionProperties streamFunctionProperties,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
StreamsBuilderFactoryBeanConfigurer customizer, ConfigurableEnvironment environment) {
|
||||
super(bindingServiceProperties, kafkaStreamsBindingInformationCatalogue, kafkaStreamsExtendedBindingProperties,
|
||||
keyValueSerdeResolver, cleanupConfig);
|
||||
FunctionCatalog functionCatalog,
|
||||
BindableProxyFactory bindableProxyFactory) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.streamFunctionProperties = streamFunctionProperties;
|
||||
this.kafkaStreamsBinderConfigurationProperties = kafkaStreamsBinderConfigurationProperties;
|
||||
this.customizer = customizer;
|
||||
this.environment = environment;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
this.functionCatalog = functionCatalog;
|
||||
this.bindableProxyFactory = bindableProxyFactory;
|
||||
}
|
||||
|
||||
private Map<String, ResolvableType> buildTypeMap(ResolvableType resolvableType,
|
||||
KafkaStreamsBindableProxyFactory kafkaStreamsBindableProxyFactory,
|
||||
Method method, String functionName) {
|
||||
Map<String, ResolvableType> resolvableTypeMap = new LinkedHashMap<>();
|
||||
if (method != null) { // Component functional bean.
|
||||
final ResolvableType firstMethodParameter = ResolvableType.forMethodParameter(method, 0);
|
||||
ResolvableType currentOutputGeneric = ResolvableType.forMethodReturnType(method);
|
||||
private Map<String, ResolvableType> buildTypeMap(ResolvableType resolvableType) {
|
||||
final Set<String> inputs = new TreeSet<>(this.bindableProxyFactory.getInputs());
|
||||
|
||||
final Set<String> inputs = new LinkedHashSet<>(kafkaStreamsBindableProxyFactory.getInputs());
|
||||
final Iterator<String> iterator = inputs.iterator();
|
||||
populateResolvableTypeMap(firstMethodParameter, resolvableTypeMap, iterator, method, functionName);
|
||||
Map<String, ResolvableType> map = new LinkedHashMap<>();
|
||||
final Iterator<String> iterator = inputs.iterator();
|
||||
|
||||
final Class<?> outputRawclass = currentOutputGeneric.getRawClass();
|
||||
traverseReturnTypeForComponentBeans(resolvableTypeMap, currentOutputGeneric, inputs, iterator, outputRawclass);
|
||||
if (iterator.hasNext()) {
|
||||
map.put(iterator.next(), resolvableType.getGeneric(0));
|
||||
ResolvableType generic = resolvableType.getGeneric(1);
|
||||
|
||||
while (iterator.hasNext() && generic != null) {
|
||||
if (generic.getRawClass() != null &&
|
||||
(generic.getRawClass().equals(Function.class) ||
|
||||
generic.getRawClass().equals(Consumer.class))) {
|
||||
map.put(iterator.next(), generic.getGeneric(0));
|
||||
}
|
||||
generic = generic.getGeneric(1);
|
||||
}
|
||||
}
|
||||
else if (resolvableType != null && resolvableType.getRawClass() != null) {
|
||||
int inputCount = 1;
|
||||
|
||||
ResolvableType currentOutputGeneric;
|
||||
if (resolvableType.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
resolvableType.getRawClass().isAssignableFrom(BiConsumer.class)) {
|
||||
inputCount = 2;
|
||||
currentOutputGeneric = resolvableType.getGeneric(2);
|
||||
return map;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void orchestrateStreamListenerSetupMethod(ResolvableType resolvableType, String functionName) {
|
||||
final Set<String> outputs = new TreeSet<>(this.bindableProxyFactory.getOutputs());
|
||||
|
||||
String[] methodAnnotatedOutboundNames = new String[outputs.size()];
|
||||
int j = 0;
|
||||
for (String output : outputs) {
|
||||
methodAnnotatedOutboundNames[j++] = output;
|
||||
}
|
||||
|
||||
final Map<String, ResolvableType> stringResolvableTypeMap = buildTypeMap(resolvableType);
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(stringResolvableTypeMap, "foobar");
|
||||
try {
|
||||
if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(Consumer.class)) {
|
||||
Consumer<Object> consumer = functionCatalog.lookup(Consumer.class, functionName);
|
||||
consumer.accept(adaptedInboundArguments[0]);
|
||||
}
|
||||
else {
|
||||
currentOutputGeneric = resolvableType.getGeneric(1);
|
||||
}
|
||||
while (currentOutputGeneric.getRawClass() != null && functionOrConsumerFound(currentOutputGeneric)) {
|
||||
inputCount++;
|
||||
currentOutputGeneric = currentOutputGeneric.getGeneric(1);
|
||||
}
|
||||
final Set<String> inputs = new LinkedHashSet<>(kafkaStreamsBindableProxyFactory.getInputs());
|
||||
|
||||
final Iterator<String> iterator = inputs.iterator();
|
||||
|
||||
populateResolvableTypeMap(resolvableType, resolvableTypeMap, iterator);
|
||||
|
||||
ResolvableType iterableResType = resolvableType;
|
||||
int i = resolvableType.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
resolvableType.getRawClass().isAssignableFrom(BiConsumer.class) ? 2 : 1;
|
||||
ResolvableType outboundResolvableType;
|
||||
if (i == inputCount) {
|
||||
outboundResolvableType = iterableResType.getGeneric(i);
|
||||
}
|
||||
else {
|
||||
while (i < inputCount && iterator.hasNext()) {
|
||||
iterableResType = iterableResType.getGeneric(1);
|
||||
if (iterableResType.getRawClass() != null &&
|
||||
functionOrConsumerFound(iterableResType)) {
|
||||
populateResolvableTypeMap(iterableResType, resolvableTypeMap, iterator);
|
||||
Function<Object, Object> function = functionCatalog.lookup(Function.class, functionName);
|
||||
Object target = null;
|
||||
if (function instanceof FluxedFunction) {
|
||||
target = ((FluxedFunction) function).getTarget();
|
||||
}
|
||||
function = (Function) target;
|
||||
Object result = function.apply(adaptedInboundArguments[0]);
|
||||
int i = 1;
|
||||
while (result instanceof Function || result instanceof Consumer) {
|
||||
if (result instanceof Function) {
|
||||
result = ((Function) result).apply(adaptedInboundArguments[i]);
|
||||
}
|
||||
else {
|
||||
((Consumer) result).accept(adaptedInboundArguments[i]);
|
||||
result = null;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
outboundResolvableType = iterableResType.getGeneric(1);
|
||||
}
|
||||
resolvableTypeMap.put(OUTBOUND, outboundResolvableType);
|
||||
}
|
||||
return resolvableTypeMap;
|
||||
}
|
||||
|
||||
private void traverseReturnTypeForComponentBeans(Map<String, ResolvableType> resolvableTypeMap, ResolvableType currentOutputGeneric,
|
||||
Set<String> inputs, Iterator<String> iterator, Class<?> outputRawclass) {
|
||||
if (outputRawclass != null && !outputRawclass.equals(Void.TYPE)) {
|
||||
ResolvableType iterableResType = currentOutputGeneric;
|
||||
int i = 1;
|
||||
// Traverse through the return signature.
|
||||
while (i < inputs.size() && iterator.hasNext()) {
|
||||
if (iterableResType.getRawClass() != null &&
|
||||
functionOrConsumerFound(iterableResType)) {
|
||||
populateResolvableTypeMap(iterableResType, resolvableTypeMap, iterator);
|
||||
}
|
||||
iterableResType = iterableResType.getGeneric(1);
|
||||
i++;
|
||||
}
|
||||
if (iterableResType.getRawClass() != null && KStream.class.isAssignableFrom(iterableResType.getRawClass())) {
|
||||
resolvableTypeMap.put(OUTBOUND, iterableResType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean functionOrConsumerFound(ResolvableType iterableResType) {
|
||||
return iterableResType.getRawClass().equals(Function.class) ||
|
||||
iterableResType.getRawClass().equals(Consumer.class);
|
||||
}
|
||||
|
||||
private void populateResolvableTypeMap(ResolvableType resolvableType, Map<String, ResolvableType> resolvableTypeMap,
|
||||
Iterator<String> iterator) {
|
||||
final String next = iterator.next();
|
||||
resolvableTypeMap.put(next, resolvableType.getGeneric(0));
|
||||
if (resolvableType.getRawClass() != null &&
|
||||
(resolvableType.getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
resolvableType.getRawClass().isAssignableFrom(BiConsumer.class))
|
||||
&& iterator.hasNext()) {
|
||||
resolvableTypeMap.put(iterator.next(), resolvableType.getGeneric(1));
|
||||
}
|
||||
}
|
||||
|
||||
private void populateResolvableTypeMap(ResolvableType resolvableType, Map<String, ResolvableType> resolvableTypeMap,
|
||||
Iterator<String> iterator, Method method, String functionName) {
|
||||
final String next = iterator.next();
|
||||
resolvableTypeMap.put(next, resolvableType);
|
||||
if (method != null) {
|
||||
final Object bean = beanFactory.getBean(functionName);
|
||||
if (BiFunction.class.isAssignableFrom(bean.getClass()) || BiConsumer.class.isAssignableFrom(bean.getClass())) {
|
||||
resolvableTypeMap.put(iterator.next(), ResolvableType.forMethodParameter(method, 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ResolvableType checkOutboundForComposedFunctions(
|
||||
ResolvableType outputResolvableType) {
|
||||
|
||||
ResolvableType currentOutputGeneric;
|
||||
|
||||
if (outputResolvableType.getRawClass() != null && outputResolvableType.getRawClass().isAssignableFrom(BiFunction.class)) {
|
||||
currentOutputGeneric = outputResolvableType.getGeneric(2);
|
||||
}
|
||||
else {
|
||||
currentOutputGeneric = outputResolvableType.getGeneric(1);
|
||||
}
|
||||
while (currentOutputGeneric.getRawClass() != null && functionOrConsumerFound(currentOutputGeneric)) {
|
||||
currentOutputGeneric = currentOutputGeneric.getGeneric(1);
|
||||
}
|
||||
return currentOutputGeneric;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method must be kept stateless. In the case of multiple function beans in an application,
|
||||
* isolated {@link KafkaStreamsBindableProxyFactory} instances are passed in separately for those functions. If the
|
||||
* state is shared between invocations, that will create potential race conditions. Hence, invocations of this method
|
||||
* should not be dependent on state modified by a previous invocation.
|
||||
*
|
||||
* @param resolvableType type of the binding
|
||||
* @param functionName bean name of the function
|
||||
* @param kafkaStreamsBindableProxyFactory bindable proxy factory for the Kafka Streams type
|
||||
*/
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
public void setupFunctionInvokerForKafkaStreams(ResolvableType resolvableType, String functionName,
|
||||
KafkaStreamsBindableProxyFactory kafkaStreamsBindableProxyFactory, Method method,
|
||||
ResolvableType outputResolvableType,
|
||||
String... composedFunctionNames) {
|
||||
final Map<String, ResolvableType> resolvableTypes = buildTypeMap(resolvableType,
|
||||
kafkaStreamsBindableProxyFactory, method, functionName);
|
||||
|
||||
ResolvableType outboundResolvableType;
|
||||
if (outputResolvableType != null) {
|
||||
outboundResolvableType = checkOutboundForComposedFunctions(outputResolvableType);
|
||||
resolvableTypes.remove(OUTBOUND);
|
||||
}
|
||||
else {
|
||||
outboundResolvableType = resolvableTypes.remove(OUTBOUND);
|
||||
}
|
||||
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(resolvableTypes, functionName);
|
||||
try {
|
||||
if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(Consumer.class)) {
|
||||
Consumer<Object> consumer = (Consumer) this.beanFactory.getBean(functionName);
|
||||
consumer.accept(adaptedInboundArguments[0]);
|
||||
}
|
||||
else if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(BiConsumer.class)) {
|
||||
BiConsumer<Object, Object> biConsumer = (BiConsumer) this.beanFactory.getBean(functionName);
|
||||
biConsumer.accept(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
}
|
||||
else if (method != null) { // Handling component functional beans
|
||||
final Object bean = beanFactory.getBean(functionName);
|
||||
if (Consumer.class.isAssignableFrom(bean.getClass())) {
|
||||
((Consumer) bean).accept(adaptedInboundArguments[0]);
|
||||
}
|
||||
else if (BiConsumer.class.isAssignableFrom(bean.getClass())) {
|
||||
((BiConsumer) bean).accept(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
}
|
||||
else if (Function.class.isAssignableFrom(bean.getClass()) || BiFunction.class.isAssignableFrom(bean.getClass())) {
|
||||
Object result;
|
||||
if (BiFunction.class.isAssignableFrom(bean.getClass())) {
|
||||
result = ((BiFunction) bean).apply(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
}
|
||||
else {
|
||||
result = ((Function) bean).apply(adaptedInboundArguments[0]);
|
||||
}
|
||||
result = handleCurriedFunctions(adaptedInboundArguments, result);
|
||||
if (result != null) {
|
||||
final Set<String> outputs = new TreeSet<>(kafkaStreamsBindableProxyFactory.getOutputs());
|
||||
final Iterator<String> outboundDefinitionIterator = outputs.iterator();
|
||||
if (result.getClass().isArray()) {
|
||||
final String initialInput = resolvableTypes.keySet().iterator().next();
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean =
|
||||
this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeanPerBinding().get(initialInput);
|
||||
handleKStreamArrayOutbound(resolvableType, functionName, kafkaStreamsBindableProxyFactory,
|
||||
outboundResolvableType, (Object[]) result, streamsBuilderFactoryBean);
|
||||
}
|
||||
else {
|
||||
if (KTable.class.isAssignableFrom(result.getClass())) {
|
||||
handleSingleKStreamOutbound(resolvableTypes, outboundResolvableType != null ?
|
||||
outboundResolvableType : resolvableType.getGeneric(1), ((KTable) result).toStream(), outboundDefinitionIterator);
|
||||
}
|
||||
else {
|
||||
handleSingleKStreamOutbound(resolvableTypes, outboundResolvableType, (KStream) result, outboundDefinitionIterator);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
Object result = null;
|
||||
if (resolvableType.getRawClass() != null && resolvableType.getRawClass().equals(BiFunction.class)) {
|
||||
if (composedFunctionNames != null && composedFunctionNames.length > 0) {
|
||||
result = handleComposedFunctions(adaptedInboundArguments, result, composedFunctionNames);
|
||||
}
|
||||
else {
|
||||
BiFunction<Object, Object, Object> biFunction = (BiFunction) beanFactory.getBean(functionName);
|
||||
result = biFunction.apply(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
result = handleCurriedFunctions(adaptedInboundArguments, result);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (composedFunctionNames != null && composedFunctionNames.length > 0) {
|
||||
result = handleComposedFunctions(adaptedInboundArguments, result, composedFunctionNames);
|
||||
}
|
||||
else {
|
||||
Function<Object, Object> function = (Function) beanFactory.getBean(functionName);
|
||||
result = function.apply(adaptedInboundArguments[0]);
|
||||
result = handleCurriedFunctions(adaptedInboundArguments, result);
|
||||
}
|
||||
}
|
||||
if (result != null) {
|
||||
final Set<String> outputs = new TreeSet<>(kafkaStreamsBindableProxyFactory.getOutputs());
|
||||
final Iterator<String> outboundDefinitionIterator = outputs.iterator();
|
||||
|
||||
if (result.getClass().isArray()) {
|
||||
final String initialInput = resolvableTypes.keySet().iterator().next();
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean =
|
||||
this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeanPerBinding().get(initialInput);
|
||||
handleKStreamArrayOutbound(resolvableType, functionName, kafkaStreamsBindableProxyFactory,
|
||||
outboundResolvableType, (Object[]) result, streamsBuilderFactoryBean);
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == ((Object[]) result).length,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
else {
|
||||
if (KTable.class.isAssignableFrom(result.getClass())) {
|
||||
handleSingleKStreamOutbound(resolvableTypes, outboundResolvableType != null ?
|
||||
outboundResolvableType : resolvableType.getGeneric(1), ((KTable) result).toStream(), outboundDefinitionIterator);
|
||||
}
|
||||
else {
|
||||
handleSingleKStreamOutbound(resolvableTypes, outboundResolvableType != null ?
|
||||
outboundResolvableType : resolvableType.getGeneric(1), (KStream) result, outboundDefinitionIterator);
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int k = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[k++]);
|
||||
|
||||
KStreamBoundElementFactory.KStreamWrapper
|
||||
boundElement = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
boundElement.wrap((KStream) outboundKStream);
|
||||
}
|
||||
}
|
||||
else {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[0]);
|
||||
|
||||
KStreamBoundElementFactory.KStreamWrapper
|
||||
boundElement = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
boundElement.wrap((KStream) result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new BeanInitializationException("Cannot setup function invoker for this Kafka Streams function.", ex);
|
||||
throw new BeanInitializationException("Cannot setup StreamListener for foobar", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private Object handleComposedFunctions(Object[] adaptedInboundArguments, Object result, String... composedFunctionNames) {
|
||||
Object bean = beanFactory.getBean(composedFunctionNames[0]);
|
||||
if (BiFunction.class.isAssignableFrom(bean.getClass())) {
|
||||
result = ((BiFunction<Object, Object, Object>) bean).apply(adaptedInboundArguments[0], adaptedInboundArguments[1]);
|
||||
}
|
||||
else if (Function.class.isAssignableFrom(bean.getClass())) {
|
||||
result = ((Function<Object, Object>) bean).apply(adaptedInboundArguments[0]);
|
||||
}
|
||||
// If the return is a curried function, apply it
|
||||
result = handleCurriedFunctions(adaptedInboundArguments, result);
|
||||
// Apply composed functions
|
||||
return applyComposedFunctions(result, composedFunctionNames);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private Object applyComposedFunctions(Object result, String[] composedFunctionNames) {
|
||||
for (int i = 1; i < composedFunctionNames.length; i++) {
|
||||
final Object bean = beanFactory.getBean(composedFunctionNames[i]);
|
||||
if (Consumer.class.isAssignableFrom(bean.getClass())) {
|
||||
((Consumer<Object>) bean).accept(result);
|
||||
result = null;
|
||||
}
|
||||
else if (Function.class.isAssignableFrom(bean.getClass())) {
|
||||
result = ((Function<Object, Object>) bean).apply(result);
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("You can only compose functions of type either java.util.function.Function or java.util.function.Consumer.");
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private Object handleCurriedFunctions(Object[] adaptedInboundArguments, Object result) {
|
||||
int i = 1;
|
||||
while (result instanceof Function || result instanceof Consumer) {
|
||||
if (result instanceof Function) {
|
||||
result = ((Function<Object, Object>) result).apply(adaptedInboundArguments[i]);
|
||||
}
|
||||
else {
|
||||
((Consumer<Object>) result).accept(adaptedInboundArguments[i]);
|
||||
result = null;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private void handleSingleKStreamOutbound(Map<String, ResolvableType> resolvableTypes, ResolvableType outboundResolvableType,
|
||||
KStream<Object, Object> result, Iterator<String> outboundDefinitionIterator) {
|
||||
if (outboundDefinitionIterator.hasNext()) {
|
||||
String outbound = outboundDefinitionIterator.next();
|
||||
Object targetBean = handleSingleKStreamOutbound(result, outbound);
|
||||
kafkaStreamsBindingInformationCatalogue.addOutboundKStreamResolvable(targetBean,
|
||||
outboundResolvableType);
|
||||
|
||||
final String next = resolvableTypes.keySet().iterator().next();
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(next);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(outbound, streamsBuilderFactoryBean);
|
||||
}
|
||||
}
|
||||
|
||||
private Object handleSingleKStreamOutbound(KStream<Object, Object> result, String next) {
|
||||
Object targetBean = this.applicationContext.getBean(next);
|
||||
KStreamBoundElementFactory.KStreamWrapper
|
||||
boundElement = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
boundElement.wrap(result);
|
||||
return targetBean;
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
private void handleKStreamArrayOutbound(ResolvableType resolvableType, String functionName,
|
||||
KafkaStreamsBindableProxyFactory kafkaStreamsBindableProxyFactory,
|
||||
ResolvableType outboundResolvableType, Object[] result,
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
// Binding target as the output bindings were deferred in the KafkaStreamsBindableProxyFactory
|
||||
// due to the fact that it didn't know the returned array size. At this point in the execution,
|
||||
// we know exactly the number of outbound components (from the array length), so do the binding.
|
||||
final int length = result.length;
|
||||
|
||||
List<String> outputBindings = getOutputBindings(functionName, length);
|
||||
Iterator<String> iterator = outputBindings.iterator();
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
|
||||
for (Object o : result) {
|
||||
String next = iterator.next();
|
||||
kafkaStreamsBindableProxyFactory.addOutputBinding(next, KStream.class);
|
||||
RootBeanDefinition rootBeanDefinition1 = new RootBeanDefinition();
|
||||
rootBeanDefinition1.setInstanceSupplier(() -> kafkaStreamsBindableProxyFactory.getOutputHolders().get(next).getBoundTarget());
|
||||
registry.registerBeanDefinition(next, rootBeanDefinition1);
|
||||
|
||||
Object targetBean = this.applicationContext.getBean(next);
|
||||
|
||||
KStreamBoundElementFactory.KStreamWrapper
|
||||
boundElement = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
boundElement.wrap((KStream) o);
|
||||
|
||||
kafkaStreamsBindingInformationCatalogue.addOutboundKStreamResolvable(
|
||||
targetBean, outboundResolvableType != null ? outboundResolvableType : resolvableType.getGeneric(1));
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(next, streamsBuilderFactoryBean);
|
||||
}
|
||||
}
|
||||
|
||||
private List<String> getOutputBindings(String functionName, int outputs) {
|
||||
List<String> outputBindings = this.streamFunctionProperties.getOutputBindings(functionName);
|
||||
List<String> outputBindingNames = new ArrayList<>();
|
||||
if (!CollectionUtils.isEmpty(outputBindings)) {
|
||||
outputBindingNames.addAll(outputBindings);
|
||||
return outputBindingNames;
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < outputs; i++) {
|
||||
outputBindingNames.add(String.format("%s-%s-%d", functionName, FunctionConstants.DEFAULT_OUTPUT_SUFFIX, i));
|
||||
}
|
||||
}
|
||||
return outputBindingNames;
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private Object[] adaptAndRetrieveInboundArguments(Map<String, ResolvableType> stringResolvableTypeMap,
|
||||
String functionName) {
|
||||
@@ -492,52 +211,61 @@ public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderPro
|
||||
Class<?> parameterType = stringResolvableTypeMap.get(input).getRawClass();
|
||||
|
||||
if (input != null) {
|
||||
Assert.isInstanceOf(String.class, input, "Annotation value must be a String");
|
||||
Object targetBean = applicationContext.getBean(input);
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties.getBindingProperties(input);
|
||||
enableNativeDecodingForKTableAlways(parameterType, bindingProperties);
|
||||
//Retrieve the StreamsConfig created for this method if available.
|
||||
//Otherwise, create the StreamsBuilderFactory and get the underlying config.
|
||||
if (!this.methodStreamsBuilderFactoryBeanMap.containsKey(functionName)) {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = buildStreamsBuilderAndRetrieveConfig(functionName, applicationContext,
|
||||
input, kafkaStreamsBinderConfigurationProperties, customizer, this.environment, bindingProperties);
|
||||
this.methodStreamsBuilderFactoryBeanMap.put(functionName, streamsBuilderFactoryBean);
|
||||
buildStreamsBuilderAndRetrieveConfig(functionName, applicationContext, input);
|
||||
}
|
||||
try {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean =
|
||||
this.methodStreamsBuilderFactoryBeanMap.get(functionName);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
final String applicationId = streamsBuilderFactoryBean.getStreamsConfiguration().getProperty(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties =
|
||||
this.kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(input);
|
||||
extendedConsumerProperties.setApplicationId(applicationId);
|
||||
//get state store spec
|
||||
//KafkaStreamsStateStoreProperties spec = buildStateStoreSpec(method);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getInboundKeySerde(extendedConsumerProperties);
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getInboundValueSerde(
|
||||
bindingProperties.getConsumer(), extendedConsumerProperties);
|
||||
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getInboundKeySerde(extendedConsumerProperties, stringResolvableTypeMap.get(input));
|
||||
LOG.info("Key Serde used for " + input + ": " + keySerde.getClass().getName());
|
||||
Serde<?> valueSerde = bindingServiceProperties.getConsumerProperties(input).isUseNativeDecoding() ?
|
||||
getValueSerde(input, extendedConsumerProperties, stringResolvableTypeMap.get(input)) : Serdes.ByteArray();
|
||||
LOG.info("Value Serde used for " + input + ": " + valueSerde.getClass().getName());
|
||||
final Topology.AutoOffsetReset autoOffsetReset = getAutoOffsetReset(input, extendedConsumerProperties);
|
||||
final KafkaConsumerProperties.StartOffset startOffset = extendedConsumerProperties.getStartOffset();
|
||||
Topology.AutoOffsetReset autoOffsetReset = null;
|
||||
if (startOffset != null) {
|
||||
switch (startOffset) {
|
||||
case earliest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.EARLIEST;
|
||||
break;
|
||||
case latest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.LATEST;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extendedConsumerProperties.isResetOffsets()) {
|
||||
LOG.warn("Detected resetOffsets configured on binding " + input + ". "
|
||||
+ "Setting resetOffsets in Kafka Streams binder does not have any effect.");
|
||||
}
|
||||
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getKStream(input, bindingProperties, extendedConsumerProperties,
|
||||
streamsBuilder, keySerde, valueSerde, autoOffsetReset, i == 0);
|
||||
KStream<?, ?> stream = getkStream(input, bindingProperties,
|
||||
streamsBuilder, keySerde, valueSerde, autoOffsetReset);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper =
|
||||
(KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.addKeySerde((KStream<?, ?>) kStreamWrapper, keySerde);
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(input, streamsBuilderFactoryBean);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addConsumerPropertiesPerSbfb(streamsBuilderFactoryBean,
|
||||
bindingServiceProperties.getConsumerProperties(input));
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
|
||||
if (KStream.class.isAssignableFrom(stringResolvableTypeMap.get(input).getRawClass())) {
|
||||
final Class<?> valueClass =
|
||||
(stringResolvableTypeMap.get(input).getGeneric(1).getRawClass() != null)
|
||||
? (stringResolvableTypeMap.get(input).getGeneric(1).getRawClass()) : Object.class;
|
||||
if (this.kafkaStreamsBindingInformationCatalogue.isUseNativeDecoding(
|
||||
(KStream<?, ?>) kStreamWrapper)) {
|
||||
(KStream) kStreamWrapper)) {
|
||||
arguments[i] = stream;
|
||||
}
|
||||
else {
|
||||
@@ -549,11 +277,31 @@ public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderPro
|
||||
if (arguments[i] == null) {
|
||||
arguments[i] = stream;
|
||||
}
|
||||
Assert.notNull(arguments[i], "Problems encountered while adapting the function argument.");
|
||||
Assert.notNull(arguments[i], "problems..");
|
||||
}
|
||||
else {
|
||||
handleKTableGlobalKTableInputs(arguments, i, input, parameterType, targetBean, streamsBuilderFactoryBean,
|
||||
streamsBuilder, extendedConsumerProperties, keySerde, valueSerde, autoOffsetReset, i == 0);
|
||||
else if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
KTable<?, ?> table = getKTable(streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper =
|
||||
(KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
arguments[i] = table;
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
GlobalKTable<?, ?> table = getGlobalKTable(streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper globalKTableWrapper =
|
||||
(GlobalKTableBoundElementFactory.GlobalKTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
globalKTableWrapper.wrap((GlobalKTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
arguments[i] = table;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
@@ -568,8 +316,151 @@ public class KafkaStreamsFunctionProcessor extends AbstractKafkaStreamsBinderPro
|
||||
return arguments;
|
||||
}
|
||||
|
||||
private GlobalKTable<?, ?> getGlobalKTable(StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, String materializedAs,
|
||||
String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return materializedAs != null ?
|
||||
materializedAsGlobalKTable(streamsBuilder, bindingDestination, materializedAs,
|
||||
keySerde, valueSerde, autoOffsetReset) :
|
||||
streamsBuilder.globalTable(bindingDestination,
|
||||
Consumed.with(keySerde, valueSerde).withOffsetResetPolicy(autoOffsetReset));
|
||||
}
|
||||
|
||||
private KTable<?, ?> getKTable(StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde,
|
||||
String materializedAs,
|
||||
String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return materializedAs != null ?
|
||||
materializedAs(streamsBuilder, bindingDestination, materializedAs, keySerde, valueSerde,
|
||||
autoOffsetReset) :
|
||||
streamsBuilder.table(bindingDestination,
|
||||
Consumed.with(keySerde, valueSerde).withOffsetResetPolicy(autoOffsetReset));
|
||||
}
|
||||
|
||||
private <K, V> KTable<K, V> materializedAs(StreamsBuilder streamsBuilder, String destination,
|
||||
String storeName, Serde<K> k, Serde<V> v,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return streamsBuilder.table(this.bindingServiceProperties.getBindingDestination(destination),
|
||||
Consumed.with(k, v).withOffsetResetPolicy(autoOffsetReset),
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> GlobalKTable<K, V> materializedAsGlobalKTable(StreamsBuilder streamsBuilder,
|
||||
String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return streamsBuilder.globalTable(this.bindingServiceProperties.getBindingDestination(destination),
|
||||
Consumed.with(k, v).withOffsetResetPolicy(autoOffsetReset),
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> Materialized<K, V, KeyValueStore<Bytes, byte[]>> getMaterialized(String storeName,
|
||||
Serde<K> k, Serde<V> v) {
|
||||
return Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k)
|
||||
.withValueSerde(v);
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName,
|
||||
BindingProperties bindingProperties,
|
||||
StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
String[] bindingTargets = StringUtils
|
||||
.commaDelimitedListToStringArray(this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
|
||||
KStream<?, ?> stream =
|
||||
streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset));
|
||||
final boolean nativeDecoding = this.bindingServiceProperties.getConsumerProperties(inboundName)
|
||||
.isUseNativeDecoding();
|
||||
if (nativeDecoding) {
|
||||
LOG.info("Native decoding is enabled for " + inboundName + ". " +
|
||||
"Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName + ". " +
|
||||
"Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
|
||||
stream = stream.mapValues((value) -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (value != null && !StringUtils.isEmpty(contentType) && !nativeDecoding) {
|
||||
returnValue = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
}
|
||||
else {
|
||||
returnValue = value;
|
||||
}
|
||||
return returnValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
|
||||
private void enableNativeDecodingForKTableAlways(Class<?> parameterType, BindingProperties bindingProperties) {
|
||||
if (parameterType.isAssignableFrom(KTable.class) || parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
//No framework level message conversion provided for KTable/GlobalKTable, its done by the broker.
|
||||
bindingProperties.getConsumer().setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private void buildStreamsBuilderAndRetrieveConfig(String functionName, ApplicationContext applicationContext,
|
||||
String inboundName) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext.getBeanFactory();
|
||||
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext.getBean("streamConfigGlobalProperties",
|
||||
Map.class);
|
||||
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
streamConfigGlobalProperties.putAll(extendedConsumerProperties.getConfiguration());
|
||||
|
||||
String applicationId = extendedConsumerProperties.getApplicationId();
|
||||
//override application.id if set at the individual binding level.
|
||||
if (StringUtils.hasText(applicationId)) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
|
||||
}
|
||||
|
||||
int concurrency = this.bindingServiceProperties.getConsumerProperties(inboundName).getConcurrency();
|
||||
// override concurrency if set at the individual binding level.
|
||||
if (concurrency > 1) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, concurrency);
|
||||
}
|
||||
|
||||
Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers = applicationContext.getBean(
|
||||
"kafkaStreamsDlqDispatchers", Map.class);
|
||||
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration =
|
||||
new KafkaStreamsConfiguration(streamConfigGlobalProperties) {
|
||||
@Override
|
||||
public Properties asProperties() {
|
||||
Properties properties = super.asProperties();
|
||||
properties.put(SendToDlqAndContinue.KAFKA_STREAMS_DLQ_DISPATCHERS, kafkaStreamsDlqDispatchers);
|
||||
return properties;
|
||||
}
|
||||
};
|
||||
|
||||
StreamsBuilderFactoryBean streamsBuilder = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(kafkaStreamsConfiguration)
|
||||
: new StreamsBuilderFactoryBean(kafkaStreamsConfiguration, this.cleanupConfig);
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition(
|
||||
(Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(), () -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" +
|
||||
functionName, streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" +
|
||||
functionName, StreamsBuilderFactoryBean.class);
|
||||
this.methodStreamsBuilderFactoryBeanMap.put(functionName, streamsBuilderX);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
|
||||
this.beanFactory = beanFactory;
|
||||
public final void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
|
||||
/**
|
||||
* Jaas configuration bean for Kafka Streams binder types.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.1.4
|
||||
*/
|
||||
@Configuration
|
||||
public class KafkaStreamsJaasConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean(KafkaJaasLoginModuleInitializer.class)
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer(
|
||||
KafkaBinderConfigurationProperties configurationProperties)
|
||||
throws IOException {
|
||||
KafkaJaasLoginModuleInitializer kafkaJaasLoginModuleInitializer = new KafkaJaasLoginModuleInitializer();
|
||||
JaasLoginModuleConfiguration jaas = configurationProperties.getJaas();
|
||||
if (jaas != null) {
|
||||
kafkaJaasLoginModuleInitializer.setLoginModule(jaas.getLoginModule());
|
||||
|
||||
KafkaJaasLoginModuleInitializer.ControlFlag controlFlag = jaas
|
||||
.getControlFlag();
|
||||
|
||||
if (controlFlag != null) {
|
||||
kafkaJaasLoginModuleInitializer.setControlFlag(controlFlag);
|
||||
}
|
||||
kafkaJaasLoginModuleInitializer.setOptions(jaas.getOptions());
|
||||
}
|
||||
return kafkaJaasLoginModuleInitializer;
|
||||
}
|
||||
}
|
||||
@@ -19,24 +19,19 @@ package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.CompositeMessageConverter;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
@@ -58,7 +53,7 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
private static final ThreadLocal<KeyValue<Object, Object>> keyValueThreadLocal = new ThreadLocal<>();
|
||||
|
||||
private final CompositeMessageConverter compositeMessageConverter;
|
||||
private final CompositeMessageConverterFactory compositeMessageConverterFactory;
|
||||
|
||||
private final SendToDlqAndContinue sendToDlqAndContinue;
|
||||
|
||||
@@ -66,14 +61,12 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties;
|
||||
|
||||
Exception[] failedWithDeserException = new Exception[1];
|
||||
|
||||
KafkaStreamsMessageConversionDelegate(
|
||||
CompositeMessageConverter compositeMessageConverter,
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties) {
|
||||
this.compositeMessageConverter = compositeMessageConverter;
|
||||
this.compositeMessageConverterFactory = compositeMessageConverterFactory;
|
||||
this.sendToDlqAndContinue = sendToDlqAndContinue;
|
||||
this.kstreamBindingInformationCatalogue = kstreamBindingInformationCatalogue;
|
||||
this.kstreamBinderConfigurationProperties = kstreamBinderConfigurationProperties;
|
||||
@@ -84,63 +77,24 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
* @param outboundBindTarget outbound KStream target
|
||||
* @return serialized KStream
|
||||
*/
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
@SuppressWarnings("rawtypes")
|
||||
public KStream serializeOnOutbound(KStream<?, ?> outboundBindTarget) {
|
||||
String contentType = this.kstreamBindingInformationCatalogue
|
||||
.getContentType(outboundBindTarget);
|
||||
MessageConverter messageConverter = this.compositeMessageConverter;
|
||||
final PerRecordContentTypeHolder perRecordContentTypeHolder = new PerRecordContentTypeHolder();
|
||||
|
||||
final KStream<?, ?> kStreamWithEnrichedHeaders = outboundBindTarget
|
||||
.filter((k, v) -> v != null)
|
||||
.mapValues((v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v
|
||||
: MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
final Message<?> convertedMessage = messageConverter.toMessage(message.getPayload(), messageHeaders);
|
||||
perRecordContentTypeHolder.setContentType((String) messageHeaders.get(MessageHeaders.CONTENT_TYPE));
|
||||
return convertedMessage.getPayload();
|
||||
});
|
||||
|
||||
kStreamWithEnrichedHeaders.process(() -> new Processor() {
|
||||
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, Object value) {
|
||||
if (perRecordContentTypeHolder.contentType != null) {
|
||||
this.context.headers().remove(MessageHeaders.CONTENT_TYPE);
|
||||
final Header header;
|
||||
try {
|
||||
header = new RecordHeader(MessageHeaders.CONTENT_TYPE,
|
||||
new ObjectMapper().writeValueAsBytes(perRecordContentTypeHolder.contentType));
|
||||
this.context.headers().add(header);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Could not add content type header");
|
||||
}
|
||||
}
|
||||
perRecordContentTypeHolder.unsetContentType();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
MessageConverter messageConverter = this.compositeMessageConverterFactory
|
||||
.getMessageConverterForAllRegistered();
|
||||
|
||||
return outboundBindTarget.mapValues((v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v
|
||||
: MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
return messageConverter.toMessage(message.getPayload(), messageHeaders)
|
||||
.getPayload();
|
||||
});
|
||||
|
||||
return kStreamWithEnrichedHeaders;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -152,7 +106,8 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
public KStream deserializeOnInbound(Class<?> valueClass,
|
||||
KStream<?, ?> bindingTarget) {
|
||||
MessageConverter messageConverter = this.compositeMessageConverter;
|
||||
MessageConverter messageConverter = this.compositeMessageConverterFactory
|
||||
.getMessageConverterForAllRegistered();
|
||||
final PerRecordContentTypeHolder perRecordContentTypeHolder = new PerRecordContentTypeHolder();
|
||||
|
||||
resolvePerRecordContentType(bindingTarget, perRecordContentTypeHolder);
|
||||
@@ -205,7 +160,6 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
"Deserialization has failed. This will be skipped from further processing.",
|
||||
e);
|
||||
// pass through
|
||||
failedWithDeserException[0] = e;
|
||||
}
|
||||
return isValidRecord;
|
||||
},
|
||||
@@ -213,7 +167,7 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
// in the first filter above.
|
||||
(k, v) -> true);
|
||||
// process errors from the second filter in the branch above.
|
||||
processErrorFromDeserialization(bindingTarget, branch[1], failedWithDeserException);
|
||||
processErrorFromDeserialization(bindingTarget, branch[1]);
|
||||
|
||||
// first branch above is the branch where the messages are converted, let it go
|
||||
// through further processing.
|
||||
@@ -270,7 +224,7 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
private void processErrorFromDeserialization(KStream<?, ?> bindingTarget,
|
||||
KStream<?, ?> branch, Exception[] exception) {
|
||||
KStream<?, ?> branch) {
|
||||
branch.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@@ -285,25 +239,18 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
if (o2 != null) {
|
||||
if (KafkaStreamsMessageConversionDelegate.this.kstreamBindingInformationCatalogue
|
||||
.isDlqEnabled(bindingTarget)) {
|
||||
String destination = this.context.topic();
|
||||
if (o2 instanceof Message) {
|
||||
Message message = (Message) o2;
|
||||
|
||||
// We need to convert the key to a byte[] before sending to DLQ.
|
||||
Serde keySerde = kstreamBindingInformationCatalogue.getKeySerde(bindingTarget);
|
||||
Serializer keySerializer = keySerde.serializer();
|
||||
byte[] keyBytes = keySerializer.serialize(null, o);
|
||||
|
||||
ConsumerRecord consumerRecord = new ConsumerRecord(this.context.topic(), this.context.partition(), this.context.offset(),
|
||||
keyBytes, message.getPayload());
|
||||
|
||||
KafkaStreamsMessageConversionDelegate.this.sendToDlqAndContinue
|
||||
.sendToDlq(consumerRecord, exception[0]);
|
||||
.sendToDlq(destination, (byte[]) o,
|
||||
(byte[]) message.getPayload(),
|
||||
this.context.partition());
|
||||
}
|
||||
else {
|
||||
ConsumerRecord consumerRecord = new ConsumerRecord(this.context.topic(), this.context.partition(), this.context.offset(),
|
||||
o, o2);
|
||||
KafkaStreamsMessageConversionDelegate.this.sendToDlqAndContinue
|
||||
.sendToDlq(consumerRecord, exception[0]);
|
||||
.sendToDlq(destination, (byte[]) o, (byte[]) o2,
|
||||
this.context.partition());
|
||||
}
|
||||
}
|
||||
else if (KafkaStreamsMessageConversionDelegate.this.kstreamBinderConfigurationProperties
|
||||
@@ -336,10 +283,6 @@ public class KafkaStreamsMessageConversionDelegate {
|
||||
this.contentType = contentType;
|
||||
}
|
||||
|
||||
void unsetContentType() {
|
||||
this.contentType = null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,78 +16,31 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* An internal registry for holding {@link KafkaStreams} objects maintained through
|
||||
* An internal registry for holding {@KafkaStreams} objects maintained through
|
||||
* {@link StreamsBuilderFactoryManager}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsRegistry {
|
||||
class KafkaStreamsRegistry {
|
||||
|
||||
private final Map<KafkaStreams, StreamsBuilderFactoryBean> streamsBuilderFactoryBeanMap = new ConcurrentHashMap<>();
|
||||
|
||||
private final Set<KafkaStreams> kafkaStreams = ConcurrentHashMap.newKeySet();
|
||||
private final Set<KafkaStreams> kafkaStreams = new HashSet<>();
|
||||
|
||||
Set<KafkaStreams> getKafkaStreams() {
|
||||
Set<KafkaStreams> currentlyRunningKafkaStreams = new HashSet<>();
|
||||
for (KafkaStreams ks : this.kafkaStreams) {
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = streamsBuilderFactoryBeanMap.get(ks);
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
currentlyRunningKafkaStreams.add(ks);
|
||||
}
|
||||
}
|
||||
return currentlyRunningKafkaStreams;
|
||||
return this.kafkaStreams;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the {@link KafkaStreams} object created in the application.
|
||||
* @param streamsBuilderFactoryBean {@link StreamsBuilderFactoryBean}
|
||||
* @param kafkaStreams {@link KafkaStreams} object created in the application
|
||||
*/
|
||||
void registerKafkaStreams(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
void registerKafkaStreams(KafkaStreams kafkaStreams) {
|
||||
this.kafkaStreams.add(kafkaStreams);
|
||||
this.streamsBuilderFactoryBeanMap.put(kafkaStreams, streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
void unregisterKafkaStreams(KafkaStreams kafkaStreams) {
|
||||
this.kafkaStreams.remove(kafkaStreams);
|
||||
this.streamsBuilderFactoryBeanMap.remove(kafkaStreams);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param kafkaStreams {@link KafkaStreams} object
|
||||
* @return Corresponding {@link StreamsBuilderFactoryBean}.
|
||||
*/
|
||||
StreamsBuilderFactoryBean streamBuilderFactoryBean(KafkaStreams kafkaStreams) {
|
||||
return this.streamsBuilderFactoryBeanMap.get(kafkaStreams);
|
||||
}
|
||||
|
||||
public StreamsBuilderFactoryBean streamsBuilderFactoryBean(String applicationId) {
|
||||
final Optional<StreamsBuilderFactoryBean> first = this.streamsBuilderFactoryBeanMap.values()
|
||||
.stream()
|
||||
.filter(streamsBuilderFactoryBean -> streamsBuilderFactoryBean.isRunning() && streamsBuilderFactoryBean
|
||||
.getStreamsConfiguration().getProperty(StreamsConfig.APPLICATION_ID_CONFIG)
|
||||
.equals(applicationId))
|
||||
.findFirst();
|
||||
return first.orElse(null);
|
||||
}
|
||||
|
||||
public List<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans() {
|
||||
return new ArrayList<>(this.streamsBuilderFactoryBeanMap.values());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -17,29 +17,40 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
import org.apache.kafka.streams.state.Stores;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
@@ -51,14 +62,16 @@ import org.springframework.cloud.stream.binding.StreamListenerSetupMethodOrchest
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
@@ -79,8 +92,8 @@ import org.springframework.util.StringUtils;
|
||||
* @author Lei Chen
|
||||
* @author Gary Russell
|
||||
*/
|
||||
class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStreamsBinderProcessor
|
||||
implements StreamListenerSetupMethodOrchestrator {
|
||||
class KafkaStreamsStreamListenerSetupMethodOrchestrator
|
||||
implements StreamListenerSetupMethodOrchestrator, ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(KafkaStreamsStreamListenerSetupMethodOrchestrator.class);
|
||||
@@ -97,13 +110,13 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final Map<Method, List<String>> registeredStoresPerMethod = new HashMap<>();
|
||||
|
||||
private final Map<Method, StreamsBuilderFactoryBean> methodStreamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
|
||||
StreamsBuilderFactoryBeanConfigurer customizer;
|
||||
private final Map<Method, List<String>> registeredStoresPerMethod = new HashMap<>();
|
||||
|
||||
private final ConfigurableEnvironment environment;
|
||||
private final CleanupConfig cleanupConfig;
|
||||
|
||||
private ConfigurableApplicationContext applicationContext;
|
||||
|
||||
KafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
@@ -112,18 +125,14 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
KafkaStreamsBindingInformationCatalogue bindingInformationCatalogue,
|
||||
StreamListenerParameterAdapter streamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> listenerResultAdapters,
|
||||
CleanupConfig cleanupConfig,
|
||||
StreamsBuilderFactoryBeanConfigurer customizer,
|
||||
ConfigurableEnvironment environment) {
|
||||
super(bindingServiceProperties, bindingInformationCatalogue, extendedBindingProperties, keyValueSerdeResolver, cleanupConfig);
|
||||
CleanupConfig cleanupConfig) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = extendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsBindingInformationCatalogue = bindingInformationCatalogue;
|
||||
this.streamListenerParameterAdapter = streamListenerParameterAdapter;
|
||||
this.streamListenerResultAdapters = listenerResultAdapters;
|
||||
this.customizer = customizer;
|
||||
this.environment = environment;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -173,46 +182,40 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
else {
|
||||
Object result = method.invoke(bean, adaptedInboundArguments);
|
||||
|
||||
if (methodAnnotatedOutboundNames != null && methodAnnotatedOutboundNames.length > 0) {
|
||||
if (result.getClass().isArray()) {
|
||||
Assert.isTrue(
|
||||
methodAnnotatedOutboundNames.length == ((Object[]) result).length,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
else {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
if (result.getClass().isArray()) {
|
||||
Assert.isTrue(
|
||||
methodAnnotatedOutboundNames.length == ((Object[]) result).length,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
|
||||
if (methodAnnotatedOutboundNames != null && methodAnnotatedOutboundNames.length > 0) {
|
||||
methodAnnotatedInboundName = populateInboundIfMissing(method, methodAnnotatedInboundName);
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(methodAnnotatedInboundName);
|
||||
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int i = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
final String methodAnnotatedOutboundName = methodAnnotatedOutboundNames[i++];
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(
|
||||
methodAnnotatedOutboundName, streamsBuilderFactoryBean);
|
||||
|
||||
Object targetBean = this.applicationContext
|
||||
.getBean(methodAnnotatedOutboundName);
|
||||
kafkaStreamsBindingInformationCatalogue.addOutboundKStreamResolvable(targetBean, ResolvableType.forMethodReturnType(method));
|
||||
adaptStreamListenerResult(outboundKStream, targetBean);
|
||||
else {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int i = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
Object targetBean = this.applicationContext
|
||||
.getBean(methodAnnotatedOutboundNames[i++]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : this.streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(
|
||||
outboundKStream.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(outboundKStream,
|
||||
targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(
|
||||
methodAnnotatedOutboundNames[0], streamsBuilderFactoryBean);
|
||||
|
||||
Object targetBean = this.applicationContext
|
||||
.getBean(methodAnnotatedOutboundNames[0]);
|
||||
kafkaStreamsBindingInformationCatalogue.addOutboundKStreamResolvable(targetBean, ResolvableType.forMethodReturnType(method));
|
||||
adaptStreamListenerResult(result, targetBean);
|
||||
}
|
||||
else {
|
||||
Object targetBean = this.applicationContext
|
||||
.getBean(methodAnnotatedOutboundNames[0]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : this.streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(result.getClass(),
|
||||
targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(result, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -223,33 +226,6 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
}
|
||||
}
|
||||
|
||||
private String populateInboundIfMissing(Method method, String methodAnnotatedInboundName) {
|
||||
if (!StringUtils.hasText(methodAnnotatedInboundName)) {
|
||||
Object[] arguments = new Object[method.getParameterTypes().length];
|
||||
if (arguments.length > 0) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, 0);
|
||||
if (methodParameter.hasParameterAnnotation(Input.class)) {
|
||||
Input methodAnnotation = methodParameter
|
||||
.getParameterAnnotation(Input.class);
|
||||
methodAnnotatedInboundName = methodAnnotation.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
return methodAnnotatedInboundName;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void adaptStreamListenerResult(Object outboundKStream, Object targetBean) {
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : this.streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(
|
||||
outboundKStream.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(outboundKStream,
|
||||
targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked"})
|
||||
public Object[] adaptAndRetrieveInboundArguments(Method method, String inboundName,
|
||||
@@ -278,57 +254,63 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
.getBean((String) targetReferenceValue);
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties
|
||||
.getBindingProperties(inboundName);
|
||||
enableNativeDecodingForKTableAlways(parameterType, bindingProperties);
|
||||
// Retrieve the StreamsConfig created for this method if available.
|
||||
// Otherwise, create the StreamsBuilderFactory and get the underlying
|
||||
// config.
|
||||
if (!this.methodStreamsBuilderFactoryBeanMap.containsKey(method)) {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = buildStreamsBuilderAndRetrieveConfig(method.getDeclaringClass().getSimpleName() + "-" + method.getName(),
|
||||
applicationContext,
|
||||
inboundName, null, customizer, this.environment, bindingProperties);
|
||||
this.methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderFactoryBean);
|
||||
buildStreamsBuilderAndRetrieveConfig(method, applicationContext,
|
||||
inboundName);
|
||||
}
|
||||
try {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.methodStreamsBuilderFactoryBeanMap
|
||||
.get(method);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
final String applicationId = streamsBuilderFactoryBean.getStreamsConfiguration().getProperty(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
extendedConsumerProperties.setApplicationId(applicationId);
|
||||
// get state store spec
|
||||
KafkaStreamsStateStoreProperties spec = buildStateStoreSpec(method);
|
||||
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver
|
||||
.getInboundKeySerde(extendedConsumerProperties, ResolvableType.forMethodParameter(methodParameter));
|
||||
LOG.info("Key Serde used for " + targetReferenceValue + ": " + keySerde.getClass().getName());
|
||||
.getInboundKeySerde(extendedConsumerProperties);
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getInboundValueSerde(
|
||||
bindingProperties.getConsumer(), extendedConsumerProperties);
|
||||
|
||||
Serde<?> valueSerde = bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding() ?
|
||||
getValueSerde(inboundName, extendedConsumerProperties, ResolvableType.forMethodParameter(methodParameter)) : Serdes.ByteArray();
|
||||
LOG.info("Value Serde used for " + targetReferenceValue + ": " + valueSerde.getClass().getName());
|
||||
|
||||
Topology.AutoOffsetReset autoOffsetReset = getAutoOffsetReset(inboundName, extendedConsumerProperties);
|
||||
final KafkaConsumerProperties.StartOffset startOffset = extendedConsumerProperties
|
||||
.getStartOffset();
|
||||
Topology.AutoOffsetReset autoOffsetReset = null;
|
||||
if (startOffset != null) {
|
||||
switch (startOffset) {
|
||||
case earliest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.EARLIEST;
|
||||
break;
|
||||
case latest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.LATEST;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extendedConsumerProperties.isResetOffsets()) {
|
||||
LOG.warn("Detected resetOffsets configured on binding "
|
||||
+ inboundName + ". "
|
||||
+ "Setting resetOffsets in Kafka Streams binder does not have any effect.");
|
||||
}
|
||||
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getkStream(inboundName, spec,
|
||||
bindingProperties, extendedConsumerProperties, streamsBuilder, keySerde, valueSerde,
|
||||
autoOffsetReset, parameterIndex == 0);
|
||||
bindingProperties, streamsBuilder, keySerde, valueSerde,
|
||||
autoOffsetReset);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
// wrap the proxy created during the initial target type binding
|
||||
// with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addKeySerde(stream, keySerde);
|
||||
BindingProperties bindingProperties1 = this.kafkaStreamsBindingInformationCatalogue.getBindingProperties().get(kStreamWrapper);
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerBindingProperties(stream, bindingProperties1);
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(inboundName, streamsBuilderFactoryBean);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addConsumerPropertiesPerSbfb(streamsBuilderFactoryBean,
|
||||
bindingServiceProperties.getConsumerProperties(inboundName));
|
||||
|
||||
this.kafkaStreamsBindingInformationCatalogue
|
||||
.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
for (StreamListenerParameterAdapter streamListenerParameterAdapter : adapters) {
|
||||
if (streamListenerParameterAdapter.supports(stream.getClass(),
|
||||
methodParameter)) {
|
||||
arguments[parameterIndex] = streamListenerParameterAdapter
|
||||
.adapt(stream, methodParameter);
|
||||
.adapt(kStreamWrapper, methodParameter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -341,9 +323,39 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
+ method + "from " + stream.getClass() + " to "
|
||||
+ parameterType);
|
||||
}
|
||||
else {
|
||||
handleKTableGlobalKTableInputs(arguments, parameterIndex, inboundName, parameterType, targetBean, streamsBuilderFactoryBean,
|
||||
streamsBuilder, extendedConsumerProperties, keySerde, valueSerde, autoOffsetReset, parameterIndex == 0);
|
||||
else if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties
|
||||
.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties
|
||||
.getBindingDestination(inboundName);
|
||||
KTable<?, ?> table = getKTable(streamsBuilder, keySerde,
|
||||
valueSerde, materializedAs, bindingDestination,
|
||||
autoOffsetReset);
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper = (KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
// wrap the proxy created during the initial target type binding
|
||||
// with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue
|
||||
.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
arguments[parameterIndex] = table;
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties
|
||||
.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties
|
||||
.getBindingDestination(inboundName);
|
||||
GlobalKTable<?, ?> table = getGlobalKTable(streamsBuilder,
|
||||
keySerde, valueSerde, materializedAs, bindingDestination,
|
||||
autoOffsetReset);
|
||||
// @checkstyle:off
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper globalKTableWrapper = (GlobalKTableBoundElementFactory.GlobalKTableWrapper) targetBean;
|
||||
// @checkstyle:on
|
||||
// wrap the proxy created during the initial target type binding
|
||||
// with real object (KTable)
|
||||
globalKTableWrapper.wrap((GlobalKTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue
|
||||
.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
arguments[parameterIndex] = table;
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
@@ -358,6 +370,52 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
return arguments;
|
||||
}
|
||||
|
||||
private GlobalKTable<?, ?> getGlobalKTable(StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, String materializedAs,
|
||||
String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return materializedAs != null
|
||||
? materializedAsGlobalKTable(streamsBuilder, bindingDestination,
|
||||
materializedAs, keySerde, valueSerde, autoOffsetReset)
|
||||
: streamsBuilder.globalTable(bindingDestination,
|
||||
Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset));
|
||||
}
|
||||
|
||||
private KTable<?, ?> getKTable(StreamsBuilder streamsBuilder, Serde<?> keySerde,
|
||||
Serde<?> valueSerde, String materializedAs, String bindingDestination,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return materializedAs != null
|
||||
? materializedAs(streamsBuilder, bindingDestination, materializedAs,
|
||||
keySerde, valueSerde, autoOffsetReset)
|
||||
: streamsBuilder.table(bindingDestination,
|
||||
Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset));
|
||||
}
|
||||
|
||||
private <K, V> KTable<K, V> materializedAs(StreamsBuilder streamsBuilder,
|
||||
String destination, String storeName, Serde<K> k, Serde<V> v,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return streamsBuilder.table(
|
||||
this.bindingServiceProperties.getBindingDestination(destination),
|
||||
Consumed.with(k, v).withOffsetResetPolicy(autoOffsetReset),
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> GlobalKTable<K, V> materializedAsGlobalKTable(
|
||||
StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
return streamsBuilder.globalTable(
|
||||
this.bindingServiceProperties.getBindingDestination(destination),
|
||||
Consumed.with(k, v).withOffsetResetPolicy(autoOffsetReset),
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> Materialized<K, V, KeyValueStore<Bytes, byte[]>> getMaterialized(
|
||||
String storeName, Serde<K> k, Serde<V> v) {
|
||||
return Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k).withValueSerde(v);
|
||||
}
|
||||
|
||||
private StoreBuilder buildStateStore(KafkaStreamsStateStoreProperties spec) {
|
||||
try {
|
||||
|
||||
@@ -376,12 +434,12 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
builder = Stores
|
||||
.windowStoreBuilder(
|
||||
Stores.persistentWindowStore(spec.getName(),
|
||||
Duration.ofMillis(spec.getRetention()), Duration.ofMillis(3), false),
|
||||
spec.getRetention(), 3, spec.getLength(), false),
|
||||
keySerde, valueSerde);
|
||||
break;
|
||||
case SESSION:
|
||||
builder = Stores.sessionStoreBuilder(Stores.persistentSessionStore(
|
||||
spec.getName(), Duration.ofMillis(spec.getRetention())), keySerde, valueSerde);
|
||||
spec.getName(), spec.getRetention()), keySerde, valueSerde);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException(
|
||||
@@ -403,10 +461,9 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName,
|
||||
KafkaStreamsStateStoreProperties storeSpec,
|
||||
BindingProperties bindingProperties,
|
||||
KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder,
|
||||
BindingProperties bindingProperties, StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde,
|
||||
Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
if (storeSpec != null) {
|
||||
StoreBuilder storeBuilder = buildStateStore(storeSpec);
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
@@ -414,8 +471,115 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
}
|
||||
return getKStream(inboundName, bindingProperties, kafkaStreamsConsumerProperties, streamsBuilder,
|
||||
keySerde, valueSerde, autoOffsetReset, firstBuild);
|
||||
String[] bindingTargets = StringUtils.commaDelimitedListToStringArray(
|
||||
this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
|
||||
KStream<?, ?> stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset));
|
||||
final boolean nativeDecoding = this.bindingServiceProperties
|
||||
.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding) {
|
||||
LOG.info("Native decoding is enabled for " + inboundName
|
||||
+ ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName
|
||||
+ ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
|
||||
stream = stream.mapValues((value) -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (value != null && !StringUtils.isEmpty(contentType) && !nativeDecoding) {
|
||||
returnValue = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
}
|
||||
else {
|
||||
returnValue = value;
|
||||
}
|
||||
return returnValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
|
||||
private void enableNativeDecodingForKTableAlways(Class<?> parameterType,
|
||||
BindingProperties bindingProperties) {
|
||||
if (parameterType.isAssignableFrom(KTable.class)
|
||||
|| parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
// No framework level message conversion provided for KTable/GlobalKTable, its
|
||||
// done by the broker.
|
||||
bindingProperties.getConsumer().setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private void buildStreamsBuilderAndRetrieveConfig(Method method,
|
||||
ApplicationContext applicationContext, String inboundName) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext
|
||||
.getBeanFactory();
|
||||
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext
|
||||
.getBean("streamConfigGlobalProperties", Map.class);
|
||||
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
streamConfigGlobalProperties
|
||||
.putAll(extendedConsumerProperties.getConfiguration());
|
||||
|
||||
String applicationId = extendedConsumerProperties.getApplicationId();
|
||||
// override application.id if set at the individual binding level.
|
||||
if (StringUtils.hasText(applicationId)) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
applicationId);
|
||||
}
|
||||
|
||||
int concurrency = this.bindingServiceProperties.getConsumerProperties(inboundName)
|
||||
.getConcurrency();
|
||||
// override concurrency if set at the individual binding level.
|
||||
if (concurrency > 1) {
|
||||
streamConfigGlobalProperties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG,
|
||||
concurrency);
|
||||
}
|
||||
|
||||
Map<String, KafkaStreamsDlqDispatch> kafkaStreamsDlqDispatchers = applicationContext
|
||||
.getBean("kafkaStreamsDlqDispatchers", Map.class);
|
||||
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration = new KafkaStreamsConfiguration(
|
||||
streamConfigGlobalProperties) {
|
||||
@Override
|
||||
public Properties asProperties() {
|
||||
Properties properties = super.asProperties();
|
||||
properties.put(SendToDlqAndContinue.KAFKA_STREAMS_DLQ_DISPATCHERS,
|
||||
kafkaStreamsDlqDispatchers);
|
||||
return properties;
|
||||
}
|
||||
};
|
||||
|
||||
StreamsBuilderFactoryBean streamsBuilder = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(kafkaStreamsConfiguration)
|
||||
: new StreamsBuilderFactoryBean(kafkaStreamsConfiguration,
|
||||
this.cleanupConfig);
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition = BeanDefinitionBuilder
|
||||
.genericBeanDefinition(
|
||||
(Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(),
|
||||
() -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(
|
||||
"stream-builder-" + method.getName(), streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean(
|
||||
"&stream-builder-" + method.getName(), StreamsBuilderFactoryBean.class);
|
||||
this.methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderX);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext)
|
||||
throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
private void validateStreamListenerMethod(StreamListener streamListener,
|
||||
@@ -466,7 +630,8 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator extends AbstractKafkaStr
|
||||
&& this.applicationContext.containsBean(targetBeanName)) {
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
if (targetBeanClass != null) {
|
||||
boolean supports = KafkaStreamsBinderUtils.supportsKStream(methodParameter, targetBeanClass);
|
||||
boolean supports = KStream.class.isAssignableFrom(targetBeanClass)
|
||||
&& KStream.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
if (!supports) {
|
||||
supports = KTable.class.isAssignableFrom(targetBeanClass)
|
||||
&& KTable.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
|
||||
@@ -16,34 +16,17 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -70,16 +53,12 @@ import org.springframework.util.StringUtils;
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
*/
|
||||
public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(KeyValueSerdeResolver.class);
|
||||
class KeyValueSerdeResolver {
|
||||
|
||||
private final Map<String, Object> streamConfigGlobalProperties;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private ConfigurableApplicationContext context;
|
||||
|
||||
KeyValueSerdeResolver(Map<String, Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.streamConfigGlobalProperties = streamConfigGlobalProperties;
|
||||
@@ -99,13 +78,6 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
public Serde<?> getInboundKeySerde(
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties, ResolvableType resolvableType) {
|
||||
String keySerdeString = extendedConsumerProperties.getKeySerde();
|
||||
|
||||
return getKeySerde(keySerdeString, resolvableType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound value.
|
||||
* @param consumerProperties {@link ConsumerProperties} on binding
|
||||
@@ -125,26 +97,7 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public Serde<?> getInboundValueSerde(ConsumerProperties consumerProperties,
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties,
|
||||
ResolvableType resolvableType) {
|
||||
Serde<?> valueSerde;
|
||||
|
||||
String valueSerdeString = extendedConsumerProperties.getValueSerde();
|
||||
try {
|
||||
if (consumerProperties != null && consumerProperties.isUseNativeDecoding()) {
|
||||
valueSerde = getValueSerde(valueSerdeString, resolvableType);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
@@ -161,11 +114,6 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
return getKeySerde(properties.getKeySerde());
|
||||
}
|
||||
|
||||
public Serde<?> getOuboundKeySerde(KafkaStreamsProducerProperties properties, ResolvableType resolvableType) {
|
||||
return getKeySerde(properties.getKeySerde(), resolvableType);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound value.
|
||||
* @param producerProperties {@link ProducerProperties} on binding
|
||||
@@ -184,24 +132,7 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public Serde<?> getOutboundValueSerde(ProducerProperties producerProperties,
|
||||
KafkaStreamsProducerProperties kafkaStreamsProducerProperties, ResolvableType resolvableType) {
|
||||
Serde<?> valueSerde;
|
||||
try {
|
||||
if (producerProperties.isUseNativeEncoding()) {
|
||||
valueSerde = getValueSerde(
|
||||
kafkaStreamsProducerProperties.getValueSerde(), resolvableType);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
@@ -239,7 +170,12 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
keySerde = Utils.newInstance(keySerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
keySerde = getFallbackSerde("default.key.serde");
|
||||
keySerde = this.binderConfigurationProperties.getConfiguration()
|
||||
.containsKey("default.key.serde")
|
||||
? Utils.newInstance(this.binderConfigurationProperties
|
||||
.getConfiguration().get("default.key.serde"),
|
||||
Serde.class)
|
||||
: Serdes.ByteArray();
|
||||
}
|
||||
keySerde.configure(this.streamConfigGlobalProperties, true);
|
||||
|
||||
@@ -250,136 +186,6 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(String keySerdeString, ResolvableType resolvableType) {
|
||||
Serde<?> keySerde = null;
|
||||
try {
|
||||
if (StringUtils.hasText(keySerdeString)) {
|
||||
keySerde = Utils.newInstance(keySerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
if (resolvableType != null &&
|
||||
(isResolvalbeKafkaStreamsType(resolvableType) || isResolvableKStreamArrayType(resolvableType))) {
|
||||
ResolvableType generic = resolvableType.isArray() ? resolvableType.getComponentType().getGeneric(0) : resolvableType.getGeneric(0);
|
||||
Serde<?> fallbackSerde = getFallbackSerde("default.key.serde");
|
||||
keySerde = getSerde(generic, fallbackSerde);
|
||||
}
|
||||
if (keySerde == null) {
|
||||
keySerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
keySerde.configure(this.streamConfigGlobalProperties, true);
|
||||
}
|
||||
catch (ClassNotFoundException ex) {
|
||||
throw new IllegalStateException("Serde class not found: ", ex);
|
||||
}
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private boolean isResolvableKStreamArrayType(ResolvableType resolvableType) {
|
||||
return resolvableType.isArray() &&
|
||||
KStream.class.isAssignableFrom(resolvableType.getComponentType().getRawClass());
|
||||
}
|
||||
|
||||
private boolean isResolvalbeKafkaStreamsType(ResolvableType resolvableType) {
|
||||
return resolvableType.getRawClass() != null && (KStream.class.isAssignableFrom(resolvableType.getRawClass()) || KTable.class.isAssignableFrom(resolvableType.getRawClass()) ||
|
||||
GlobalKTable.class.isAssignableFrom(resolvableType.getRawClass()));
|
||||
}
|
||||
|
||||
private Serde<?> getSerde(ResolvableType generic, Serde<?> fallbackSerde) {
|
||||
Serde<?> serde = null;
|
||||
|
||||
Map<String, Serde> beansOfType = context.getBeansOfType(Serde.class);
|
||||
Serde<?>[] serdeBeans = new Serde<?>[1];
|
||||
|
||||
final Class<?> genericRawClazz = generic.getRawClass();
|
||||
beansOfType.forEach((k, v) -> {
|
||||
final Class<?> classObj = ClassUtils.resolveClassName(((AnnotatedBeanDefinition)
|
||||
context.getBeanFactory().getBeanDefinition(k))
|
||||
.getMetadata().getClassName(),
|
||||
ClassUtils.getDefaultClassLoader());
|
||||
try {
|
||||
Method[] methods = classObj.getMethods();
|
||||
Optional<Method> serdeBeanMethod = Arrays.stream(methods).filter(m -> m.getName().equals(k)).findFirst();
|
||||
if (serdeBeanMethod.isPresent()) {
|
||||
Method method = serdeBeanMethod.get();
|
||||
ResolvableType resolvableType = ResolvableType.forMethodReturnType(method, classObj);
|
||||
ResolvableType serdeBeanGeneric = resolvableType.getGeneric(0);
|
||||
Class<?> serdeGenericRawClazz = serdeBeanGeneric.getRawClass();
|
||||
if (serdeGenericRawClazz != null && genericRawClazz != null) {
|
||||
if (serdeGenericRawClazz.isAssignableFrom(genericRawClazz)) {
|
||||
serdeBeans[0] = v;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Pass through...
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
if (serdeBeans[0] != null) {
|
||||
return serdeBeans[0];
|
||||
}
|
||||
|
||||
if (genericRawClazz != null) {
|
||||
if (Integer.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Integer();
|
||||
}
|
||||
else if (Long.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Long();
|
||||
}
|
||||
else if (Short.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Short();
|
||||
}
|
||||
else if (Double.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Double();
|
||||
}
|
||||
else if (Float.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.Float();
|
||||
}
|
||||
else if (byte[].class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.ByteArray();
|
||||
}
|
||||
else if (String.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.String();
|
||||
}
|
||||
else if (UUID.class.isAssignableFrom(genericRawClazz)) {
|
||||
serde = Serdes.UUID();
|
||||
}
|
||||
else if (!isSerdeFromStandardDefaults(fallbackSerde)) {
|
||||
//User purposely set a default serde that is not one of the above
|
||||
serde = fallbackSerde;
|
||||
}
|
||||
else {
|
||||
// If the type is Object, then skip assigning the JsonSerde and let the fallback mechanism takes precedence.
|
||||
if (!genericRawClazz.isAssignableFrom((Object.class))) {
|
||||
serde = new JsonSerde(genericRawClazz);
|
||||
}
|
||||
}
|
||||
}
|
||||
return serde;
|
||||
}
|
||||
|
||||
private boolean isSerdeFromStandardDefaults(Serde<?> serde) {
|
||||
if (serde != null) {
|
||||
if (Number.class.isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
else if (Serdes.ByteArray().getClass().isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
else if (Serdes.String().getClass().isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
else if (Serdes.UUID().getClass().isAssignableFrom(serde.getClass())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
private Serde<?> getValueSerde(String valueSerdeString)
|
||||
throws ClassNotFoundException {
|
||||
Serde<?> valueSerde;
|
||||
@@ -387,47 +193,14 @@ public class KeyValueSerdeResolver implements ApplicationContextAware {
|
||||
valueSerde = Utils.newInstance(valueSerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
valueSerde = getFallbackSerde("default.value.serde");
|
||||
valueSerde = this.binderConfigurationProperties.getConfiguration()
|
||||
.containsKey("default.value.serde")
|
||||
? Utils.newInstance(this.binderConfigurationProperties
|
||||
.getConfiguration().get("default.value.serde"),
|
||||
Serde.class)
|
||||
: Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(this.streamConfigGlobalProperties, false);
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
private Serde<?> getFallbackSerde(String s) throws ClassNotFoundException {
|
||||
return this.binderConfigurationProperties.getConfiguration()
|
||||
.containsKey(s)
|
||||
? Utils.newInstance(this.binderConfigurationProperties
|
||||
.getConfiguration().get(s),
|
||||
Serde.class)
|
||||
: Serdes.ByteArray();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private Serde<?> getValueSerde(String valueSerdeString, ResolvableType resolvableType)
|
||||
throws ClassNotFoundException {
|
||||
Serde<?> valueSerde = null;
|
||||
if (StringUtils.hasText(valueSerdeString)) {
|
||||
valueSerde = Utils.newInstance(valueSerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
|
||||
if (resolvableType != null && ((isResolvalbeKafkaStreamsType(resolvableType)) ||
|
||||
(isResolvableKStreamArrayType(resolvableType)))) {
|
||||
Serde<?> fallbackSerde = getFallbackSerde("default.value.serde");
|
||||
ResolvableType generic = resolvableType.isArray() ? resolvableType.getComponentType().getGeneric(1) : resolvableType.getGeneric(1);
|
||||
valueSerde = getSerde(generic, fallbackSerde);
|
||||
}
|
||||
if (valueSerde == null) {
|
||||
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
valueSerde.configure(streamConfigGlobalProperties, false);
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
context = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
|
||||
/**
|
||||
* Registry that contains {@link QueryableStoreType}s those created from the user
|
||||
* applications.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.0.0
|
||||
* @deprecated in favor of {@link InteractiveQueryService}
|
||||
*/
|
||||
public class QueryableStoreRegistry {
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
public QueryableStoreRegistry(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
* @param storeName name of the queryable store
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
* @deprecated in favor of
|
||||
* {@link InteractiveQueryService#getQueryableStore(String, QueryableStoreType)}
|
||||
*/
|
||||
public <T> T getQueryableStoreType(String storeName,
|
||||
QueryableStoreType<T> storeType) {
|
||||
|
||||
for (KafkaStreams kafkaStream : this.kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
try {
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
// pass through
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -16,48 +16,109 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.processor.internals.ProcessorContextImpl;
|
||||
import org.apache.kafka.streams.processor.internals.StreamTask;
|
||||
|
||||
import org.springframework.kafka.listener.ConsumerRecordRecoverer;
|
||||
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* Custom implementation for {@link ConsumerRecordRecoverer} that keeps a collection of
|
||||
* recoverer objects per input topics. These topics might be per input binding or multiplexed
|
||||
* topics in a single binding.
|
||||
* Custom implementation for {@link DeserializationExceptionHandler} that sends the
|
||||
* records in error to a DLQ topic, then continue stream processing on new records.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.0.0
|
||||
*/
|
||||
public class SendToDlqAndContinue implements ConsumerRecordRecoverer {
|
||||
public class SendToDlqAndContinue implements DeserializationExceptionHandler {
|
||||
|
||||
/**
|
||||
* Key used for DLQ dispatchers.
|
||||
*/
|
||||
public static final String KAFKA_STREAMS_DLQ_DISPATCHERS = "spring.cloud.stream.kafka.streams.dlq.dispatchers";
|
||||
|
||||
/**
|
||||
* DLQ dispatcher per topic in the application context. The key here is not the actual
|
||||
* DLQ topic but the incoming topic that caused the error.
|
||||
*/
|
||||
private Map<String, DeadLetterPublishingRecoverer> dlqDispatchers = new HashMap<>();
|
||||
private Map<String, KafkaStreamsDlqDispatch> dlqDispatchers = new HashMap<>();
|
||||
|
||||
/**
|
||||
* For a given topic, send the key/value record to DLQ topic.
|
||||
*
|
||||
* @param consumerRecord consumer record
|
||||
* @param exception exception
|
||||
* @param topic incoming topic that caused the error
|
||||
* @param key to send
|
||||
* @param value to send
|
||||
* @param partition for the topic where this record should be sent
|
||||
*/
|
||||
public void sendToDlq(ConsumerRecord<?, ?> consumerRecord, Exception exception) {
|
||||
DeadLetterPublishingRecoverer kafkaStreamsDlqDispatch = this.dlqDispatchers.get(consumerRecord.topic());
|
||||
kafkaStreamsDlqDispatch.accept(consumerRecord, exception);
|
||||
}
|
||||
|
||||
void addKStreamDlqDispatch(String topic,
|
||||
DeadLetterPublishingRecoverer kafkaStreamsDlqDispatch) {
|
||||
this.dlqDispatchers.put(topic, kafkaStreamsDlqDispatch);
|
||||
public void sendToDlq(String topic, byte[] key, byte[] value, int partition) {
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = this.dlqDispatchers.get(topic);
|
||||
kafkaStreamsDlqDispatch.sendToDlq(key, value, partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(ConsumerRecord<?, ?> consumerRecord, Exception e) {
|
||||
this.dlqDispatchers.get(consumerRecord.topic()).accept(consumerRecord, e);
|
||||
@SuppressWarnings("unchecked")
|
||||
public DeserializationHandlerResponse handle(ProcessorContext context,
|
||||
ConsumerRecord<byte[], byte[]> record, Exception exception) {
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = this.dlqDispatchers
|
||||
.get(record.topic());
|
||||
kafkaStreamsDlqDispatch.sendToDlq(record.key(), record.value(),
|
||||
record.partition());
|
||||
context.commit();
|
||||
|
||||
// The following conditional block should be reconsidered when we have a solution
|
||||
// for this SO problem:
|
||||
// https://stackoverflow.com/questions/48470899/kafka-streams-deserialization-handler
|
||||
// Currently it seems like when deserialization error happens, there is no commits
|
||||
// happening and the
|
||||
// following code will use reflection to get access to the underlying
|
||||
// KafkaConsumer.
|
||||
// It works with Kafka 1.0.0, but there is no guarantee it will work in future
|
||||
// versions of kafka as
|
||||
// we access private fields by name using reflection, but it is a temporary fix.
|
||||
if (context instanceof ProcessorContextImpl) {
|
||||
ProcessorContextImpl processorContextImpl = (ProcessorContextImpl) context;
|
||||
Field task = ReflectionUtils.findField(ProcessorContextImpl.class, "task");
|
||||
ReflectionUtils.makeAccessible(task);
|
||||
Object taskField = ReflectionUtils.getField(task, processorContextImpl);
|
||||
|
||||
if (taskField.getClass().isAssignableFrom(StreamTask.class)) {
|
||||
StreamTask streamTask = (StreamTask) taskField;
|
||||
Field consumer = ReflectionUtils.findField(StreamTask.class, "consumer");
|
||||
ReflectionUtils.makeAccessible(consumer);
|
||||
Object kafkaConsumerField = ReflectionUtils.getField(consumer,
|
||||
streamTask);
|
||||
if (kafkaConsumerField.getClass().isAssignableFrom(KafkaConsumer.class)) {
|
||||
KafkaConsumer kafkaConsumer = (KafkaConsumer) kafkaConsumerField;
|
||||
final Map<TopicPartition, OffsetAndMetadata> consumedOffsetsAndMetadata = new HashMap<>();
|
||||
TopicPartition tp = new TopicPartition(record.topic(),
|
||||
record.partition());
|
||||
OffsetAndMetadata oam = new OffsetAndMetadata(record.offset() + 1);
|
||||
consumedOffsetsAndMetadata.put(tp, oam);
|
||||
kafkaConsumer.commitSync(consumedOffsetsAndMetadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
return DeserializationHandlerResponse.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void configure(Map<String, ?> configs) {
|
||||
this.dlqDispatchers = (Map<String, KafkaStreamsDlqDispatch>) configs
|
||||
.get(KAFKA_STREAMS_DLQ_DISPATCHERS);
|
||||
}
|
||||
|
||||
void addKStreamDlqDispatch(String topic,
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch) {
|
||||
this.dlqDispatchers.put(topic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
|
||||
/**
|
||||
*
|
||||
* {@link DeserializationExceptionHandler} that allows to silently skip
|
||||
* deserialization exceptions and continue processing.
|
||||
*
|
||||
* @author Soby Chakco
|
||||
* @since 3.1.2
|
||||
*/
|
||||
public class SkipAndContinueExceptionHandler implements DeserializationExceptionHandler {
|
||||
|
||||
@Override
|
||||
public DeserializationExceptionHandler.DeserializationHandlerResponse handle(final ProcessorContext context,
|
||||
final ConsumerRecord<byte[], byte[]> record,
|
||||
final Exception exception) {
|
||||
return DeserializationExceptionHandler.DeserializationHandlerResponse.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(final Map<String, ?> configs) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,25 +16,16 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.context.SmartLifecycle;
|
||||
import org.springframework.kafka.KafkaException;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.streams.KafkaStreamsMicrometerListener;
|
||||
|
||||
/**
|
||||
* Iterate through all {@link StreamsBuilderFactoryBean} in the application context and
|
||||
* start them. As each one completes starting, register the associated KafkaStreams object
|
||||
* into {@link InteractiveQueryService}.
|
||||
* into {@link QueryableStoreRegistry}.
|
||||
*
|
||||
* This {@link SmartLifecycle} class ensures that the bean created from it is started very
|
||||
* late through the bootstrap process by setting the phase value closer to
|
||||
@@ -44,35 +35,24 @@ import org.springframework.kafka.streams.KafkaStreamsMicrometerListener;
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
private final KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics;
|
||||
|
||||
private final KafkaStreamsMicrometerListener listener;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
StreamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderMetrics kafkaStreamsBinderMetrics,
|
||||
KafkaStreamsMicrometerListener listener,
|
||||
KafkaProperties kafkaProperties) {
|
||||
StreamsBuilderFactoryManager(
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.kafkaStreamsBinderMetrics = kafkaStreamsBinderMetrics;
|
||||
this.listener = listener;
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoStartup() {
|
||||
return this.kafkaProperties == null || this.kafkaProperties.getStreams().isAutoStartup();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -90,26 +70,9 @@ public class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
if (this.listener != null) {
|
||||
streamsBuilderFactoryBean.addListener(this.listener);
|
||||
}
|
||||
// By default, we shut down the client if there is an uncaught exception in the application.
|
||||
// Users can override this by customizing SBFB. See this issue for more details:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1110
|
||||
streamsBuilderFactoryBean.setStreamsUncaughtExceptionHandler(exception ->
|
||||
StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT);
|
||||
// Starting the stream.
|
||||
final Map<StreamsBuilderFactoryBean, List<ConsumerProperties>> bindingServicePropertiesPerSbfb =
|
||||
this.kafkaStreamsBindingInformationCatalogue.getConsumerPropertiesPerSbfb();
|
||||
final List<ConsumerProperties> consumerProperties = bindingServicePropertiesPerSbfb.get(streamsBuilderFactoryBean);
|
||||
final boolean autoStartupDisabledOnAtLeastOneConsumerBinding = consumerProperties.stream().anyMatch(consumerProperties1 -> !consumerProperties1.isAutoStartup());
|
||||
if (!autoStartupDisabledOnAtLeastOneConsumerBinding) {
|
||||
streamsBuilderFactoryBean.start();
|
||||
this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
}
|
||||
}
|
||||
if (this.kafkaStreamsBinderMetrics != null) {
|
||||
this.kafkaStreamsBinderMetrics.addMetrics(streamsBuilderFactoryBeans);
|
||||
streamsBuilderFactoryBean.start();
|
||||
this.kafkaStreamsRegistry.registerKafkaStreams(
|
||||
streamsBuilderFactoryBean.getKafkaStreams());
|
||||
}
|
||||
this.running = true;
|
||||
}
|
||||
@@ -125,14 +88,9 @@ public class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeans();
|
||||
int n = 0;
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.removeListener(this.listener);
|
||||
streamsBuilderFactoryBean.stop();
|
||||
}
|
||||
for (ProducerFactory<byte[], byte[]> dlqProducerFactory : this.kafkaStreamsBindingInformationCatalogue.getDlqProducerFactories()) {
|
||||
((DisposableBean) dlqProducerFactory).destroy();
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new IllegalStateException(ex);
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020-2020 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.endpoint;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.springframework.boot.actuate.endpoint.annotation.Endpoint;
|
||||
import org.springframework.boot.actuate.endpoint.annotation.ReadOperation;
|
||||
import org.springframework.boot.actuate.endpoint.annotation.Selector;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsRegistry;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Actuator endpoint for topology description.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.4
|
||||
*/
|
||||
@Endpoint(id = "kafkastreamstopology")
|
||||
public class KafkaStreamsTopologyEndpoint {
|
||||
|
||||
/**
|
||||
* Topology not found message.
|
||||
*/
|
||||
public static final String NO_TOPOLOGY_FOUND_MSG = "No topology found for the given application ID";
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
public KafkaStreamsTopologyEndpoint(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
@ReadOperation
|
||||
public List<String> kafkaStreamsTopologies() {
|
||||
final List<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsRegistry.streamsBuilderFactoryBeans();
|
||||
final StringBuilder topologyDescription = new StringBuilder();
|
||||
final List<String> descs = new ArrayList<>();
|
||||
streamsBuilderFactoryBeans.stream()
|
||||
.forEach(streamsBuilderFactoryBean ->
|
||||
descs.add(streamsBuilderFactoryBean.getTopology().describe().toString()));
|
||||
return descs;
|
||||
}
|
||||
|
||||
@ReadOperation
|
||||
public String kafkaStreamsTopology(@Selector String applicationId) {
|
||||
if (!StringUtils.isEmpty(applicationId)) {
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsRegistry.streamsBuilderFactoryBean(applicationId);
|
||||
if (streamsBuilderFactoryBean != null) {
|
||||
return streamsBuilderFactoryBean.getTopology().describe().toString();
|
||||
}
|
||||
else {
|
||||
return NO_TOPOLOGY_FOUND_MSG;
|
||||
}
|
||||
}
|
||||
return NO_TOPOLOGY_FOUND_MSG;
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020-2020 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.endpoint;
|
||||
|
||||
import org.springframework.boot.actuate.autoconfigure.endpoint.EndpointAutoConfiguration;
|
||||
import org.springframework.boot.actuate.autoconfigure.endpoint.condition.ConditionalOnAvailableEndpoint;
|
||||
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsBinderSupportAutoConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsRegistry;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.4
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnClass(name = {
|
||||
"org.springframework.boot.actuate.endpoint.annotation.Endpoint" })
|
||||
@AutoConfigureAfter({EndpointAutoConfiguration.class, KafkaStreamsBinderSupportAutoConfiguration.class})
|
||||
public class KafkaStreamsTopologyEndpointAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnAvailableEndpoint
|
||||
public KafkaStreamsTopologyEndpoint topologyEndpoint(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new KafkaStreamsTopologyEndpoint(kafkaStreamsRegistry);
|
||||
}
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.factory.BeanFactoryUtils;
|
||||
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionOutcome;
|
||||
import org.springframework.boot.autoconfigure.condition.SpringBootCondition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
/**
|
||||
* Custom {@link org.springframework.context.annotation.Condition} that detects the presence
|
||||
* of java.util.Function|Consumer beans. Used for Kafka Streams function support.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.2.0
|
||||
*/
|
||||
public class FunctionDetectorCondition extends SpringBootCondition {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FunctionDetectorCondition.class);
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
@Override
|
||||
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
|
||||
if (context != null && context.getBeanFactory() != null) {
|
||||
|
||||
String[] functionTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), Function.class, true, false);
|
||||
String[] consumerTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), Consumer.class, true, false);
|
||||
String[] biFunctionTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), BiFunction.class, true, false);
|
||||
String[] biConsumerTypes = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(context.getBeanFactory(), BiConsumer.class, true, false);
|
||||
|
||||
List<String> functionComponents = new ArrayList<>();
|
||||
|
||||
functionComponents.addAll(Arrays.asList(functionTypes));
|
||||
functionComponents.addAll(Arrays.asList(consumerTypes));
|
||||
functionComponents.addAll(Arrays.asList(biFunctionTypes));
|
||||
functionComponents.addAll(Arrays.asList(biConsumerTypes));
|
||||
|
||||
List<String> kafkaStreamsFunctions = pruneFunctionBeansForKafkaStreams(functionComponents, context);
|
||||
if (!CollectionUtils.isEmpty(kafkaStreamsFunctions)) {
|
||||
return ConditionOutcome.match("Matched. Function/BiFunction/Consumer beans found");
|
||||
}
|
||||
else {
|
||||
return ConditionOutcome.noMatch("No match. No Function/BiFunction/Consumer beans found");
|
||||
}
|
||||
}
|
||||
return ConditionOutcome.noMatch("No match. No Function/BiFunction/Consumer beans found");
|
||||
}
|
||||
|
||||
private static List<String> pruneFunctionBeansForKafkaStreams(List<String> functionComponents,
|
||||
ConditionContext context) {
|
||||
final List<String> prunedList = new ArrayList<>();
|
||||
|
||||
for (String key : functionComponents) {
|
||||
final Class<?> classObj = ClassUtils.resolveClassName(((AnnotatedBeanDefinition)
|
||||
context.getBeanFactory().getBeanDefinition(key))
|
||||
.getMetadata().getClassName(),
|
||||
ClassUtils.getDefaultClassLoader());
|
||||
try {
|
||||
|
||||
Method[] methods = classObj.getMethods();
|
||||
Optional<Method> kafkaStreamMethod = Arrays.stream(methods).filter(m -> m.getName().equals(key)).findFirst();
|
||||
// check if the bean name is overridden.
|
||||
if (!kafkaStreamMethod.isPresent()) {
|
||||
final BeanDefinition beanDefinition = context.getBeanFactory().getBeanDefinition(key);
|
||||
final String factoryMethodName = beanDefinition.getFactoryMethodName();
|
||||
kafkaStreamMethod = Arrays.stream(methods).filter(m -> m.getName().equals(factoryMethodName)).findFirst();
|
||||
}
|
||||
|
||||
if (kafkaStreamMethod.isPresent()) {
|
||||
Method method = kafkaStreamMethod.get();
|
||||
ResolvableType resolvableType = ResolvableType.forMethodReturnType(method, classObj);
|
||||
final Class<?> rawClass = resolvableType.getGeneric(0).getRawClass();
|
||||
if (rawClass == KStream.class || rawClass == KTable.class || rawClass == GlobalKTable.class) {
|
||||
prunedList.add(key);
|
||||
}
|
||||
}
|
||||
else {
|
||||
//check if it is a @Component bean.
|
||||
Optional<Method> componentBeanMethod = Arrays.stream(methods).filter(
|
||||
m -> (m.getName().equals("apply") || m.getName().equals("accept"))
|
||||
&& isKafkaStreamsTypeFound(m)).findFirst();
|
||||
if (componentBeanMethod.isPresent()) {
|
||||
Method method = componentBeanMethod.get();
|
||||
final ResolvableType resolvableType1 = ResolvableType.forMethodParameter(method, 0);
|
||||
final Class<?> rawClass = resolvableType1.getRawClass();
|
||||
if (rawClass == KStream.class || rawClass == KTable.class || rawClass == GlobalKTable.class) {
|
||||
prunedList.add(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.error("Function not found: " + key, e);
|
||||
}
|
||||
}
|
||||
return prunedList;
|
||||
}
|
||||
|
||||
private static boolean isKafkaStreamsTypeFound(Method method) {
|
||||
return KStream.class.isAssignableFrom(method.getParameters()[0].getType()) ||
|
||||
KTable.class.isAssignableFrom(method.getParameters()[0].getType()) ||
|
||||
GlobalKTable.class.isAssignableFrom(method.getParameters()[0].getType());
|
||||
}
|
||||
}
|
||||
@@ -1,286 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
import org.springframework.beans.factory.BeanFactoryAware;
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.beans.factory.support.RootBeanDefinition;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindableProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.BoundTargetHolder;
|
||||
import org.springframework.cloud.stream.function.FunctionConstants;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams specific target bindings proxy factory. See {@link AbstractBindableProxyFactory} for more details.
|
||||
* <p>
|
||||
* Targets bound by this factory:
|
||||
* <p>
|
||||
* {@link KStream}
|
||||
* {@link KTable}
|
||||
* {@link GlobalKTable}
|
||||
* <p>
|
||||
* This class looks at the Function bean's return signature as {@link ResolvableType} and introspect the individual types,
|
||||
* binding them on the way.
|
||||
* <p>
|
||||
* All types on the {@link ResolvableType} are bound except for KStream[] array types on the outbound, which will be
|
||||
* deferred for binding at a later stage. The reason for doing that is because in this class, we don't have any way to know
|
||||
* the actual size in the returned array. That has to wait until the function is invoked and we get a result.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class KafkaStreamsBindableProxyFactory extends AbstractBindableProxyFactory implements InitializingBean, BeanFactoryAware {
|
||||
|
||||
@Autowired
|
||||
private StreamFunctionProperties streamFunctionProperties;
|
||||
|
||||
private ResolvableType[] types;
|
||||
|
||||
private Method method;
|
||||
|
||||
private final String functionName;
|
||||
|
||||
private BeanFactory beanFactory;
|
||||
|
||||
public KafkaStreamsBindableProxyFactory(ResolvableType[] types, String functionName, Method method) {
|
||||
super(types[0].getType().getClass());
|
||||
this.types = types;
|
||||
this.functionName = functionName;
|
||||
this.method = method;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() {
|
||||
populateBindingTargetFactories(beanFactory);
|
||||
Assert.notEmpty(KafkaStreamsBindableProxyFactory.this.bindingTargetFactories,
|
||||
"'bindingTargetFactories' cannot be empty");
|
||||
|
||||
int resolvableTypeDepthCounter = 0;
|
||||
boolean isKafkaStreamsType = this.types[0].getRawClass().isAssignableFrom(KStream.class) ||
|
||||
this.types[0].getRawClass().isAssignableFrom(KTable.class) ||
|
||||
this.types[0].getRawClass().isAssignableFrom(GlobalKTable.class);
|
||||
ResolvableType argument = isKafkaStreamsType ? this.types[0] : this.types[0].getGeneric(resolvableTypeDepthCounter++);
|
||||
List<String> inputBindings = buildInputBindings();
|
||||
Iterator<String> iterator = inputBindings.iterator();
|
||||
String next = iterator.next();
|
||||
bindInput(argument, next);
|
||||
|
||||
// Check if its a component style bean.
|
||||
if (method != null) {
|
||||
final Object bean = beanFactory.getBean(functionName);
|
||||
if (BiFunction.class.isAssignableFrom(bean.getClass()) || BiConsumer.class.isAssignableFrom(bean.getClass())) {
|
||||
argument = ResolvableType.forMethodParameter(method, 1);
|
||||
next = iterator.next();
|
||||
bindInput(argument, next);
|
||||
}
|
||||
}
|
||||
// Normal functional bean
|
||||
if (this.types[0].getRawClass() != null &&
|
||||
(this.types[0].getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
this.types[0].getRawClass().isAssignableFrom(BiConsumer.class))) {
|
||||
argument = this.types[0].getGeneric(resolvableTypeDepthCounter++);
|
||||
next = iterator.next();
|
||||
bindInput(argument, next);
|
||||
}
|
||||
ResolvableType outboundArgument;
|
||||
if (method != null) {
|
||||
outboundArgument = ResolvableType.forMethodReturnType(method);
|
||||
}
|
||||
else {
|
||||
outboundArgument = this.types[0].getGeneric(resolvableTypeDepthCounter);
|
||||
}
|
||||
|
||||
while (isAnotherFunctionOrConsumerFound(outboundArgument)) {
|
||||
//The function is a curried function. We should introspect the partial function chain hierarchy.
|
||||
argument = outboundArgument.getGeneric(0);
|
||||
String next1 = iterator.next();
|
||||
bindInput(argument, next1);
|
||||
outboundArgument = outboundArgument.getGeneric(1);
|
||||
}
|
||||
|
||||
|
||||
final int lastTypeIndex = this.types.length - 1;
|
||||
if (this.types.length > 1 && this.types[lastTypeIndex] != null && this.types[lastTypeIndex].getRawClass() != null) {
|
||||
if (this.types[lastTypeIndex].getRawClass().isAssignableFrom(Function.class) ||
|
||||
this.types[lastTypeIndex].getRawClass().isAssignableFrom(Consumer.class)) {
|
||||
outboundArgument = this.types[lastTypeIndex].getGeneric(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (outboundArgument != null && outboundArgument.getRawClass() != null && (!outboundArgument.isArray() &&
|
||||
(outboundArgument.getRawClass().isAssignableFrom(KStream.class) ||
|
||||
outboundArgument.getRawClass().isAssignableFrom(KTable.class)))) { //Allowing both KStream and KTable on the outbound.
|
||||
// if the type is array, we need to do a late binding as we don't know the number of
|
||||
// output bindings at this point in the flow.
|
||||
|
||||
List<String> outputBindings = streamFunctionProperties.getOutputBindings(this.functionName);
|
||||
String outputBinding = null;
|
||||
|
||||
if (!CollectionUtils.isEmpty(outputBindings)) {
|
||||
Iterator<String> outputBindingsIter = outputBindings.iterator();
|
||||
if (outputBindingsIter.hasNext()) {
|
||||
outputBinding = outputBindingsIter.next();
|
||||
}
|
||||
}
|
||||
else {
|
||||
outputBinding = String.format("%s-%s-0", this.functionName, FunctionConstants.DEFAULT_OUTPUT_SUFFIX);
|
||||
}
|
||||
Assert.isTrue(outputBinding != null, "output binding is not inferred.");
|
||||
// We will only allow KStream targets on the outbound. If the user provides a KTable,
|
||||
// we still use the KStreamBinder to send it through the outbound.
|
||||
// In that case before sending, we do a cast from KTable to KStream.
|
||||
// See KafkaStreamsFunctionsProcessor#setupFunctionInvokerForKafkaStreams for details.
|
||||
KafkaStreamsBindableProxyFactory.this.outputHolders.put(outputBinding,
|
||||
new BoundTargetHolder(getBindingTargetFactory(KStream.class)
|
||||
.createOutput(outputBinding), true));
|
||||
String outputBinding1 = outputBinding;
|
||||
RootBeanDefinition rootBeanDefinition1 = new RootBeanDefinition();
|
||||
rootBeanDefinition1.setInstanceSupplier(() -> outputHolders.get(outputBinding1).getBoundTarget());
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
registry.registerBeanDefinition(outputBinding1, rootBeanDefinition1);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isAnotherFunctionOrConsumerFound(ResolvableType arg1) {
|
||||
return arg1 != null && !arg1.isArray() && arg1.getRawClass() != null &&
|
||||
(arg1.getRawClass().isAssignableFrom(Function.class) || arg1.getRawClass().isAssignableFrom(Consumer.class));
|
||||
}
|
||||
|
||||
/**
|
||||
* If the application provides the property spring.cloud.stream.function.inputBindings.functionName,
|
||||
* that gets precedence. Otherwise, use functionName-input or functionName-input-0, functionName-input-1 and so on
|
||||
* for multiple inputs.
|
||||
*
|
||||
* @return an ordered collection of input bindings to use
|
||||
*/
|
||||
private List<String> buildInputBindings() {
|
||||
List<String> inputs = new ArrayList<>();
|
||||
List<String> inputBindings = streamFunctionProperties.getInputBindings(this.functionName);
|
||||
if (!CollectionUtils.isEmpty(inputBindings)) {
|
||||
inputs.addAll(inputBindings);
|
||||
return inputs;
|
||||
}
|
||||
int numberOfInputs = this.types[0].getRawClass() != null &&
|
||||
(this.types[0].getRawClass().isAssignableFrom(BiFunction.class) ||
|
||||
this.types[0].getRawClass().isAssignableFrom(BiConsumer.class)) ? 2 : getNumberOfInputs();
|
||||
|
||||
// For @Component style beans.
|
||||
if (method != null) {
|
||||
final ResolvableType returnType = ResolvableType.forMethodReturnType(method);
|
||||
Object bean = beanFactory.containsBean(functionName) ? beanFactory.getBean(functionName) : null;
|
||||
|
||||
if (bean != null && (BiFunction.class.isAssignableFrom(bean.getClass()) || BiConsumer.class.isAssignableFrom(bean.getClass()))) {
|
||||
numberOfInputs = 2;
|
||||
}
|
||||
else if (returnType.getRawClass().isAssignableFrom(Function.class) || returnType.getRawClass().isAssignableFrom(Consumer.class)) {
|
||||
numberOfInputs = 1;
|
||||
ResolvableType arg1 = returnType;
|
||||
|
||||
while (isAnotherFunctionOrConsumerFound(arg1)) {
|
||||
arg1 = arg1.getGeneric(1);
|
||||
numberOfInputs++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
while (i < numberOfInputs) {
|
||||
inputs.add(String.format("%s-%s-%d", this.functionName, FunctionConstants.DEFAULT_INPUT_SUFFIX, i++));
|
||||
}
|
||||
return inputs;
|
||||
}
|
||||
|
||||
private int getNumberOfInputs() {
|
||||
int numberOfInputs = 1;
|
||||
ResolvableType arg1 = this.types[0].getGeneric(1);
|
||||
|
||||
while (isAnotherFunctionOrConsumerFound(arg1)) {
|
||||
arg1 = arg1.getGeneric(1);
|
||||
numberOfInputs++;
|
||||
}
|
||||
return numberOfInputs;
|
||||
}
|
||||
|
||||
private void bindInput(ResolvableType arg0, String inputName) {
|
||||
if (arg0.getRawClass() != null) {
|
||||
KafkaStreamsBindableProxyFactory.this.inputHolders.put(inputName,
|
||||
new BoundTargetHolder(getBindingTargetFactory(arg0.getRawClass())
|
||||
.createInput(inputName), true));
|
||||
}
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
RootBeanDefinition rootBeanDefinition = new RootBeanDefinition();
|
||||
rootBeanDefinition.setInstanceSupplier(() -> inputHolders.get(inputName).getBoundTarget());
|
||||
registry.registerBeanDefinition(inputName, rootBeanDefinition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getInputs() {
|
||||
Set<String> ins = new LinkedHashSet<>();
|
||||
this.inputHolders.forEach((s, BoundTargetHolder) -> ins.add(s));
|
||||
return ins;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getOutputs() {
|
||||
Set<String> outs = new LinkedHashSet<>();
|
||||
this.outputHolders.forEach((s, BoundTargetHolder) -> outs.add(s));
|
||||
return outs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
|
||||
this.beanFactory = beanFactory;
|
||||
}
|
||||
|
||||
public void addOutputBinding(String output, Class<?> clazz) {
|
||||
KafkaStreamsBindableProxyFactory.this.outputHolders.put(output,
|
||||
new BoundTargetHolder(getBindingTargetFactory(clazz)
|
||||
.createOutput(output), true));
|
||||
}
|
||||
|
||||
public String getFunctionName() {
|
||||
return functionName;
|
||||
}
|
||||
|
||||
public Map<String, BoundTargetHolder> getOutputHolders() {
|
||||
return outputHolders;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,36 +16,34 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsFunctionProcessor;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 2.2.0
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnProperty("spring.cloud.stream.function.definition")
|
||||
@EnableConfigurationProperties(StreamFunctionProperties.class)
|
||||
public class KafkaStreamsFunctionAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@Conditional(FunctionDetectorCondition.class)
|
||||
public KafkaStreamsFunctionProcessorInvoker kafkaStreamsFunctionProcessorInvoker(
|
||||
KafkaStreamsFunctionBeanPostProcessor kafkaStreamsFunctionBeanPostProcessor,
|
||||
KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor,
|
||||
KafkaStreamsBindableProxyFactory[] kafkaStreamsBindableProxyFactories,
|
||||
StreamFunctionProperties streamFunctionProperties) {
|
||||
return new KafkaStreamsFunctionProcessorInvoker(kafkaStreamsFunctionBeanPostProcessor.getResolvableTypes(),
|
||||
kafkaStreamsFunctionProcessor, kafkaStreamsBindableProxyFactories, kafkaStreamsFunctionBeanPostProcessor.getMethods(),
|
||||
streamFunctionProperties);
|
||||
StreamFunctionProperties properties) {
|
||||
return new KafkaStreamsFunctionProcessorInvoker(kafkaStreamsFunctionBeanPostProcessor.getResolvableType(),
|
||||
properties.getDefinition(), kafkaStreamsFunctionProcessor);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Conditional(FunctionDetectorCondition.class)
|
||||
public KafkaStreamsFunctionBeanPostProcessor kafkaStreamsFunctionBeanPostProcessor(StreamFunctionProperties streamFunctionProperties) {
|
||||
return new KafkaStreamsFunctionBeanPostProcessor(streamFunctionProperties);
|
||||
public KafkaStreamsFunctionBeanPostProcessor kafkaStreamsFunctionBeanPostProcessor(
|
||||
StreamFunctionProperties properties) {
|
||||
return new KafkaStreamsFunctionBeanPostProcessor(properties);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -17,255 +17,53 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanFactory;
|
||||
import org.springframework.beans.factory.BeanFactoryAware;
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.beans.factory.annotation.AnnotatedBeanDefinition;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.beans.factory.support.RootBeanDefinition;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.2.0
|
||||
* @since 2.1.0
|
||||
*
|
||||
*/
|
||||
public class KafkaStreamsFunctionBeanPostProcessor implements InitializingBean, BeanFactoryAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(KafkaStreamsFunctionBeanPostProcessor.class);
|
||||
|
||||
private static final String[] EXCLUDE_FUNCTIONS = new String[]{"functionRouter", "sendToDlqAndContinue"};
|
||||
class KafkaStreamsFunctionBeanPostProcessor implements InitializingBean, BeanFactoryAware {
|
||||
|
||||
private final StreamFunctionProperties kafkaStreamsFunctionProperties;
|
||||
private ConfigurableListableBeanFactory beanFactory;
|
||||
private boolean onlySingleFunction;
|
||||
private Map<String, ResolvableType> resolvableTypeMap = new TreeMap<>();
|
||||
private Map<String, Method> methods = new TreeMap<>();
|
||||
private ResolvableType resolvableType;
|
||||
|
||||
private final StreamFunctionProperties streamFunctionProperties;
|
||||
|
||||
private Map<String, ResolvableType> kafkaStreamsOnlyResolvableTypes = new HashMap<>();
|
||||
private Map<String, Method> kafakStreamsOnlyMethods = new HashMap<>();
|
||||
|
||||
public KafkaStreamsFunctionBeanPostProcessor(StreamFunctionProperties streamFunctionProperties) {
|
||||
this.streamFunctionProperties = streamFunctionProperties;
|
||||
KafkaStreamsFunctionBeanPostProcessor(StreamFunctionProperties properties) {
|
||||
this.kafkaStreamsFunctionProperties = properties;
|
||||
}
|
||||
|
||||
public Map<String, ResolvableType> getResolvableTypes() {
|
||||
return this.resolvableTypeMap;
|
||||
}
|
||||
|
||||
public Map<String, Method> getMethods() {
|
||||
return methods;
|
||||
public ResolvableType getResolvableType() {
|
||||
return this.resolvableType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() {
|
||||
String[] functionNames = this.beanFactory.getBeanNamesForType(Function.class);
|
||||
String[] biFunctionNames = this.beanFactory.getBeanNamesForType(BiFunction.class);
|
||||
String[] consumerNames = this.beanFactory.getBeanNamesForType(Consumer.class);
|
||||
String[] biConsumerNames = this.beanFactory.getBeanNamesForType(BiConsumer.class);
|
||||
|
||||
final Stream<String> concat = Stream.concat(
|
||||
Stream.concat(Stream.of(functionNames), Stream.of(consumerNames)),
|
||||
Stream.concat(Stream.of(biFunctionNames), Stream.of(biConsumerNames)));
|
||||
final List<String> collect = concat.collect(Collectors.toList());
|
||||
collect.removeIf(s -> Arrays.stream(EXCLUDE_FUNCTIONS).anyMatch(t -> t.equals(s)));
|
||||
collect.removeIf(Pattern.compile(".*_registration").asPredicate());
|
||||
|
||||
onlySingleFunction = collect.size() == 1;
|
||||
collect.stream()
|
||||
.forEach(this::extractResolvableTypes);
|
||||
|
||||
kafkaStreamsOnlyResolvableTypes.keySet().forEach(k -> addResolvableTypeInfo(k, kafkaStreamsOnlyResolvableTypes.get(k)));
|
||||
kafakStreamsOnlyMethods.keySet().forEach(k -> addResolvableTypeInfo(k, kafakStreamsOnlyMethods.get(k)));
|
||||
|
||||
BeanDefinitionRegistry registry = (BeanDefinitionRegistry) beanFactory;
|
||||
|
||||
final String definition = streamFunctionProperties.getDefinition();
|
||||
final String[] functionUnits = StringUtils.hasText(definition) ? definition.split(";") : new String[]{};
|
||||
|
||||
final Set<String> kafkaStreamsMethodNames = new HashSet<>(kafkaStreamsOnlyResolvableTypes.keySet());
|
||||
kafkaStreamsMethodNames.addAll(this.resolvableTypeMap.keySet());
|
||||
|
||||
if (functionUnits.length == 0) {
|
||||
for (String s : getResolvableTypes().keySet()) {
|
||||
ResolvableType[] resolvableTypes = new ResolvableType[]{getResolvableTypes().get(s)};
|
||||
RootBeanDefinition rootBeanDefinition = new RootBeanDefinition(
|
||||
KafkaStreamsBindableProxyFactory.class);
|
||||
registerKakaStreamsProxyFactory(registry, s, resolvableTypes, rootBeanDefinition);
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (String functionUnit : functionUnits) {
|
||||
if (functionUnit.contains("|")) {
|
||||
final String[] composedFunctions = functionUnit.split("\\|");
|
||||
String derivedNameFromComposed = "";
|
||||
ResolvableType[] resolvableTypes = new ResolvableType[composedFunctions.length];
|
||||
|
||||
int i = 0;
|
||||
boolean nonKafkaStreamsFunctionsFound = false;
|
||||
|
||||
for (String split : composedFunctions) {
|
||||
derivedNameFromComposed = derivedNameFromComposed.concat(split);
|
||||
resolvableTypes[i++] = getResolvableTypes().get(split);
|
||||
if (!kafkaStreamsMethodNames.contains(split)) {
|
||||
nonKafkaStreamsFunctionsFound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!nonKafkaStreamsFunctionsFound) {
|
||||
RootBeanDefinition rootBeanDefinition = new RootBeanDefinition(
|
||||
KafkaStreamsBindableProxyFactory.class);
|
||||
registerKakaStreamsProxyFactory(registry, derivedNameFromComposed, resolvableTypes, rootBeanDefinition);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Ensure that the function unit is a Kafka Streams function
|
||||
if (kafkaStreamsMethodNames.contains(functionUnit)) {
|
||||
ResolvableType[] resolvableTypes = new ResolvableType[]{getResolvableTypes().get(functionUnit)};
|
||||
RootBeanDefinition rootBeanDefinition = new RootBeanDefinition(
|
||||
KafkaStreamsBindableProxyFactory.class);
|
||||
registerKakaStreamsProxyFactory(registry, functionUnit, resolvableTypes, rootBeanDefinition);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void registerKakaStreamsProxyFactory(BeanDefinitionRegistry registry, String s, ResolvableType[] resolvableTypes, RootBeanDefinition rootBeanDefinition) {
|
||||
rootBeanDefinition.getConstructorArgumentValues()
|
||||
.addGenericArgumentValue(resolvableTypes);
|
||||
rootBeanDefinition.getConstructorArgumentValues()
|
||||
.addGenericArgumentValue(s);
|
||||
rootBeanDefinition.getConstructorArgumentValues()
|
||||
.addGenericArgumentValue(getMethods().get(s));
|
||||
registry.registerBeanDefinition("kafkaStreamsBindableProxyFactory-" + s, rootBeanDefinition);
|
||||
}
|
||||
|
||||
private void extractResolvableTypes(String key) {
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
final Class<?> classObj = ClassUtils.resolveClassName(((AnnotatedBeanDefinition)
|
||||
this.beanFactory.getBeanDefinition(key))
|
||||
this.beanFactory.getBeanDefinition(kafkaStreamsFunctionProperties.getDefinition()))
|
||||
.getMetadata().getClassName(),
|
||||
ClassUtils.getDefaultClassLoader());
|
||||
|
||||
try {
|
||||
Method[] methods = classObj.getMethods();
|
||||
Optional<Method> functionalBeanMethods = Arrays.stream(methods).filter(m -> m.getName().equals(key)).findFirst();
|
||||
if (!functionalBeanMethods.isPresent()) {
|
||||
final BeanDefinition beanDefinition = this.beanFactory.getBeanDefinition(key);
|
||||
final String factoryMethodName = beanDefinition.getFactoryMethodName();
|
||||
functionalBeanMethods = Arrays.stream(methods).filter(m -> m.getName().equals(factoryMethodName)).findFirst();
|
||||
}
|
||||
|
||||
if (functionalBeanMethods.isPresent()) {
|
||||
Method method = functionalBeanMethods.get();
|
||||
ResolvableType resolvableType = ResolvableType.forMethodReturnType(method, classObj);
|
||||
final Class<?> rawClass = resolvableType.getGeneric(0).getRawClass();
|
||||
if (rawClass == KStream.class || rawClass == KTable.class || rawClass == GlobalKTable.class) {
|
||||
if (onlySingleFunction) {
|
||||
resolvableTypeMap.put(key, resolvableType);
|
||||
}
|
||||
else {
|
||||
discoverOnlyKafkaStreamsResolvableTypes(key, resolvableType);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
Optional<Method> componentBeanMethods = Arrays.stream(methods)
|
||||
.filter(m -> m.getName().equals("apply") && isKafkaStreamsTypeFound(m) ||
|
||||
m.getName().equals("accept") && isKafkaStreamsTypeFound(m)).findFirst();
|
||||
if (componentBeanMethods.isPresent()) {
|
||||
Method method = componentBeanMethods.get();
|
||||
final ResolvableType resolvableType = ResolvableType.forMethodParameter(method, 0);
|
||||
final Class<?> rawClass = resolvableType.getRawClass();
|
||||
if (rawClass == KStream.class || rawClass == KTable.class || rawClass == GlobalKTable.class) {
|
||||
if (onlySingleFunction) {
|
||||
resolvableTypeMap.put(key, resolvableType);
|
||||
this.methods.put(key, method);
|
||||
}
|
||||
else {
|
||||
discoverOnlyKafkaStreamsResolvableTypesAndMethods(key, resolvableType, method);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Method method = classObj.getMethod(this.kafkaStreamsFunctionProperties.getDefinition());
|
||||
this.resolvableType = ResolvableType.forMethodReturnType(method, classObj);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.error("Function activation issues while mapping the function: " + key, e);
|
||||
catch (NoSuchMethodException e) {
|
||||
//ignore
|
||||
}
|
||||
}
|
||||
|
||||
private void addResolvableTypeInfo(String key, ResolvableType resolvableType) {
|
||||
if (kafkaStreamsOnlyResolvableTypes.size() == 1) {
|
||||
resolvableTypeMap.put(key, resolvableType);
|
||||
}
|
||||
else {
|
||||
final String definition = streamFunctionProperties.getDefinition();
|
||||
if (definition == null) {
|
||||
throw new IllegalStateException("Multiple functions found, but function definition property is not set.");
|
||||
}
|
||||
else if (definition.contains(key)) {
|
||||
resolvableTypeMap.put(key, resolvableType);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void discoverOnlyKafkaStreamsResolvableTypes(String key, ResolvableType resolvableType) {
|
||||
kafkaStreamsOnlyResolvableTypes.put(key, resolvableType);
|
||||
}
|
||||
|
||||
private void discoverOnlyKafkaStreamsResolvableTypesAndMethods(String key, ResolvableType resolvableType, Method method) {
|
||||
kafkaStreamsOnlyResolvableTypes.put(key, resolvableType);
|
||||
kafakStreamsOnlyMethods.put(key, method);
|
||||
}
|
||||
|
||||
private void addResolvableTypeInfo(String key, Method method) {
|
||||
if (kafakStreamsOnlyMethods.size() == 1) {
|
||||
this.methods.put(key, method);
|
||||
}
|
||||
else {
|
||||
final String definition = streamFunctionProperties.getDefinition();
|
||||
if (definition == null) {
|
||||
throw new IllegalStateException("Multiple functions found, but function definition property is not set.");
|
||||
}
|
||||
else if (definition.contains(key)) {
|
||||
this.methods.put(key, method);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isKafkaStreamsTypeFound(Method method) {
|
||||
return KStream.class.isAssignableFrom(method.getParameters()[0].getType()) ||
|
||||
KTable.class.isAssignableFrom(method.getParameters()[0].getType()) ||
|
||||
GlobalKTable.class.isAssignableFrom(method.getParameters()[0].getType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
|
||||
this.beanFactory = (ConfigurableListableBeanFactory) beanFactory;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,74 +16,31 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsFunctionProcessor;
|
||||
import org.springframework.cloud.stream.function.StreamFunctionProperties;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class KafkaStreamsFunctionProcessorInvoker {
|
||||
class KafkaStreamsFunctionProcessorInvoker {
|
||||
|
||||
private final KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor;
|
||||
private final Map<String, ResolvableType> resolvableTypeMap;
|
||||
private final KafkaStreamsBindableProxyFactory[] kafkaStreamsBindableProxyFactories;
|
||||
private final Map<String, Method> methods;
|
||||
private final StreamFunctionProperties streamFunctionProperties;
|
||||
private final ResolvableType resolvableType;
|
||||
private final String functionName;
|
||||
|
||||
public KafkaStreamsFunctionProcessorInvoker(Map<String, ResolvableType> resolvableTypeMap,
|
||||
KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor,
|
||||
KafkaStreamsBindableProxyFactory[] kafkaStreamsBindableProxyFactories,
|
||||
Map<String, Method> methods, StreamFunctionProperties streamFunctionProperties) {
|
||||
KafkaStreamsFunctionProcessorInvoker(ResolvableType resolvableType, String functionName,
|
||||
KafkaStreamsFunctionProcessor kafkaStreamsFunctionProcessor) {
|
||||
this.kafkaStreamsFunctionProcessor = kafkaStreamsFunctionProcessor;
|
||||
this.resolvableTypeMap = resolvableTypeMap;
|
||||
this.kafkaStreamsBindableProxyFactories = kafkaStreamsBindableProxyFactories;
|
||||
this.methods = methods;
|
||||
this.streamFunctionProperties = streamFunctionProperties;
|
||||
this.resolvableType = resolvableType;
|
||||
this.functionName = functionName;
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
void invoke() {
|
||||
final String definition = streamFunctionProperties.getDefinition();
|
||||
final String[] functionUnits = StringUtils.hasText(definition) ? definition.split(";") : new String[]{};
|
||||
|
||||
if (functionUnits.length == 0) {
|
||||
resolvableTypeMap.forEach((key, value) -> {
|
||||
Optional<KafkaStreamsBindableProxyFactory> proxyFactory =
|
||||
Arrays.stream(kafkaStreamsBindableProxyFactories).filter(p -> p.getFunctionName().equals(key)).findFirst();
|
||||
this.kafkaStreamsFunctionProcessor.setupFunctionInvokerForKafkaStreams(value, key, proxyFactory.get(), methods.get(key), null);
|
||||
});
|
||||
}
|
||||
|
||||
for (String functionUnit : functionUnits) {
|
||||
if (functionUnit.contains("|")) {
|
||||
final String[] composedFunctions = functionUnit.split("\\|");
|
||||
String[] derivedNameFromComposed = new String[]{""};
|
||||
for (String split : composedFunctions) {
|
||||
derivedNameFromComposed[0] = derivedNameFromComposed[0].concat(split);
|
||||
}
|
||||
Optional<KafkaStreamsBindableProxyFactory> proxyFactory =
|
||||
Arrays.stream(kafkaStreamsBindableProxyFactories).filter(p -> p.getFunctionName().equals(derivedNameFromComposed[0])).findFirst();
|
||||
proxyFactory.ifPresent(kafkaStreamsBindableProxyFactory ->
|
||||
this.kafkaStreamsFunctionProcessor.setupFunctionInvokerForKafkaStreams(resolvableTypeMap.get(composedFunctions[0]),
|
||||
derivedNameFromComposed[0], kafkaStreamsBindableProxyFactory, methods.get(derivedNameFromComposed[0]), resolvableTypeMap.get(composedFunctions[composedFunctions.length - 1]), composedFunctions));
|
||||
}
|
||||
else {
|
||||
Optional<KafkaStreamsBindableProxyFactory> proxyFactory =
|
||||
Arrays.stream(kafkaStreamsBindableProxyFactories).filter(p -> p.getFunctionName().equals(functionUnit)).findFirst();
|
||||
proxyFactory.ifPresent(kafkaStreamsBindableProxyFactory ->
|
||||
this.kafkaStreamsFunctionProcessor.setupFunctionInvokerForKafkaStreams(resolvableTypeMap.get(functionUnit), functionUnit,
|
||||
kafkaStreamsBindableProxyFactory, methods.get(functionUnit), null));
|
||||
}
|
||||
}
|
||||
this.kafkaStreamsFunctionProcessor.orchestrateStreamListenerSetupMethod(resolvableType, functionName);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Type;
|
||||
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.cloud.function.context.WrapperDetector;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsFunctionWrapperDetector implements WrapperDetector {
|
||||
@Override
|
||||
public boolean isWrapper(Type type) {
|
||||
if (type instanceof Class<?>) {
|
||||
Class<?> cls = (Class<?>) type;
|
||||
return KStream.class.isAssignableFrom(cls) ||
|
||||
KTable.class.isAssignableFrom(cls) ||
|
||||
GlobalKTable.class.isAssignableFrom(cls);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright 2017-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationProperties} that can be used by end user Kafka Stream applications.
|
||||
* This class provides convenient ways to access the commonly used kafka stream properties
|
||||
* from the user application. For example, windowing operations are common use cases in
|
||||
* stream processing and one can provide window specific properties at runtime and use
|
||||
* those properties in the applications using this class.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka.streams")
|
||||
public class KafkaStreamsApplicationSupportProperties {
|
||||
|
||||
private TimeWindow timeWindow;
|
||||
|
||||
public TimeWindow getTimeWindow() {
|
||||
return this.timeWindow;
|
||||
}
|
||||
|
||||
public void setTimeWindow(TimeWindow timeWindow) {
|
||||
this.timeWindow = timeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* Properties required by time windows.
|
||||
*/
|
||||
public static class TimeWindow {
|
||||
|
||||
private int length;
|
||||
|
||||
private int advanceBy;
|
||||
|
||||
public int getLength() {
|
||||
return this.length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getAdvanceBy() {
|
||||
return this.advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -16,12 +16,8 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.DeserializationExceptionHandler;
|
||||
|
||||
/**
|
||||
* Kafka Streams binder configuration properties.
|
||||
@@ -38,10 +34,7 @@ public class KafkaStreamsBinderConfigurationProperties
|
||||
|
||||
/**
|
||||
* Enumeration for various Serde errors.
|
||||
*
|
||||
* @deprecated in favor of {@link DeserializationExceptionHandler}.
|
||||
*/
|
||||
@Deprecated
|
||||
public enum SerdeError {
|
||||
|
||||
/**
|
||||
@@ -61,37 +54,6 @@ public class KafkaStreamsBinderConfigurationProperties
|
||||
|
||||
private String applicationId;
|
||||
|
||||
private StateStoreRetry stateStoreRetry = new StateStoreRetry();
|
||||
|
||||
private Map<String, Functions> functions = new HashMap<>();
|
||||
|
||||
private KafkaStreamsBinderConfigurationProperties.SerdeError serdeError;
|
||||
|
||||
/**
|
||||
* {@link org.apache.kafka.streams.errors.DeserializationExceptionHandler} to use when
|
||||
* there is a deserialization exception. This handler will be applied against all input bindings
|
||||
* unless overridden at the consumer binding.
|
||||
*/
|
||||
private DeserializationExceptionHandler deserializationExceptionHandler;
|
||||
|
||||
private boolean includeStoppedProcessorsForHealthCheck;
|
||||
|
||||
public Map<String, Functions> getFunctions() {
|
||||
return functions;
|
||||
}
|
||||
|
||||
public void setFunctions(Map<String, Functions> functions) {
|
||||
this.functions = functions;
|
||||
}
|
||||
|
||||
public StateStoreRetry getStateStoreRetry() {
|
||||
return stateStoreRetry;
|
||||
}
|
||||
|
||||
public void setStateStoreRetry(StateStoreRetry stateStoreRetry) {
|
||||
this.stateStoreRetry = stateStoreRetry;
|
||||
}
|
||||
|
||||
public String getApplicationId() {
|
||||
return this.applicationId;
|
||||
}
|
||||
@@ -100,92 +62,21 @@ public class KafkaStreamsBinderConfigurationProperties
|
||||
this.applicationId = applicationId;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
/**
|
||||
* {@link org.apache.kafka.streams.errors.DeserializationExceptionHandler} to use when
|
||||
* there is a Serde error.
|
||||
* {@link KafkaStreamsBinderConfigurationProperties.SerdeError} values are used to
|
||||
* provide the exception handler on consumer binding.
|
||||
*/
|
||||
private KafkaStreamsBinderConfigurationProperties.SerdeError serdeError;
|
||||
|
||||
public KafkaStreamsBinderConfigurationProperties.SerdeError getSerdeError() {
|
||||
return this.serdeError;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public void setSerdeError(
|
||||
KafkaStreamsBinderConfigurationProperties.SerdeError serdeError) {
|
||||
this.serdeError = serdeError;
|
||||
if (serdeError == SerdeError.logAndContinue) {
|
||||
this.deserializationExceptionHandler = DeserializationExceptionHandler.logAndContinue;
|
||||
}
|
||||
else if (serdeError == SerdeError.logAndFail) {
|
||||
this.deserializationExceptionHandler = DeserializationExceptionHandler.logAndFail;
|
||||
}
|
||||
else if (serdeError == SerdeError.sendToDlq) {
|
||||
this.deserializationExceptionHandler = DeserializationExceptionHandler.sendToDlq;
|
||||
}
|
||||
}
|
||||
|
||||
public DeserializationExceptionHandler getDeserializationExceptionHandler() {
|
||||
return deserializationExceptionHandler;
|
||||
}
|
||||
|
||||
public void setDeserializationExceptionHandler(DeserializationExceptionHandler deserializationExceptionHandler) {
|
||||
this.deserializationExceptionHandler = deserializationExceptionHandler;
|
||||
}
|
||||
|
||||
public boolean isIncludeStoppedProcessorsForHealthCheck() {
|
||||
return includeStoppedProcessorsForHealthCheck;
|
||||
}
|
||||
|
||||
public void setIncludeStoppedProcessorsForHealthCheck(boolean includeStoppedProcessorsForHealthCheck) {
|
||||
this.includeStoppedProcessorsForHealthCheck = includeStoppedProcessorsForHealthCheck;
|
||||
}
|
||||
|
||||
public static class StateStoreRetry {
|
||||
|
||||
private int maxAttempts = 1;
|
||||
|
||||
private long backoffPeriod = 1000;
|
||||
|
||||
public int getMaxAttempts() {
|
||||
return maxAttempts;
|
||||
}
|
||||
|
||||
public void setMaxAttempts(int maxAttempts) {
|
||||
this.maxAttempts = maxAttempts;
|
||||
}
|
||||
|
||||
public long getBackoffPeriod() {
|
||||
return backoffPeriod;
|
||||
}
|
||||
|
||||
public void setBackoffPeriod(long backoffPeriod) {
|
||||
this.backoffPeriod = backoffPeriod;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Functions {
|
||||
|
||||
/**
|
||||
* Function specific application id.
|
||||
*/
|
||||
private String applicationId;
|
||||
|
||||
/**
|
||||
* Funcion specific configuraiton to use.
|
||||
*/
|
||||
private Map<String, String> configuration;
|
||||
|
||||
public String getApplicationId() {
|
||||
return applicationId;
|
||||
}
|
||||
|
||||
public void setApplicationId(String applicationId) {
|
||||
this.applicationId = applicationId;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
this.serdeError = serdeError;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.DeserializationExceptionHandler;
|
||||
|
||||
/**
|
||||
* Extended properties for Kafka Streams consumer.
|
||||
@@ -44,33 +43,6 @@ public class KafkaStreamsConsumerProperties extends KafkaConsumerProperties {
|
||||
*/
|
||||
private String materializedAs;
|
||||
|
||||
/**
|
||||
* Per input binding deserialization handler.
|
||||
*/
|
||||
private DeserializationExceptionHandler deserializationExceptionHandler;
|
||||
|
||||
/**
|
||||
* {@link org.apache.kafka.streams.processor.TimestampExtractor} bean name to use for this consumer.
|
||||
*/
|
||||
private String timestampExtractorBeanName;
|
||||
|
||||
/**
|
||||
* Comma separated list of supported event types for this binding.
|
||||
*/
|
||||
private String eventTypes;
|
||||
|
||||
/**
|
||||
* Record level header key for event type.
|
||||
* If the default value is overridden, then that is expected on each record header if eventType based
|
||||
* routing is enabled on this binding (by setting eventTypes).
|
||||
*/
|
||||
private String eventTypeHeaderKey = "event_type";
|
||||
|
||||
/**
|
||||
* Custom name for the source component from which the processor is consuming from.
|
||||
*/
|
||||
private String consumedAs;
|
||||
|
||||
public String getApplicationId() {
|
||||
return this.applicationId;
|
||||
}
|
||||
@@ -103,43 +75,4 @@ public class KafkaStreamsConsumerProperties extends KafkaConsumerProperties {
|
||||
this.materializedAs = materializedAs;
|
||||
}
|
||||
|
||||
public String getTimestampExtractorBeanName() {
|
||||
return timestampExtractorBeanName;
|
||||
}
|
||||
|
||||
public void setTimestampExtractorBeanName(String timestampExtractorBeanName) {
|
||||
this.timestampExtractorBeanName = timestampExtractorBeanName;
|
||||
}
|
||||
|
||||
public DeserializationExceptionHandler getDeserializationExceptionHandler() {
|
||||
return deserializationExceptionHandler;
|
||||
}
|
||||
|
||||
public void setDeserializationExceptionHandler(DeserializationExceptionHandler deserializationExceptionHandler) {
|
||||
this.deserializationExceptionHandler = deserializationExceptionHandler;
|
||||
}
|
||||
|
||||
public String getEventTypes() {
|
||||
return eventTypes;
|
||||
}
|
||||
|
||||
public void setEventTypes(String eventTypes) {
|
||||
this.eventTypes = eventTypes;
|
||||
}
|
||||
|
||||
public String getEventTypeHeaderKey() {
|
||||
return this.eventTypeHeaderKey;
|
||||
}
|
||||
|
||||
public void setEventTypeHeaderKey(String eventTypeHeaderKey) {
|
||||
this.eventTypeHeaderKey = eventTypeHeaderKey;
|
||||
}
|
||||
|
||||
public String getConsumedAs() {
|
||||
return consumedAs;
|
||||
}
|
||||
|
||||
public void setConsumedAs(String consumedAs) {
|
||||
this.consumedAs = consumedAs;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,16 +36,6 @@ public class KafkaStreamsProducerProperties extends KafkaProducerProperties {
|
||||
*/
|
||||
private String valueSerde;
|
||||
|
||||
/**
|
||||
* {@link org.apache.kafka.streams.processor.StreamPartitioner} to be used on Kafka Streams producer.
|
||||
*/
|
||||
private String streamPartitionerBeanName;
|
||||
|
||||
/**
|
||||
* Custom name for the sink component to which the processor is producing to.
|
||||
*/
|
||||
private String producedAs;
|
||||
|
||||
public String getKeySerde() {
|
||||
return this.keySerde;
|
||||
}
|
||||
@@ -62,19 +52,4 @@ public class KafkaStreamsProducerProperties extends KafkaProducerProperties {
|
||||
this.valueSerde = valueSerde;
|
||||
}
|
||||
|
||||
public String getStreamPartitionerBeanName() {
|
||||
return this.streamPartitionerBeanName;
|
||||
}
|
||||
|
||||
public void setStreamPartitionerBeanName(String streamPartitionerBeanName) {
|
||||
this.streamPartitionerBeanName = streamPartitionerBeanName;
|
||||
}
|
||||
|
||||
public String getProducedAs() {
|
||||
return producedAs;
|
||||
}
|
||||
|
||||
public void setProducedAs(String producedAs) {
|
||||
this.producedAs = producedAs;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,245 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.serde;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
|
||||
/**
|
||||
* A convenient {@link Serde} for {@link java.util.Collection} implementations.
|
||||
*
|
||||
* Whenever a Kafka Stream application needs to collect data into a container object like
|
||||
* {@link java.util.Collection}, then this Serde class can be used as a convenience for
|
||||
* serialization needs. Some examples of where using this may handy is when the application
|
||||
* needs to do aggregation or reduction operations where it needs to simply hold an
|
||||
* {@link Iterable} type.
|
||||
*
|
||||
* By default, this Serde will use {@link JsonSerde} for serializing the inner objects.
|
||||
* This can be changed by providing an explicit Serde during creation of this object.
|
||||
*
|
||||
* Here is an example of a possible use case:
|
||||
*
|
||||
* <pre class="code">
|
||||
* .aggregate(ArrayList::new,
|
||||
* (k, v, aggregates) -> {
|
||||
* aggregates.add(v);
|
||||
* return aggregates;
|
||||
* },
|
||||
* Materialized.<String, Collection<Foo>, WindowStore<Bytes, byte[]>>as(
|
||||
* "foo-store")
|
||||
* .withKeySerde(Serdes.String())
|
||||
* .withValueSerde(new CollectionSerde<>(Foo.class, ArrayList.class)))
|
||||
* * </pre>
|
||||
*
|
||||
* Supported Collection types by this Serde are - {@link java.util.ArrayList}, {@link java.util.LinkedList},
|
||||
* {@link java.util.PriorityQueue} and {@link java.util.HashSet}. Deserializer will throw an exception
|
||||
* if any other Collection types are used.
|
||||
*
|
||||
* @param <E> type of the underlying object that the collection holds
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class CollectionSerde<E> implements Serde<Collection<E>> {
|
||||
|
||||
/**
|
||||
* Serde used for serializing the inner object.
|
||||
*/
|
||||
private final Serde<Collection<E>> inner;
|
||||
|
||||
/**
|
||||
* Type of the collection class. This has to be a class that is
|
||||
* implementing the {@link java.util.Collection} interface.
|
||||
*/
|
||||
private final Class<?> collectionClass;
|
||||
|
||||
/**
|
||||
* Constructor to use when the application wants to specify the type
|
||||
* of the Serde used for the inner object.
|
||||
*
|
||||
* @param serde specify an explicit Serde
|
||||
* @param collectionsClass type of the Collection class
|
||||
*/
|
||||
public CollectionSerde(Serde<E> serde, Class<?> collectionsClass) {
|
||||
this.collectionClass = collectionsClass;
|
||||
this.inner =
|
||||
Serdes.serdeFrom(
|
||||
new CollectionSerializer<>(serde.serializer()),
|
||||
new CollectionDeserializer<>(serde.deserializer(), collectionsClass));
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor to delegate serialization operations for the inner objects
|
||||
* to {@link JsonSerde}.
|
||||
*
|
||||
* @param targetTypeForJsonSerde target type used by the JsonSerde
|
||||
* @param collectionsClass type of the Collection class
|
||||
*/
|
||||
public CollectionSerde(Class<?> targetTypeForJsonSerde, Class<?> collectionsClass) {
|
||||
this.collectionClass = collectionsClass;
|
||||
try (JsonSerde<E> jsonSerde = new JsonSerde(targetTypeForJsonSerde)) {
|
||||
|
||||
this.inner = Serdes.serdeFrom(
|
||||
new CollectionSerializer<>(jsonSerde.serializer()),
|
||||
new CollectionDeserializer<>(jsonSerde.deserializer(), collectionsClass));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer<Collection<E>> serializer() {
|
||||
return inner.serializer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer<Collection<E>> deserializer() {
|
||||
return inner.deserializer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
inner.serializer().configure(configs, isKey);
|
||||
inner.deserializer().configure(configs, isKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
inner.serializer().close();
|
||||
inner.deserializer().close();
|
||||
}
|
||||
|
||||
private static class CollectionSerializer<E> implements Serializer<Collection<E>> {
|
||||
|
||||
|
||||
private Serializer<E> inner;
|
||||
|
||||
CollectionSerializer(Serializer<E> inner) {
|
||||
this.inner = inner;
|
||||
}
|
||||
|
||||
CollectionSerializer() { }
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] serialize(String topic, Collection<E> collection) {
|
||||
final int size = collection.size();
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
final DataOutputStream dos = new DataOutputStream(baos);
|
||||
final Iterator<E> iterator = collection.iterator();
|
||||
try {
|
||||
dos.writeInt(size);
|
||||
while (iterator.hasNext()) {
|
||||
final byte[] bytes = inner.serialize(topic, iterator.next());
|
||||
dos.writeInt(bytes.length);
|
||||
dos.write(bytes);
|
||||
}
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new RuntimeException("Unable to serialize the provided collection", e);
|
||||
}
|
||||
return baos.toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
inner.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static class CollectionDeserializer<E> implements Deserializer<Collection<E>> {
|
||||
private final Deserializer<E> valueDeserializer;
|
||||
private final Class<?> collectionClass;
|
||||
|
||||
CollectionDeserializer(final Deserializer<E> valueDeserializer, Class<?> collectionClass) {
|
||||
this.valueDeserializer = valueDeserializer;
|
||||
this.collectionClass = collectionClass;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<E> deserialize(String topic, byte[] bytes) {
|
||||
if (bytes == null || bytes.length == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Collection<E> collection = getCollection();
|
||||
final DataInputStream dataInputStream = new DataInputStream(new ByteArrayInputStream(bytes));
|
||||
|
||||
try {
|
||||
final int records = dataInputStream.readInt();
|
||||
for (int i = 0; i < records; i++) {
|
||||
final byte[] valueBytes = new byte[dataInputStream.readInt()];
|
||||
final int read = dataInputStream.read(valueBytes);
|
||||
if (read != -1) {
|
||||
collection.add(valueDeserializer.deserialize(topic, valueBytes));
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new RuntimeException("Unable to deserialize collection", e);
|
||||
}
|
||||
|
||||
return collection;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
|
||||
private Collection<E> getCollection() {
|
||||
Collection<E> collection;
|
||||
if (this.collectionClass.isAssignableFrom(ArrayList.class)) {
|
||||
collection = new ArrayList<>();
|
||||
}
|
||||
else if (this.collectionClass.isAssignableFrom(HashSet.class)) {
|
||||
collection = new HashSet<>();
|
||||
}
|
||||
else if (this.collectionClass.isAssignableFrom(LinkedList.class)) {
|
||||
collection = new LinkedList<>();
|
||||
}
|
||||
else if (this.collectionClass.isAssignableFrom(PriorityQueue.class)) {
|
||||
collection = new PriorityQueue<>();
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException("Unsupported collection type - " + this.collectionClass);
|
||||
}
|
||||
return collection;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,22 +16,213 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.serde;
|
||||
|
||||
import org.springframework.messaging.converter.CompositeMessageConverter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.MimeTypeUtils;
|
||||
|
||||
/**
|
||||
* This class provides the same functionality as {@link MessageConverterDelegateSerde} and is deprecated.
|
||||
* It is kept for backward compatibility reasons and will be removed in version 3.1
|
||||
* A {@link Serde} implementation that wraps the list of {@link MessageConverter}s from
|
||||
* {@link CompositeMessageConverterFactory}.
|
||||
*
|
||||
* The primary motivation for this class is to provide an avro based {@link Serde} that is
|
||||
* compatible with the schema registry that Spring Cloud Stream provides. When using the
|
||||
* schema registry support from Spring Cloud Stream in a Kafka Streams binder based
|
||||
* application, the applications can deserialize the incoming Kafka Streams records using
|
||||
* the built in Avro {@link MessageConverter}. However, this same message conversion
|
||||
* approach will not work downstream in other operations in the topology for Kafka Streams
|
||||
* as some of them needs a {@link Serde} instance that can talk to the Spring Cloud Stream
|
||||
* provided Schema Registry. This implementation will solve that problem.
|
||||
*
|
||||
* Only Avro and JSON based converters are exposed as binder provided {@link Serde}
|
||||
* implementations currently.
|
||||
*
|
||||
* Users of this class must call the
|
||||
* {@link CompositeNonNativeSerde#configure(Map, boolean)} method to configure the
|
||||
* {@link Serde} object. At the very least the configuration map must include a key called
|
||||
* "valueClass" to indicate the type of the target object for deserialization. If any
|
||||
* other content type other than JSON is needed (only Avro is available now other than
|
||||
* JSON), that needs to be included in the configuration map with the key "contentType".
|
||||
* For example,
|
||||
*
|
||||
* <pre class="code">
|
||||
* Map<String, Object> config = new HashMap<>();
|
||||
* config.put("valueClass", Foo.class);
|
||||
* config.put("contentType", "application/avro");
|
||||
* </pre>
|
||||
*
|
||||
* Then use the above map when calling the configure method.
|
||||
*
|
||||
* This class is only intended to be used when writing a Spring Cloud Stream Kafka Streams
|
||||
* application that uses Spring Cloud Stream schema registry for schema evolution.
|
||||
*
|
||||
* An instance of this class is provided as a bean by the binder configuration and
|
||||
* typically the applications can autowire that bean. This is the expected usage pattern
|
||||
* of this class.
|
||||
*
|
||||
* @param <T> type of the object to marshall
|
||||
* @author Soby Chacko
|
||||
* @since 2.1
|
||||
*
|
||||
* @deprecated in favor of {@link MessageConverterDelegateSerde}
|
||||
*/
|
||||
@Deprecated
|
||||
public class CompositeNonNativeSerde extends MessageConverterDelegateSerde {
|
||||
public class CompositeNonNativeSerde<T> implements Serde<T> {
|
||||
|
||||
private static final String VALUE_CLASS_HEADER = "valueClass";
|
||||
|
||||
private static final String AVRO_FORMAT = "avro";
|
||||
|
||||
private static final MimeType DEFAULT_AVRO_MIME_TYPE = new MimeType("application",
|
||||
"*+" + AVRO_FORMAT);
|
||||
|
||||
private final CompositeNonNativeDeserializer<T> compositeNonNativeDeserializer;
|
||||
|
||||
private final CompositeNonNativeSerializer<T> compositeNonNativeSerializer;
|
||||
|
||||
public CompositeNonNativeSerde(
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
this.compositeNonNativeDeserializer = new CompositeNonNativeDeserializer<>(
|
||||
compositeMessageConverterFactory);
|
||||
this.compositeNonNativeSerializer = new CompositeNonNativeSerializer<>(
|
||||
compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
this.compositeNonNativeDeserializer.configure(configs, isKey);
|
||||
this.compositeNonNativeSerializer.configure(configs, isKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer<T> serializer() {
|
||||
return this.compositeNonNativeSerializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer<T> deserializer() {
|
||||
return this.compositeNonNativeDeserializer;
|
||||
}
|
||||
|
||||
private static MimeType resolveMimeType(Map<String, ?> configs) {
|
||||
if (configs.containsKey(MessageHeaders.CONTENT_TYPE)) {
|
||||
String contentType = (String) configs.get(MessageHeaders.CONTENT_TYPE);
|
||||
if (DEFAULT_AVRO_MIME_TYPE.equals(MimeTypeUtils.parseMimeType(contentType))) {
|
||||
return DEFAULT_AVRO_MIME_TYPE;
|
||||
}
|
||||
else if (contentType.contains("avro")) {
|
||||
return MimeTypeUtils.parseMimeType("application/avro");
|
||||
}
|
||||
else {
|
||||
return new MimeType("application", "json", StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new MimeType("application", "json", StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom {@link Deserializer} that uses the {@link CompositeMessageConverterFactory}.
|
||||
*
|
||||
* @param <U> parameterized target type for deserialization
|
||||
*/
|
||||
private static class CompositeNonNativeDeserializer<U> implements Deserializer<U> {
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
|
||||
private MimeType mimeType;
|
||||
|
||||
private Class<?> valueClass;
|
||||
|
||||
CompositeNonNativeDeserializer(
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
this.messageConverter = compositeMessageConverterFactory
|
||||
.getMessageConverterForAllRegistered();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
Assert.isTrue(configs.containsKey(VALUE_CLASS_HEADER),
|
||||
"Deserializers must provide a configuration for valueClass.");
|
||||
final Object valueClass = configs.get(VALUE_CLASS_HEADER);
|
||||
Assert.isTrue(valueClass instanceof Class,
|
||||
"Deserializers must provide a valid value for valueClass.");
|
||||
this.valueClass = (Class<?>) valueClass;
|
||||
this.mimeType = resolveMimeType(configs);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public U deserialize(String topic, byte[] data) {
|
||||
Message<?> message = MessageBuilder.withPayload(data)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, this.mimeType.toString())
|
||||
.build();
|
||||
U messageConverted = (U) this.messageConverter.fromMessage(message,
|
||||
this.valueClass);
|
||||
Assert.notNull(messageConverted, "Deserialization failed.");
|
||||
return messageConverted;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom {@link Serializer} that uses the {@link CompositeMessageConverterFactory}.
|
||||
*
|
||||
* @param <V> parameterized type for serialization
|
||||
*/
|
||||
private static class CompositeNonNativeSerializer<V> implements Serializer<V> {
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
|
||||
private MimeType mimeType;
|
||||
|
||||
CompositeNonNativeSerializer(
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
this.messageConverter = compositeMessageConverterFactory
|
||||
.getMessageConverterForAllRegistered();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
this.mimeType = resolveMimeType(configs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] serialize(String topic, V data) {
|
||||
Message<?> message = MessageBuilder.withPayload(data).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, this.mimeType.toString());
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
final Object payload = this.messageConverter
|
||||
.toMessage(message.getPayload(), messageHeaders).getPayload();
|
||||
return (byte[]) payload;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
public CompositeNonNativeSerde(CompositeMessageConverter compositeMessageConverter) {
|
||||
super(compositeMessageConverter);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,228 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.serde;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.CompositeMessageConverter;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.MimeTypeUtils;
|
||||
|
||||
/**
|
||||
* A {@link Serde} implementation that wraps the list of {@link MessageConverter}s from
|
||||
* {@link CompositeMessageConverter}.
|
||||
*
|
||||
* The primary motivation for this class is to provide an avro based {@link Serde} that is
|
||||
* compatible with the schema registry that Spring Cloud Stream provides. When using the
|
||||
* schema registry support from Spring Cloud Stream in a Kafka Streams binder based
|
||||
* application, the applications can deserialize the incoming Kafka Streams records using
|
||||
* the built in Avro {@link MessageConverter}. However, this same message conversion
|
||||
* approach will not work downstream in other operations in the topology for Kafka Streams
|
||||
* as some of them needs a {@link Serde} instance that can talk to the Spring Cloud Stream
|
||||
* provided Schema Registry. This implementation will solve that problem.
|
||||
*
|
||||
* Only Avro and JSON based converters are exposed as binder provided {@link Serde}
|
||||
* implementations currently.
|
||||
*
|
||||
* Users of this class must call the
|
||||
* {@link MessageConverterDelegateSerde#configure(Map, boolean)} method to configure the
|
||||
* {@link Serde} object. At the very least the configuration map must include a key called
|
||||
* "valueClass" to indicate the type of the target object for deserialization. If any
|
||||
* other content type other than JSON is needed (only Avro is available now other than
|
||||
* JSON), that needs to be included in the configuration map with the key "contentType".
|
||||
* For example,
|
||||
*
|
||||
* <pre class="code">
|
||||
* Map<String, Object> config = new HashMap<>();
|
||||
* config.put("valueClass", Foo.class);
|
||||
* config.put("contentType", "application/avro");
|
||||
* </pre>
|
||||
*
|
||||
* Then use the above map when calling the configure method.
|
||||
*
|
||||
* This class is only intended to be used when writing a Spring Cloud Stream Kafka Streams
|
||||
* application that uses Spring Cloud Stream schema registry for schema evolution.
|
||||
*
|
||||
* An instance of this class is provided as a bean by the binder configuration and
|
||||
* typically the applications can autowire that bean. This is the expected usage pattern
|
||||
* of this class.
|
||||
*
|
||||
* @param <T> type of the object to marshall
|
||||
* @author Soby Chacko
|
||||
* @since 3.0
|
||||
* @deprecated in favor of other schema registry providers instead of Spring Cloud Schema Registry. See its motivation above.
|
||||
*/
|
||||
@Deprecated
|
||||
public class MessageConverterDelegateSerde<T> implements Serde<T> {
|
||||
|
||||
private static final String VALUE_CLASS_HEADER = "valueClass";
|
||||
|
||||
private static final String AVRO_FORMAT = "avro";
|
||||
|
||||
private static final MimeType DEFAULT_AVRO_MIME_TYPE = new MimeType("application",
|
||||
"*+" + AVRO_FORMAT);
|
||||
|
||||
private final MessageConverterDelegateDeserializer<T> messageConverterDelegateDeserializer;
|
||||
|
||||
private final MessageConverterDelegateSerializer<T> messageConverterDelegateSerializer;
|
||||
|
||||
public MessageConverterDelegateSerde(
|
||||
CompositeMessageConverter compositeMessageConverter) {
|
||||
this.messageConverterDelegateDeserializer = new MessageConverterDelegateDeserializer<>(
|
||||
compositeMessageConverter);
|
||||
this.messageConverterDelegateSerializer = new MessageConverterDelegateSerializer<>(
|
||||
compositeMessageConverter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
this.messageConverterDelegateDeserializer.configure(configs, isKey);
|
||||
this.messageConverterDelegateSerializer.configure(configs, isKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer<T> serializer() {
|
||||
return this.messageConverterDelegateSerializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer<T> deserializer() {
|
||||
return this.messageConverterDelegateDeserializer;
|
||||
}
|
||||
|
||||
private static MimeType resolveMimeType(Map<String, ?> configs) {
|
||||
if (configs.containsKey(MessageHeaders.CONTENT_TYPE)) {
|
||||
String contentType = (String) configs.get(MessageHeaders.CONTENT_TYPE);
|
||||
if (DEFAULT_AVRO_MIME_TYPE.equals(MimeTypeUtils.parseMimeType(contentType))) {
|
||||
return DEFAULT_AVRO_MIME_TYPE;
|
||||
}
|
||||
else if (contentType.contains("avro")) {
|
||||
return MimeTypeUtils.parseMimeType("application/avro");
|
||||
}
|
||||
else {
|
||||
return new MimeType("application", "json", StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new MimeType("application", "json", StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom {@link Deserializer} that uses the {@link org.springframework.cloud.stream.converter.CompositeMessageConverterFactory}.
|
||||
*
|
||||
* @param <U> parameterized target type for deserialization
|
||||
*/
|
||||
private static class MessageConverterDelegateDeserializer<U> implements Deserializer<U> {
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
|
||||
private MimeType mimeType;
|
||||
|
||||
private Class<?> valueClass;
|
||||
|
||||
MessageConverterDelegateDeserializer(
|
||||
CompositeMessageConverter compositeMessageConverter) {
|
||||
this.messageConverter = compositeMessageConverter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
Assert.isTrue(configs.containsKey(VALUE_CLASS_HEADER),
|
||||
"Deserializers must provide a configuration for valueClass.");
|
||||
final Object valueClass = configs.get(VALUE_CLASS_HEADER);
|
||||
Assert.isTrue(valueClass instanceof Class,
|
||||
"Deserializers must provide a valid value for valueClass.");
|
||||
this.valueClass = (Class<?>) valueClass;
|
||||
this.mimeType = resolveMimeType(configs);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public U deserialize(String topic, byte[] data) {
|
||||
Message<?> message = MessageBuilder.withPayload(data)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, this.mimeType.toString())
|
||||
.build();
|
||||
U messageConverted = (U) this.messageConverter.fromMessage(message,
|
||||
this.valueClass);
|
||||
Assert.notNull(messageConverted, "Deserialization failed.");
|
||||
return messageConverted;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom {@link Serializer} that uses the {@link org.springframework.cloud.stream.converter.CompositeMessageConverterFactory}.
|
||||
*
|
||||
* @param <V> parameterized type for serialization
|
||||
*/
|
||||
private static class MessageConverterDelegateSerializer<V> implements Serializer<V> {
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
|
||||
private MimeType mimeType;
|
||||
|
||||
MessageConverterDelegateSerializer(
|
||||
CompositeMessageConverter compositeMessageConverter) {
|
||||
this.messageConverter = compositeMessageConverter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs, boolean isKey) {
|
||||
this.mimeType = resolveMimeType(configs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] serialize(String topic, V data) {
|
||||
Message<?> message = MessageBuilder.withPayload(data).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, this.mimeType.toString());
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
final Object payload = this.messageConverter
|
||||
.toMessage(message.getPayload(), messageHeaders).getPayload();
|
||||
return (byte[]) payload;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,9 @@
|
||||
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.ExtendedBindingHandlerMappingsProviderAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsBinderSupportAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.function.KafkaStreamsFunctionAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.endpoint.KafkaStreamsTopologyEndpointAutoConfiguration
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsApplicationSupportAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.function.KafkaStreamsFunctionAutoConfiguration
|
||||
|
||||
org.springframework.cloud.function.context.WrapperDetector=\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.function.KafkaStreamsFunctionWrapperDetector
|
||||
|
||||
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.test.context.runner.ApplicationContextRunner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* Tests for {@link ExtendedBindingHandlerMappingsProviderAutoConfiguration}.
|
||||
*/
|
||||
class ExtendedBindingHandlerMappingsProviderAutoConfigurationTests {
|
||||
|
||||
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
|
||||
.withUserConfiguration(KafkaStreamsTestApp.class)
|
||||
.withPropertyValues(
|
||||
"spring.cloud.stream.kafka.streams.default.consumer.application-id: testApp123",
|
||||
"spring.cloud.stream.kafka.streams.default.consumer.consumed-as: default-consumer",
|
||||
"spring.cloud.stream.kafka.streams.default.consumer.materialized-as: default-materializer",
|
||||
"spring.cloud.stream.kafka.streams.default.producer.produced-as: default-producer",
|
||||
"spring.cloud.stream.kafka.streams.default.producer.key-serde: default-foo");
|
||||
|
||||
@Test
|
||||
void defaultsUsedWhenNoCustomBindingProperties() {
|
||||
this.contextRunner.run((context) -> {
|
||||
assertThat(context)
|
||||
.hasNotFailed()
|
||||
.hasSingleBean(KafkaStreamsExtendedBindingProperties.class);
|
||||
KafkaStreamsExtendedBindingProperties extendedBindingProperties = context.getBean(KafkaStreamsExtendedBindingProperties.class);
|
||||
assertThat(extendedBindingProperties.getExtendedConsumerProperties("process-in-0"))
|
||||
.hasFieldOrPropertyWithValue("applicationId", "testApp123")
|
||||
.hasFieldOrPropertyWithValue("consumedAs", "default-consumer")
|
||||
.hasFieldOrPropertyWithValue("materializedAs", "default-materializer");
|
||||
assertThat(extendedBindingProperties.getExtendedProducerProperties("process-out-0"))
|
||||
.hasFieldOrPropertyWithValue("producedAs", "default-producer")
|
||||
.hasFieldOrPropertyWithValue("keySerde", "default-foo");
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void defaultsRespectedWhenCustomBindingProperties() {
|
||||
this.contextRunner
|
||||
.withPropertyValues(
|
||||
"spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.consumed-as: custom-consumer",
|
||||
"spring.cloud.stream.kafka.streams.bindings.process-out-0.producer.produced-as: custom-producer")
|
||||
.run((context) -> {
|
||||
assertThat(context)
|
||||
.hasNotFailed()
|
||||
.hasSingleBean(KafkaStreamsExtendedBindingProperties.class);
|
||||
KafkaStreamsExtendedBindingProperties extendedBindingProperties = context.getBean(KafkaStreamsExtendedBindingProperties.class);
|
||||
assertThat(extendedBindingProperties.getExtendedConsumerProperties("process-in-0"))
|
||||
.hasFieldOrPropertyWithValue("applicationId", "testApp123")
|
||||
.hasFieldOrPropertyWithValue("consumedAs", "custom-consumer")
|
||||
.hasFieldOrPropertyWithValue("materializedAs", "default-materializer");
|
||||
assertThat(extendedBindingProperties.getExtendedProducerProperties("process-out-0"))
|
||||
.hasFieldOrPropertyWithValue("producedAs", "custom-producer")
|
||||
.hasFieldOrPropertyWithValue("keySerde", "default-foo");
|
||||
});
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
static class KafkaStreamsTestApp {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,265 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2020 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.header.internals.RecordHeaders;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonDeserializer;
|
||||
import org.springframework.kafka.support.serializer.JsonSerializer;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaStreamsEventTypeRoutingTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"foo-1", "foo-2");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
private static Consumer<Integer, Foo> consumer;
|
||||
|
||||
private static CountDownLatch LATCH = new CountDownLatch(3);
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("test-group-1", "false",
|
||||
embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put("value.deserializer", JsonDeserializer.class);
|
||||
consumerProps.put(JsonDeserializer.TRUSTED_PACKAGES, "*");
|
||||
DefaultKafkaConsumerFactory<Integer, Foo> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "foo-2");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
//See https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1003 for more context on this test.
|
||||
@Test
|
||||
public void testRoutingWorksBasedOnEventTypes() {
|
||||
SpringApplication app = new SpringApplication(EventTypeRoutingTestConfig.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=process",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=foo-1",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=foo-2",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.eventTypes=foo,bar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.process.applicationId=process-id-foo-0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put("value.serializer", JsonSerializer.class);
|
||||
DefaultKafkaProducerFactory<Integer, Foo> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, Foo> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foo-1");
|
||||
Foo foo1 = new Foo();
|
||||
foo1.setFoo("foo-1");
|
||||
Headers headers = new RecordHeaders();
|
||||
headers.add(new RecordHeader("event_type", "foo".getBytes()));
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord1 = new ProducerRecord<>("foo-1", 0, 56, foo1, headers);
|
||||
template.send(producerRecord1);
|
||||
|
||||
Foo foo2 = new Foo();
|
||||
foo2.setFoo("foo-2");
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord2 = new ProducerRecord<>("foo-1", 0, 57, foo2);
|
||||
template.send(producerRecord2);
|
||||
|
||||
Foo foo3 = new Foo();
|
||||
foo3.setFoo("foo-3");
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord3 = new ProducerRecord<>("foo-1", 0, 58, foo3, headers);
|
||||
template.send(producerRecord3);
|
||||
|
||||
Foo foo4 = new Foo();
|
||||
foo4.setFoo("foo-4");
|
||||
Headers headers1 = new RecordHeaders();
|
||||
headers1.add(new RecordHeader("event_type", "bar".getBytes()));
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord4 = new ProducerRecord<>("foo-1", 0, 59, foo4, headers1);
|
||||
template.send(producerRecord4);
|
||||
|
||||
final ConsumerRecords<Integer, Foo> records = KafkaTestUtils.getRecords(consumer);
|
||||
|
||||
assertThat(records.count()).isEqualTo(3);
|
||||
|
||||
List<Integer> keys = new ArrayList<>();
|
||||
List<Foo> values = new ArrayList<>();
|
||||
|
||||
records.forEach(integerFooConsumerRecord -> {
|
||||
keys.add(integerFooConsumerRecord.key());
|
||||
values.add(integerFooConsumerRecord.value());
|
||||
});
|
||||
|
||||
assertThat(keys).containsExactlyInAnyOrder(56, 58, 59);
|
||||
assertThat(values).containsExactlyInAnyOrder(foo1, foo3, foo4);
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRoutingWorksBasedOnEventTypesConsumer() throws Exception {
|
||||
SpringApplication app = new SpringApplication(EventTypeRoutingTestConfig.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=consumer",
|
||||
"--spring.cloud.stream.bindings.consumer-in-0.destination=foo-consumer-1",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.consumer-in-0.consumer.eventTypes=foo,bar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.consumer.applicationId=consumer-id-foo-0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put("value.serializer", JsonSerializer.class);
|
||||
DefaultKafkaProducerFactory<Integer, Foo> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, Foo> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foo-consumer-1");
|
||||
Foo foo1 = new Foo();
|
||||
foo1.setFoo("foo-1");
|
||||
Headers headers = new RecordHeaders();
|
||||
headers.add(new RecordHeader("event_type", "foo".getBytes()));
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord1 = new ProducerRecord<>("foo-consumer-1", 0, 56, foo1, headers);
|
||||
template.send(producerRecord1);
|
||||
|
||||
Foo foo2 = new Foo();
|
||||
foo2.setFoo("foo-2");
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord2 = new ProducerRecord<>("foo-consumer-1", 0, 57, foo2);
|
||||
template.send(producerRecord2);
|
||||
|
||||
Foo foo3 = new Foo();
|
||||
foo3.setFoo("foo-3");
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord3 = new ProducerRecord<>("foo-consumer-1", 0, 58, foo3, headers);
|
||||
template.send(producerRecord3);
|
||||
|
||||
Foo foo4 = new Foo();
|
||||
foo4.setFoo("foo-4");
|
||||
Headers headers1 = new RecordHeaders();
|
||||
headers1.add(new RecordHeader("event_type", "bar".getBytes()));
|
||||
|
||||
final ProducerRecord<Integer, Foo> producerRecord4 = new ProducerRecord<>("foo-consumer-1", 0, 59, foo4, headers1);
|
||||
template.send(producerRecord4);
|
||||
|
||||
Assert.isTrue(LATCH.await(10, TimeUnit.SECONDS), "Foo");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class EventTypeRoutingTestConfig {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<Integer, Foo>, KStream<Integer, Foo>> process() {
|
||||
return input -> input;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KTable<Integer, Foo>> consumer() {
|
||||
return ktable -> ktable.toStream().foreach((key, value) -> {
|
||||
LATCH.countDown();
|
||||
});
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<GlobalKTable<Integer, Foo>> global() {
|
||||
return ktable -> {
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class Foo {
|
||||
String foo;
|
||||
|
||||
public String getFoo() {
|
||||
return foo;
|
||||
}
|
||||
|
||||
public void setFoo(String foo) {
|
||||
this.foo = foo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Foo foo1 = (Foo) o;
|
||||
return Objects.equals(foo, foo1.foo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(foo);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,468 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.apache.kafka.streams.kstream.ForeachAction;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaStreamsFunctionCompositionTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"fooFuncanotherFooFunc-out-0", "bar");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
private static final CountDownLatch countDownLatch1 = new CountDownLatch(1);
|
||||
private static final CountDownLatch countDownLatch2 = new CountDownLatch(1);
|
||||
private static final CountDownLatch countDownLatch3 = new CountDownLatch(2);
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("fn-composition-group", "false",
|
||||
embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "fooFuncanotherFooFunc-out-0", "bar");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasicFunctionCompositionWithDefaultDestination() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig1.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=fooFunc|anotherFooFunc;anotherProcess",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("fooFuncanotherFooFunc-in-0");
|
||||
template.sendDefault("foobar!!");
|
||||
//Verify non-composed funcions can be run standalone with composed function chains, i.e foo|bar;buzz
|
||||
template.setDefaultTopic("anotherProcess-in-0");
|
||||
template.sendDefault("this is crazy!!!");
|
||||
Thread.sleep(1000);
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "fooFuncanotherFooFunc-out-0");
|
||||
assertThat(cr.value().contains("foobar!!")).isTrue();
|
||||
|
||||
Assert.isTrue(countDownLatch1.await(5, TimeUnit.SECONDS), "anotherProcess consumer didn't trigger.");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasicFunctionCompositionWithDestinaion() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig1.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=fooFunc|anotherFooFunc;anotherProcess",
|
||||
"--spring.cloud.stream.bindings.fooFuncanotherFooFunc-in-0.destination=foo",
|
||||
"--spring.cloud.stream.bindings.fooFuncanotherFooFunc-out-0.destination=bar",
|
||||
"--spring.cloud.stream.bindings.anotherProcess-in-0.destination=buzz",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foo");
|
||||
template.sendDefault("foobar!!");
|
||||
template.setDefaultTopic("buzz");
|
||||
template.sendDefault("this is crazy!!!");
|
||||
Thread.sleep(1000);
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "bar");
|
||||
assertThat(cr.value().contains("foobar!!")).isTrue();
|
||||
|
||||
Assert.isTrue(countDownLatch1.await(5, TimeUnit.SECONDS), "anotherProcess consumer didn't trigger.");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFunctionToConsumerComposition() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig2.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=fooFunc|anotherProcess",
|
||||
"--spring.cloud.stream.bindings.fooFuncanotherProcess-in-0.destination=foo",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foo");
|
||||
template.sendDefault("foobar!!");
|
||||
|
||||
Thread.sleep(1000);
|
||||
|
||||
Assert.isTrue(countDownLatch2.await(5, TimeUnit.SECONDS), "anotherProcess consumer didn't trigger.");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBiFunctionToConsumerComposition() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig3.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=fooBiFunc|anotherProcess",
|
||||
"--spring.cloud.stream.bindings.fooBiFuncanotherProcess-in-0.destination=foo",
|
||||
"--spring.cloud.stream.bindings.fooBiFuncanotherProcess-in-1.destination=foo-1",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foo");
|
||||
template.sendDefault("foobar!!");
|
||||
|
||||
template.setDefaultTopic("foo-1");
|
||||
template.sendDefault("another foobar!!");
|
||||
|
||||
Thread.sleep(1000);
|
||||
|
||||
Assert.isTrue(countDownLatch3.await(5, TimeUnit.SECONDS), "anotherProcess consumer didn't trigger.");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testChainedFunctionsAsComposed() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig4.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=fooBiFunc|anotherFooFunc|yetAnotherFooFunc|lastFunctionInChain",
|
||||
"--spring.cloud.stream.function.bindings.fooBiFuncanotherFooFuncyetAnotherFooFunclastFunctionInChain-in-0=input1",
|
||||
"--spring.cloud.stream.function.bindings.fooBiFuncanotherFooFuncyetAnotherFooFunclastFunctionInChain-in-1=input2",
|
||||
"--spring.cloud.stream.function.bindings.fooBiFuncanotherFooFuncyetAnotherFooFunclastFunctionInChain-out-0=output",
|
||||
"--spring.cloud.stream.bindings.input1.destination=my-foo-1",
|
||||
"--spring.cloud.stream.bindings.input2.destination=my-foo-2",
|
||||
"--spring.cloud.stream.bindings.output.destination=bar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
DefaultKafkaProducerFactory<String, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<String, String> template = new KafkaTemplate<>(pf, true);
|
||||
|
||||
template.setDefaultTopic("my-foo-2");
|
||||
template.sendDefault("foo-1", "foo2");
|
||||
|
||||
template.setDefaultTopic("my-foo-1");
|
||||
template.sendDefault("foo-1", "foo1");
|
||||
|
||||
Thread.sleep(1000);
|
||||
|
||||
final ConsumerRecords<String, String> records = KafkaTestUtils.getRecords(consumer);
|
||||
assertThat(records.iterator().hasNext()).isTrue();
|
||||
assertThat(records.iterator().next().value().equals("foo1foo2From-anotherFooFuncFrom-yetAnotherFooFuncFrom-lastFunctionInChain")).isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFirstFunctionCurriedThenComposeWithOtherFunctions() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig5.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=curriedFunc|anotherFooFunc|yetAnotherFooFunc|lastFunctionInChain",
|
||||
"--spring.cloud.stream.function.bindings.curriedFuncanotherFooFuncyetAnotherFooFunclastFunctionInChain-in-0=input1",
|
||||
"--spring.cloud.stream.function.bindings.curriedFuncanotherFooFuncyetAnotherFooFunclastFunctionInChain-in-1=input2",
|
||||
"--spring.cloud.stream.function.bindings.curriedFuncanotherFooFuncyetAnotherFooFunclastFunctionInChain-out-0=output",
|
||||
"--spring.cloud.stream.bindings.input1.destination=my-foo-1",
|
||||
"--spring.cloud.stream.bindings.input2.destination=my-foo-2",
|
||||
"--spring.cloud.stream.bindings.output.destination=bar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
DefaultKafkaProducerFactory<String, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<String, String> template = new KafkaTemplate<>(pf, true);
|
||||
|
||||
template.setDefaultTopic("my-foo-2");
|
||||
template.sendDefault("foo-1", "foo2");
|
||||
|
||||
Thread.sleep(1000);
|
||||
|
||||
template.setDefaultTopic("my-foo-1");
|
||||
template.sendDefault("foo-1", "foo1");
|
||||
|
||||
Thread.sleep(1000);
|
||||
|
||||
final ConsumerRecords<String, String> records = KafkaTestUtils.getRecords(consumer);
|
||||
assertThat(records.iterator().hasNext()).isTrue();
|
||||
assertThat(records.iterator().next().value().equals("foo1foo2From-anotherFooFuncFrom-yetAnotherFooFuncFrom-lastFunctionInChain")).isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFunctionToConsumerCompositionWithFunctionProducesKTable() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(FunctionCompositionConfig6.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=fooFunc|anotherProcess",
|
||||
"--spring.cloud.stream.bindings.fooFuncanotherProcess-in-0.destination=foo",
|
||||
"--spring.cloud.stream.bindings.fooFuncanotherProcess-out-0.destination=bar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
DefaultKafkaProducerFactory<String, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<String, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foo");
|
||||
template.sendDefault("foo", "foobar!!");
|
||||
|
||||
Thread.sleep(1000);
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "bar");
|
||||
assertThat(cr.value().contains("foobar!!")).isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionCompositionConfig1 {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> fooFunc() {
|
||||
return input -> input.peek((s, s2) -> {
|
||||
System.out.println("hello: " + s2);
|
||||
});
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> anotherFooFunc() {
|
||||
return input -> input.peek((s, s2) -> System.out.println("hello Foo: " + s2));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KStream<String, String>> anotherProcess() {
|
||||
return c -> c.foreach((s, s2) -> {
|
||||
System.out.println("s2s2s2::" + s2);
|
||||
countDownLatch1.countDown();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionCompositionConfig2 {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> fooFunc() {
|
||||
return input -> input.peek((s, s2) -> {
|
||||
System.out.println("hello: " + s2);
|
||||
});
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KStream<String, String>> anotherProcess() {
|
||||
return c -> c.foreach((s, s2) -> {
|
||||
System.out.println("s2s2s2::" + s2);
|
||||
countDownLatch2.countDown();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionCompositionConfig3 {
|
||||
|
||||
@Bean
|
||||
public BiFunction<KStream<String, String>, KStream<String, String>, KStream<String, String>> fooBiFunc() {
|
||||
return KStream::merge;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KStream<String, String>> anotherProcess() {
|
||||
return c -> c.foreach((s, s2) -> {
|
||||
System.out.println("s2s2s2::" + s2);
|
||||
countDownLatch3.countDown();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionCompositionConfig4 {
|
||||
|
||||
@Bean
|
||||
public BiFunction<KStream<String, String>, KTable<String, String>, KStream<String, String>> fooBiFunc() {
|
||||
return (a, b) -> a.join(b, (value1, value2) -> value1 + value2);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> anotherFooFunc() {
|
||||
return input -> input.mapValues(value -> value + "From-anotherFooFunc");
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> yetAnotherFooFunc() {
|
||||
return input -> input.mapValues(value -> value + "From-yetAnotherFooFunc");
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> lastFunctionInChain() {
|
||||
return input -> input.mapValues(value -> value + "From-lastFunctionInChain");
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionCompositionConfig5 {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, Function<KTable<String, String>, KStream<String, String>>> curriedFunc() {
|
||||
return a -> b ->
|
||||
a.join(b, (value1, value2) -> value1 + value2);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> anotherFooFunc() {
|
||||
return input -> input.mapValues(value -> value + "From-anotherFooFunc");
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> yetAnotherFooFunc() {
|
||||
return input -> input.mapValues(value -> value + "From-yetAnotherFooFunc");
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>> lastFunctionInChain() {
|
||||
return input -> input.mapValues(value -> value + "From-lastFunctionInChain");
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionCompositionConfig6 {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KTable<String, String>> fooFunc() {
|
||||
return ks -> {
|
||||
ks.foreach(new ForeachAction<String, String>() {
|
||||
@Override
|
||||
public void apply(String key, String value) {
|
||||
System.out.println();
|
||||
}
|
||||
});
|
||||
return ks.toTable();
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KTable<String, String>, KStream<String, String>> anotherProcess() {
|
||||
return KTable::toStream;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class MultipleFunctionsInSameAppTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"coffee", "electronics");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
private static CountDownLatch countDownLatch = new CountDownLatch(2);
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("purchase-groups", "false",
|
||||
embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "coffee", "electronics");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testMultiFunctionsInSameApp() throws InterruptedException {
|
||||
SpringApplication app = new SpringApplication(MultipleFunctionsInSameApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=process;analyze;anotherProcess;yetAnotherProcess",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=purchases",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=coffee",
|
||||
"--spring.cloud.stream.bindings.process-out-1.destination=electronics",
|
||||
"--spring.cloud.stream.bindings.analyze-in-0.destination=coffee",
|
||||
"--spring.cloud.stream.bindings.analyze-in-1.destination=electronics",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.analyze.applicationId=analyze-id-0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.process.applicationId=process-id-0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.bindings.process-in-0.consumer.concurrency=2",
|
||||
"--spring.cloud.stream.bindings.analyze-in-0.consumer.concurrency=1",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.num.stream.threads=3",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.process.configuration.client.id=process-client",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.analyze.configuration.client.id=analyze-client",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.anotherProcess.configuration.client.id=anotherProcess-client",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
receiveAndValidate("purchases", "coffee", "electronics");
|
||||
|
||||
StreamsBuilderFactoryBean processStreamsBuilderFactoryBean = context
|
||||
.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
|
||||
StreamsBuilderFactoryBean analyzeStreamsBuilderFactoryBean = context
|
||||
.getBean("&stream-builder-analyze", StreamsBuilderFactoryBean.class);
|
||||
|
||||
StreamsBuilderFactoryBean anotherProcessStreamsBuilderFactoryBean = context
|
||||
.getBean("&stream-builder-anotherProcess", StreamsBuilderFactoryBean.class);
|
||||
|
||||
final Properties processStreamsConfiguration = processStreamsBuilderFactoryBean.getStreamsConfiguration();
|
||||
final Properties analyzeStreamsConfiguration = analyzeStreamsBuilderFactoryBean.getStreamsConfiguration();
|
||||
final Properties anotherProcessStreamsConfiguration = anotherProcessStreamsBuilderFactoryBean.getStreamsConfiguration();
|
||||
|
||||
assertThat(processStreamsConfiguration.getProperty("client.id")).isEqualTo("process-client");
|
||||
assertThat(analyzeStreamsConfiguration.getProperty("client.id")).isEqualTo("analyze-client");
|
||||
|
||||
Integer concurrency = (Integer) processStreamsConfiguration.get(StreamsConfig.NUM_STREAM_THREADS_CONFIG);
|
||||
assertThat(concurrency).isEqualTo(2);
|
||||
concurrency = (Integer) analyzeStreamsConfiguration.get(StreamsConfig.NUM_STREAM_THREADS_CONFIG);
|
||||
assertThat(concurrency).isEqualTo(1);
|
||||
assertThat(anotherProcessStreamsConfiguration.get(StreamsConfig.NUM_STREAM_THREADS_CONFIG)).isEqualTo("3");
|
||||
|
||||
final KafkaStreamsBindingInformationCatalogue catalogue = context.getBean(KafkaStreamsBindingInformationCatalogue.class);
|
||||
Field field = ReflectionUtils.findField(KafkaStreamsBindingInformationCatalogue.class, "outboundKStreamResolvables", Map.class);
|
||||
ReflectionUtils.makeAccessible(field);
|
||||
final Map<Object, ResolvableType> outboundKStreamResolvables = (Map<Object, ResolvableType>) ReflectionUtils.getField(field, catalogue);
|
||||
// Since we have 2 functions with return types -- one is an array return type with 2 bindings -- assert that
|
||||
// the catalogue contains outbound type information for all the 3 different bindings.
|
||||
assertThat(outboundKStreamResolvables.size()).isEqualTo(3);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiFunctionsInSameAppWithMultiBinders() throws Exception {
|
||||
SpringApplication app = new SpringApplication(MultipleFunctionsInSameApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=process;analyze",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=purchases",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.startOffset=latest",
|
||||
"--spring.cloud.stream.bindings.process-in-0.binder=kafka1",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=coffee",
|
||||
"--spring.cloud.stream.bindings.process-out-0.binder=kafka1",
|
||||
"--spring.cloud.stream.bindings.process-out-1.destination=electronics",
|
||||
"--spring.cloud.stream.bindings.process-out-1.binder=kafka1",
|
||||
"--spring.cloud.stream.bindings.analyze-in-0.destination=coffee",
|
||||
"--spring.cloud.stream.bindings.analyze-in-0.binder=kafka2",
|
||||
"--spring.cloud.stream.bindings.analyze-in-1.destination=electronics",
|
||||
"--spring.cloud.stream.bindings.analyze-in-1.binder=kafka2",
|
||||
"--spring.cloud.stream.bindings.analyze-in-0.consumer.concurrency=2",
|
||||
"--spring.cloud.stream.binders.kafka1.type=kstream",
|
||||
"--spring.cloud.stream.binders.kafka1.environment.spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.binders.kafka1.environment.spring.cloud.stream.kafka.streams.binder.applicationId=my-app-1",
|
||||
"--spring.cloud.stream.binders.kafka1.environment.spring.cloud.stream.kafka.streams.binder.configuration.client.id=process-client",
|
||||
"--spring.cloud.stream.binders.kafka2.type=kstream",
|
||||
"--spring.cloud.stream.binders.kafka2.environment.spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.binders.kafka2.environment.spring.cloud.stream.kafka.streams.binder.applicationId=my-app-2",
|
||||
"--spring.cloud.stream.binders.kafka2.environment.spring.cloud.stream.kafka.streams.binder.configuration.client.id=analyze-client")) {
|
||||
|
||||
Thread.sleep(1000);
|
||||
receiveAndValidate("purchases", "coffee", "electronics");
|
||||
|
||||
StreamsBuilderFactoryBean processStreamsBuilderFactoryBean = context
|
||||
.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
|
||||
StreamsBuilderFactoryBean analyzeStreamsBuilderFactoryBean = context
|
||||
.getBean("&stream-builder-analyze", StreamsBuilderFactoryBean.class);
|
||||
|
||||
final Properties processStreamsConfiguration = processStreamsBuilderFactoryBean.getStreamsConfiguration();
|
||||
final Properties analyzeStreamsConfiguration = analyzeStreamsBuilderFactoryBean.getStreamsConfiguration();
|
||||
|
||||
assertThat(processStreamsConfiguration.getProperty("application.id")).isEqualTo("my-app-1");
|
||||
assertThat(analyzeStreamsConfiguration.getProperty("application.id")).isEqualTo("my-app-2");
|
||||
assertThat(processStreamsConfiguration.getProperty("client.id")).isEqualTo("process-client");
|
||||
assertThat(analyzeStreamsConfiguration.getProperty("client.id")).isEqualTo("analyze-client");
|
||||
|
||||
Integer concurrency = (Integer) analyzeStreamsConfiguration.get(StreamsConfig.NUM_STREAM_THREADS_CONFIG);
|
||||
assertThat(concurrency).isEqualTo(2);
|
||||
|
||||
concurrency = (Integer) processStreamsConfiguration.get(StreamsConfig.NUM_STREAM_THREADS_CONFIG);
|
||||
assertThat(concurrency).isNull(); //thus default to 1 by Kafka Streams.
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(String in, String... out) throws InterruptedException {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic(in);
|
||||
template.sendDefault("coffee");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, out[0]);
|
||||
assertThat(cr.value().contains("coffee")).isTrue();
|
||||
|
||||
template.sendDefault("electronics");
|
||||
cr = KafkaTestUtils.getSingleRecord(consumer, out[1]);
|
||||
assertThat(cr.value().contains("electronics")).isTrue();
|
||||
|
||||
Assert.isTrue(countDownLatch.await(5, TimeUnit.SECONDS), "Analyze (BiConsumer) method didn't receive all the expected records");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class MultipleFunctionsInSameApp {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, String>[]> process() {
|
||||
return input -> input.branch(
|
||||
(s, p) -> p.equalsIgnoreCase("coffee"),
|
||||
(s, p) -> p.equalsIgnoreCase("electronics"));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, String>, KStream<String, Long>> yetAnotherProcess() {
|
||||
return input -> input.map((k, v) -> new KeyValue<>("foo", 1L));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public BiConsumer<KStream<String, String>, KStream<String, String>> analyze() {
|
||||
return (coffee, electronics) -> {
|
||||
coffee.foreach((s, p) -> countDownLatch.countDown());
|
||||
electronics.foreach((s, p) -> countDownLatch.countDown());
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KStream<String, String>> anotherProcess() {
|
||||
return c -> {
|
||||
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,27 +16,18 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.bootstrap;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
|
||||
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
@@ -44,42 +35,23 @@ import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
|
||||
public class KafkaStreamsBinderBootstrapTest {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, 10);
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
|
||||
}
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
@Test
|
||||
public void testKStreamBinderWithCustomEnvironmentCanStart() {
|
||||
public void testKafkaStreamsBinderWithCustomEnvironmentCanStart() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(
|
||||
SimpleKafkaStreamsApplication.class).web(WebApplicationType.NONE).run(
|
||||
"--spring.cloud.function.definition=input1;input2;input3",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input1-in-0.consumer.application-id"
|
||||
+ "=testKStreamBinderWithCustomEnvironmentCanStart",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input2-in-0.consumer.application-id"
|
||||
+ "=testKStreamBinderWithCustomEnvironmentCanStart-foo",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input3-in-0.consumer.application-id"
|
||||
+ "=testKStreamBinderWithCustomEnvironmentCanStart-foobar",
|
||||
"--spring.cloud.stream.bindings.input1-in-0.destination=foo",
|
||||
"--spring.cloud.stream.bindings.input1-in-0.binder=kstreamBinder",
|
||||
"--spring.cloud.stream.binders.kstreamBinder.type=kstream",
|
||||
"--spring.cloud.stream.binders.kstreamBinder.environment"
|
||||
SimpleApplication.class).web(WebApplicationType.NONE).run(
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithCustomEnvironmentCanStart",
|
||||
"--spring.cloud.stream.bindings.input.destination=foo",
|
||||
"--spring.cloud.stream.bindings.input.binder=kBind1",
|
||||
"--spring.cloud.stream.binders.kBind1.type=kstream",
|
||||
"--spring.cloud.stream.binders.kBind1.environment"
|
||||
+ ".spring.cloud.stream.kafka.streams.binder.brokers"
|
||||
+ "=" + embeddedKafka.getEmbeddedKafka().getBrokersAsString(),
|
||||
"--spring.cloud.stream.bindings.input2-in-0.destination=bar",
|
||||
"--spring.cloud.stream.bindings.input2-in-0.binder=ktableBinder",
|
||||
"--spring.cloud.stream.binders.ktableBinder.type=ktable",
|
||||
"--spring.cloud.stream.binders.ktableBinder.environment"
|
||||
+ ".spring.cloud.stream.kafka.streams.binder.brokers"
|
||||
+ "=" + embeddedKafka.getEmbeddedKafka().getBrokersAsString(),
|
||||
"--spring.cloud.stream.bindings.input3-in-0.destination=foobar",
|
||||
"--spring.cloud.stream.bindings.input3-in-0.binder=globalktableBinder",
|
||||
"--spring.cloud.stream.binders.globalktableBinder.type=globalktable",
|
||||
"--spring.cloud.stream.binders.globalktableBinder.environment"
|
||||
+ ".spring.cloud.stream.kafka.streams.binder.brokers"
|
||||
+ "=" + embeddedKafka.getEmbeddedKafka().getBrokersAsString());
|
||||
+ "=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.binders.kBind1.environment.spring"
|
||||
+ ".cloud.stream.kafka.streams.binder.zkNodes="
|
||||
+ embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
@@ -87,79 +59,34 @@ public class KafkaStreamsBinderBootstrapTest {
|
||||
@Test
|
||||
public void testKafkaStreamsBinderWithStandardConfigurationCanStart() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(
|
||||
SimpleKafkaStreamsApplication.class).web(WebApplicationType.NONE).run(
|
||||
"--spring.cloud.function.definition=input1;input2;input3",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input1-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input2-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart-foo",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input3-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart-foobar",
|
||||
SimpleApplication.class).web(WebApplicationType.NONE).run(
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart",
|
||||
"--spring.cloud.stream.bindings.input.destination=foo",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers="
|
||||
+ embeddedKafka.getEmbeddedKafka().getBrokersAsString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testStreamConfigGlobalProperties_GH1149() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(
|
||||
SimpleKafkaStreamsApplication.class).web(WebApplicationType.NONE).run(
|
||||
"--spring.cloud.function.definition=input1;input2;input3",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input1-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input2-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart-foo",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input2-in-0.consumer.configuration.spring.json.value.type.method=com.test.MyClass",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input3-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderWithStandardConfigurationCanStart-foobar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers="
|
||||
+ embeddedKafka.getEmbeddedKafka().getBrokersAsString());
|
||||
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext
|
||||
.getBean("streamConfigGlobalProperties", Map.class);
|
||||
// Make sure that global stream configs do not contain individual binding config set on second function.
|
||||
assertThat(streamConfigGlobalProperties.containsKey("spring.json.value.type.method")).isFalse();
|
||||
|
||||
// Make sure that only input2 function gets the specific binding property set on it.
|
||||
final StreamsBuilderFactoryBean input1SBFB = applicationContext.getBean("&stream-builder-input1", StreamsBuilderFactoryBean.class);
|
||||
final Properties streamsConfiguration1 = input1SBFB.getStreamsConfiguration();
|
||||
assertThat(streamsConfiguration1.containsKey("spring.json.value.type.method")).isFalse();
|
||||
|
||||
final StreamsBuilderFactoryBean input2SBFB = applicationContext.getBean("&stream-builder-input2", StreamsBuilderFactoryBean.class);
|
||||
final Properties streamsConfiguration2 = input2SBFB.getStreamsConfiguration();
|
||||
assertThat(streamsConfiguration2.containsKey("spring.json.value.type.method")).isTrue();
|
||||
|
||||
final StreamsBuilderFactoryBean input3SBFB = applicationContext.getBean("&stream-builder-input3", StreamsBuilderFactoryBean.class);
|
||||
final Properties streamsConfiguration3 = input3SBFB.getStreamsConfiguration();
|
||||
assertThat(streamsConfiguration3.containsKey("spring.json.value.type.method")).isFalse();
|
||||
+ embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes="
|
||||
+ embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
static class SimpleKafkaStreamsApplication {
|
||||
@EnableBinding(StreamSourceProcessor.class)
|
||||
static class SimpleApplication {
|
||||
|
||||
@StreamListener
|
||||
public void handle(@Input("input") KStream<Object, String> stream) {
|
||||
|
||||
@Bean
|
||||
public Consumer<KStream<Object, String>> input1() {
|
||||
return s -> {
|
||||
// No-op consumer
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Consumer<KTable<Object, String>> input2() {
|
||||
return s -> {
|
||||
// No-op consumer
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Consumer<GlobalKTable<Object, String>> input3() {
|
||||
return s -> {
|
||||
// No-op consumer
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
interface StreamSourceProcessor {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> inputStream();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.bootstrap;
|
||||
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaStreamsBinderJaasInitTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, 10);
|
||||
|
||||
private static String JAVA_LOGIN_CONFIG_PARAM_VALUE;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeAll() {
|
||||
JAVA_LOGIN_CONFIG_PARAM_VALUE = System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
|
||||
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterAll() {
|
||||
if (JAVA_LOGIN_CONFIG_PARAM_VALUE != null) {
|
||||
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, JAVA_LOGIN_CONFIG_PARAM_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKafkaStreamsBinderJaasInitialization() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(
|
||||
KafkaStreamsBinderJaasInitTestsApplication.class).web(WebApplicationType.NONE).run(
|
||||
"--spring.cloud.function.definition=foo",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.foo-in-0.consumer.application-id"
|
||||
+ "=testKafkaStreamsBinderJaasInitialization-jaas-id",
|
||||
"--spring.cloud.stream.kafka.streams.binder.jaas.loginModule=org.apache.kafka.common.security.plain.PlainLoginModule",
|
||||
"--spring.cloud.stream.kafka.streams.binder.jaas.options.username=foo",
|
||||
"--spring.cloud.stream.kafka.streams.binder.jaas.options.password=bar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers="
|
||||
+ embeddedKafka.getEmbeddedKafka().getBrokersAsString());
|
||||
javax.security.auth.login.Configuration configuration = javax.security.auth.login.Configuration
|
||||
.getConfiguration();
|
||||
final AppConfigurationEntry[] kafkaConfiguration = configuration
|
||||
.getAppConfigurationEntry("KafkaClient");
|
||||
assertThat(kafkaConfiguration).hasSize(1);
|
||||
assertThat(kafkaConfiguration[0].getOptions().get("username")).isEqualTo("foo");
|
||||
assertThat(kafkaConfiguration[0].getOptions().get("password")).isEqualTo("bar");
|
||||
assertThat(kafkaConfiguration[0].getControlFlag())
|
||||
.isEqualTo(AppConfigurationEntry.LoginModuleControlFlag.REQUIRED);
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
static class KafkaStreamsBinderJaasInitTestsApplication {
|
||||
|
||||
@Bean
|
||||
public Consumer<KStream<Object, String>> foo() {
|
||||
return s -> {
|
||||
// No-op consumer
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
@@ -38,6 +37,11 @@ import org.junit.Test;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
@@ -49,9 +53,6 @@ import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsBinderWordCountBranchesFunctionTests {
|
||||
|
||||
@ClassRule
|
||||
@@ -83,23 +84,26 @@ public class KafkaStreamsBinderWordCountBranchesFunctionTests {
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.cloud.stream.function.bindings.process-in-0=input",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=process",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.function.bindings.process-out-0=output1",
|
||||
"--spring.cloud.stream.bindings.output1.destination=counts",
|
||||
"--spring.cloud.stream.function.bindings.process-out-1=output2",
|
||||
"--spring.cloud.stream.bindings.output1.contentType=application/json",
|
||||
"--spring.cloud.stream.bindings.output2.destination=foo",
|
||||
"--spring.cloud.stream.function.bindings.process-out-2=output3",
|
||||
"--spring.cloud.stream.bindings.output2.contentType=application/json",
|
||||
"--spring.cloud.stream.bindings.output3.destination=bar",
|
||||
|
||||
"--spring.cloud.stream.bindings.output3.contentType=application/json",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.applicationId" +
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.applicationId" +
|
||||
"=KafkaStreamsBinderWordCountBranchesFunctionTests-abc",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString());
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
}
|
||||
@@ -179,35 +183,43 @@ public class KafkaStreamsBinderWordCountBranchesFunctionTests {
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Bean
|
||||
@SuppressWarnings({"unchecked"})
|
||||
@SuppressWarnings("unchecked")
|
||||
public Function<KStream<Object, String>, KStream<?, WordCount>[]> process() {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input -> {
|
||||
final Map<String, KStream<Object, WordCount>> stringKStreamMap = input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(TimeWindows.of(Duration.ofSeconds(5)))
|
||||
.count(Materialized.as("WordCounts-branch"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value,
|
||||
new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.split()
|
||||
.branch(isEnglish)
|
||||
.branch(isFrench)
|
||||
.branch(isSpanish)
|
||||
.noDefaultBranch();
|
||||
|
||||
return stringKStreamMap.values().toArray(new KStream[0]);
|
||||
};
|
||||
return input -> input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("WordCounts-branch"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value,
|
||||
new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
}
|
||||
|
||||
interface KStreamProcessorX {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,58 +16,40 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.Grouped;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.processor.StreamPartitioner;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.DirectFieldAccessor;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.InteractiveQueryService;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsRegistry;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.StreamsBuilderFactoryManager;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.endpoint.KafkaStreamsTopologyEndpoint;
|
||||
import org.springframework.cloud.stream.binding.InputBindingLifecycle;
|
||||
import org.springframework.cloud.stream.binding.OutputBindingLifecycle;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.Lifecycle;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@@ -75,23 +57,20 @@ public class KafkaStreamsBinderWordCountFunctionTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"counts", "counts-1", "counts-2", "counts-5", "counts-6");
|
||||
"counts");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
private final static CountDownLatch LATCH = new CountDownLatch(1);
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false",
|
||||
embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "counts", "counts-1", "counts-2", "counts-5", "counts-6");
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
@@ -100,236 +79,37 @@ public class KafkaStreamsBinderWordCountFunctionTests {
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testBasicKStreamTopologyExecution() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=counts",
|
||||
"--spring.cloud.stream.kafka.streams.binder.application-id=testKstreamWordCountFunction",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.consumerProperties.request.timeout.ms=29000", //for testing ...binder.consumerProperties
|
||||
"--spring.cloud.stream.kafka.streams.binder.consumerProperties.consumer.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer",
|
||||
"--spring.cloud.stream.kafka.streams.binder.producerProperties.max.block.ms=90000", //for testing ...binder.producerProperties
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.consumedAs=custom-consumer",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-out-0.producer.producedAs=custom-producer",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
receiveAndValidate("words", "counts");
|
||||
|
||||
final MeterRegistry meterRegistry = context.getBean(MeterRegistry.class);
|
||||
Thread.sleep(100);
|
||||
|
||||
assertThat(meterRegistry.getMeters().stream().anyMatch(m -> m.getId().getName().equals("kafka.stream.thread.poll.records.max"))).isTrue();
|
||||
assertThat(meterRegistry.getMeters().stream().anyMatch(m -> m.getId().getName().equals("kafka.consumer.network.io.total"))).isTrue();
|
||||
assertThat(meterRegistry.getMeters().stream().anyMatch(m -> m.getId().getName().equals("kafka.producer.record.send.total"))).isTrue();
|
||||
assertThat(meterRegistry.getMeters().stream().anyMatch(m -> m.getId().getName().equals("kafka.admin.client.network.io.total"))).isTrue();
|
||||
|
||||
Assert.isTrue(LATCH.await(5, TimeUnit.SECONDS), "Failed to call customizers");
|
||||
//Testing topology endpoint
|
||||
final KafkaStreamsRegistry kafkaStreamsRegistry = context.getBean(KafkaStreamsRegistry.class);
|
||||
final KafkaStreamsTopologyEndpoint kafkaStreamsTopologyEndpoint = new KafkaStreamsTopologyEndpoint(kafkaStreamsRegistry);
|
||||
final List<String> topologies = kafkaStreamsTopologyEndpoint.kafkaStreamsTopologies();
|
||||
final String topology1 = topologies.get(0);
|
||||
final String topology2 = kafkaStreamsTopologyEndpoint.kafkaStreamsTopology("testKstreamWordCountFunction");
|
||||
assertThat(topology1).isNotEmpty();
|
||||
assertThat(topology1).isEqualTo(topology2);
|
||||
assertThat(topology1.contains("Source: custom-consumer")).isTrue();
|
||||
assertThat(topology1.contains("Sink: custom-producer")).isTrue();
|
||||
|
||||
//verify that ...binder.consumerProperties and ...binder.producerProperties work.
|
||||
Map<String, Object> streamConfigGlobalProperties = (Map<String, Object>) context.getBean("streamConfigGlobalProperties");
|
||||
assertThat(streamConfigGlobalProperties.get("consumer.request.timeout.ms")).isEqualTo("29000");
|
||||
assertThat(streamConfigGlobalProperties.get("consumer.value.deserializer")).isEqualTo("org.apache.kafka.common.serialization.StringDeserializer");
|
||||
assertThat(streamConfigGlobalProperties.get("producer.max.block.ms")).isEqualTo("90000");
|
||||
|
||||
InputBindingLifecycle inputBindingLifecycle = context.getBean(InputBindingLifecycle.class);
|
||||
final Collection<Binding<Object>> inputBindings = (Collection<Binding<Object>>) new DirectFieldAccessor(inputBindingLifecycle)
|
||||
.getPropertyValue("inputBindings");
|
||||
assertThat(inputBindings).isNotNull();
|
||||
final Optional<Binding<Object>> theOnlyInputBinding = inputBindings.stream().findFirst();
|
||||
assertThat(theOnlyInputBinding.isPresent()).isTrue();
|
||||
final DefaultBinding<Object> objectBinding = (DefaultBinding<Object>) theOnlyInputBinding.get();
|
||||
assertThat(objectBinding.getBindingName()).isEqualTo("process-in-0");
|
||||
|
||||
final Lifecycle lifecycle = (Lifecycle) new DirectFieldAccessor(objectBinding).getPropertyValue("lifecycle");
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean(StreamsBuilderFactoryBean.class);
|
||||
assertThat(lifecycle).isEqualTo(streamsBuilderFactoryBean);
|
||||
|
||||
OutputBindingLifecycle outputBindingLifecycle = context.getBean(OutputBindingLifecycle.class);
|
||||
final Collection<Binding<Object>> outputBindings = (Collection<Binding<Object>>) new DirectFieldAccessor(outputBindingLifecycle)
|
||||
.getPropertyValue("outputBindings");
|
||||
assertThat(outputBindings).isNotNull();
|
||||
final Optional<Binding<Object>> theOnlyOutputBinding = outputBindings.stream().findFirst();
|
||||
assertThat(theOnlyOutputBinding.isPresent()).isTrue();
|
||||
final DefaultBinding<Object> objectBinding1 = (DefaultBinding<Object>) theOnlyOutputBinding.get();
|
||||
assertThat(objectBinding1.getBindingName()).isEqualTo("process-out-0");
|
||||
|
||||
final Lifecycle lifecycle1 = (Lifecycle) new DirectFieldAccessor(objectBinding1).getPropertyValue("lifecycle");
|
||||
assertThat(lifecycle1).isEqualTo(streamsBuilderFactoryBean);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithApplicationIdSpecifiedAtDefaultConsumer() {
|
||||
public void testKstreamWordCountFunction() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words-5",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=counts-5",
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id=testKstreamWordCountWithApplicationIdSpecifiedAtDefaultConsumer",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.binder.brokers="
|
||||
+ embeddedKafka.getBrokersAsString())) {
|
||||
receiveAndValidate("words-5", "counts-5");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountFunctionWithCustomProducerStreamPartitioner() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.kafka.streams.binder.application-id=testKstreamWordCountFunctionWithCustomProducerStreamPartitioner",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words-2",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=counts-2",
|
||||
"--spring.cloud.stream.bindings.process-out-0.producer.partitionCount=2",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-out-0.producer.streamPartitionerBeanName" +
|
||||
"=streamPartitioner",
|
||||
"--spring.cloud.stream.function.definition=process",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output.contentType=application/json",
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id=basic-word-count",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words-2");
|
||||
template.sendDefault("foo");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-2");
|
||||
assertThat(cr.value().contains("\"word\":\"foo\",\"count\":1")).isTrue();
|
||||
assertThat(cr.partition() == 0) .isTrue();
|
||||
template.sendDefault("bar");
|
||||
cr = KafkaTestUtils.getSingleRecord(consumer, "counts-2");
|
||||
assertThat(cr.value().contains("\"word\":\"bar\",\"count\":1")).isTrue();
|
||||
assertThat(cr.partition() == 1) .isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
receiveAndValidate(context);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderAutoStartup() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.kafka.streams.auto-startup=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words-3",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=counts-3",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
final StreamsBuilderFactoryManager streamsBuilderFactoryManager = context.getBean(StreamsBuilderFactoryManager.class);
|
||||
assertThat(streamsBuilderFactoryManager.isAutoStartup()).isFalse();
|
||||
assertThat(streamsBuilderFactoryManager.isRunning()).isFalse();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamIndividualBindingAutoStartup() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words-4",
|
||||
"--spring.cloud.stream.bindings.process-in-0.consumer.auto-startup=false",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=counts-4",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean(StreamsBuilderFactoryBean.class);
|
||||
assertThat(streamsBuilderFactoryBean.isRunning()).isFalse();
|
||||
streamsBuilderFactoryBean.start();
|
||||
assertThat(streamsBuilderFactoryBean.isRunning()).isTrue();
|
||||
}
|
||||
}
|
||||
|
||||
// The following test verifies the fixes made for this issue:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/774
|
||||
@Test
|
||||
public void testOutboundNullValueIsHandledGracefully()
|
||||
throws Exception {
|
||||
SpringApplication app = new SpringApplication(OutboundNullApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words-6",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=counts-6",
|
||||
"--spring.cloud.stream.bindings.process-out-0.producer.useNativeEncoding=false",
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id=testOutboundNullValueIsHandledGracefully",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.binder.brokers="
|
||||
+ embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words-6");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer,
|
||||
"counts-6");
|
||||
assertThat(cr.value() == null).isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(String in, String out) {
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic(in);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, out);
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().contains("\"word\":\"foobar\",\"count\":1")).isTrue();
|
||||
}
|
||||
finally {
|
||||
@@ -387,62 +167,24 @@ public class KafkaStreamsBinderWordCountFunctionTests {
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
InteractiveQueryService interactiveQueryService;
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
static class WordCountProcessorApplication {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<Object, String>, KStream<String, WordCount>> process() {
|
||||
public Function<KStream<Object, String>, KStream<?, WordCount>> process() {
|
||||
|
||||
return input -> input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(TimeWindows.of(Duration.ofMillis(5000)))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("foo-WordCounts"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key(), new WordCount(key.key(), value,
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value,
|
||||
new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StreamsBuilderFactoryBeanConfigurer customizer() {
|
||||
return fb -> {
|
||||
try {
|
||||
fb.setStateListener((newState, oldState) -> {
|
||||
|
||||
});
|
||||
fb.getObject(); //make sure no exception is thrown at this call.
|
||||
KafkaStreamsBinderWordCountFunctionTests.LATCH.countDown();
|
||||
|
||||
}
|
||||
catch (Exception e) {
|
||||
//Nothing to do - When the exception is thrown above, the latch won't be count down.
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StreamPartitioner<String, WordCount> streamPartitioner() {
|
||||
return (t, k, v, n) -> k.equals("foo") ? 0 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
static class OutboundNullApplication {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<Object, String>, KStream<?, WordCount>> process() {
|
||||
return input -> input
|
||||
.flatMapValues(
|
||||
value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(TimeWindows.of(Duration.ofSeconds(5))).count(Materialized.as("foobar-WordCounts"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, null));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,347 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsComponentBeansTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"testFunctionComponent-out", "testBiFunctionComponent-out", "testCurriedFunctionWithFunctionTerminal-out");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
private static Consumer<String, String> consumer1;
|
||||
private static Consumer<String, String> consumer2;
|
||||
private static Consumer<String, String> consumer3;
|
||||
|
||||
private final static CountDownLatch LATCH_1 = new CountDownLatch(1);
|
||||
private final static CountDownLatch LATCH_2 = new CountDownLatch(2);
|
||||
private final static CountDownLatch LATCH_3 = new CountDownLatch(3);
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false",
|
||||
embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer1, "testFunctionComponent-out");
|
||||
|
||||
Map<String, Object> consumerProps1 = KafkaTestUtils.consumerProps("group-x", "false",
|
||||
embeddedKafka);
|
||||
consumerProps1.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps1.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
DefaultKafkaConsumerFactory<String, String> cf1 = new DefaultKafkaConsumerFactory<>(consumerProps1);
|
||||
consumer2 = cf1.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer2, "testBiFunctionComponent-out");
|
||||
|
||||
Map<String, Object> consumerProps2 = KafkaTestUtils.consumerProps("group-y", "false",
|
||||
embeddedKafka);
|
||||
consumerProps2.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps2.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
DefaultKafkaConsumerFactory<String, String> cf2 = new DefaultKafkaConsumerFactory<>(consumerProps2);
|
||||
consumer3 = cf2.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer3, "testCurriedFunctionWithFunctionTerminal-out");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer1.close();
|
||||
consumer2.close();
|
||||
consumer3.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFunctionComponent() {
|
||||
SpringApplication app = new SpringApplication(FunctionAsComponent.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext ignored = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.foo-in-0.destination=testFunctionComponent-in",
|
||||
"--spring.cloud.stream.bindings.foo-out-0.destination=testFunctionComponent-out",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("testFunctionComponent-in");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1, "testFunctionComponent-out");
|
||||
assertThat(cr.value().contains("foobarfoobar")).isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConsumerComponent() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ConsumerAsComponent.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.bar-in-0.destination=testConsumerComponent-in",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("testConsumerComponent-in");
|
||||
template.sendDefault("foobar");
|
||||
Assert.isTrue(LATCH_1.await(10, TimeUnit.SECONDS), "bar");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBiFunctionComponent() {
|
||||
SpringApplication app = new SpringApplication(BiFunctionAsComponent.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext ignored = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.bazz-in-0.destination=testBiFunctionComponent-in-0",
|
||||
"--spring.cloud.stream.bindings.bazz-in-1.destination=testBiFunctionComponent-in-1",
|
||||
"--spring.cloud.stream.bindings.bazz-out-0.destination=testBiFunctionComponent-out",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("testBiFunctionComponent-in-0");
|
||||
template.sendDefault("foobar");
|
||||
template.setDefaultTopic("testBiFunctionComponent-in-1");
|
||||
template.sendDefault("foobar");
|
||||
final ConsumerRecords<String, String> records = KafkaTestUtils.getRecords(consumer2, 10_000, 2);
|
||||
assertThat(records.count()).isEqualTo(2);
|
||||
records.forEach(stringStringConsumerRecord -> assertThat(stringStringConsumerRecord.value().contains("foobar")).isTrue());
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBiConsumerComponent() throws Exception {
|
||||
SpringApplication app = new SpringApplication(BiConsumerAsComponent.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.buzz-in-0.destination=testBiConsumerComponent-in-0",
|
||||
"--spring.cloud.stream.bindings.buzz-in-1.destination=testBiConsumerComponent-in-1",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("testBiConsumerComponent-in-0");
|
||||
template.sendDefault("foobar");
|
||||
template.setDefaultTopic("testBiConsumerComponent-in-1");
|
||||
template.sendDefault("foobar");
|
||||
Assert.isTrue(LATCH_2.await(10, TimeUnit.SECONDS), "bar");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCurriedFunctionWithConsumerTerminal() throws Exception {
|
||||
SpringApplication app = new SpringApplication(CurriedFunctionWithConsumerTerminal.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.curriedConsumer-in-0.destination=testCurriedFunctionWithConsumerTerminal-in-0",
|
||||
"--spring.cloud.stream.bindings.curriedConsumer-in-1.destination=testCurriedFunctionWithConsumerTerminal-in-1",
|
||||
"--spring.cloud.stream.bindings.curriedConsumer-in-2.destination=testCurriedFunctionWithConsumerTerminal-in-2",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("testCurriedFunctionWithConsumerTerminal-in-0");
|
||||
template.sendDefault("foobar");
|
||||
template.setDefaultTopic("testCurriedFunctionWithConsumerTerminal-in-1");
|
||||
template.sendDefault("foobar");
|
||||
template.setDefaultTopic("testCurriedFunctionWithConsumerTerminal-in-2");
|
||||
template.sendDefault("foobar");
|
||||
Assert.isTrue(LATCH_3.await(10, TimeUnit.SECONDS), "bar");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCurriedFunctionWithFunctionTerminal() {
|
||||
SpringApplication app = new SpringApplication(CurriedFunctionWithFunctionTerminal.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.curriedFunction-in-0.destination=testCurriedFunctionWithFunctionTerminal-in-0",
|
||||
"--spring.cloud.stream.bindings.curriedFunction-in-1.destination=testCurriedFunctionWithFunctionTerminal-in-1",
|
||||
"--spring.cloud.stream.bindings.curriedFunction-in-2.destination=testCurriedFunctionWithFunctionTerminal-in-2",
|
||||
"--spring.cloud.stream.bindings.curriedFunction-out-0.destination=testCurriedFunctionWithFunctionTerminal-out",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("testCurriedFunctionWithFunctionTerminal-in-0");
|
||||
template.sendDefault("foobar");
|
||||
template.setDefaultTopic("testCurriedFunctionWithFunctionTerminal-in-1");
|
||||
template.sendDefault("foobar");
|
||||
template.setDefaultTopic("testCurriedFunctionWithFunctionTerminal-in-2");
|
||||
template.sendDefault("foobar");
|
||||
final ConsumerRecords<String, String> records = KafkaTestUtils.getRecords(consumer3, 10_000, 3);
|
||||
assertThat(records.count()).isEqualTo(3);
|
||||
records.forEach(stringStringConsumerRecord -> assertThat(stringStringConsumerRecord.value().contains("foobar")).isTrue());
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Component("foo")
|
||||
@EnableAutoConfiguration
|
||||
public static class FunctionAsComponent implements Function<KStream<Integer, String>,
|
||||
KStream<String, String>> {
|
||||
|
||||
@Override
|
||||
public KStream<String, String> apply(KStream<Integer, String> stringIntegerKStream) {
|
||||
return stringIntegerKStream.map((integer, s) -> new KeyValue<>(s, s + s));
|
||||
}
|
||||
}
|
||||
|
||||
@Component("bar")
|
||||
@EnableAutoConfiguration
|
||||
public static class ConsumerAsComponent implements java.util.function.Consumer<KStream<Integer, String>> {
|
||||
|
||||
@Override
|
||||
public void accept(KStream<Integer, String> integerStringKStream) {
|
||||
integerStringKStream.foreach((integer, s) -> LATCH_1.countDown());
|
||||
}
|
||||
}
|
||||
|
||||
@Component("bazz")
|
||||
@EnableAutoConfiguration
|
||||
public static class BiFunctionAsComponent implements BiFunction<KStream<String, String>, KStream<String, String>, KStream<String, String>> {
|
||||
|
||||
@Override
|
||||
public KStream<String, String> apply(KStream<String, String> stringStringKStream, KStream<String, String> stringStringKStream2) {
|
||||
return stringStringKStream.merge(stringStringKStream2);
|
||||
}
|
||||
}
|
||||
|
||||
@Component("buzz")
|
||||
@EnableAutoConfiguration
|
||||
public static class BiConsumerAsComponent implements BiConsumer<KStream<String, String>, KStream<String, String>> {
|
||||
|
||||
@Override
|
||||
public void accept(KStream<String, String> stringStringKStream, KStream<String, String> stringStringKStream2) {
|
||||
final KStream<String, String> merged = stringStringKStream.merge(stringStringKStream2);
|
||||
merged.foreach((s, s2) -> LATCH_2.countDown());
|
||||
}
|
||||
}
|
||||
|
||||
@Component("curriedConsumer")
|
||||
@EnableAutoConfiguration
|
||||
public static class CurriedFunctionWithConsumerTerminal implements Function<KStream<String, String>,
|
||||
Function<KStream<String, String>,
|
||||
java.util.function.Consumer<KStream<String, String>>>> {
|
||||
|
||||
@Override
|
||||
public Function<KStream<String, String>, java.util.function.Consumer<KStream<String, String>>> apply(KStream<String, String> stringStringKStream) {
|
||||
return stringStringKStream1 -> stringStringKStream2 -> {
|
||||
final KStream<String, String> merge1 = stringStringKStream.merge(stringStringKStream1);
|
||||
final KStream<String, String> merged2 = merge1.merge(stringStringKStream2);
|
||||
merged2.foreach((s1, s2) -> LATCH_3.countDown());
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@Component("curriedFunction")
|
||||
@EnableAutoConfiguration
|
||||
public static class CurriedFunctionWithFunctionTerminal implements Function<KStream<String, String>,
|
||||
Function<KStream<String, String>,
|
||||
java.util.function.Function<KStream<String, String>, KStream<String, String>>>> {
|
||||
|
||||
@Override
|
||||
public Function<KStream<String, String>, Function<KStream<String, String>, KStream<String, String>>> apply(KStream<String, String> stringStringKStream) {
|
||||
return stringStringKStream1 -> stringStringKStream2 -> {
|
||||
final KStream<String, String> merge1 = stringStringKStream.merge(stringStringKStream1);
|
||||
return merge1.merge(stringStringKStream2);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.processor.ProcessorSupplier;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
import org.apache.kafka.streams.state.Stores;
|
||||
import org.apache.kafka.streams.state.WindowStore;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaStreamsFunctionStateStoreTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"counts");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
@Test
|
||||
public void testKafkaStreamsFuncionWithMultipleStateStores() throws Exception {
|
||||
SpringApplication app = new SpringApplication(StateStoreTestApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=biConsumerBean;hello",
|
||||
"--spring.cloud.stream.bindings.biConsumerBean-in-0.destination=words",
|
||||
"--spring.cloud.stream.bindings.hello-in-0.destination=words",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.changed.applicationId=testKafkaStreamsFuncionWithMultipleStateStores-123",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.hello.applicationId=testKafkaStreamsFuncionWithMultipleStateStores-456",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
receiveAndValidate(context);
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault(1, "foobar");
|
||||
Thread.sleep(2000L);
|
||||
StateStoreTestApplication processorApplication = context
|
||||
.getBean(StateStoreTestApplication.class);
|
||||
|
||||
KeyValueStore<Long, Long> state1 = processorApplication.state1;
|
||||
assertThat(processorApplication.processed1).isTrue();
|
||||
assertThat(state1 != null).isTrue();
|
||||
assertThat(state1.name()).isEqualTo("my-store");
|
||||
WindowStore<Long, Long> state2 = processorApplication.state2;
|
||||
assertThat(state2 != null).isTrue();
|
||||
assertThat(state2.name()).isEqualTo("other-store");
|
||||
assertThat(state2.persistent()).isTrue();
|
||||
|
||||
KeyValueStore<Long, Long> state3 = processorApplication.state1;
|
||||
assertThat(processorApplication.processed2).isTrue();
|
||||
assertThat(state3 != null).isTrue();
|
||||
assertThat(state3.name()).isEqualTo("my-store");
|
||||
WindowStore<Long, Long> state4 = processorApplication.state2;
|
||||
assertThat(state4 != null).isTrue();
|
||||
assertThat(state4.name()).isEqualTo("other-store");
|
||||
assertThat(state4.persistent()).isTrue();
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class StateStoreTestApplication {
|
||||
|
||||
KeyValueStore<Long, Long> state1;
|
||||
WindowStore<Long, Long> state2;
|
||||
|
||||
KeyValueStore<Long, Long> state3;
|
||||
WindowStore<Long, Long> state4;
|
||||
|
||||
boolean processed1;
|
||||
boolean processed2;
|
||||
|
||||
@Bean(name = "biConsumerBean")
|
||||
public java.util.function.BiConsumer<KStream<Object, String>, KStream<Object, String>> process() {
|
||||
return (input0, input1) ->
|
||||
input0.process((ProcessorSupplier<Object, String>) () -> new Processor<Object, String>() {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void init(ProcessorContext context) {
|
||||
state1 = (KeyValueStore<Long, Long>) context.getStateStore("my-store");
|
||||
state2 = (WindowStore<Long, Long>) context.getStateStore("other-store");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, String value) {
|
||||
processed1 = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
}, "my-store", "other-store");
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KTable<Object, String>> hello() {
|
||||
return input -> {
|
||||
input.toStream().process(() -> new Processor<Object, String>() {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void init(ProcessorContext context) {
|
||||
state3 = (KeyValueStore<Long, Long>) context.getStateStore("my-store");
|
||||
state4 = (WindowStore<Long, Long>) context.getStateStore("other-store");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, String value) {
|
||||
processed2 = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
}, "my-store", "other-store");
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StoreBuilder myStore() {
|
||||
return Stores.keyValueStoreBuilder(
|
||||
Stores.persistentKeyValueStore("my-store"), Serdes.Long(),
|
||||
Serdes.Long());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StoreBuilder otherStore() {
|
||||
return Stores.windowStoreBuilder(
|
||||
Stores.persistentWindowStore("other-store",
|
||||
Duration.ofSeconds(3), Duration.ofSeconds(3), false), Serdes.Long(),
|
||||
Serdes.Long());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020-2020 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.StreamRetryTemplate;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Lazy;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.retry.RetryPolicy;
|
||||
import org.springframework.retry.backoff.FixedBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy;
|
||||
import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat;
|
||||
|
||||
public class KafkaStreamsRetryTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true);
|
||||
|
||||
private static final EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
private final static CountDownLatch LATCH1 = new CountDownLatch(2);
|
||||
private final static CountDownLatch LATCH2 = new CountDownLatch(4);
|
||||
|
||||
@Test
|
||||
public void testRetryTemplatePerBindingOnKStream() throws Exception {
|
||||
SpringApplication app = new SpringApplication(RetryTemplatePerConsumerBindingApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.function.definition=process",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words",
|
||||
"--spring.cloud.stream.bindings.process-in-0.consumer.max-attempts=2",
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id=testRetryTemplatePerBindingOnKStream",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
sendAndValidate(LATCH1);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRetryTemplateOnTableTypes() throws Exception {
|
||||
SpringApplication app = new SpringApplication(RetryTemplatePerConsumerBindingApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.function.definition=tableTypes",
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id=testRetryTemplateOnTableTypes",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
assertThat(context.getBean("tableTypes-in-0-RetryTemplate", RetryTemplate.class)).isNotNull();
|
||||
assertThat(context.getBean("tableTypes-in-1-RetryTemplate", RetryTemplate.class)).isNotNull();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRetryTemplateBeanProvidedByTheApp() throws Exception {
|
||||
SpringApplication app = new SpringApplication(CustomRetryTemplateApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.function.definition=process",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=words",
|
||||
"--spring.cloud.stream.bindings.process-in-0.consumer.retry-template-name=fooRetryTemplate",
|
||||
"--spring.cloud.stream.kafka.streams.default.consumer.application-id=testRetryTemplateBeanProvidedByTheApp",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
sendAndValidate(LATCH2);
|
||||
assertThatThrownBy(() -> context.getBean("process-in-0-RetryTemplate", RetryTemplate.class)).isInstanceOf(NoSuchBeanDefinitionException.class);
|
||||
}
|
||||
}
|
||||
|
||||
private void sendAndValidate(CountDownLatch latch) throws InterruptedException {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
try {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
Assert.isTrue(latch.await(10, TimeUnit.SECONDS), "Foo");
|
||||
}
|
||||
finally {
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class RetryTemplatePerConsumerBindingApp {
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KStream<Object, String>> process(@Lazy @Qualifier("process-in-0-RetryTemplate") RetryTemplate retryTemplate) {
|
||||
|
||||
return input -> input
|
||||
.process(() -> new Processor<Object, String>() {
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, String s) {
|
||||
retryTemplate.execute(context -> {
|
||||
LATCH1.countDown();
|
||||
throw new RuntimeException();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Bean
|
||||
public BiConsumer<KTable<?, ?>, GlobalKTable<?, ?>> tableTypes() {
|
||||
return (t, g) -> {
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class CustomRetryTemplateApp {
|
||||
|
||||
@Bean
|
||||
@StreamRetryTemplate
|
||||
RetryTemplate fooRetryTemplate() {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
RetryPolicy retryPolicy = new SimpleRetryPolicy(4);
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1);
|
||||
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
retryTemplate.setRetryPolicy(retryPolicy);
|
||||
|
||||
return retryTemplate;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public java.util.function.Consumer<KStream<Object, String>> process() {
|
||||
|
||||
return input -> input
|
||||
.process(() -> new Processor<Object, String>() {
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, String s) {
|
||||
fooRetryTemplate().execute(context -> {
|
||||
LATCH2.countDown();
|
||||
throw new RuntimeException();
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Date;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.KeyValueSerdeResolver;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
public class SerdesProvidedAsBeansTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true);
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountFunction() throws NoSuchMethodException {
|
||||
SpringApplication app = new SpringApplication(SerdeProvidedAsBeanApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=purchases",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=coffee",
|
||||
"--spring.cloud.stream.kafka.streams.binder.functions.process.applicationId=process-id-0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
final Method method = SerdeProvidedAsBeanApp.class.getMethod("process");
|
||||
|
||||
ResolvableType resolvableType = ResolvableType.forMethodReturnType(method, SerdeProvidedAsBeanApp.class);
|
||||
|
||||
final KeyValueSerdeResolver keyValueSerdeResolver = context.getBean(KeyValueSerdeResolver.class);
|
||||
final BindingServiceProperties bindingServiceProperties = context.getBean(BindingServiceProperties.class);
|
||||
final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = context.getBean(KafkaStreamsExtendedBindingProperties.class);
|
||||
|
||||
final ConsumerProperties consumerProperties = bindingServiceProperties.getBindingProperties("process-in-0").getConsumer();
|
||||
final KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties = kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties("input");
|
||||
kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties("input");
|
||||
final Serde<?> inboundValueSerde = keyValueSerdeResolver.getInboundValueSerde(consumerProperties, kafkaStreamsConsumerProperties, resolvableType.getGeneric(0));
|
||||
|
||||
Assert.isTrue(inboundValueSerde instanceof FooSerde, "Inbound Value Serde is not matched");
|
||||
|
||||
final ProducerProperties producerProperties = bindingServiceProperties.getBindingProperties("process-out-0").getProducer();
|
||||
final KafkaStreamsProducerProperties kafkaStreamsProducerProperties = kafkaStreamsExtendedBindingProperties.getExtendedProducerProperties("output");
|
||||
kafkaStreamsExtendedBindingProperties.getExtendedProducerProperties("output");
|
||||
final Serde<?> outboundValueSerde = keyValueSerdeResolver.getOutboundValueSerde(producerProperties, kafkaStreamsProducerProperties, resolvableType.getGeneric(1));
|
||||
|
||||
Assert.isTrue(outboundValueSerde instanceof FooSerde, "Outbound Value Serde is not matched");
|
||||
}
|
||||
}
|
||||
|
||||
static class FooSerde<T> implements Serde<T> {
|
||||
@Override
|
||||
public Serializer<T> serializer() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer<T> deserializer() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class SerdeProvidedAsBeanApp {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, Date>, KStream<String, Date>> process() {
|
||||
return input -> input;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Serde<Date> fooSerde() {
|
||||
return new FooSerde<>();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,30 +32,24 @@ import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.processor.TimestampExtractor;
|
||||
import org.apache.kafka.streams.processor.WallclockTimestampExtractor;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonDeserializer;
|
||||
import org.springframework.kafka.support.serializer.JsonSerializer;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
@@ -76,67 +70,51 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
public void testStreamToGlobalKTable() throws Exception {
|
||||
SpringApplication app = new SpringApplication(OrderEnricherApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
try (ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
try (ConfigurableApplicationContext ignored = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=process",
|
||||
"--spring.cloud.stream.function.bindings.process-in-0=order",
|
||||
"--spring.cloud.stream.function.bindings.process-in-1=customer",
|
||||
"--spring.cloud.stream.function.bindings.process-in-2=product",
|
||||
"--spring.cloud.stream.function.bindings.process-out-0=enriched-order",
|
||||
"--spring.cloud.stream.bindings.order.destination=orders",
|
||||
"--spring.cloud.stream.bindings.customer.destination=customers",
|
||||
"--spring.cloud.stream.bindings.product.destination=products",
|
||||
"--spring.cloud.stream.bindings.enriched-order.destination=enriched-order",
|
||||
|
||||
"--spring.cloud.stream.bindings.input.destination=orders",
|
||||
"--spring.cloud.stream.bindings.input-x.destination=customers",
|
||||
"--spring.cloud.stream.bindings.input-y.destination=products",
|
||||
"--spring.cloud.stream.bindings.output.destination=enriched-order",
|
||||
"--spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.input-x.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.input-y.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde" +
|
||||
"=org.springframework.cloud.stream.binder.kafka.streams.function" +
|
||||
".StreamToGlobalKTableFunctionTests$OrderSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-x.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-x.consumer.valueSerde" +
|
||||
"=org.springframework.cloud.stream.binder.kafka.streams.function" +
|
||||
".StreamToGlobalKTableFunctionTests$CustomerSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-y.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-y.consumer.valueSerde" +
|
||||
"=org.springframework.cloud.stream.binder.kafka.streams.function" +
|
||||
".StreamToGlobalKTableFunctionTests$ProductSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde" +
|
||||
"=org.springframework.cloud.stream.binder.kafka.streams." +
|
||||
"function.StreamToGlobalKTableFunctionTests$EnrichedOrderSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.order.consumer.applicationId=" +
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.applicationId=" +
|
||||
"StreamToGlobalKTableJoinFunctionTests-abc",
|
||||
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.topic.properties.cleanup.policy=compact",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-1.consumer.topic.properties.cleanup.policy=compact",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-2.consumer.topic.properties.cleanup.policy=compact",
|
||||
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
// Testing certain ancillary configuration of GlobalKTable around topics creation.
|
||||
// See this issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/687
|
||||
|
||||
BinderFactory binderFactory = context.getBeanFactory()
|
||||
.getBean(BinderFactory.class);
|
||||
|
||||
Binder<KStream, ? extends ConsumerProperties, ? extends ProducerProperties> kStreamBinder = binderFactory
|
||||
.getBinder("kstream", KStream.class);
|
||||
|
||||
KafkaStreamsConsumerProperties input = (KafkaStreamsConsumerProperties) ((ExtendedPropertiesBinder) kStreamBinder)
|
||||
.getExtendedConsumerProperties("process-in-0");
|
||||
String cleanupPolicy = input.getTopic().getProperties().get("cleanup.policy");
|
||||
|
||||
assertThat(cleanupPolicy).isEqualTo("compact");
|
||||
|
||||
Binder<GlobalKTable, ? extends ConsumerProperties, ? extends ProducerProperties> globalKTableBinder = binderFactory
|
||||
.getBinder("globalktable", GlobalKTable.class);
|
||||
|
||||
KafkaStreamsConsumerProperties inputX = (KafkaStreamsConsumerProperties) ((ExtendedPropertiesBinder) globalKTableBinder)
|
||||
.getExtendedConsumerProperties("process-in-1");
|
||||
String cleanupPolicyX = inputX.getTopic().getProperties().get("cleanup.policy");
|
||||
|
||||
assertThat(cleanupPolicyX).isEqualTo("compact");
|
||||
|
||||
KafkaStreamsConsumerProperties inputY = (KafkaStreamsConsumerProperties) ((ExtendedPropertiesBinder) globalKTableBinder)
|
||||
.getExtendedConsumerProperties("process-in-2");
|
||||
String cleanupPolicyY = inputY.getTopic().getProperties().get("cleanup.policy");
|
||||
|
||||
assertThat(cleanupPolicyY).isEqualTo("compact");
|
||||
|
||||
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString())) {
|
||||
Map<String, Object> senderPropsCustomer = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderPropsCustomer.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
|
||||
CustomerSerde customerSerde = new CustomerSerde();
|
||||
senderPropsCustomer.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
|
||||
JsonSerializer.class);
|
||||
customerSerde.serializer().getClass());
|
||||
|
||||
DefaultKafkaProducerFactory<Long, Customer> pfCustomer =
|
||||
new DefaultKafkaProducerFactory<>(senderPropsCustomer);
|
||||
@@ -150,7 +128,8 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
|
||||
Map<String, Object> senderPropsProduct = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderPropsProduct.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
|
||||
senderPropsProduct.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
|
||||
ProductSerde productSerde = new ProductSerde();
|
||||
senderPropsProduct.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, productSerde.serializer().getClass());
|
||||
|
||||
DefaultKafkaProducerFactory<Long, Product> pfProduct =
|
||||
new DefaultKafkaProducerFactory<>(senderPropsProduct);
|
||||
@@ -165,7 +144,8 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
|
||||
Map<String, Object> senderPropsOrder = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderPropsOrder.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
|
||||
senderPropsOrder.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
|
||||
OrderSerde orderSerde = new OrderSerde();
|
||||
senderPropsOrder.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, orderSerde.serializer().getClass());
|
||||
|
||||
DefaultKafkaProducerFactory<Long, Order> pfOrder = new DefaultKafkaProducerFactory<>(senderPropsOrder);
|
||||
KafkaTemplate<Long, Order> orderTemplate = new KafkaTemplate<>(pfOrder, true);
|
||||
@@ -182,8 +162,9 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
|
||||
EnrichedOrderSerde enrichedOrderSerde = new EnrichedOrderSerde();
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
|
||||
JsonDeserializer.class);
|
||||
enrichedOrderSerde.deserializer().getClass());
|
||||
consumerProps.put(JsonDeserializer.VALUE_DEFAULT_TYPE,
|
||||
"org.springframework.cloud.stream.binder.kafka.streams." +
|
||||
"function.StreamToGlobalKTableFunctionTests.EnrichedOrder");
|
||||
@@ -224,57 +205,18 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTimeExtractor() throws Exception {
|
||||
SpringApplication app = new SpringApplication(OrderEnricherApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
interface CustomGlobalKTableProcessor extends KafkaStreamsProcessor {
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run(
|
||||
"--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.function.definition=forTimeExtractorTest",
|
||||
"--spring.cloud.stream.bindings.forTimeExtractorTest-in-0.destination=orders",
|
||||
"--spring.cloud.stream.bindings.forTimeExtractorTest-in-1.destination=customers",
|
||||
"--spring.cloud.stream.bindings.forTimeExtractorTest-in-2.destination=products",
|
||||
"--spring.cloud.stream.bindings.forTimeExtractorTest-out-0.destination=enriched-order",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.forTimeExtractorTest-in-0.consumer.timestampExtractorBeanName" +
|
||||
"=timestampExtractor",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.forTimeExtractorTest-in-1.consumer.timestampExtractorBeanName" +
|
||||
"=timestampExtractor",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.forTimeExtractorTest-in-2.consumer.timestampExtractorBeanName" +
|
||||
"=timestampExtractor",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.order.consumer.applicationId=" +
|
||||
"testTimeExtractor-abc",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
@Input("input-x")
|
||||
GlobalKTable<?, ?> inputX();
|
||||
|
||||
final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties =
|
||||
context.getBean(KafkaStreamsExtendedBindingProperties.class);
|
||||
|
||||
final Map<String, KafkaStreamsBindingProperties> bindings = kafkaStreamsExtendedBindingProperties.getBindings();
|
||||
|
||||
final KafkaStreamsBindingProperties kafkaStreamsBindingProperties0 = bindings.get("forTimeExtractorTest-in-0");
|
||||
final String timestampExtractorBeanName0 = kafkaStreamsBindingProperties0.getConsumer().getTimestampExtractorBeanName();
|
||||
final TimestampExtractor timestampExtractor0 = context.getBean(timestampExtractorBeanName0, TimestampExtractor.class);
|
||||
assertThat(timestampExtractor0).isNotNull();
|
||||
|
||||
final KafkaStreamsBindingProperties kafkaStreamsBindingProperties1 = bindings.get("forTimeExtractorTest-in-1");
|
||||
final String timestampExtractorBeanName1 = kafkaStreamsBindingProperties1.getConsumer().getTimestampExtractorBeanName();
|
||||
final TimestampExtractor timestampExtractor1 = context.getBean(timestampExtractorBeanName1, TimestampExtractor.class);
|
||||
assertThat(timestampExtractor1).isNotNull();
|
||||
|
||||
final KafkaStreamsBindingProperties kafkaStreamsBindingProperties2 = bindings.get("forTimeExtractorTest-in-2");
|
||||
final String timestampExtractorBeanName2 = kafkaStreamsBindingProperties2.getConsumer().getTimestampExtractorBeanName();
|
||||
final TimestampExtractor timestampExtractor2 = context.getBean(timestampExtractorBeanName2, TimestampExtractor.class);
|
||||
assertThat(timestampExtractor2).isNotNull();
|
||||
}
|
||||
@Input("input-y")
|
||||
GlobalKTable<?, ?> inputY();
|
||||
}
|
||||
|
||||
@EnableBinding(CustomGlobalKTableProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class OrderEnricherApplication {
|
||||
|
||||
@Bean
|
||||
@@ -302,20 +244,6 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Function<KStream<Long, Order>,
|
||||
Function<KTable<Long, Customer>,
|
||||
Function<GlobalKTable<Long, Product>, KStream<Long, Order>>>> forTimeExtractorTest() {
|
||||
return orderStream ->
|
||||
customers ->
|
||||
products -> orderStream;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public TimestampExtractor timestampExtractor() {
|
||||
return new WallclockTimestampExtractor();
|
||||
}
|
||||
}
|
||||
|
||||
static class Order {
|
||||
@@ -411,4 +339,15 @@ public class StreamToGlobalKTableFunctionTests {
|
||||
}
|
||||
}
|
||||
|
||||
public static class OrderSerde extends JsonSerde<Order> {
|
||||
}
|
||||
|
||||
public static class CustomerSerde extends JsonSerde<Customer> {
|
||||
}
|
||||
|
||||
public static class ProductSerde extends JsonSerde<Product> {
|
||||
}
|
||||
|
||||
public static class EnrichedOrderSerde extends JsonSerde<EnrichedOrder> {
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,16 +16,10 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.function;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
@@ -39,36 +33,29 @@ import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.Grouped;
|
||||
import org.apache.kafka.streams.kstream.JoinWindows;
|
||||
import org.apache.kafka.streams.kstream.Joined;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.StreamJoined;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.EmbeddedKafkaBroker;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
@@ -76,12 +63,12 @@ public class StreamToTableJoinFunctionTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1,
|
||||
true, "output-topic-1", "output-topic-2", "user-clicks-2", "user-regions-2");
|
||||
true, "output-topic-1", "output-topic-2");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule.getEmbeddedKafka();
|
||||
|
||||
@Test
|
||||
public void testStreamToTable() {
|
||||
public void testStreamToTable() throws Exception {
|
||||
SpringApplication app = new SpringApplication(CountClicksPerRegionApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
@@ -95,113 +82,36 @@ public class StreamToTableJoinFunctionTests {
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "output-topic-1");
|
||||
|
||||
runTest(app, consumer);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStreamToTableBiFunction() {
|
||||
SpringApplication app = new SpringApplication(BiFunctionCountClicksPerRegionApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
Consumer<String, Long> consumer;
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-2",
|
||||
"false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
|
||||
DefaultKafkaConsumerFactory<String, Long> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "output-topic-1");
|
||||
|
||||
runTest(app, consumer);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStreamToTableBiConsumer() throws Exception {
|
||||
SpringApplication app = new SpringApplication(BiConsumerApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
Consumer<String, Long> consumer;
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-2",
|
||||
"false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
|
||||
DefaultKafkaConsumerFactory<String, Long> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "output-topic-1");
|
||||
|
||||
try (ConfigurableApplicationContext ignored = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=user-clicks-1",
|
||||
"--spring.cloud.stream.bindings.process-in-1.destination=user-regions-1",
|
||||
"--spring.cloud.stream.function.definition=process1",
|
||||
"--spring.cloud.stream.bindings.input-1.destination=user-clicks-1",
|
||||
"--spring.cloud.stream.bindings.input-2.destination=user-regions-1",
|
||||
"--spring.cloud.stream.bindings.output.destination=output-topic-1",
|
||||
"--spring.cloud.stream.bindings.input-1.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.input-2.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.valueSerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-2.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-2.consumer.valueSerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.applicationId" +
|
||||
"=testStreamToTableBiConsumer",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
|
||||
// Input 1: Region per user (multiple records allowed per user).
|
||||
List<KeyValue<String, String>> userRegions = Arrays.asList(
|
||||
new KeyValue<>("alice", "asia")
|
||||
);
|
||||
|
||||
Map<String, Object> senderProps1 = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps1.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
senderProps1.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
|
||||
DefaultKafkaProducerFactory<String, String> pf1 = new DefaultKafkaProducerFactory<>(senderProps1);
|
||||
KafkaTemplate<String, String> template1 = new KafkaTemplate<>(pf1, true);
|
||||
template1.setDefaultTopic("user-regions-1");
|
||||
|
||||
for (KeyValue<String, String> keyValue : userRegions) {
|
||||
template1.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
// Input 2: Clicks per user (multiple records allowed per user).
|
||||
List<KeyValue<String, Long>> userClicks = Arrays.asList(
|
||||
new KeyValue<>("alice", 13L)
|
||||
);
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
|
||||
|
||||
DefaultKafkaProducerFactory<String, Long> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<String, Long> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("user-clicks-1");
|
||||
|
||||
for (KeyValue<String, Long> keyValue : userClicks) {
|
||||
template.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
Assert.isTrue(BiConsumerApplication.latch.await(10, TimeUnit.SECONDS), "Failed to receive message");
|
||||
|
||||
}
|
||||
finally {
|
||||
consumer.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void runTest(SpringApplication app, Consumer<String, Long> consumer) {
|
||||
try (ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=user-clicks-1",
|
||||
"--spring.cloud.stream.bindings.process-in-1.destination=user-regions-1",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=output-topic-1",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.applicationId" +
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.applicationId" +
|
||||
"=StreamToTableJoinFunctionTests-abc",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-1.consumer.topic.properties.cleanup.policy=compact",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-out-0.producer.topic.properties.cleanup.policy=compact",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString())) {
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString())) {
|
||||
|
||||
// Input 1: Region per user (multiple records allowed per user).
|
||||
List<KeyValue<String, String>> userRegions = Arrays.asList(
|
||||
@@ -270,43 +180,19 @@ public class StreamToTableJoinFunctionTests {
|
||||
|
||||
assertThat(count == expectedClicksPerRegion.size()).isTrue();
|
||||
assertThat(actualClicksPerRegion).hasSameElementsAs(expectedClicksPerRegion);
|
||||
|
||||
// Testing certain ancillary configuration of GlobalKTable around topics creation.
|
||||
// See this issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/687
|
||||
BinderFactory binderFactory = context.getBeanFactory()
|
||||
.getBean(BinderFactory.class);
|
||||
|
||||
Binder<KTable, ? extends ConsumerProperties, ? extends ProducerProperties> ktableBinder = binderFactory
|
||||
.getBinder("ktable", KTable.class);
|
||||
|
||||
KafkaStreamsConsumerProperties inputX = (KafkaStreamsConsumerProperties) ((ExtendedPropertiesBinder) ktableBinder)
|
||||
.getExtendedConsumerProperties("process-in-1");
|
||||
String cleanupPolicyX = inputX.getTopic().getProperties().get("cleanup.policy");
|
||||
|
||||
assertThat(cleanupPolicyX).isEqualTo("compact");
|
||||
|
||||
Binder<KStream, ? extends ConsumerProperties, ? extends ProducerProperties> kStreamBinder = binderFactory
|
||||
.getBinder("kstream", KStream.class);
|
||||
|
||||
KafkaStreamsProducerProperties producerProperties = (KafkaStreamsProducerProperties) ((ExtendedPropertiesBinder) kStreamBinder)
|
||||
.getExtendedProducerProperties("process-out-0");
|
||||
|
||||
String cleanupPolicyOutput = producerProperties.getTopic().getProperties().get("cleanup.policy");
|
||||
|
||||
assertThat(cleanupPolicyOutput).isEqualTo("compact");
|
||||
}
|
||||
finally {
|
||||
consumer.close();
|
||||
}
|
||||
}
|
||||
|
||||
// @Test
|
||||
@Test
|
||||
public void testGlobalStartOffsetWithLatestAndIndividualBindingWthEarliest() throws Exception {
|
||||
SpringApplication app = new SpringApplication(BiFunctionCountClicksPerRegionApplication.class);
|
||||
SpringApplication app = new SpringApplication(CountClicksPerRegionApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
Consumer<String, Long> consumer;
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-3",
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-2",
|
||||
"false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
@@ -343,22 +229,35 @@ public class StreamToTableJoinFunctionTests {
|
||||
template.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
try (ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
try (ConfigurableApplicationContext ignored = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.process-in-0.destination=user-clicks-2",
|
||||
"--spring.cloud.stream.bindings.process-in-1.destination=user-regions-2",
|
||||
"--spring.cloud.stream.bindings.process-out-0.destination=output-topic-2",
|
||||
"--spring.cloud.stream.bindings.process-in-0.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.process-in-1.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.process-out-0.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.function.definition=process1",
|
||||
"--spring.cloud.stream.bindings.input-1.destination=user-clicks-2",
|
||||
"--spring.cloud.stream.bindings.input-2.destination=user-regions-2",
|
||||
"--spring.cloud.stream.bindings.output.destination=output-topic-2",
|
||||
"--spring.cloud.stream.bindings.input-1.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.input-2.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.auto.offset.reset=latest",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.startOffset=earliest",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.startOffset=earliest",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.valueSerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-2.consumer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-2.consumer.valueSerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde" +
|
||||
"=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.application-id" +
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input-1.consumer.application-id" +
|
||||
"=StreamToTableJoinFunctionTests-foobar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString())) {
|
||||
@@ -402,6 +301,7 @@ public class StreamToTableJoinFunctionTests {
|
||||
template.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
|
||||
List<KeyValue<String, Long>> expectedClicksPerRegion = Arrays.asList(
|
||||
new KeyValue<>("americas", 101L),
|
||||
new KeyValue<>("europe", 56L),
|
||||
@@ -424,46 +324,14 @@ public class StreamToTableJoinFunctionTests {
|
||||
}
|
||||
} while (count < expectedClicksPerRegion.size() && (System.currentTimeMillis() - start) < 30000);
|
||||
|
||||
// TODO: Matched count is 3 and not 4 (expectedClicksPerRegion.size()) when running with full suite. Investigate why.
|
||||
// TODO: This behavior is only observed after the Spring Kafka upgrade to 2.5.0 and kafka client to 2.5.
|
||||
// TODO: Note that the test passes fine as a single test.
|
||||
assertThat(count).matches(
|
||||
matchedCount -> matchedCount == expectedClicksPerRegion.size() - 1 || matchedCount == expectedClicksPerRegion.size());
|
||||
assertThat(actualClicksPerRegion).containsAnyElementsOf(expectedClicksPerRegion);
|
||||
assertThat(count).isEqualTo(expectedClicksPerRegion.size());
|
||||
assertThat(actualClicksPerRegion).hasSameElementsAs(expectedClicksPerRegion);
|
||||
}
|
||||
finally {
|
||||
consumer.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTrivialSingleKTableInputAsNonDeclarative() {
|
||||
SpringApplication app = new SpringApplication(TrivialKTableApp.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
app.run("--server.port=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers="
|
||||
+ embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.bindings.process-in-0.consumer.application-id=" +
|
||||
"testTrivialSingleKTableInputAsNonDeclarative");
|
||||
//All we are verifying is that this application didn't throw any errors.
|
||||
//See this issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/536
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTwoKStreamsCanBeJoined() {
|
||||
SpringApplication app = new SpringApplication(
|
||||
JoinProcessor.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
app.run("--server.port=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers="
|
||||
+ embeddedKafka.getBrokersAsString(),
|
||||
"--spring.application.name=" +
|
||||
"two-kstream-input-join-integ-test");
|
||||
//All we are verifying is that this application didn't throw any errors.
|
||||
//See this issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/701
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tuple for a region and its associated number of clicks.
|
||||
*/
|
||||
@@ -493,87 +361,50 @@ public class StreamToTableJoinFunctionTests {
|
||||
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamKTableProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class CountClicksPerRegionApplication {
|
||||
|
||||
@Bean
|
||||
public Function<KStream<String, Long>, Function<KTable<String, String>, KStream<String, Long>>> process() {
|
||||
public Function<KStream<String, Long>, Function<KTable<String, String>, KStream<String, Long>>> process1() {
|
||||
return userClicksStream -> (userRegionsTable -> (userClicksStream
|
||||
.leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ?
|
||||
"UNKNOWN" : region, clicks),
|
||||
Joined.with(Serdes.String(), Serdes.Long(), null))
|
||||
.map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(),
|
||||
regionWithClicks.getClicks()))
|
||||
.groupByKey(Grouped.with(Serdes.String(), Serdes.Long()))
|
||||
.reduce(Long::sum, Materialized.as("CountClicks-" + UUID.randomUUID()))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.Long()))
|
||||
.reduce((firstClicks, secondClicks) -> firstClicks + secondClicks)
|
||||
.toStream()));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CleanupConfig cleanupConfig() {
|
||||
return new CleanupConfig(false, true);
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class BiFunctionCountClicksPerRegionApplication {
|
||||
interface KStreamKTableProcessor {
|
||||
|
||||
@Bean
|
||||
public BiFunction<KStream<String, Long>, KTable<String, String>, KStream<String, Long>> process() {
|
||||
return (userClicksStream, userRegionsTable) -> (userClicksStream
|
||||
.leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ?
|
||||
"UNKNOWN" : region, clicks),
|
||||
Joined.with(Serdes.String(), Serdes.Long(), null))
|
||||
.map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(),
|
||||
regionWithClicks.getClicks()))
|
||||
.groupByKey(Grouped.with(Serdes.String(), Serdes.Long()))
|
||||
.reduce(Long::sum, Materialized.as("CountClicks-" + UUID.randomUUID()))
|
||||
.toStream());
|
||||
}
|
||||
/**
|
||||
* Input binding.
|
||||
*
|
||||
* @return {@link Input} binding for {@link KStream} type.
|
||||
*/
|
||||
@Input("input-1")
|
||||
KStream<?, ?> input1();
|
||||
|
||||
/**
|
||||
* Input binding.
|
||||
*
|
||||
* @return {@link Input} binding for {@link KStream} type.
|
||||
*/
|
||||
@Input("input-2")
|
||||
KTable<?, ?> input2();
|
||||
|
||||
/**
|
||||
* Output binding.
|
||||
*
|
||||
* @return {@link Output} binding for {@link KStream} type.
|
||||
*/
|
||||
@Output("output")
|
||||
KStream<?, ?> output();
|
||||
|
||||
@Bean
|
||||
public CleanupConfig cleanupConfig() {
|
||||
return new CleanupConfig(false, true);
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class BiConsumerApplication {
|
||||
|
||||
static CountDownLatch latch = new CountDownLatch(2);
|
||||
|
||||
@Bean
|
||||
public BiConsumer<KStream<String, Long>, KTable<String, String>> process() {
|
||||
return (userClicksStream, userRegionsTable) -> {
|
||||
userClicksStream.foreach((key, value) -> latch.countDown());
|
||||
userRegionsTable.toStream().foreach((key, value) -> latch.countDown());
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class TrivialKTableApp {
|
||||
|
||||
public java.util.function.Consumer<KTable<String, String>> process() {
|
||||
return inputTable -> inputTable.toStream().foreach((key, value) -> System.out.println("key : value " + key + " : " + value));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableAutoConfiguration
|
||||
public static class JoinProcessor {
|
||||
|
||||
public BiConsumer<KStream<String, String>, KStream<String, String>> testProcessor() {
|
||||
return (input1Stream, input2Stream) -> input1Stream
|
||||
.join(input2Stream,
|
||||
(event1, event2) -> null,
|
||||
JoinWindows.of(Duration.ofMillis(5)),
|
||||
StreamJoined.with(
|
||||
Serdes.String(),
|
||||
Serdes.String(),
|
||||
Serdes.String()
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
@@ -25,25 +24,25 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.Grouped;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.DlqPartitionFunction;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.PropertySource;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
@@ -71,13 +70,7 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"abc-DeserializationErrorHandlerByKafkaTests-In",
|
||||
"xyz-DeserializationErrorHandlerByKafkaTests-In",
|
||||
"DeserializationErrorHandlerByKafkaTests-out",
|
||||
"error.abc-DeserializationErrorHandlerByKafkaTests-In.group",
|
||||
"error.xyz-DeserializationErrorHandlerByKafkaTests-In.group",
|
||||
"error.word1.groupx",
|
||||
"error.word2.groupx");
|
||||
"counts", "error.words.group", "error.word1.groupx", "error.word2.groupx");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule
|
||||
.getEmbeddedKafka();
|
||||
@@ -88,9 +81,11 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers",
|
||||
embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes",
|
||||
embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port", "0");
|
||||
System.setProperty("spring.jmx.enabled", "false");
|
||||
@@ -101,37 +96,36 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(
|
||||
consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "DeserializationErrorHandlerByKafkaTests-out", "DeserializationErrorHandlerByKafkaTests-out");
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
System.clearProperty("spring.cloud.stream.kafka.streams.binder.brokers");
|
||||
System.clearProperty("server.port");
|
||||
System.clearProperty("spring.jmx.enabled");
|
||||
}
|
||||
|
||||
// @checkstyle:off
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.destination=abc-DeserializationErrorHandlerByKafkaTests-In",
|
||||
"spring.cloud.stream.bindings.output.destination=DeserializationErrorHandlerByKafkaTests-Out",
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.application-id=deser-kafka-dlq",
|
||||
"spring.cloud.stream.bindings.input.group=group",
|
||||
"spring.cloud.stream.kafka.streams.binder.deserializationExceptionHandler=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde="
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde="
|
||||
+ "org.apache.kafka.common.serialization.Serdes$IntegerSerde" }, webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
// @checkstyle:on
|
||||
public static class DeserializationByKafkaAndDlqTests
|
||||
extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void test() {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("abc-DeserializationErrorHandlerByKafkaTests-In");
|
||||
template.sendDefault(1, null, "foobar");
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar",
|
||||
"false", embeddedKafka);
|
||||
@@ -139,53 +133,11 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(
|
||||
consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.abc-DeserializationErrorHandlerByKafkaTests-In.group");
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.words.group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.abc-DeserializationErrorHandlerByKafkaTests-In.group");
|
||||
assertThat(cr.value()).isEqualTo("foobar");
|
||||
assertThat(cr.partition()).isEqualTo(0); // custom partition function
|
||||
|
||||
// Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(conversionDelegate, never()).deserializeOnInbound(any(Class.class),
|
||||
any(KStream.class));
|
||||
verify(conversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.destination=xyz-DeserializationErrorHandlerByKafkaTests-In",
|
||||
"spring.cloud.stream.bindings.output.destination=DeserializationErrorHandlerByKafkaTests-Out",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.application-id=deser-kafka-dlq",
|
||||
"spring.cloud.stream.bindings.input.group=group",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.deserializationExceptionHandler=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde="
|
||||
+ "org.apache.kafka.common.serialization.Serdes$IntegerSerde" }, webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
public static class DeserializationByKafkaAndDlqPerBindingTests
|
||||
extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("xyz-DeserializationErrorHandlerByKafkaTests-In");
|
||||
template.sendDefault(1, null, "foobar");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar",
|
||||
"false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(
|
||||
consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.xyz-DeserializationErrorHandlerByKafkaTests-In.group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.xyz-DeserializationErrorHandlerByKafkaTests-In.group");
|
||||
assertThat(cr.value()).isEqualTo("foobar");
|
||||
assertThat(cr.partition()).isEqualTo(0); // custom partition function
|
||||
"error.words.group");
|
||||
assertThat(cr.value().equals("foobar")).isTrue();
|
||||
|
||||
// Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(conversionDelegate, never()).deserializeOnInbound(any(Class.class),
|
||||
@@ -195,18 +147,22 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
}
|
||||
|
||||
// @checkstyle:off
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"spring.cloud.stream.bindings.input.destination=word1,word2",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.application-id=deser-kafka-dlq-multi-input",
|
||||
"spring.cloud.stream.kafka.streams.default.consumer.applicationId=deser-kafka-dlq-multi-input",
|
||||
"spring.cloud.stream.bindings.input.group=groupx",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde="
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde="
|
||||
+ "org.apache.kafka.common.serialization.Serdes$IntegerSerde" }, webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
// @checkstyle:on
|
||||
public static class DeserializationByKafkaAndDlqTestsWithMultipleInputs
|
||||
extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
@@ -227,12 +183,14 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer1, "error.word1.groupx",
|
||||
"error.word2.groupx");
|
||||
|
||||
// TODO: Investigate why the ordering matters below: i.e.
|
||||
// if we consume from error.word1.groupx first, an exception is thrown.
|
||||
ConsumerRecord<String, String> cr1 = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.word1.groupx");
|
||||
assertThat(cr1.value()).isEqualTo("foobar");
|
||||
ConsumerRecord<String, String> cr2 = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.word2.groupx");
|
||||
assertThat(cr2.value()).isEqualTo("foobar");
|
||||
assertThat(cr1.value().equals("foobar")).isTrue();
|
||||
ConsumerRecord<String, String> cr2 = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.word1.groupx");
|
||||
assertThat(cr2.value().equals("foobar")).isTrue();
|
||||
|
||||
// Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(conversionDelegate, never()).deserializeOnInbound(any(Class.class),
|
||||
@@ -245,8 +203,12 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@PropertySource("classpath:/org/springframework/cloud/stream/binder/kstream/integTest-1.properties")
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, String> input) {
|
||||
@@ -255,17 +217,12 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
.flatMapValues(
|
||||
value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(TimeWindows.of(Duration.ofMillis(5000))).count(Materialized.as("foo-WordCounts-x"))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows).count(Materialized.as("foo-WordCounts-x"))
|
||||
.toStream().map((key, value) -> new KeyValue<>(null,
|
||||
"Count for " + key.key() + " : " + value));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DlqPartitionFunction partitionFunction() {
|
||||
return (group, rec, ex) -> 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,21 +16,19 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.Grouped;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
@@ -66,8 +64,7 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@ClassRule
|
||||
public static EmbeddedKafkaRule embeddedKafkaRule = new EmbeddedKafkaRule(1, true,
|
||||
"foos", "goos",
|
||||
"counts-id", "error.foos.foobar-group", "error.goos.foobar-group", "error.foos1.fooz-group",
|
||||
"counts-id", "error.foos.foobar-group", "error.foos1.fooz-group",
|
||||
"error.foos2.fooz-group");
|
||||
|
||||
private static EmbeddedKafkaBroker embeddedKafka = embeddedKafkaRule
|
||||
@@ -82,11 +79,16 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers",
|
||||
embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes",
|
||||
embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port", "0");
|
||||
System.setProperty("spring.jmx.enabled", "false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("kafka-streams-dlq-tests", "false",
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foob", "false",
|
||||
embeddedKafka);
|
||||
// consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
|
||||
// Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(
|
||||
consumerProps);
|
||||
@@ -97,38 +99,35 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
System.clearProperty("spring.cloud.stream.kafka.streams.binder.brokers");
|
||||
System.clearProperty("server.port");
|
||||
System.clearProperty("spring.jmx.enabled");
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=false",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=false",
|
||||
"spring.cloud.stream.bindings.input.destination=foos",
|
||||
@SpringBootTest(properties = { "spring.cloud.stream.bindings.input.destination=foos",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.deserializationExceptionHandler=sendToDlq",
|
||||
"spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.application-id"
|
||||
+ "=deserializationByBinderAndDlqTests",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.dlqPartitions=1",
|
||||
"spring.cloud.stream.bindings.input.group=foobar-group" }, webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
public static class DeserializationByBinderAndDlqTests
|
||||
extends DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void test() {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault(1, 7, "hello");
|
||||
template.sendDefault("hello");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar",
|
||||
"false", embeddedKafka);
|
||||
@@ -141,65 +140,16 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.foos.foobar-group");
|
||||
assertThat(cr.value()).isEqualTo("hello");
|
||||
assertThat(cr.partition()).isEqualTo(0);
|
||||
assertThat(cr.value().equals("hello")).isTrue();
|
||||
|
||||
// Ensuring that the deserialization was indeed done by the binder
|
||||
verify(conversionDelegate).deserializeOnInbound(any(Class.class),
|
||||
any(KStream.class));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=false",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=false",
|
||||
"spring.cloud.stream.bindings.input.destination=goos",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.deserializationExceptionHandler=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.application-id"
|
||||
+ "=deserializationByBinderAndDlqTests",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.dlqPartitions=1",
|
||||
"spring.cloud.stream.bindings.input.group=foobar-group" }, webEnvironment = SpringBootTest.WebEnvironment.NONE)
|
||||
public static class DeserializationByBinderAndDlqSetOnConsumerBindingTests
|
||||
extends DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("goos");
|
||||
template.sendDefault(1, 7, "hello");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar",
|
||||
"false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(
|
||||
consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1,
|
||||
"error.goos.foobar-group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1,
|
||||
"error.goos.foobar-group");
|
||||
assertThat(cr.value()).isEqualTo("hello");
|
||||
assertThat(cr.partition()).isEqualTo(0);
|
||||
|
||||
// Ensuring that the deserialization was indeed done by the binder
|
||||
verify(conversionDelegate).deserializeOnInbound(any(Class.class),
|
||||
any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=false",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=false",
|
||||
"spring.cloud.stream.bindings.input.destination=foos1,foos2",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
@@ -207,6 +157,8 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde"
|
||||
+ "=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.bindings.input.consumer.application-id"
|
||||
+ "=deserializationByBinderAndDlqTestsWithMultipleInputs",
|
||||
@@ -216,7 +168,7 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() {
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(
|
||||
senderProps);
|
||||
@@ -260,9 +212,9 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Grouped.with(new JsonSerde<>(Product.class),
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class),
|
||||
new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(Duration.ofMillis(5000)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store-x")).toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user