Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9f40ac084 | ||
|
|
db02abe531 | ||
|
|
f1dc14b5c3 | ||
|
|
50ce8ca2ba | ||
|
|
6a312592a4 | ||
|
|
43d786f701 | ||
|
|
68811cad28 | ||
|
|
4382dab8f8 | ||
|
|
20a8158a56 | ||
|
|
3ad0d7c465 | ||
|
|
5b3974c932 | ||
|
|
3c7615f7a3 | ||
|
|
8ae0157135 | ||
|
|
08658ffa6c | ||
|
|
8d797deaf9 | ||
|
|
561b4b7e73 | ||
|
|
93fdd2ef0f | ||
|
|
fd48a1d0eb | ||
|
|
a07a0017bb | ||
|
|
62b40b852f | ||
|
|
c396c5c756 | ||
|
|
b20f4a0e08 | ||
|
|
77f4bc3fb8 | ||
|
|
2aa8e9eefa | ||
|
|
e3460d6fce | ||
|
|
29bb8513c0 | ||
|
|
69227166c7 | ||
|
|
4ff4507741 | ||
|
|
f2e1b63460 | ||
|
|
73f1ed9523 | ||
|
|
dc7662e17d | ||
|
|
b76fff31b8 | ||
|
|
1f4f0c3858 | ||
|
|
1aecd02404 | ||
|
|
6485bd2abd | ||
|
|
02913cd177 | ||
|
|
0865602141 | ||
|
|
790b141799 | ||
|
|
60e620e36e |
59
pom.xml
59
pom.xml
@@ -2,28 +2,25 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.M3</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>1.3.3.RELEASE</version>
|
||||
<version>2.0.0.M4</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.7</java.version>
|
||||
<kafka.version>0.10.1.1</kafka.version>
|
||||
<spring-kafka.version>1.1.6.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.1.1.RELEASE</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>1.3.0.M2</spring-cloud-stream.version>
|
||||
<spring-cloud-build.version>1.3.3.RELEASE</spring-cloud-build.version>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.0.0.RELEASE</spring-kafka.version>
|
||||
<kafka.version>0.11.0.0</kafka.version>
|
||||
<spring-integration-kafka.version>3.0.0.M2</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>2.0.0.M3</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10.1-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10.2-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kstream</module>
|
||||
</modules>
|
||||
@@ -45,11 +42,6 @@
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
@@ -124,18 +116,6 @@
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>2.17</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
<artifactId>checkstyle</artifactId>
|
||||
<version>7.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
@@ -159,26 +139,15 @@
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build-tools</artifactId>
|
||||
<version>${spring-cloud-build.version}</version>
|
||||
<artifactId>spring-cloud-stream-tools</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>checkstyle-validation</id>
|
||||
<phase>validate</phase>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<encoding>UTF-8</encoding>
|
||||
<consoleOutput>true</consoleOutput>
|
||||
<failsOnError>true</failsOnError>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<headerLocation>checkstyle-header.txt</headerLocation>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
0
spring-cloud-starter-stream-kafka/.jdk8
Normal file
0
spring-cloud-starter-stream-kafka/.jdk8
Normal file
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.M3</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.1-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10.1 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<kafka.version>0.10.1.1</kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.2-test</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>http://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -1,241 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* This test specifically tests for the 0.10.1.x version of Kafka.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka_0_10_1_BinderTests extends Kafka_0_10_2_BinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
private Kafka10TestBinder binder;
|
||||
|
||||
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Kafka10TestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new Kafka10TestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int partitionSize(String topic) {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
return new ZkUtils(zkClient, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
|
||||
return adminUtilsOperation.partitionSize(topic, zkUtils);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKafkaOffsetHeaderKey() {
|
||||
return KafkaHeaders.OFFSET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
|
||||
private ConsumerFactory<byte[], byte[]> consumerFactory() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
|
||||
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
|
||||
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
|
||||
|
||||
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomAvroSerialization() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
Map<String, Object> schemaRegistryProps = new HashMap<>();
|
||||
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
|
||||
schemaRegistryProps.put("listeners", "http://0.0.0.0:8082");
|
||||
schemaRegistryProps.put("port", "8082");
|
||||
schemaRegistryProps.put("kafkastore.topic", "_schemas");
|
||||
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
|
||||
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
|
||||
Server server = app.createServer();
|
||||
server.start();
|
||||
long endTime = System.currentTimeMillis() + 5000;
|
||||
while(true) {
|
||||
if (server.isRunning()) {
|
||||
break;
|
||||
}
|
||||
else if (System.currentTimeMillis() > endTime) {
|
||||
Assertions.fail("Kafka Schema Registry Server failed to start");
|
||||
}
|
||||
}
|
||||
User1 firstOutboundFoo = new User1();
|
||||
String userName1 = "foo-name" + UUID.randomUUID().toString();
|
||||
String favColor1 = "foo-color" + UUID.randomUUID().toString();
|
||||
firstOutboundFoo.setName(userName1);
|
||||
firstOutboundFoo.setFavoriteColor(favColor1);
|
||||
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
|
||||
SubscribableChannel moduleOutputChannel = new DirectChannel();
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
|
||||
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
|
||||
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
Assertions.assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.2-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10.2 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<kafka.version>0.10.2.1</kafka.version>
|
||||
<spring-kafka.version>1.2.2.RELEASE</spring-kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.2.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.2.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>http://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -1,241 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* This test specifically tests for the 0.10.2.x version of Kafka.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka_0_10_2_BinderTests extends KafkaBinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
private Kafka10TestBinder binder;
|
||||
|
||||
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Kafka10TestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new Kafka10TestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int partitionSize(String topic) {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
return new ZkUtils(zkClient, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
|
||||
return adminUtilsOperation.partitionSize(topic, zkUtils);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKafkaOffsetHeaderKey() {
|
||||
return KafkaHeaders.OFFSET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
|
||||
private ConsumerFactory<byte[], byte[]> consumerFactory() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
|
||||
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
|
||||
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
|
||||
|
||||
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomAvroSerialization() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
Map<String, Object> schemaRegistryProps = new HashMap<>();
|
||||
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
|
||||
schemaRegistryProps.put("listeners", "http://0.0.0.0:8082");
|
||||
schemaRegistryProps.put("port", "8082");
|
||||
schemaRegistryProps.put("kafkastore.topic", "_schemas");
|
||||
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
|
||||
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
|
||||
Server server = app.createServer();
|
||||
server.start();
|
||||
long endTime = System.currentTimeMillis() + 5000;
|
||||
while(true) {
|
||||
if (server.isRunning()) {
|
||||
break;
|
||||
}
|
||||
else if (System.currentTimeMillis() > endTime) {
|
||||
Assertions.fail("Kafka Schema Registry Server failed to start");
|
||||
}
|
||||
}
|
||||
User1 firstOutboundFoo = new User1();
|
||||
String userName1 = "foo-name" + UUID.randomUUID().toString();
|
||||
String favColor1 = "foo-color" + UUID.randomUUID().toString();
|
||||
firstOutboundFoo.setName(userName1);
|
||||
firstOutboundFoo.setFavoriteColor(favColor1);
|
||||
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
|
||||
SubscribableChannel moduleOutputChannel = new DirectChannel();
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
|
||||
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
|
||||
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
Assertions.assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.reflect.Nullable;
|
||||
import org.apache.avro.specific.SpecificRecordBase;
|
||||
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class User1 extends SpecificRecordBase {
|
||||
|
||||
@Nullable
|
||||
private String name;
|
||||
|
||||
@Nullable
|
||||
private String favoriteColor;
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getFavoriteColor() {
|
||||
return this.favoriteColor;
|
||||
}
|
||||
|
||||
public void setFavoriteColor(String favoriteColor) {
|
||||
this.favoriteColor = favoriteColor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema getSchema() {
|
||||
try {
|
||||
return new Schema.Parser().parse(new ClassPathResource("schemas/users_v1.schema").getInputStream());
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(int i) {
|
||||
if (i == 0) {
|
||||
return getName().toString();
|
||||
}
|
||||
if (i == 1) {
|
||||
return getFavoriteColor().toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(int i, Object o) {
|
||||
if (i == 0) {
|
||||
setName((String) o);
|
||||
}
|
||||
if (i == 1) {
|
||||
setFavoriteColor((String) o);
|
||||
}
|
||||
}
|
||||
}
|
||||
0
spring-cloud-stream-binder-kafka-core/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-core/.jdk8
Normal file
@@ -1,10 +1,22 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.M3</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.utils.ZkUtils;
|
||||
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka09AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
private static Class<?> ADMIN_UTIL_CLASS;
|
||||
|
||||
static {
|
||||
try {
|
||||
ADMIN_UTIL_CLASS = ClassUtils.forName("kafka.admin.AdminUtils", null);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method addPartitions = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("addPartitions")) {
|
||||
addPartitions = m;
|
||||
}
|
||||
}
|
||||
if (addPartitions != null) {
|
||||
addPartitions.invoke(null, zkUtils, topic, numPartitions,
|
||||
replicaAssignmentStr, checkBrokerAvailable);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
new RuntimeException("method not found"));
|
||||
}
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
Method errorCodeMethod = ReflectionUtils.findMethod(topicMetadataClass, "errorCode");
|
||||
return (short) errorCodeMethod.invoke(result);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int partitionSize(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
|
||||
Method partitionsMetadata = ReflectionUtils.findMethod(topicMetadataClass, "partitionsMetadata");
|
||||
scala.collection.Seq<kafka.api.PartitionMetadata> partitionSize =
|
||||
(scala.collection.Seq<kafka.api.PartitionMetadata>)partitionsMetadata.invoke(result);
|
||||
|
||||
return partitionSize.size();
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method createTopic = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("createTopic")) {
|
||||
createTopic = m;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (createTopic != null) {
|
||||
createTopic.invoke(null, zkUtils, topic, partitions,
|
||||
replicationFactor, topicConfig);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
new RuntimeException("method not found"));
|
||||
}
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ import org.apache.kafka.common.requests.MetadataResponse;
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
public class KafkaAdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
@@ -18,6 +18,7 @@ package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -35,10 +35,13 @@ import org.springframework.util.StringUtils;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
@Autowired(required = false)
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@@ -88,8 +91,22 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
private int healthTimeout = 60;
|
||||
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a {@link DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
@@ -228,6 +245,14 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getHealthTimeout() {
|
||||
return this.healthTimeout;
|
||||
}
|
||||
|
||||
public void setHealthTimeout(int healthTimeout) {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
@@ -338,4 +363,32 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.jaas = jaas;
|
||||
}
|
||||
|
||||
public String getHeaderMapperBeanName() {
|
||||
return this.headerMapperBeanName;
|
||||
}
|
||||
|
||||
public void setHeaderMapperBeanName(String headerMapperBeanName) {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public static class Transaction {
|
||||
|
||||
private final KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
public String getTransactionIdPrefix() {
|
||||
return this.transactionIdPrefix;
|
||||
}
|
||||
|
||||
public void setTransactionIdPrefix(String transactionIdPrefix) {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -43,6 +43,8 @@ public class KafkaConsumerProperties {
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private String[] trustedPackages;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
@@ -123,4 +125,12 @@ public class KafkaConsumerProperties {
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
public String[] getTrustedPackages() {
|
||||
return trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,16 +16,17 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
@@ -39,6 +40,8 @@ public class KafkaProducerProperties {
|
||||
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
private String[] headerPatterns;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public int getBufferSize() {
|
||||
@@ -82,6 +85,14 @@ public class KafkaProducerProperties {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
@@ -40,8 +40,6 @@ import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryCallback;
|
||||
import org.springframework.retry.RetryContext;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
@@ -203,29 +201,25 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
|
||||
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) throws RuntimeException {
|
||||
|
||||
try {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
|
||||
configurationProperties.getReplicationFactor(), new Properties());
|
||||
}
|
||||
catch (Exception e) {
|
||||
String exceptionClass = e.getClass().getName();
|
||||
if (exceptionClass.equals("kafka.common.TopicExistsException")
|
||||
|| exceptionClass.equals("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
try {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
|
||||
configurationProperties.getReplicationFactor(), new Properties());
|
||||
}
|
||||
catch (Exception e) {
|
||||
String exceptionClass = e.getClass().getName();
|
||||
if (exceptionClass.equals("kafka.common.TopicExistsException")
|
||||
|| exceptionClass.equals("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
else {
|
||||
@@ -243,27 +237,23 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
|
||||
|
||||
@Override
|
||||
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
.execute(context -> {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
return partitions;
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
||||
0
spring-cloud-stream-binder-kafka-docs/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-docs/.jdk8
Normal file
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.M3</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
@@ -23,12 +23,15 @@ Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinat
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
include::metrics.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
[[kafka-metrics]]
|
||||
== Kafka metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages
|
||||
have not been yet consumed from given binder's topic (`someTopic`) by given consumer group (`someGroup`).
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then
|
||||
consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`. This metric is
|
||||
particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
@@ -73,6 +73,11 @@ spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that will be transported by the binder.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information in seconds; default 60.
|
||||
Health will report as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
|
||||
The frequency, in milliseconds, with which offsets are saved.
|
||||
Ignored if `0`.
|
||||
@@ -115,7 +120,18 @@ spring.cloud.stream.kafka.binder.socketBufferSize::
|
||||
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
|
||||
+
|
||||
Default: `2097152`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enable transactions in the binder; see `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
@@ -169,6 +185,7 @@ dlqName::
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
=== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
@@ -192,6 +209,14 @@ messageKeyExpression::
|
||||
For example `headers.key` or `payload.myKey`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match spring-messaging headers to be mapped to the kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`; matching stops after the first match (positive or negative).
|
||||
For example `!foo,fo*` will pass `fox` but not `foo`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
@@ -523,5 +548,24 @@ spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.c
|
||||
spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde
|
||||
----
|
||||
|
||||
[[kafka-error-channels]]
|
||||
== Error Channels
|
||||
|
||||
Starting with _version 1.3_, the binder unconditionally sends exceptions to an error channel for each consumer destination, and can be configured to send async producer send failures to an error channel too.
|
||||
See <<binder-error-channels>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage` - the spring-messaging `Message<?>` that failed to be sent.
|
||||
* `record` - the raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of these exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>); you can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages have not been yet consumed from given binder's topic by given consumer group.
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`.
|
||||
This metric is particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions, for example when you want to strictly order message processing - all messages for a particular customer should go to the same partition.
|
||||
|
||||
The following illustrates how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Source.class)
|
||||
public class KafkaPartitionProducerApplication {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
|
||||
private static final String[] data = new String[] {
|
||||
"foo1", "bar1", "qux1",
|
||||
"foo2", "bar2", "qux2",
|
||||
"foo3", "bar3", "qux3",
|
||||
"foo4", "bar4", "qux4",
|
||||
};
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionProducerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@InboundChannelAdapter(channel = Source.OUTPUT, poller = @Poller(fixedRate = "5000"))
|
||||
public Message<?> generate() {
|
||||
String value = data[RANDOM.nextInt(data.length)];
|
||||
System.out.println("Sending: " + value);
|
||||
return MessageBuilder.withPayload(value)
|
||||
.setHeader("partitionKey", value)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partitioned: true
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration will support up to 12 consumer instances (or 6 if their `concurrency` is 2, etc.).
|
||||
It is generally best to "over provision" the partitions to allow for future increases in consumers and/or concurrency.
|
||||
|
||||
NOTE: The above configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values; you can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka will allocate partitions across the instances.
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class KafkaPartitionConsumerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionConsumerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void listen(@Payload String in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
|
||||
System.out.println(in + " received from partition " + partition);
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
input:
|
||||
destination: partitioned.topic
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed; Kafka will rebalance the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers will be idle.
|
||||
0
spring-cloud-stream-binder-kafka/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka/.jdk8
Normal file
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.M3</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -27,10 +27,6 @@
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
@@ -52,11 +48,6 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
@@ -81,20 +72,4 @@
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
||||
@@ -19,6 +19,13 @@ package org.springframework.cloud.stream.binder.kafka;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
@@ -33,42 +40,91 @@ import org.springframework.kafka.core.ConsumerFactory;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Laur Aliste
|
||||
*/
|
||||
public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
|
||||
private static final int DEFAULT_TIMEOUT = 60;
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
private final ConsumerFactory<?, ?> consumerFactory;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
|
||||
ConsumerFactory<?, ?> consumerFactory) {
|
||||
private int timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
private Consumer<?, ?> metadataConsumer;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder, ConsumerFactory<?, ?> consumerFactory) {
|
||||
this.binder = binder;
|
||||
this.consumerFactory = consumerFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the timeout in seconds to retrieve health information.
|
||||
*
|
||||
* @param timeout the timeout - default 60.
|
||||
*/
|
||||
public void setTimeout(int timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Health health() {
|
||||
try (Consumer<?, ?> metadataConsumer = consumerFactory.createConsumer()) {
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (this.binder.getTopicsInUse().get(topic).getPartitionInfos().contains(partitionInfo)
|
||||
&& partitionInfo.leader()
|
||||
.id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
ExecutorService exec = Executors.newSingleThreadExecutor();
|
||||
Future<Health> future = exec.submit(new Callable<Health>() {
|
||||
|
||||
@Override
|
||||
public Health call() {
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
return Health.down().withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
|
||||
});
|
||||
try {
|
||||
return future.get(this.timeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return Health.down()
|
||||
.withDetail("Interrupted while waiting for partition information in", this.timeout + " seconds")
|
||||
.build();
|
||||
}
|
||||
catch (Exception e) {
|
||||
catch (ExecutionException e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
catch (TimeoutException e) {
|
||||
return Health.down()
|
||||
.withDetail("Failed to retrieve partition information in", this.timeout + " seconds")
|
||||
.build();
|
||||
}
|
||||
finally {
|
||||
exec.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
import javax.security.auth.login.Configuration;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.context.event.ContextRefreshedEvent;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBinderJaasInitializerListener implements ApplicationListener<ContextRefreshedEvent>,
|
||||
ApplicationContextAware, DisposableBean {
|
||||
|
||||
public static final String DEFAULT_ZK_LOGIN_CONTEXT_NAME = "Client";
|
||||
|
||||
private ApplicationContext applicationContext;
|
||||
|
||||
private final boolean ignoreJavaLoginConfigParamSystemProperty;
|
||||
|
||||
private final File placeholderJaasConfiguration;
|
||||
|
||||
public KafkaBinderJaasInitializerListener() throws IOException {
|
||||
// we ignore the system property if it wasn't originally set at launch
|
||||
this.ignoreJavaLoginConfigParamSystemProperty =
|
||||
(System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) == null);
|
||||
this.placeholderJaasConfiguration = File.createTempFile("kafka-client-jaas-config-placeholder", "conf");
|
||||
this.placeholderJaasConfiguration.deleteOnExit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
this.applicationContext = applicationContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
if (this.ignoreJavaLoginConfigParamSystemProperty) {
|
||||
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(ContextRefreshedEvent event) {
|
||||
if (event.getSource() == this.applicationContext) {
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties =
|
||||
applicationContext.getBean(KafkaBinderConfigurationProperties.class);
|
||||
// only use programmatic support if a file is not set via system property
|
||||
if (ignoreJavaLoginConfigParamSystemProperty
|
||||
&& binderConfigurationProperties.getJaas() != null) {
|
||||
Map<String, AppConfigurationEntry[]> configurationEntries = new HashMap<>();
|
||||
AppConfigurationEntry kafkaClientConfigurationEntry = new AppConfigurationEntry
|
||||
(binderConfigurationProperties.getJaas().getLoginModule(),
|
||||
binderConfigurationProperties.getJaas().getControlFlagValue(),
|
||||
binderConfigurationProperties.getJaas().getOptions() != null ?
|
||||
binderConfigurationProperties.getJaas().getOptions() :
|
||||
Collections.<String, Object>emptyMap());
|
||||
configurationEntries.put(JaasUtils.LOGIN_CONTEXT_CLIENT,
|
||||
new AppConfigurationEntry[]{ kafkaClientConfigurationEntry });
|
||||
Configuration.setConfiguration(new InternalConfiguration(configurationEntries));
|
||||
// Workaround for a 0.9 client issue where even if the Configuration is set
|
||||
// a system property check is performed.
|
||||
// Since the Configuration already exists, this will be ignored.
|
||||
if (this.placeholderJaasConfiguration != null) {
|
||||
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, this.placeholderJaasConfiguration.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link Configuration} set up programmatically by the Kafka binder
|
||||
*/
|
||||
public static class InternalConfiguration extends Configuration {
|
||||
|
||||
private final Map<String, AppConfigurationEntry[]> configurationEntries;
|
||||
|
||||
public InternalConfiguration(Map<String, AppConfigurationEntry[]> configurationEntries) {
|
||||
Assert.notNull(configurationEntries, " cannot be null");
|
||||
Assert.notEmpty(configurationEntries, " cannot be empty");
|
||||
this.configurationEntries = configurationEntries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
|
||||
return configurationEntries.get(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,25 +13,25 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.springframework.boot.actuate.endpoint.PublicMetrics;
|
||||
import org.springframework.boot.actuate.metrics.Metric;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
@@ -42,9 +42,9 @@ import org.springframework.util.ObjectUtils;
|
||||
*
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public class KafkaBinderMetrics implements PublicMetrics {
|
||||
public class KafkaBinderMetrics implements MeterBinder {
|
||||
|
||||
private final static Logger LOG = LoggerFactory.getLogger(KafkaBinderMetrics.class);
|
||||
private final static Log LOG = LogFactory.getLog(KafkaBinderMetrics.class);
|
||||
|
||||
static final String METRIC_PREFIX = "spring.cloud.stream.binder.kafka";
|
||||
|
||||
@@ -68,8 +68,7 @@ public class KafkaBinderMetrics implements PublicMetrics {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Metric<?>> metrics() {
|
||||
List<Metric<?>> metrics = new LinkedList<>();
|
||||
public void bindTo(MeterRegistry registry) {
|
||||
for (Map.Entry<String, KafkaMessageChannelBinder.TopicInformation> topicInfo : this.binder.getTopicsInUse()
|
||||
.entrySet()) {
|
||||
if (!topicInfo.getValue().isConsumerTopic()) {
|
||||
@@ -96,13 +95,12 @@ public class KafkaBinderMetrics implements PublicMetrics {
|
||||
lag += endOffset.getValue();
|
||||
}
|
||||
}
|
||||
metrics.add(new Metric<>(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), lag));
|
||||
registry.gauge(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), lag);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
}
|
||||
}
|
||||
return metrics;
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createConsumerFactory(String group) {
|
||||
@@ -123,4 +121,4 @@ public class KafkaBinderMetrics implements PublicMetrics {
|
||||
return new DefaultKafkaConsumerFactory<>(props);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,24 +21,30 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
@@ -58,16 +64,21 @@ import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.config.ContainerProperties;
|
||||
import org.springframework.kafka.support.DefaultKafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.kafka.support.TopicPartitionInitialOffset;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.kafka.support.converter.MessagingMessageConverter;
|
||||
import org.springframework.kafka.transaction.KafkaTransactionManager;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
@@ -76,7 +87,7 @@ import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
|
||||
/**
|
||||
* A {@link Binder} that uses Kafka as the underlying middleware.
|
||||
* A {@link org.springframework.cloud.stream.binder.Binder} that uses Kafka as the underlying middleware.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
@@ -89,22 +100,31 @@ import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
* @author Doug Saus
|
||||
*/
|
||||
public class KafkaMessageChannelBinder extends
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
|
||||
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final Map<String, TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
private final KafkaTransactionManager<byte[], byte[]> transactionManager;
|
||||
|
||||
private ProducerListener<byte[], byte[]> producerListener;
|
||||
|
||||
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
|
||||
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
super(false, headersToMap(configurationProperties), provisioningProvider);
|
||||
super(headersToMap(configurationProperties), provisioningProvider);
|
||||
this.configurationProperties = configurationProperties;
|
||||
if (StringUtils.hasText(configurationProperties.getTransaction().getTransactionIdPrefix())) {
|
||||
this.transactionManager = new KafkaTransactionManager<>(
|
||||
getProducerFactory(configurationProperties.getTransaction().getTransactionIdPrefix(),
|
||||
new ExtendedProducerProperties<>(configurationProperties.getTransaction().getProducer())));
|
||||
}
|
||||
else {
|
||||
this.transactionManager = null;
|
||||
}
|
||||
}
|
||||
|
||||
private static String[] headersToMap(KafkaBinderConfigurationProperties configurationProperties) {
|
||||
@@ -147,16 +167,25 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
@Override
|
||||
protected MessageHandler createProducerMessageHandler(final ProducerDestination destination,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
|
||||
final DefaultKafkaProducerFactory<byte[], byte[]> producerFB = getProducerFactory(producerProperties);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties, MessageChannel errorChannel)
|
||||
throws Exception {
|
||||
/*
|
||||
* IMPORTANT: With a transactional binder, individual producer properties for Kafka are
|
||||
* ignored; the global binder (spring.cloud.stream.kafka.binder.transaction.producer.*)
|
||||
* properties are used instead, for all producers. A binder is transactional when
|
||||
* 'spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix' has text.
|
||||
*/
|
||||
final ProducerFactory<byte[], byte[]> producerFB = this.transactionManager != null
|
||||
? this.transactionManager.getProducerFactory()
|
||||
: getProducerFactory(null, producerProperties);
|
||||
Collection<PartitionInfo> partitions = provisioningProvider.getPartitionsForTopic(
|
||||
producerProperties.getPartitionCount(),
|
||||
false,
|
||||
new Callable<Collection<PartitionInfo>>() {
|
||||
@Override
|
||||
public Collection<PartitionInfo> call() throws Exception {
|
||||
return producerFB.createProducer().partitionsFor(destination.getName());
|
||||
}
|
||||
producerProperties.getPartitionCount(), false,
|
||||
() -> {
|
||||
Producer<byte[], byte[]> producer = producerFB.createProducer();
|
||||
List<PartitionInfo> partitionsFor = producer.partitionsFor(destination.getName());
|
||||
producer.close();
|
||||
((DisposableBean) producerFB).destroy();
|
||||
return partitionsFor;
|
||||
});
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(null, partitions));
|
||||
if (producerProperties.getPartitionCount() < partitions.size()) {
|
||||
@@ -165,17 +194,60 @@ public class KafkaMessageChannelBinder extends
|
||||
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
|
||||
+ partitions.size() + " of the topic. The larger number will be used instead.");
|
||||
}
|
||||
/*
|
||||
* This is dirty; it relies on the fact that we, and the partition interceptor, share a
|
||||
* hard reference to the producer properties instance. But I don't see another way to fix
|
||||
* it since the interceptor has already been added to the channel, and we don't have
|
||||
* access to the channel here; if we did, we could inject the proper partition count
|
||||
* there. TODO: Consider this when doing the 2.0 binder restructuring.
|
||||
*/
|
||||
producerProperties.setPartitionCount(partitions.size());
|
||||
}
|
||||
|
||||
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFB);
|
||||
if (this.producerListener != null) {
|
||||
kafkaTemplate.setProducerListener(this.producerListener);
|
||||
}
|
||||
return new ProducerConfigurationMessageHandler(kafkaTemplate, destination.getName(), producerProperties,
|
||||
producerFB);
|
||||
ProducerConfigurationMessageHandler handler = new ProducerConfigurationMessageHandler(kafkaTemplate,
|
||||
destination.getName(), producerProperties, producerFB);
|
||||
if (errorChannel != null) {
|
||||
handler.setSendFailureChannel(errorChannel);
|
||||
}
|
||||
KafkaHeaderMapper mapper = null;
|
||||
if (this.configurationProperties.getHeaderMapperBeanName() != null) {
|
||||
mapper = getApplicationContext().getBean(this.configurationProperties.getHeaderMapperBeanName(),
|
||||
KafkaHeaderMapper.class);
|
||||
}
|
||||
/*
|
||||
* Even if the user configures a bean, we must not use it if the header
|
||||
* mode is not the default (headers); setting the mapper to null
|
||||
* disables populating headers in the message handler.
|
||||
*/
|
||||
if (producerProperties.getHeaderMode() != null
|
||||
&& !HeaderMode.headers.equals(producerProperties.getHeaderMode())) {
|
||||
mapper = null;
|
||||
}
|
||||
else if (mapper == null) {
|
||||
String[] headerPatterns = producerProperties.getExtension().getHeaderPatterns();
|
||||
if (headerPatterns != null && headerPatterns.length > 0) {
|
||||
List<String> patterns = new LinkedList<>(Arrays.asList(headerPatterns));
|
||||
if (!patterns.contains("!" + MessageHeaders.TIMESTAMP)) {
|
||||
patterns.add(0, "!" + MessageHeaders.TIMESTAMP);
|
||||
}
|
||||
if (!patterns.contains("!" + MessageHeaders.ID)) {
|
||||
patterns.add(0, "!" + MessageHeaders.ID);
|
||||
}
|
||||
mapper = new DefaultKafkaHeaderMapper(patterns.toArray(new String[patterns.size()]));
|
||||
}
|
||||
else {
|
||||
mapper = new DefaultKafkaHeaderMapper();
|
||||
}
|
||||
}
|
||||
handler.setHeaderMapper(mapper);
|
||||
return handler;
|
||||
}
|
||||
|
||||
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
|
||||
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(String transactionIdPrefix,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
@@ -204,7 +276,11 @@ public class KafkaMessageChannelBinder extends
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory = new DefaultKafkaProducerFactory<>(props);
|
||||
if (transactionIdPrefix != null) {
|
||||
producerFactory.setTransactionIdPrefix(transactionIdPrefix);
|
||||
}
|
||||
return producerFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -223,11 +299,11 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
Collection<PartitionInfo> allPartitions = provisioningProvider.getPartitionsForTopic(partitionCount,
|
||||
extendedConsumerProperties.getExtension().isAutoRebalanceEnabled(),
|
||||
new Callable<Collection<PartitionInfo>>() {
|
||||
@Override
|
||||
public Collection<PartitionInfo> call() throws Exception {
|
||||
return consumerFactory.createConsumer().partitionsFor(destination.getName());
|
||||
}
|
||||
() -> {
|
||||
Consumer<?, ?> consumer = consumerFactory.createConsumer();
|
||||
List<PartitionInfo> partitionsFor = consumer.partitionsFor(destination.getName());
|
||||
consumer.close();
|
||||
return partitionsFor;
|
||||
});
|
||||
|
||||
Collection<PartitionInfo> listenedPartitions;
|
||||
@@ -256,11 +332,13 @@ public class KafkaMessageChannelBinder extends
|
||||
|| extendedConsumerProperties.getExtension().isAutoRebalanceEnabled()
|
||||
? new ContainerProperties(destination.getName())
|
||||
: new ContainerProperties(topicPartitionInitialOffsets);
|
||||
if (this.transactionManager != null) {
|
||||
containerProperties.setTransactionManager(this.transactionManager);
|
||||
}
|
||||
int concurrency = Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
|
||||
@SuppressWarnings("rawtypes")
|
||||
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer =
|
||||
new ConcurrentMessageListenerContainer(
|
||||
consumerFactory, containerProperties) {
|
||||
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer = new ConcurrentMessageListenerContainer(
|
||||
consumerFactory, containerProperties) {
|
||||
|
||||
@Override
|
||||
public void stop(Runnable callback) {
|
||||
@@ -284,6 +362,32 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
final KafkaMessageDrivenChannelAdapter<?, ?> kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter<>(
|
||||
messageListenerContainer);
|
||||
MessagingMessageConverter messageConverter = new MessagingMessageConverter();
|
||||
KafkaHeaderMapper mapper = null;
|
||||
if (this.configurationProperties.getHeaderMapperBeanName() != null) {
|
||||
mapper = getApplicationContext().getBean(this.configurationProperties.getHeaderMapperBeanName(),
|
||||
KafkaHeaderMapper.class);
|
||||
}
|
||||
if (mapper == null) {
|
||||
DefaultKafkaHeaderMapper headerMapper = new DefaultKafkaHeaderMapper() {
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> headers) {
|
||||
super.toHeaders(source, headers);
|
||||
if (headers.size() > 0) {
|
||||
headers.put(BinderHeaders.NATIVE_HEADERS_PRESENT, Boolean.TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
String[] trustedPackages = extendedConsumerProperties.getExtension().getTrustedPackages();
|
||||
if (!StringUtils.isEmpty(trustedPackages)) {
|
||||
headerMapper.addTrustedPackages(trustedPackages);
|
||||
}
|
||||
mapper = headerMapper;
|
||||
}
|
||||
messageConverter.setHeaderMapper(mapper);
|
||||
kafkaMessageDrivenChannelAdapter.setMessageConverter(messageConverter);
|
||||
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
|
||||
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, consumerGroup,
|
||||
extendedConsumerProperties);
|
||||
@@ -306,48 +410,47 @@ public class KafkaMessageChannelBinder extends
|
||||
protected MessageHandler getErrorMessageHandler(final ConsumerDestination destination, final String group,
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties) {
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(new KafkaProducerProperties()));
|
||||
ProducerFactory<byte[], byte[]> producerFactory = this.transactionManager != null
|
||||
? this.transactionManager.getProducerFactory()
|
||||
: getProducerFactory(null, new ExtendedProducerProperties<>(new KafkaProducerProperties()));
|
||||
final KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
return new MessageHandler() {
|
||||
return message -> {
|
||||
final ConsumerRecord<?, ?> record = message.getHeaders()
|
||||
.get(KafkaHeaders.RAW_DATA, ConsumerRecord.class);
|
||||
final byte[] key = record.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[]) record.key()))
|
||||
: null;
|
||||
final byte[] payload = record.value() != null
|
||||
? Utils.toArray(ByteBuffer.wrap((byte[]) record.value()))
|
||||
: null;
|
||||
String dlqName = StringUtils.hasText(extendedConsumerProperties.getExtension().getDlqName())
|
||||
? extendedConsumerProperties.getExtension().getDlqName()
|
||||
: "error." + destination.getName() + "." + group;
|
||||
ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(dlqName, record.partition(),
|
||||
key, payload, record.headers());
|
||||
ListenableFuture<SendResult<byte[], byte[]>> sentDlq = kafkaTemplate.send(producerRecord);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[], byte[]>>() {
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key), 50)).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(payload), 50))
|
||||
.append("'").append(" received from ")
|
||||
.append(record.partition());
|
||||
|
||||
@Override
|
||||
public void handleMessage(Message<?> message) throws MessagingException {
|
||||
final ConsumerRecord<?, ?> record = message.getHeaders()
|
||||
.get(KafkaMessageDrivenChannelAdapter.KAFKA_RAW_DATA, ConsumerRecord.class);
|
||||
final byte[] key = record.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[]) record.key()))
|
||||
: null;
|
||||
final byte[] payload = record.value() != null
|
||||
? Utils.toArray(ByteBuffer.wrap((byte[]) record.value())) : null;
|
||||
String dlqName = StringUtils.hasText(extendedConsumerProperties.getExtension().getDlqName())
|
||||
? extendedConsumerProperties.getExtension().getDlqName()
|
||||
: "error." + destination.getName() + "." + group;
|
||||
ListenableFuture<SendResult<byte[], byte[]>> sentDlq = kafkaTemplate.send(dlqName,
|
||||
record.partition(), key, payload);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[], byte[]>>() {
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key), 50)).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(payload), 50))
|
||||
.append("'").append(" received from ")
|
||||
.append(record.partition());
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[], byte[]> result) {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[], byte[]> result) {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
return null;
|
||||
@@ -411,11 +514,11 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
private boolean running = true;
|
||||
|
||||
private final DefaultKafkaProducerFactory<byte[], byte[]> producerFactory;
|
||||
private final ProducerFactory<byte[], byte[]> producerFactory;
|
||||
|
||||
private ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
|
||||
ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory) {
|
||||
ProducerFactory<byte[], byte[]> producerFactory) {
|
||||
super(kafkaTemplate);
|
||||
setTopicExpression(new LiteralExpression(topic));
|
||||
setMessageKeyExpression(producerProperties.getExtension().getMessageKeyExpression());
|
||||
@@ -443,7 +546,9 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
producerFactory.stop();
|
||||
if (this.producerFactory instanceof Lifecycle) {
|
||||
((Lifecycle) producerFactory).stop();
|
||||
}
|
||||
this.running = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -20,43 +20,34 @@ import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.actuate.endpoint.PublicMetrics;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderJaasInitializerListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.KafkaAdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
@@ -68,18 +59,16 @@ import org.springframework.util.ObjectUtils;
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class})
|
||||
@Import({ PropertyPlaceholderAutoConfiguration.class})
|
||||
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
protected static final Log logger = LogFactory.getLog(KafkaBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private Codec codec;
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
@@ -104,7 +93,6 @@ public class KafkaBinderConfiguration {
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
|
||||
this.configurationProperties, provisioningProvider());
|
||||
kafkaMessageChannelBinder.setCodec(this.codec);
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
|
||||
return kafkaMessageChannelBinder;
|
||||
@@ -128,49 +116,26 @@ public class KafkaBinderConfiguration {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, consumerFactory);
|
||||
KafkaBinderHealthIndicator indicator = new KafkaBinderHealthIndicator(kafkaMessageChannelBinder,
|
||||
consumerFactory);
|
||||
indicator.setTimeout(this.configurationProperties.getHealthTimeout());
|
||||
return indicator;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public PublicMetrics kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
public MeterBinder kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
return new KafkaBinderMetrics(kafkaMessageChannelBinder, configurationProperties);
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka09Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka09AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.9 AdminUtils");
|
||||
return new Kafka09AdminUtilsOperation();
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka10Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka10AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.10 AdminUtils");
|
||||
return new Kafka10AdminUtilsOperation();
|
||||
return new KafkaAdminUtilsOperation();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<?> jaasInitializer() throws IOException {
|
||||
return new KafkaBinderJaasInitializerListener();
|
||||
}
|
||||
|
||||
static class Kafka10Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.10");
|
||||
}
|
||||
}
|
||||
|
||||
static class Kafka09Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.9");
|
||||
}
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer() throws IOException {
|
||||
return new KafkaJaasLoginModuleInitializer();
|
||||
}
|
||||
|
||||
public static class JaasConfigurationProperties {
|
||||
|
||||
@@ -13,25 +13,15 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.List;
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractTestBinder;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.PublishSubscribeChannel;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.codec.kryo.KryoRegistrar;
|
||||
import org.springframework.integration.codec.kryo.PojoCodec;
|
||||
import org.springframework.integration.context.IntegrationContextUtils;
|
||||
import org.springframework.integration.tuple.TupleKryoRegistrar;
|
||||
|
||||
import com.esotericsoftware.kryo.Kryo;
|
||||
import com.esotericsoftware.kryo.Registration;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
@@ -40,34 +30,19 @@ import com.esotericsoftware.kryo.Registration;
|
||||
public abstract class AbstractKafkaTestBinder extends
|
||||
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
|
||||
private ApplicationContext applicationContext;
|
||||
|
||||
@Override
|
||||
public void cleanup() {
|
||||
// do nothing - the rule will take care of that
|
||||
}
|
||||
|
||||
protected void addErrorChannel(GenericApplicationContext context) {
|
||||
PublishSubscribeChannel errorChannel = new PublishSubscribeChannel();
|
||||
context.getBeanFactory().initializeBean(errorChannel, IntegrationContextUtils.ERROR_CHANNEL_BEAN_NAME);
|
||||
context.getBeanFactory().registerSingleton(IntegrationContextUtils.ERROR_CHANNEL_BEAN_NAME, errorChannel);
|
||||
protected final void setApplicationContext(ApplicationContext context) {
|
||||
this.applicationContext = context;
|
||||
}
|
||||
|
||||
protected static Codec getCodec() {
|
||||
return new PojoCodec(new TupleRegistrar());
|
||||
}
|
||||
|
||||
private static class TupleRegistrar implements KryoRegistrar {
|
||||
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
|
||||
|
||||
@Override
|
||||
public void registerTypes(Kryo kryo) {
|
||||
this.delegate.registerTypes(kryo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Registration> getRegistrations() {
|
||||
return this.delegate.getRegistrations();
|
||||
}
|
||||
public ApplicationContext getApplicationContext() {
|
||||
return this.applicationContext;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
@@ -66,10 +67,10 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory",
|
||||
ExtendedProducerProperties.class);
|
||||
String.class, ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, producerProperties);
|
||||
.invoke(this.kafkaMessageChannelBinder, "foo", producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
@@ -63,10 +64,10 @@ public class KafkaBinderConfigurationPropertiesTest {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(
|
||||
kafkaProducerProperties);
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory",
|
||||
ExtendedProducerProperties.class);
|
||||
String.class, ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, producerProperties);
|
||||
.invoke(this.kafkaMessageChannelBinder, "bar", producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@@ -21,22 +22,27 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.Status;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.BDDMockito.given;
|
||||
|
||||
/**
|
||||
* @author Barry Commins
|
||||
* @author Gary Russell
|
||||
* @author Laur Aliste
|
||||
*/
|
||||
public class KafkaBinderHealthIndicatorTest {
|
||||
|
||||
@@ -53,21 +59,22 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private Map<String, KafkaMessageChannelBinder.TopicInformation> topicsInUse = new HashMap<>();
|
||||
private final Map<String, KafkaMessageChannelBinder.TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
indicator = new KafkaBinderHealthIndicator(binder, consumerFactory);
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
this.indicator = new KafkaBinderHealthIndicator(binder, consumerFactory);
|
||||
this.indicator.setTimeout(10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void kafkaBinderIsUp() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
}
|
||||
@@ -76,11 +83,57 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
public void kafkaBinderIsDown() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
@Test(timeout = 5000)
|
||||
public void kafkaBinderDoesNotAnswer() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willAnswer(new Answer<Object>() {
|
||||
|
||||
@Override
|
||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||
final int fiveMinutes = 1000 * 60 * 5;
|
||||
Thread.sleep(fiveMinutes);
|
||||
return partitions;
|
||||
}
|
||||
|
||||
});
|
||||
this.indicator.setTimeout(1);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createsConsumerOnceWhenInvokedMultipleTimes() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
|
||||
indicator.health();
|
||||
Health health = indicator.health();
|
||||
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
org.mockito.Mockito.verify(this.consumerFactory).createConsumer();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void consumerCreationFailsFirstTime() {
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willThrow(KafkaException.class)
|
||||
.willReturn(consumer);
|
||||
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
|
||||
health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory, Mockito.times(2)).createConsumer();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node leader) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
partitions.add(new PartitionInfo(TEST_TOPIC, 0, leader, null, null));
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import com.sun.security.auth.login.ConfigFile;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBinderJaasInitializerListenerTest {
|
||||
|
||||
@Test
|
||||
public void testConfigurationParsedCorrectlyWithKafkaClient() throws Exception {
|
||||
ConfigFile configFile = new ConfigFile(new ClassPathResource("jaas-sample-kafka-only.conf").getURI());
|
||||
final AppConfigurationEntry[] kafkaConfigurationArray = configFile.getAppConfigurationEntry(JaasUtils.LOGIN_CONTEXT_CLIENT);
|
||||
|
||||
final ConfigurableApplicationContext context =
|
||||
SpringApplication.run(SimpleApplication.class,
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true",
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true",
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab",
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM");
|
||||
javax.security.auth.login.Configuration configuration = javax.security.auth.login.Configuration.getConfiguration();
|
||||
|
||||
final AppConfigurationEntry[] kafkaConfiguration = configuration.getAppConfigurationEntry(JaasUtils.LOGIN_CONTEXT_CLIENT);
|
||||
assertThat(kafkaConfiguration).hasSize(1);
|
||||
assertThat(kafkaConfiguration[0].getOptions()).isEqualTo(kafkaConfigurationArray[0].getOptions());
|
||||
context.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
public static class SimpleApplication {
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -13,14 +13,16 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.Node;
|
||||
@@ -31,17 +33,11 @@ import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import org.springframework.boot.actuate.metrics.Metric;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder.TopicInformation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.BDDMockito.given;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyCollectionOf;
|
||||
import static org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics.METRIC_PREFIX;
|
||||
|
||||
/**
|
||||
* @author Henryk Konsek
|
||||
@@ -61,6 +57,8 @@ public class KafkaBinderMetricsTest {
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private MeterRegistry meterRegistry = new SimpleMeterRegistry();
|
||||
|
||||
private Map<String, TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
@Mock
|
||||
@@ -69,24 +67,23 @@ public class KafkaBinderMetricsTest {
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
metrics = new KafkaBinderMetrics(binder, kafkaBinderConfigurationProperties, consumerFactory);
|
||||
given(consumer.endOffsets(anyCollectionOf(TopicPartition.class)))
|
||||
.willReturn(singletonMap(new TopicPartition(TEST_TOPIC, 0), 1000L));
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class)))
|
||||
.willReturn(java.util.Collections.singletonMap(new TopicPartition(TEST_TOPIC, 0), 1000L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateLag() {
|
||||
given(consumer.committed(any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).hasSize(1);
|
||||
assertThat(collectedMetrics.iterator().next().getName())
|
||||
.isEqualTo(String.format("%s.%s.%s.lag", METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(collectedMetrics.iterator().next().getValue()).isEqualTo(500L);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
MeterRegistry.Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().get().value()).isEqualTo(500.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -94,36 +91,34 @@ public class KafkaBinderMetricsTest {
|
||||
Map<TopicPartition, Long> endOffsets = new HashMap<>();
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 0), 1000L);
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 1), 1000L);
|
||||
given(consumer.endOffsets(anyCollectionOf(TopicPartition.class))).willReturn(endOffsets);
|
||||
given(consumer.committed(any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class))).willReturn(endOffsets);
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0), new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).hasSize(1);
|
||||
assertThat(collectedMetrics.iterator().next().getName())
|
||||
.isEqualTo(String.format("%s.%s.%s.lag", METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(collectedMetrics.iterator().next().getValue()).isEqualTo(1000L);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
MeterRegistry.Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().get().value()).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateFullLagForNotCommittedGroups() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).hasSize(1);
|
||||
assertThat(collectedMetrics.iterator().next().getName())
|
||||
.isEqualTo(String.format("%s.%s.%s.lag", METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(collectedMetrics.iterator().next().getValue()).isEqualTo(1000L);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
MeterRegistry.Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().get().value()).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotCalculateLagForProducerTopics() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation(null, partitions));
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).isEmpty();
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).isEmpty();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node... nodes) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,7 @@ package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.KafkaAdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
@@ -37,12 +37,12 @@ import org.springframework.kafka.support.ProducerListener;
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
public class KafkaTestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||
public Kafka10TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
try {
|
||||
AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
AdminUtilsOperation adminUtilsOperation = new KafkaAdminUtilsOperation();
|
||||
KafkaTopicProvisioner provisioningProvider =
|
||||
new KafkaTopicProvisioner(binderConfiguration, adminUtilsOperation);
|
||||
provisioningProvider.afterPropertiesSet();
|
||||
@@ -63,10 +63,10 @@ public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
};
|
||||
|
||||
binder.setCodec(AbstractKafkaTestBinder.getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(Config.class);
|
||||
setApplicationContext(context);
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.bootstrap;
|
||||
|
||||
import org.junit.ClassRule;
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
<pattern>%d{ISO8601} %5p %t %c{2}:%L - %m%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<logger name="org.apache.kafka" level="DEBUG"/>
|
||||
<logger name="org.springframework.integration.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.cloud.stream" level="INFO" />
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
{"namespace": "org.springframework.cloud.stream.binder.kafka",
|
||||
"type": "record",
|
||||
"name": "User1",
|
||||
"fields": [
|
||||
{"name": "name", "type": "string"},
|
||||
{"name": "favoriteColor", "type": "string"}
|
||||
]
|
||||
}
|
||||
0
spring-cloud-stream-binder-kstream/.jdk8
Normal file
0
spring-cloud-stream-binder-kstream/.jdk8
Normal file
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.M3</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -23,10 +23,6 @@
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
@@ -52,13 +48,26 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
|
||||
@@ -23,18 +23,13 @@ import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.EmbeddedHeaderUtils;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.MessageValues;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
@@ -43,8 +38,6 @@ import org.springframework.cloud.stream.binder.kstream.config.KStreamConsumerPro
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamProducerProperties;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -54,17 +47,17 @@ public class KStreamBinder extends
|
||||
AbstractBinder<KStream<Object, Object>, ExtendedConsumerProperties<KStreamConsumerProperties>, ExtendedProducerProperties<KStreamProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KStream<Object, Object>, KStreamConsumerProperties, KStreamProducerProperties> {
|
||||
|
||||
private String[] headers;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KStreamExtendedBindingProperties kStreamExtendedBindingProperties;
|
||||
|
||||
private final StreamsConfig streamsConfig;
|
||||
|
||||
private final KafkaBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
public KStreamBinder(KafkaBinderConfigurationProperties binderConfigurationProperties, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KStreamExtendedBindingProperties kStreamExtendedBindingProperties, StreamsConfig streamsConfig) {
|
||||
this.headers = EmbeddedHeaderUtils.headersToEmbed(binderConfigurationProperties.getHeaders());
|
||||
KStreamExtendedBindingProperties kStreamExtendedBindingProperties, StreamsConfig streamsConfig) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kStreamExtendedBindingProperties = kStreamExtendedBindingProperties;
|
||||
this.streamsConfig = streamsConfig;
|
||||
@@ -72,7 +65,7 @@ public class KStreamBinder extends
|
||||
|
||||
@Override
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget, ExtendedConsumerProperties<KStreamConsumerProperties> properties) {
|
||||
KStream<Object, Object> inputTarget, ExtendedConsumerProperties<KStreamConsumerProperties> properties) {
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<KafkaConsumerProperties>(
|
||||
new KafkaConsumerProperties());
|
||||
@@ -83,74 +76,50 @@ public class KStreamBinder extends
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name, KStream<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KStreamProducerProperties> properties) {
|
||||
ExtendedProducerProperties<KStreamProducerProperties> properties) {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<KafkaProducerProperties>(
|
||||
new KafkaProducerProperties());
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name , extendedProducerProperties);
|
||||
if (HeaderMode.embeddedHeaders.equals(properties.getHeaderMode())) {
|
||||
outboundBindTarget = outboundBindTarget.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
if (v instanceof Message) {
|
||||
try {
|
||||
return new KeyValue<>(k, (Object)KStreamBinder.this.serializeAndEmbedHeadersIfApplicable((Message<?>) v));
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException("Wrong type of message " + v);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name, extendedProducerProperties);
|
||||
if (!properties.isUseNativeEncoding()) {
|
||||
outboundBindTarget = outboundBindTarget
|
||||
.map((k, v) -> KeyValue.pair(k, (Object) KStreamBinder.this.serializePayloadIfNecessary((Message<?>) v)));
|
||||
}
|
||||
else {
|
||||
if (!properties.isUseNativeEncoding()) {
|
||||
outboundBindTarget = outboundBindTarget
|
||||
.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
return KeyValue.pair(k, (Object)KStreamBinder.this.serializePayloadIfNecessary((Message<?>) v));
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
outboundBindTarget = outboundBindTarget
|
||||
.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
return KeyValue.pair(k, ((Message<?>) v).getPayload());
|
||||
}
|
||||
});
|
||||
}
|
||||
outboundBindTarget = outboundBindTarget
|
||||
.map((k, v) -> KeyValue.pair(k, ((Message<Object>) v).getPayload()));
|
||||
}
|
||||
if (!properties.isUseNativeEncoding() || StringUtils.hasText(properties.getExtension().getKeySerde()) || StringUtils.hasText(properties.getExtension().getValueSerde())) {
|
||||
Serde<?> keySerde = Serdes.ByteArray();
|
||||
Serde<?> valueSerde = Serdes.ByteArray();
|
||||
try {
|
||||
Serde<?> keySerde;
|
||||
Serde<?> valueSerde;
|
||||
|
||||
if (StringUtils.hasText(properties.getExtension().getKeySerde())) {
|
||||
keySerde = Utils.newInstance(properties.getExtension().getKeySerde(), Serde.class);
|
||||
if (keySerde instanceof Configurable) {
|
||||
((Configurable) keySerde).configure(streamsConfig.originals());
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
try {
|
||||
else {
|
||||
keySerde = this.binderConfigurationProperties.getConfiguration().containsKey("key.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("key.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
|
||||
if (StringUtils.hasText(properties.getExtension().getValueSerde())) {
|
||||
valueSerde = Utils.newInstance(properties.getExtension().getValueSerde(), Serde.class);
|
||||
if (valueSerde instanceof Configurable) {
|
||||
((Configurable) valueSerde).configure(streamsConfig.originals());
|
||||
}
|
||||
}
|
||||
else {
|
||||
valueSerde = this.binderConfigurationProperties.getConfiguration().containsKey("value.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("value.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
outboundBindTarget.to((Serde<Object>) keySerde, (Serde<Object>) valueSerde, name);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
outboundBindTarget.to((Serde<Object>) keySerde, (Serde<Object>) valueSerde, name);
|
||||
|
||||
}
|
||||
else {
|
||||
outboundBindTarget.to(name);
|
||||
@@ -158,24 +127,6 @@ public class KStreamBinder extends
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
private byte[] serializeAndEmbedHeadersIfApplicable(Message<?> message) throws Exception {
|
||||
MessageValues transformed = serializePayloadIfNecessary(message);
|
||||
byte[] payload;
|
||||
|
||||
Object contentType = transformed.get(MessageHeaders.CONTENT_TYPE);
|
||||
// transform content type headers to String, so that they can be properly embedded
|
||||
// in JSON
|
||||
if (contentType instanceof MimeType) {
|
||||
transformed.put(MessageHeaders.CONTENT_TYPE, contentType.toString());
|
||||
}
|
||||
Object originalContentType = transformed.get(BinderHeaders.BINDER_ORIGINAL_CONTENT_TYPE);
|
||||
if (originalContentType instanceof MimeType) {
|
||||
transformed.put(BinderHeaders.BINDER_ORIGINAL_CONTENT_TYPE, originalContentType.toString());
|
||||
}
|
||||
payload = EmbeddedHeaderUtils.embedHeaders(transformed, headers);
|
||||
return payload;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kStreamExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
|
||||
@@ -16,9 +16,6 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
@@ -27,19 +24,13 @@ import org.apache.kafka.streams.kstream.KStreamBuilder;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.EmbeddedHeaderUtils;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.MessageSerializationUtils;
|
||||
import org.springframework.cloud.stream.binder.MessageValues;
|
||||
import org.springframework.cloud.stream.binder.StringConvertingContentTypeResolver;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.support.MutableMessageHeaders;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
@@ -55,47 +46,33 @@ public class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KSt
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private volatile Codec codec;
|
||||
|
||||
private final StringConvertingContentTypeResolver contentTypeResolver = new StringConvertingContentTypeResolver();
|
||||
|
||||
private volatile Map<String, Class<?>> payloadTypeCache = new ConcurrentHashMap<>();
|
||||
|
||||
private CompositeMessageConverterFactory compositeMessageConverterFactory;
|
||||
|
||||
public KStreamBoundElementFactory(KStreamBuilder streamBuilder, BindingServiceProperties bindingServiceProperties,
|
||||
Codec codec, CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kStreamBuilder = streamBuilder;
|
||||
this.codec = codec;
|
||||
this.compositeMessageConverterFactory = compositeMessageConverterFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
KStream<Object, Object> stream = kStreamBuilder.stream(bindingServiceProperties.getBindingDestination(name));
|
||||
ConsumerProperties properties = bindingServiceProperties.getConsumerProperties(name);
|
||||
if (HeaderMode.embeddedHeaders.equals(properties.getHeaderMode())) {
|
||||
|
||||
stream = stream.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object key, Object value) {
|
||||
if (!(value instanceof byte[])) {
|
||||
return new KeyValue<>(key, value);
|
||||
}
|
||||
try {
|
||||
MessageValues messageValues = EmbeddedHeaderUtils
|
||||
.extractHeaders(MessageBuilder.withPayload((byte[]) value).build(), true);
|
||||
messageValues = deserializePayloadIfNecessary(messageValues);
|
||||
return new KeyValue<Object, Object>(null, messageValues.toMessage());
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
stream = stream.map((key, value) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
Message<Object> message = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
keyValue = new KeyValue<>(key, message);
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(key, value);
|
||||
}
|
||||
return keyValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
|
||||
@@ -106,43 +83,48 @@ public class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KSt
|
||||
String contentType = bindingProperties.getContentType();
|
||||
MessageConverter messageConverter = StringUtils.hasText(contentType) ? compositeMessageConverterFactory
|
||||
.getMessageConverterForType(MimeType.valueOf(contentType)) : null;
|
||||
KStreamWrapperHandler handler = new KStreamWrapperHandler(messageConverter);
|
||||
KStreamWrapperHandler handler = new KStreamWrapperHandler(messageConverter, bindingServiceProperties, name);
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KStreamWrapper.class, KStream.class);
|
||||
proxyFactory.addAdvice(handler);
|
||||
return (KStream) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
private MessageValues deserializePayloadIfNecessary(MessageValues messageValues) {
|
||||
return MessageSerializationUtils.deserializePayload(messageValues, this.contentTypeResolver, this.codec);
|
||||
}
|
||||
|
||||
interface KStreamWrapper {
|
||||
public interface KStreamWrapper {
|
||||
|
||||
void wrap(KStream<Object, Object> delegate);
|
||||
}
|
||||
|
||||
static class KStreamWrapperHandler implements KStreamWrapper, MethodInterceptor {
|
||||
private static class KStreamWrapperHandler implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private String name;
|
||||
|
||||
public KStreamWrapperHandler(MessageConverter messageConverter) {
|
||||
KStreamWrapperHandler(MessageConverter messageConverter,
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
String name) {
|
||||
this.messageConverter = messageConverter;
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
if (messageConverter != null) {
|
||||
KeyValueMapper<Object, Object, KeyValue<Object, Object>> keyValueMapper = new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
Message<?> message = (Message<?>) v;
|
||||
return new KeyValue<Object, Object>(k,
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
new MutableMessageHeaders(((Message<?>) v).getHeaders())));
|
||||
KeyValueMapper<Object, Object, KeyValue<Object, Object>> keyValueMapper = (k, v) -> {
|
||||
Message<?> message = (Message<?>) v;
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
String contentType = bindingProperties.getContentType();
|
||||
MutableMessageHeaders messageHeaders = new MutableMessageHeaders(((Message<?>) v).getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
messageHeaders.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
return new KeyValue<>(k,
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
messageHeaders));
|
||||
};
|
||||
delegate = delegate.map(keyValueMapper);
|
||||
}
|
||||
|
||||
@@ -52,23 +52,27 @@ public class KStreamListenerParameterAdapter implements StreamListenerParameterA
|
||||
final Class<?> valueClass = (resolvableType.getGeneric(1).getRawClass() != null)
|
||||
? (resolvableType.getGeneric(1).getRawClass()) : Object.class;
|
||||
|
||||
return bindingTarget.map(new KeyValueMapper() {
|
||||
@Override
|
||||
public Object apply(Object o, Object o2) {
|
||||
if (valueClass.isAssignableFrom(o2.getClass())) {
|
||||
return new KeyValue<>(o, o2);
|
||||
}
|
||||
else if (o2 instanceof Message) {
|
||||
return new KeyValue<>(o, messageConverter.fromMessage((Message) o2, valueClass));
|
||||
}
|
||||
else if(o2 instanceof String || o2 instanceof byte[]) {
|
||||
Message<Object> message = MessageBuilder.withPayload(o2).build();
|
||||
return new KeyValue<>(o, messageConverter.fromMessage(message, valueClass));
|
||||
return bindingTarget.map((KeyValueMapper) (o, o2) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
if (valueClass.isAssignableFrom(o2.getClass())) {
|
||||
keyValue = new KeyValue<>(o, o2);
|
||||
}
|
||||
else if (o2 instanceof Message) {
|
||||
if (valueClass.isAssignableFrom(((Message) o2).getPayload().getClass())) {
|
||||
keyValue = new KeyValue<>(o, ((Message) o2).getPayload());
|
||||
}
|
||||
else {
|
||||
return new KeyValue<>(o, o2);
|
||||
keyValue = new KeyValue<>(o, messageConverter.fromMessage((Message) o2, valueClass));
|
||||
}
|
||||
}
|
||||
else if(o2 instanceof String || o2 instanceof byte[]) {
|
||||
Message<Object> message = MessageBuilder.withPayload(o2).build();
|
||||
keyValue = new KeyValue<>(o, messageConverter.fromMessage(message, valueClass));
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(o, o2);
|
||||
}
|
||||
return keyValue;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ import java.io.IOException;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.messaging.Message;
|
||||
@@ -40,16 +39,15 @@ public class KStreamStreamListenerResultAdapter implements StreamListenerResultA
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Closeable adapt(KStream streamListenerResult, KStreamBoundElementFactory.KStreamWrapper boundElement) {
|
||||
boundElement.wrap(streamListenerResult.map(new KeyValueMapper() {
|
||||
@Override
|
||||
public Object apply(Object k, Object v) {
|
||||
if (v instanceof Message<?>) {
|
||||
return new KeyValue<>(k, v);
|
||||
}
|
||||
else {
|
||||
return new KeyValue<>(k, MessageBuilder.withPayload(v).build());
|
||||
}
|
||||
boundElement.wrap(streamListenerResult.map((k, v) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
if (v instanceof Message<?>) {
|
||||
keyValue = new KeyValue<>(k, v);
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(k, MessageBuilder.withPayload(v).build());
|
||||
}
|
||||
return keyValue;
|
||||
}));
|
||||
return new NoOpCloseable();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KStreamApplicationSupportProperties.class)
|
||||
public class KStreamApplicationSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty("spring.cloud.stream.kstream.timeWindow.length")
|
||||
public TimeWindows configuredTimeWindow(KStreamApplicationSupportProperties processorProperties) {
|
||||
return processorProperties.getTimeWindow().getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getTimeWindow().getLength()).advanceBy(processorProperties.getTimeWindow().getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getTimeWindow().getLength());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationProperties} that can be used by end user Kafka Stream applications. This class provides
|
||||
* convenient ways to access the commonly used kafka stream properties from the user application. For example, windowing
|
||||
* operations are common use cases in stream processing and one can provide window specific properties at runtime and use
|
||||
* those properties in the applications using this class.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kstream")
|
||||
public class KStreamApplicationSupportProperties {
|
||||
|
||||
private TimeWindow timeWindow;
|
||||
|
||||
public TimeWindow getTimeWindow() {
|
||||
return timeWindow;
|
||||
}
|
||||
|
||||
public void setTimeWindow(TimeWindow timeWindow) {
|
||||
this.timeWindow = timeWindow;
|
||||
}
|
||||
|
||||
public static class TimeWindow {
|
||||
|
||||
private int length;
|
||||
|
||||
private int advanceBy;
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getAdvanceBy() {
|
||||
return advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,27 +18,22 @@ package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.KafkaAdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamBinder;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KStreamExtendedBindingProperties.class)
|
||||
@@ -63,34 +58,10 @@ public class KStreamBinderConfiguration {
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka09Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka09AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.9 AdminUtils");
|
||||
return new Kafka09AdminUtilsOperation();
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka10Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka10AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.10 AdminUtils");
|
||||
return new Kafka10AdminUtilsOperation();
|
||||
return new KafkaAdminUtilsOperation();
|
||||
}
|
||||
|
||||
static class Kafka10Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.10");
|
||||
}
|
||||
}
|
||||
|
||||
static class Kafka09Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.9");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ import org.springframework.cloud.stream.binder.kstream.KStreamStreamListenerResu
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.kafka.annotation.KafkaStreamsDefaultConfiguration;
|
||||
import org.springframework.kafka.core.KStreamBuilderFactoryBean;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
@@ -70,10 +69,9 @@ public class KStreamBinderSupportAutoConfiguration {
|
||||
public StreamsConfig streamsConfig(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
Properties props = new Properties();
|
||||
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, binderConfigurationProperties.getKafkaConnectionString());
|
||||
props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "default");
|
||||
props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, binderConfigurationProperties.getZkConnectionString());
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
@@ -94,9 +92,9 @@ public class KStreamBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kStreamBindableTargetFactory(KStreamBuilder kStreamBuilder,
|
||||
BindingServiceProperties bindingServiceProperties, Codec codec,
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
return new KStreamBoundElementFactory(kStreamBuilder, bindingServiceProperties, codec,
|
||||
return new KStreamBoundElementFactory(kStreamBuilder, bindingServiceProperties,
|
||||
compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
|
||||
org.springframework.cloud.stream.binder.kstream.config.KStreamBinderSupportAutoConfiguration
|
||||
org.springframework.cloud.stream.binder.kstream.config.KStreamBinderSupportAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kstream.config.KStreamApplicationSupportAutoConfiguration
|
||||
|
||||
|
||||
|
||||
@@ -18,16 +18,13 @@ package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.kstream.Windowed;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
@@ -52,20 +49,21 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
/**
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<Integer, Long> consumer;
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());
|
||||
//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, Long> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
@@ -88,12 +86,16 @@ public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$ByteArraySerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidateFoo(context);
|
||||
context.close();
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception{
|
||||
@@ -102,10 +104,12 @@ public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<Integer, Long> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
|
||||
assertThat(cr.key().equals(123));
|
||||
assertThat(cr.value().equals(1L));
|
||||
ObjectMapper om = new ObjectMapper();
|
||||
Long aLong = om.readValue(cr.value(), Long.class);
|
||||
assertThat(aLong.equals(1L));
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@@ -116,30 +120,12 @@ public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
@SendTo("output")
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input
|
||||
.filter(new Predicate<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public boolean test(Object key, Product product) {
|
||||
return product.getId() == 123;
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, Product, KeyValue<Product, Product>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Product, Product> apply(Object key, Product value) {
|
||||
return new KeyValue<>(value, value);
|
||||
}
|
||||
})
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
|
||||
.count(TimeWindows.of(5000), "id-count-store")
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Windowed<Product>, Long, KeyValue<Integer, Long>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Integer, Long> apply(Windowed<Product> key, Long value) {
|
||||
return new KeyValue<>(key.key().id, value);
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
@@ -27,10 +26,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.kstream.ValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Windowed;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
@@ -39,15 +35,14 @@ import org.junit.Test;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KStreamBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
@@ -59,6 +54,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamBinderWordCountIntegrationTests {
|
||||
|
||||
@@ -95,11 +91,18 @@ public class KStreamBinderWordCountIntegrationTests {
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$ByteArraySerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kstream.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidate(context);
|
||||
context.close();
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception{
|
||||
@@ -114,90 +117,25 @@ public class KStreamBinderWordCountIntegrationTests {
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(WordCountProcessorProperties.class)
|
||||
@EnableConfigurationProperties(KStreamApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private WordCountProcessorProperties processorProperties;
|
||||
|
||||
@Autowired
|
||||
private KStreamBuilderFactoryBean kafkaStreams;
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
|
||||
return input
|
||||
.flatMapValues(new ValueMapper<String, Iterable<String>>() {
|
||||
|
||||
@Override
|
||||
public List<String> apply(String value) {
|
||||
return Arrays.asList(value.toLowerCase().split("\\W+"));
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, String, KeyValue<String, String>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<String, String> apply(Object key, String value) {
|
||||
return new KeyValue<>(value, value);
|
||||
}
|
||||
})
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serdes.String(), Serdes.String())
|
||||
.count(configuredTimeWindow(), processorProperties.getStoreName())
|
||||
.count(timeWindows, "foo-WordCounts")
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Windowed<String>, Long, KeyValue<Object, WordCount>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Object, WordCount> apply(Windowed<String> key, Long value) {
|
||||
return new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end())));
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@link TimeWindows} property.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
private TimeWindows configuredTimeWindow() {
|
||||
return processorProperties.getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getWindowLength()).advanceBy(processorProperties.getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getWindowLength());
|
||||
}
|
||||
}
|
||||
|
||||
@ConfigurationProperties(prefix = "kstream.word.count")
|
||||
static class WordCountProcessorProperties {
|
||||
|
||||
private int windowLength = 5000;
|
||||
|
||||
private int advanceBy = 0;
|
||||
|
||||
private String storeName = "WordCounts";
|
||||
|
||||
int getWindowLength() {
|
||||
return windowLength;
|
||||
}
|
||||
|
||||
public void setWindowLength(int windowLength) {
|
||||
this.windowLength = windowLength;
|
||||
}
|
||||
|
||||
int getAdvanceBy() {
|
||||
return advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
|
||||
String getStoreName() {
|
||||
return storeName;
|
||||
}
|
||||
|
||||
public void setStoreName(String storeName) {
|
||||
this.storeName = storeName;
|
||||
}
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
@@ -24,8 +25,6 @@ import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
|
||||
import org.junit.AfterClass;
|
||||
@@ -54,6 +53,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamInteractiveQueryIntegrationTests {
|
||||
|
||||
@@ -86,13 +86,18 @@ public class KStreamInteractiveQueryIntegrationTests {
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$ByteArraySerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidateFoo(context);
|
||||
context.close();
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception{
|
||||
@@ -120,30 +125,12 @@ public class KStreamInteractiveQueryIntegrationTests {
|
||||
public KStream<?, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter(new Predicate<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public boolean test(Object key, Product product) {
|
||||
return product.getId() == 123;
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, Product, KeyValue<Integer, Product>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Integer, Product> apply(Object key, Product value) {
|
||||
return new KeyValue<>(value.id, value);
|
||||
}
|
||||
})
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value.id, value))
|
||||
.groupByKey(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class))
|
||||
.count("prod-id-count-store")
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Integer, Long, KeyValue<Object, String>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Object, String> apply(Integer key, Long value) {
|
||||
return new KeyValue<>(null, "Count for product with ID 123: " + value);
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for product with ID 123: " + value));
|
||||
}
|
||||
|
||||
@Bean
|
||||
@@ -151,7 +138,6 @@ public class KStreamInteractiveQueryIntegrationTests {
|
||||
return new Foo(kStreamBuilderFactoryBean);
|
||||
}
|
||||
|
||||
|
||||
static class Foo {
|
||||
KStreamBuilderFactoryBean kStreamBuilderFactoryBean;
|
||||
|
||||
|
||||
@@ -23,10 +23,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.kstream.Windowed;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
@@ -51,6 +48,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
|
||||
@@ -85,11 +83,17 @@ public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$ByteArraySerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidateFoo(context);
|
||||
context.close();
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
@@ -108,33 +112,15 @@ public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, Product> input) {
|
||||
public KStream<Integer, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter(new Predicate<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public boolean test(Object key, Product product) {
|
||||
return product.getId() == 123;
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, Product, KeyValue<Product, Product>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Product, Product> apply(Object key, Product value) {
|
||||
return new KeyValue<>(value, value);
|
||||
}
|
||||
})
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
|
||||
.count(TimeWindows.of(5000), "id-count-store")
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Windowed<Product>, Long, KeyValue<Object, String>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Object, String> apply(Windowed<Product> key, Long value) {
|
||||
return new KeyValue<>(null, "Count for product with ID 123: " + value);
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, "Count for product with ID 123: " + value));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
|
||||
|
||||
lines=$(find . -name 'pom.xml' | xargs egrep "SNAPSHOT" | grep -v regex | wc -l)
|
||||
lines=$(find . -name 'pom.xml' | xargs egrep "SNAPSHOT|M[0-9]|RC[0-9]" | grep -v regex | wc -l)
|
||||
if [ $lines -eq 0 ]; then
|
||||
echo "No snapshots found"
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user