Compare commits
574 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc4358ba10 | ||
|
|
f3d2287b70 | ||
|
|
220ae98bcc | ||
|
|
bd3eebd897 | ||
|
|
ed8683dcc2 | ||
|
|
60b6604988 | ||
|
|
a3e76282b4 | ||
|
|
c9687189b7 | ||
|
|
5fcdf28776 | ||
|
|
d334359cd4 | ||
|
|
aff0dc00ef | ||
|
|
7840decc86 | ||
|
|
d37cbc79d7 | ||
|
|
7a03eeed02 | ||
|
|
3a88839a5f | ||
|
|
c9a07729dd | ||
|
|
07f10f6eb5 | ||
|
|
6fdc663349 | ||
|
|
f3a954fad7 | ||
|
|
7be0f1be23 | ||
|
|
0b687ad0ab | ||
|
|
c0bece64bd | ||
|
|
2efd29fb27 | ||
|
|
82a3306cb9 | ||
|
|
af0485e241 | ||
|
|
ea8912b011 | ||
|
|
d76d916970 | ||
|
|
ac0e462ed2 | ||
|
|
bd1b49222c | ||
|
|
9fd16416d6 | ||
|
|
a4c38b3453 | ||
|
|
a8c948a6b2 | ||
|
|
d57091d791 | ||
|
|
2b7f6ecb96 | ||
|
|
53d32c2332 | ||
|
|
b7ebc185e7 | ||
|
|
56e25383f8 | ||
|
|
b4c7c36229 | ||
|
|
6eed115cc9 | ||
|
|
8e6d07cc7b | ||
|
|
adde49aab3 | ||
|
|
e500138486 | ||
|
|
970bac41bb | ||
|
|
afe39bf78a | ||
|
|
c1ad3006e9 | ||
|
|
ba2c3a05c9 | ||
|
|
739b499966 | ||
|
|
1b26f5d629 | ||
|
|
99c323e314 | ||
|
|
f4d3715317 | ||
|
|
d7907bbdcc | ||
|
|
9b5e735f74 | ||
|
|
201668542b | ||
|
|
a5f01f9d6f | ||
|
|
912c47e3ac | ||
|
|
001882de4e | ||
|
|
54ac274ea3 | ||
|
|
80b707e5e9 | ||
|
|
13474bdafb | ||
|
|
d0b4bdf438 | ||
|
|
0bedc606ce | ||
|
|
b5cb32767b | ||
|
|
1a45ff797a | ||
|
|
f7537e795e | ||
|
|
7cae3aa54f | ||
|
|
5454d54faf | ||
|
|
4d68282937 | ||
|
|
9cf4cdc7c1 | ||
|
|
69a3c36db9 | ||
|
|
b8be381828 | ||
|
|
162993dd46 | ||
|
|
ee3096658a | ||
|
|
6e46054d7b | ||
|
|
7bc90c10a2 | ||
|
|
5adeea2acb | ||
|
|
65664a2d4c | ||
|
|
f5e12a29c1 | ||
|
|
416580e607 | ||
|
|
a81093734e | ||
|
|
5f4e3eebc5 | ||
|
|
cb42e80dac | ||
|
|
4fb5037fd7 | ||
|
|
d8b8c8d9fd | ||
|
|
9e4a1075d4 | ||
|
|
79497ad264 | ||
|
|
7e48afa005 | ||
|
|
87ac491230 | ||
|
|
a4ad9e2c0b | ||
|
|
4e6881830a | ||
|
|
829ce1cf7e | ||
|
|
c5289589c1 | ||
|
|
dd607627ed | ||
|
|
0ea4315af8 | ||
|
|
f25dbff2b7 | ||
|
|
f8b290844b | ||
|
|
a7e025794c | ||
|
|
634a73c9ff | ||
|
|
bc2f692964 | ||
|
|
7eefe6c567 | ||
|
|
c5d108ef89 | ||
|
|
47e7ca07a8 | ||
|
|
7cc001ac4c | ||
|
|
a7299df63f | ||
|
|
33aa926940 | ||
|
|
a1fb7f0a2d | ||
|
|
e2eca34e4b | ||
|
|
bc02da2900 | ||
|
|
1f52ec5020 | ||
|
|
394b8a6685 | ||
|
|
32939e30fe | ||
|
|
cadb5422cc | ||
|
|
99880ba216 | ||
|
|
9e9e0f7ea3 | ||
|
|
e1648083e6 | ||
|
|
87491118c3 | ||
|
|
cf7acb23e8 | ||
|
|
1fbb6f250e | ||
|
|
ae4abe4f33 | ||
|
|
5d312228db | ||
|
|
69c5b67126 | ||
|
|
83cfdfc532 | ||
|
|
c6154eecfc | ||
|
|
f8a4488a0e | ||
|
|
ffde5d35db | ||
|
|
5921d464ed | ||
|
|
023d3df7f7 | ||
|
|
846534fe84 | ||
|
|
2eedc8a218 | ||
|
|
49d263eafc | ||
|
|
94f398a3e4 | ||
|
|
7cba801bef | ||
|
|
db14154398 | ||
|
|
386a361a66 | ||
|
|
27cb6f4b5f | ||
|
|
58ac92ec71 | ||
|
|
d1c62bbc26 | ||
|
|
afe13cc045 | ||
|
|
42c9af019e | ||
|
|
675c2e4940 | ||
|
|
a1b31e67c4 | ||
|
|
5a7cc9f257 | ||
|
|
fc184ba422 | ||
|
|
526627854a | ||
|
|
6be541c070 | ||
|
|
1afb22d65f | ||
|
|
097fb89d9e | ||
|
|
0ad0a31b4e | ||
|
|
19e97dd9c4 | ||
|
|
676764b923 | ||
|
|
d8a678c77e | ||
|
|
23ce9e3d6e | ||
|
|
73921db3ec | ||
|
|
f1e3a0bdd6 | ||
|
|
97e3b61d14 | ||
|
|
50f4470fcf | ||
|
|
45f1927c6f | ||
|
|
0a0d3a1057 | ||
|
|
5cdd8a09f9 | ||
|
|
514530db22 | ||
|
|
50ec8f0919 | ||
|
|
829d1c651a | ||
|
|
200a5efb64 | ||
|
|
7f3a7f856f | ||
|
|
1358bdfeec | ||
|
|
f8dac888e4 | ||
|
|
50380dae69 | ||
|
|
23b9d5b4c6 | ||
|
|
7bebe9b78f | ||
|
|
c44c17008c | ||
|
|
33fa713a9f | ||
|
|
769ed56049 | ||
|
|
859952b32a | ||
|
|
1e9aa60c4e | ||
|
|
4161f875ed | ||
|
|
f2e543c816 | ||
|
|
ac4d9298c9 | ||
|
|
34c4efb35c | ||
|
|
5b3b92cdb9 | ||
|
|
a6ac6e3221 | ||
|
|
6280489be8 | ||
|
|
80df80806b | ||
|
|
ec52fbe2eb | ||
|
|
5e128aafdc | ||
|
|
f8d0b8bde2 | ||
|
|
4159217023 | ||
|
|
087b09d193 | ||
|
|
83c42a55e9 | ||
|
|
1edc2c714a | ||
|
|
dc7c8d657a | ||
|
|
a549810899 | ||
|
|
aff8b8487c | ||
|
|
95bc54b991 | ||
|
|
38bea7e7da | ||
|
|
201feca206 | ||
|
|
21a8f89f22 | ||
|
|
7fc9145a21 | ||
|
|
473ca53723 | ||
|
|
788dc28d7e | ||
|
|
dbc00ffa92 | ||
|
|
240ae8282e | ||
|
|
7adb10bcd2 | ||
|
|
8be9fe6abc | ||
|
|
04e0bf628c | ||
|
|
79323588ba | ||
|
|
395683fdaf | ||
|
|
2a25f7c1f3 | ||
|
|
5fa88e11ec | ||
|
|
ee3279072d | ||
|
|
fa0d6fe60e | ||
|
|
6578a996cb | ||
|
|
af5778d157 | ||
|
|
a190289bff | ||
|
|
7091a641d6 | ||
|
|
c408f91162 | ||
|
|
1cfb63f4bf | ||
|
|
1bf1393c6e | ||
|
|
85663ebea7 | ||
|
|
c17752b02d | ||
|
|
52c0b35add | ||
|
|
36f00e5867 | ||
|
|
e7487b9ada | ||
|
|
064f5b6611 | ||
|
|
ff859c5859 | ||
|
|
445eabc59a | ||
|
|
cc5d1b1aa6 | ||
|
|
db896532e6 | ||
|
|
07a740a5b5 | ||
|
|
65f8cc5660 | ||
|
|
90a47a675f | ||
|
|
9d212024f8 | ||
|
|
d594bab4cf | ||
|
|
e46bd1f844 | ||
|
|
5a8aad502f | ||
|
|
b84a0fdfc6 | ||
|
|
e3fcddeab6 | ||
|
|
1b6f9f5806 | ||
|
|
f397f7c313 | ||
|
|
7e0b923c05 | ||
|
|
0dc5196cb2 | ||
|
|
1cc50c1a40 | ||
|
|
0b3d508fe2 | ||
|
|
2acce00c74 | ||
|
|
ce0376ad86 | ||
|
|
25ac3b75e3 | ||
|
|
6091a1de51 | ||
|
|
5cfce42d2e | ||
|
|
349fc85b4b | ||
|
|
a4762ad28b | ||
|
|
bdd4b3e28b | ||
|
|
3ff99da014 | ||
|
|
c7c05fe3c2 | ||
|
|
4b50f7c2f5 | ||
|
|
88c5aa0969 | ||
|
|
d5a72a29d0 | ||
|
|
8c3cb8230b | ||
|
|
70c1ce8970 | ||
|
|
1d8a4e67d2 | ||
|
|
bbfc776395 | ||
|
|
4e9ed30948 | ||
|
|
34fd7a6a7a | ||
|
|
b23b42d874 | ||
|
|
cf59cfcf12 | ||
|
|
02a4fcb144 | ||
|
|
6ac9c0ed23 | ||
|
|
78ff4f1a70 | ||
|
|
637ec2e55d | ||
|
|
6effd58406 | ||
|
|
7b8f0dcab7 | ||
|
|
88912b8d6b | ||
|
|
34b0945d43 | ||
|
|
278ba795d0 | ||
|
|
bf30ecdea1 | ||
|
|
464ce685bb | ||
|
|
54fa9a638d | ||
|
|
fefd9a3bd6 | ||
|
|
8e26d5e170 | ||
|
|
ac6bdc976e | ||
|
|
65386f6967 | ||
|
|
81c453086a | ||
|
|
0ddd9f8f64 | ||
|
|
d0fe596a9e | ||
|
|
062bbc1cc3 | ||
|
|
bc1936eb28 | ||
|
|
e2f1092173 | ||
|
|
06e5739fbd | ||
|
|
ad8e67fdc5 | ||
|
|
a3fb4cc3b3 | ||
|
|
7f09baf72d | ||
|
|
28a02cda4f | ||
|
|
f96a9f884c | ||
|
|
a9020368e5 | ||
|
|
ca9296dbd2 | ||
|
|
e8d202404b | ||
|
|
5794fb983c | ||
|
|
1ce1d7918f | ||
|
|
1264434871 | ||
|
|
e4fed0a52d | ||
|
|
05e2918bc0 | ||
|
|
6dadf0c104 | ||
|
|
f4dcf5100c | ||
|
|
b8eb41cb87 | ||
|
|
82cfd6d176 | ||
|
|
6866eef8b0 | ||
|
|
b833a9f371 | ||
|
|
0283359d4a | ||
|
|
cc2f8f6137 | ||
|
|
855334aaa3 | ||
|
|
9d708f836a | ||
|
|
ecc8715b0c | ||
|
|
98431ed8a0 | ||
|
|
c7fa1ce275 | ||
|
|
cc8c645c5a | ||
|
|
21fe9c75c5 | ||
|
|
65dd706a6a | ||
|
|
a02308a5a3 | ||
|
|
ac75f8fecf | ||
|
|
bf6a227f32 | ||
|
|
01daa4c0dd | ||
|
|
021943ec41 | ||
|
|
daf4b47d1c | ||
|
|
2b1be3754d | ||
|
|
db5a303431 | ||
|
|
e549090787 | ||
|
|
7ff64098a3 | ||
|
|
cea40bc969 | ||
|
|
717022e274 | ||
|
|
d0f1c8d703 | ||
|
|
7a532b2bbd | ||
|
|
3773fa2c05 | ||
|
|
5734ce689b | ||
|
|
608086ff4d | ||
|
|
16713b3b4f | ||
|
|
2432a7309b | ||
|
|
6fdb0001d6 | ||
|
|
f2ab4a07c6 | ||
|
|
584115580b | ||
|
|
752b509374 | ||
|
|
96062b23f2 | ||
|
|
da6145f382 | ||
|
|
39a5b25b9c | ||
|
|
4e250b34cf | ||
|
|
10de84f4fe | ||
|
|
309f588325 | ||
|
|
ca8e086d4c | ||
|
|
cce392aa94 | ||
|
|
e53b0f0de9 | ||
|
|
413cc4d2b5 | ||
|
|
94f76b903f | ||
|
|
8d5f794461 | ||
|
|
00c13c0035 | ||
|
|
e1e46226d5 | ||
|
|
46cbeb1804 | ||
|
|
3e4af104b8 | ||
|
|
16bb3e2f62 | ||
|
|
8145ab19fb | ||
|
|
930d33aeba | ||
|
|
fe2a398b8b | ||
|
|
24e1fc9722 | ||
|
|
245a43c1d8 | ||
|
|
4d1fed63ee | ||
|
|
786bd3ec62 | ||
|
|
183f21c880 | ||
|
|
18737b8fea | ||
|
|
840745e593 | ||
|
|
2df0377acb | ||
|
|
164948ad33 | ||
|
|
a472845cb4 | ||
|
|
12db5fc20e | ||
|
|
46f1b41832 | ||
|
|
64ca773a4f | ||
|
|
a92149f121 | ||
|
|
2721fe4d05 | ||
|
|
8907693ffb | ||
|
|
ca0bfc028f | ||
|
|
688f05cbc9 | ||
|
|
acb4180ea3 | ||
|
|
77e4087871 | ||
|
|
0d339e77b8 | ||
|
|
799eb5be28 | ||
|
|
2c7615db1f | ||
|
|
b2b1438ad7 | ||
|
|
87399fe904 | ||
|
|
f03e3e17c6 | ||
|
|
48aae76ec9 | ||
|
|
6976bcd3a8 | ||
|
|
912ab14309 | ||
|
|
b4fcb791c3 | ||
|
|
ca3f20ff26 | ||
|
|
97ecf48867 | ||
|
|
4d0b62f839 | ||
|
|
592b9a02d8 | ||
|
|
4ca58bea06 | ||
|
|
108ccbc572 | ||
|
|
303679b9f9 | ||
|
|
35434c680b | ||
|
|
e382604b04 | ||
|
|
bfe9529a51 | ||
|
|
d8df388d9f | ||
|
|
569344afa6 | ||
|
|
c1e58d187a | ||
|
|
12afaf1144 | ||
|
|
bbb455946e | ||
|
|
5dac51df62 | ||
|
|
c1db3d950c | ||
|
|
4d59670096 | ||
|
|
d213b4ff2c | ||
|
|
66198e345a | ||
|
|
df8d151878 | ||
|
|
d96dc8361b | ||
|
|
9c88b4d808 | ||
|
|
94be206651 | ||
|
|
4ab6432f23 | ||
|
|
24b52809ed | ||
|
|
7450d0731d | ||
|
|
08b41f7396 | ||
|
|
e725a172ba | ||
|
|
b7a3511375 | ||
|
|
d6c06286cd | ||
|
|
321331103b | ||
|
|
f549ae069c | ||
|
|
2cc60a744d | ||
|
|
2ddc837c1a | ||
|
|
816a4ec232 | ||
|
|
1a19eeabec | ||
|
|
d386ff7923 | ||
|
|
9644f2dbbf | ||
|
|
498998f1a4 | ||
|
|
5558f7f7fc | ||
|
|
dec9ff696f | ||
|
|
5014ab8fe1 | ||
|
|
3d3f02b6cc | ||
|
|
7790d4b196 | ||
|
|
602aed8a4d | ||
|
|
c1f4cf9dc6 | ||
|
|
c990140452 | ||
|
|
383add504a | ||
|
|
5f9395a5ec | ||
|
|
70eb25d413 | ||
|
|
6bc74c6e5c | ||
|
|
33603c62f0 | ||
|
|
efd46835a1 | ||
|
|
3a267bc751 | ||
|
|
62e98df0c7 | ||
|
|
9e156911b4 | ||
|
|
cd28454818 | ||
|
|
172d469faa | ||
|
|
74cb25a56a | ||
|
|
7dd4b66f58 | ||
|
|
908fb77a88 | ||
|
|
5660c7cf76 | ||
|
|
f8c1fb45a6 | ||
|
|
73d3d79651 | ||
|
|
792705d304 | ||
|
|
0a48999e3a | ||
|
|
b7b93c1352 | ||
|
|
718cb44de7 | ||
|
|
de78bfa00b | ||
|
|
27bb33f9e3 | ||
|
|
ecd8cc587c | ||
|
|
f506da758f | ||
|
|
12a528fd88 | ||
|
|
4b138c4a2f | ||
|
|
0af784aaa9 | ||
|
|
0deb8754bf | ||
|
|
95a4681d27 | ||
|
|
b4a2950acd | ||
|
|
c5c81f8148 | ||
|
|
d80e66d9b8 | ||
|
|
86c9704ef4 | ||
|
|
63a4acda1b | ||
|
|
31a6f5d7e3 | ||
|
|
4d02c38f70 | ||
|
|
c22c08e259 | ||
|
|
02e1fec3b4 | ||
|
|
fb91b2aff9 | ||
|
|
a881b9a54a | ||
|
|
fc92a6b0af | ||
|
|
d65e8ff59d | ||
|
|
9948674f30 | ||
|
|
cfc1e0e212 | ||
|
|
4bd206f37f | ||
|
|
9d1d90e608 | ||
|
|
0b8a760b5a | ||
|
|
d63f9e5fa6 | ||
|
|
c51d7e0613 | ||
|
|
7093551a86 | ||
|
|
e9f24b00c8 | ||
|
|
ed1589f957 | ||
|
|
c5f72250b5 | ||
|
|
0dc871aff0 | ||
|
|
67bcd0749a | ||
|
|
6001136ec0 | ||
|
|
e93904c238 | ||
|
|
43c8f02bff | ||
|
|
64ea989b08 | ||
|
|
525adef3a6 | ||
|
|
1866feb75f | ||
|
|
bf1c366d9b | ||
|
|
81f4a861c5 | ||
|
|
662aa71159 | ||
|
|
8683604004 | ||
|
|
7232839cdf | ||
|
|
47443f8b70 | ||
|
|
b589f32933 | ||
|
|
20af89bc00 | ||
|
|
0b50a6ce2f | ||
|
|
3a396c6d37 | ||
|
|
3e39514003 | ||
|
|
2144d3e26f | ||
|
|
2d4e9a113b | ||
|
|
84249727ae | ||
|
|
2fc158003f | ||
|
|
9c20bb4cdc | ||
|
|
8dbed6b877 | ||
|
|
46ee483023 | ||
|
|
3037b90fd2 | ||
|
|
cb2a074448 | ||
|
|
d73ef9f243 | ||
|
|
ef2b2db961 | ||
|
|
f6c058f9b8 | ||
|
|
17c11fbb66 | ||
|
|
8ab289a715 | ||
|
|
4c15cbc1d6 | ||
|
|
1b80cfcf4c | ||
|
|
0d5d092f84 | ||
|
|
99d739be21 | ||
|
|
40c1a8b8f1 | ||
|
|
c43e45c7ad | ||
|
|
700c7a1af3 | ||
|
|
9ec0cdd245 | ||
|
|
0227c9ecc9 | ||
|
|
a5cec9a25c | ||
|
|
9443c621b0 | ||
|
|
fe7bdd245d | ||
|
|
1d5fcca522 | ||
|
|
e75967351f | ||
|
|
d44f2348e6 | ||
|
|
3b4bff959f | ||
|
|
acb5b47a4f | ||
|
|
ea810bb9e1 | ||
|
|
e2fc45c4ce | ||
|
|
74a044b0c0 | ||
|
|
70f385553a | ||
|
|
f5739a9c7f | ||
|
|
39f5490488 | ||
|
|
d445a2bff9 | ||
|
|
5446485cbf | ||
|
|
cc61d916b8 | ||
|
|
41d9136812 | ||
|
|
71a6a6cf28 | ||
|
|
36361d19bf | ||
|
|
e869516e3d | ||
|
|
ea288c62eb | ||
|
|
afa337f718 | ||
|
|
e4b08e888e | ||
|
|
de4b5a9443 | ||
|
|
a090614709 | ||
|
|
a3f7ca9756 | ||
|
|
82b07a0120 | ||
|
|
0d0cf8dcb7 | ||
|
|
cbfe03be2f | ||
|
|
1f0c6cabc6 | ||
|
|
0c61cedc85 | ||
|
|
44210f1b72 | ||
|
|
7007f9494a | ||
|
|
da268bb6dd | ||
|
|
e03baefbaf | ||
|
|
01396b6573 | ||
|
|
3f009a8267 | ||
|
|
31bb86b002 | ||
|
|
cc2dfd1d08 | ||
|
|
fc768ba695 | ||
|
|
38d6deb4d5 | ||
|
|
2a0b9015de |
1
.gitignore
vendored
@@ -23,3 +23,4 @@ _site/
|
||||
dump.rdb
|
||||
.apt_generated
|
||||
artifacts
|
||||
.sts4-cache
|
||||
|
||||
117
.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Copyright 2007-present the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import java.net.*;
|
||||
import java.io.*;
|
||||
import java.nio.channels.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class MavenWrapperDownloader {
|
||||
|
||||
private static final String WRAPPER_VERSION = "0.5.6";
|
||||
/**
|
||||
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
|
||||
*/
|
||||
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
|
||||
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
|
||||
|
||||
/**
|
||||
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
|
||||
* use instead of the default one.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
|
||||
".mvn/wrapper/maven-wrapper.properties";
|
||||
|
||||
/**
|
||||
* Path where the maven-wrapper.jar will be saved to.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_JAR_PATH =
|
||||
".mvn/wrapper/maven-wrapper.jar";
|
||||
|
||||
/**
|
||||
* Name of the property which should be used to override the default download url for the wrapper.
|
||||
*/
|
||||
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("- Downloader started");
|
||||
File baseDirectory = new File(args[0]);
|
||||
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
|
||||
|
||||
// If the maven-wrapper.properties exists, read it and check if it contains a custom
|
||||
// wrapperUrl parameter.
|
||||
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
|
||||
String url = DEFAULT_DOWNLOAD_URL;
|
||||
if(mavenWrapperPropertyFile.exists()) {
|
||||
FileInputStream mavenWrapperPropertyFileInputStream = null;
|
||||
try {
|
||||
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
|
||||
Properties mavenWrapperProperties = new Properties();
|
||||
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
|
||||
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
|
||||
} catch (IOException e) {
|
||||
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
|
||||
} finally {
|
||||
try {
|
||||
if(mavenWrapperPropertyFileInputStream != null) {
|
||||
mavenWrapperPropertyFileInputStream.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// Ignore ...
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading from: " + url);
|
||||
|
||||
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
|
||||
if(!outputFile.getParentFile().exists()) {
|
||||
if(!outputFile.getParentFile().mkdirs()) {
|
||||
System.out.println(
|
||||
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
|
||||
try {
|
||||
downloadFileFromURL(url, outputFile);
|
||||
System.out.println("Done");
|
||||
System.exit(0);
|
||||
} catch (Throwable e) {
|
||||
System.out.println("- Error downloading");
|
||||
e.printStackTrace();
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
|
||||
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
|
||||
String username = System.getenv("MVNW_USERNAME");
|
||||
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
|
||||
Authenticator.setDefault(new Authenticator() {
|
||||
@Override
|
||||
protected PasswordAuthentication getPasswordAuthentication() {
|
||||
return new PasswordAuthentication(username, password);
|
||||
}
|
||||
});
|
||||
}
|
||||
URL website = new URL(urlString);
|
||||
ReadableByteChannel rbc;
|
||||
rbc = Channels.newChannel(website.openStream());
|
||||
FileOutputStream fos = new FileOutputStream(destination);
|
||||
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
|
||||
fos.close();
|
||||
rbc.close();
|
||||
}
|
||||
|
||||
}
|
||||
BIN
.mvn/wrapper/maven-wrapper.jar
vendored
3
.mvn/wrapper/maven-wrapper.properties
vendored
@@ -1 +1,2 @@
|
||||
distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.3.3/apache-maven-3.3.3-bin.zip
|
||||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
|
||||
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -29,7 +29,7 @@
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -37,7 +37,7 @@
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>http://repo.spring.io/release</url>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -47,7 +47,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -55,7 +55,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
|
||||
4
LICENSE
@@ -1,6 +1,6 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
183
README.adoc
@@ -1 +1,182 @@
|
||||
Spring Cloud Stream Binder for Apache Kafka
|
||||
////
|
||||
DO NOT EDIT THIS FILE. IT WAS GENERATED.
|
||||
Manual changes to this file will be lost when it is generated again.
|
||||
Edit the files in the src/main/asciidoc/ directory instead.
|
||||
////
|
||||
|
||||
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
|
||||
image::https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka.svg?style=svg["CircleCI", link="https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka/branch/{github-tag}/graph/badge.svg["codecov", link="https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.svg[Gitter, link="https://gitter.im/spring-cloud/spring-cloud-stream-binder-kafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Apache Kafka Streams Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka Streams binder, you need to add `spring-cloud-stream-binder-kafka-streams` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
[[building]]
|
||||
== Building
|
||||
|
||||
:jdkversion: 1.7
|
||||
|
||||
=== Basic Compile and Test
|
||||
|
||||
To build the source you will need to install JDK {jdkversion}.
|
||||
|
||||
The build uses the Maven wrapper so you don't have to install a specific
|
||||
version of Maven. To enable the tests, you should have Kafka server 0.9 or above running
|
||||
before building. See below for more information on running the servers.
|
||||
|
||||
The main build command is
|
||||
|
||||
----
|
||||
$ ./mvnw clean install
|
||||
----
|
||||
|
||||
You can also add '-DskipTests' if you like, to avoid running the tests.
|
||||
|
||||
NOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command
|
||||
in place of `./mvnw` in the examples below. If you do that you also
|
||||
might need to add `-P spring` if your local Maven settings do not
|
||||
contain repository declarations for spring pre-release artifacts.
|
||||
|
||||
NOTE: Be aware that you might need to increase the amount of memory
|
||||
available to Maven by setting a `MAVEN_OPTS` environment variable with
|
||||
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
|
||||
the `.mvn` configuration, so if you find you have to do it to make a
|
||||
build succeed, please raise a ticket to get the settings added to
|
||||
source control.
|
||||
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
|
||||
There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
Unfortunately m2e does not yet support Maven 3.3, so once the projects
|
||||
are imported into Eclipse you will also need to tell m2eclipse to use
|
||||
the `.settings.xml` file for the projects. If you do not do this you
|
||||
may see many different errors related to the POMs in the
|
||||
projects. Open your Eclipse preferences, expand the Maven
|
||||
preferences, and select User Settings. In the User Settings field
|
||||
click Browse and navigate to the Spring Cloud project you imported
|
||||
selecting the `.settings.xml` file in that project. Click Apply and
|
||||
then OK to save the preference changes.
|
||||
|
||||
NOTE: Alternatively you can copy the repository settings from https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml[`.settings.xml`] into your own `~/.m2/settings.xml`.
|
||||
|
||||
==== Importing into eclipse without m2eclipse
|
||||
If you prefer not to use m2eclipse you can generate eclipse project metadata using the
|
||||
following command:
|
||||
|
||||
[indent=0]
|
||||
----
|
||||
$ ./mvnw eclipse:eclipse
|
||||
----
|
||||
|
||||
The generated eclipse projects can be imported by selecting `import existing projects`
|
||||
from the `file` menu.
|
||||
[[contributing]
|
||||
== Contributing
|
||||
|
||||
Spring Cloud is released under the non-restrictive Apache 2.0 license,
|
||||
and follows a very standard Github development process, using Github
|
||||
tracker for issues and merging pull requests into master. If you want
|
||||
to contribute even something trivial please do not hesitate, but
|
||||
follow the guidelines below.
|
||||
|
||||
=== Sign the Contributor License Agreement
|
||||
Before we accept a non-trivial patch or pull request we will need you to sign the
|
||||
https://support.springsource.com/spring_committer_signup[contributor's agreement].
|
||||
Signing the contributor's agreement does not grant anyone commit rights to the main
|
||||
repository, but it does mean that we can accept your contributions, and you will get an
|
||||
author credit if we do. Active contributors might be asked to join the core team, and
|
||||
given the ability to merge pull requests.
|
||||
|
||||
=== Code Conventions and Housekeeping
|
||||
None of these is essential for a pull request, but they will all help. They can also be
|
||||
added after the original pull request but before a merge.
|
||||
|
||||
* Use the Spring Framework code format conventions. If you use Eclipse
|
||||
you can import formatter settings using the
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
for.
|
||||
* Add the ASF license header comment to all new `.java` files (copy from existing files
|
||||
in the project)
|
||||
* Add yourself as an `@author` to the .java files that you modify substantially (more
|
||||
than cosmetic changes).
|
||||
* Add some Javadocs and, if you change the namespace, some XSD doc elements.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
65
docs/pom.xml
Normal file
@@ -0,0 +1,65 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.2.1</version>
|
||||
</parent>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-docs</name>
|
||||
<description>Spring Cloud Stream Kafka Binder Docs</description>
|
||||
<properties>
|
||||
<docs.main>spring-cloud-stream-binder-kafka</docs.main>
|
||||
<main.basedir>${basedir}/..</main.basedir>
|
||||
<maven.plugin.plugin.version>3.4</maven.plugin.plugin.version>
|
||||
<configprops.inclusionPattern>.*stream.*</configprops.inclusionPattern>
|
||||
<upload-docs-zip.phase>deploy</upload-docs-zip.phase>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>${project.groupId}</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<sourceDirectory>src/main/asciidoc</sourceDirectory>
|
||||
</build>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>docs</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctor-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-deploy-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
57
docs/src/main/asciidoc/README.adoc
Normal file
@@ -0,0 +1,57 @@
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
|
||||
image::https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka.svg?style=svg["CircleCI", link="https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka/branch/{github-tag}/graph/badge.svg["codecov", link="https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.svg[Gitter, link="https://gitter.im/spring-cloud/spring-cloud-stream-binder-kafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Apache Kafka Streams Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka Streams binder, you need to add `spring-cloud-stream-binder-kafka-streams` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
62
docs/src/main/asciidoc/_configprops.adoc
Normal file
@@ -0,0 +1,62 @@
|
||||
|===
|
||||
|Name | Default | Description
|
||||
|
||||
|spring.cloud.stream.binders | | Additional per-binder properties (see {@link BinderProperties}) if more then one binder of the same type is used (i.e., connect to multiple instances of RabbitMq). Here you can specify multiple binder configurations, each with different environment settings. For example; spring.cloud.stream.binders.rabbit1.environment. . . , spring.cloud.stream.binders.rabbit2.environment. . .
|
||||
|spring.cloud.stream.binding-retry-interval | `30` | Retry interval (in seconds) used to schedule binding attempts. Default: 30 sec.
|
||||
|spring.cloud.stream.bindings | | Additional binding properties (see {@link BinderProperties}) per binding name (e.g., 'input`). For example; This sets the content-type for the 'input' binding of a Sink application: 'spring.cloud.stream.bindings.input.contentType=text/plain'
|
||||
|spring.cloud.stream.default-binder | | The name of the binder to use by all bindings in the event multiple binders available (e.g., 'rabbit').
|
||||
|spring.cloud.stream.dynamic-destination-cache-size | `10` | The maximum size of Least Recently Used (LRU) cache of dynamic destinations. Once this size is reached, new destinations will trigger the removal of old destinations. Default: 10
|
||||
|spring.cloud.stream.dynamic-destinations | `[]` | A list of destinations that can be bound dynamically. If set, only listed destinations can be bound.
|
||||
|spring.cloud.stream.function.batch-mode | `false` |
|
||||
|spring.cloud.stream.function.bindings | |
|
||||
|spring.cloud.stream.instance-count | `1` | The number of deployed instances of an application. Default: 1. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-count" where 'foo' is the name of the binding.
|
||||
|spring.cloud.stream.instance-index | `0` | The instance id of the application: a number from 0 to instanceCount-1. Used for partitioning and with Kafka. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-index" where 'foo' is the name of the binding.
|
||||
|spring.cloud.stream.instance-index-list | | A list of instance id's from 0 to instanceCount-1. Used for partitioning and with Kafka. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-index-list" where 'foo' is the name of the binding. This setting will override the one set in 'spring.cloud.stream.instance-index'
|
||||
|spring.cloud.stream.integration.message-handler-not-propagated-headers | | Message header names that will NOT be copied from the inbound message.
|
||||
|spring.cloud.stream.kafka.binder.authorization-exception-retry-interval | | Time between retries after AuthorizationException is caught in the ListenerContainer; defalt is null which disables retries. For more info see: {@link org.springframework.kafka.listener.ConsumerProperties#setAuthorizationExceptionRetryInterval(java.time.Duration)}
|
||||
|spring.cloud.stream.kafka.binder.auto-add-partitions | `false` |
|
||||
|spring.cloud.stream.kafka.binder.auto-alter-topics | `false` |
|
||||
|spring.cloud.stream.kafka.binder.auto-create-topics | `true` |
|
||||
|spring.cloud.stream.kafka.binder.brokers | `[localhost]` |
|
||||
|spring.cloud.stream.kafka.binder.certificate-store-directory | | When a certificate store location is given as classpath URL (classpath:), then the binder moves the resource from the classpath location inside the JAR to a location on the filesystem. If this value is set, then this location is used, otherwise, the certificate file is copied to the directory returned by java.io.tmpdir.
|
||||
|spring.cloud.stream.kafka.binder.configuration | | Arbitrary kafka properties that apply to both producers and consumers.
|
||||
|spring.cloud.stream.kafka.binder.consider-down-when-any-partition-has-no-leader | `false` |
|
||||
|spring.cloud.stream.kafka.binder.consumer-properties | | Arbitrary kafka consumer properties.
|
||||
|spring.cloud.stream.kafka.binder.header-mapper-bean-name | | The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
|spring.cloud.stream.kafka.binder.headers | `[]` |
|
||||
|spring.cloud.stream.kafka.binder.health-timeout | `60` | Time to wait to get partition information in seconds; default 60.
|
||||
|spring.cloud.stream.kafka.binder.jaas | |
|
||||
|spring.cloud.stream.kafka.binder.min-partition-count | `1` |
|
||||
|spring.cloud.stream.kafka.binder.producer-properties | | Arbitrary kafka producer properties.
|
||||
|spring.cloud.stream.kafka.binder.replication-factor | `-1` |
|
||||
|spring.cloud.stream.kafka.binder.required-acks | `1` |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.batch-timeout | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.buffer-size | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.compression-type | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.configuration | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.error-channel-enabled | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.header-mode | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.header-patterns | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.message-key-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-count | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-key-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-key-extractor-name | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-selector-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-selector-name | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.required-groups | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.sync | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.topic | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.use-native-encoding | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix | |
|
||||
|spring.cloud.stream.kafka.bindings | |
|
||||
|spring.cloud.stream.metrics.export-properties | | List of properties that are going to be appended to each message. This gets populate by onApplicationEvent, once the context refreshes to avoid overhead of doing per message basis.
|
||||
|spring.cloud.stream.metrics.key | | The name of the metric being emitted. Should be an unique value per application. Defaults to: ${spring.application.name:${vcap.application.name:${spring.config.name:application}}}.
|
||||
|spring.cloud.stream.metrics.meter-filter | | Pattern to control the 'meters' one wants to capture. By default all 'meters' will be captured. For example, 'spring.integration.*' will only capture metric information for meters whose name starts with 'spring.integration'.
|
||||
|spring.cloud.stream.metrics.properties | | Application properties that should be added to the metrics payload For example: `spring.application**`.
|
||||
|spring.cloud.stream.metrics.schedule-interval | `60s` | Interval expressed as Duration for scheduling metrics snapshots publishing. Defaults to 60 seconds
|
||||
|spring.cloud.stream.override-cloud-connectors | `false` | This property is only applicable when the cloud profile is active and Spring Cloud Connectors are provided with the application. If the property is false (the default), the binder detects a suitable bound service (for example, a RabbitMQ service bound in Cloud Foundry for the RabbitMQ binder) and uses it for creating connections (usually through Spring Cloud Connectors). When set to true, this property instructs binders to completely ignore the bound services and rely on Spring Boot properties (for example, relying on the spring.rabbitmq.* properties provided in the environment for the RabbitMQ binder). The typical usage of this property is to be nested in a customized environment when connecting to multiple systems.
|
||||
|spring.cloud.stream.pollable-source | `none` | A semi-colon delimited list of binding names of pollable sources. Binding names follow the same naming convention as functions. For example, name '...pollable-source=foobar' will be accessible as 'foobar-iin-0'' binding
|
||||
|spring.cloud.stream.sendto.destination | `none` | The name of the header used to determine the name of the output destination
|
||||
|spring.cloud.stream.source | | A colon delimited string representing the names of the sources based on which source bindings will be created. This is primarily to support cases where source binding may be required without providing a corresponding Supplier. (e.g., for cases where the actual source of data is outside of scope of spring-cloud-stream - HTTP -> Stream)
|
||||
|
||||
|===
|
||||
@@ -34,7 +34,7 @@ source control.
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
http://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
@@ -43,13 +43,13 @@ There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
http://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
http://eclipse.org[Eclipse] when working with the code. We use the
|
||||
http://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the http://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
@@ -24,7 +24,7 @@ added after the original pull request but before a merge.
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
http://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
@@ -37,6 +37,6 @@ added after the original pull request but before a merge.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
@@ -1,12 +1,65 @@
|
||||
[[kafka-dlq-processing]]
|
||||
== Dead-Letter Topic Processing
|
||||
=== Dead-Letter Topic Processing
|
||||
|
||||
Because you cannot anticipate how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
[[dlq-partition-selection]]
|
||||
==== Dead-Letter Topic Partition Selection
|
||||
|
||||
By default, records are published to the Dead-Letter topic using the same partition as the original record.
|
||||
This means the Dead-Letter topic must have at least as many partitions as the original record.
|
||||
|
||||
To change this behavior, add a `DlqPartitionFunction` implementation as a `@Bean` to the application context.
|
||||
Only one such bean can be present.
|
||||
The function is provided with the consumer group, the failed `ConsumerRecord` and the exception.
|
||||
For example, if you always want to route to partition 0, you might use:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public DlqPartitionFunction partitionFunction() {
|
||||
return (group, record, ex) -> 0;
|
||||
}
|
||||
----
|
||||
====
|
||||
NOTE: If you set a consumer binding's `dlqPartitions` property to 1 (and the binder's `minPartitionCount` is equal to `1`), there is no need to supply a `DlqPartitionFunction`; the framework will always use partition 0.
|
||||
If you set a consumer binding's `dlqPartitions` property to a value greater than `1` (or the binder's `minPartitionCount` is greater than `1`), you **must** provide a `DlqPartitionFunction` bean, even if the partition count is the same as the original topic's.
|
||||
|
||||
It is also possible to define a custom name for the DLQ topic.
|
||||
In order to do so, create an implementation of `DlqDestinationResolver` as a `@Bean` to the application context.
|
||||
When the binder detects such a bean, that takes precedence, otherwise it will use the `dlqName` property.
|
||||
If neither of these are found, it will default to `error.<destination>.<group>`.
|
||||
Here is an example of `DlqDestinationResolver` as a `@Bean`.
|
||||
|
||||
====
|
||||
[source]
|
||||
----
|
||||
@Bean
|
||||
public DlqDestinationResolver dlqDestinationResolver() {
|
||||
return (rec, ex) -> {
|
||||
if (rec.topic().equals("word1")) {
|
||||
return "topic1-dlq";
|
||||
}
|
||||
else {
|
||||
return "topic2-dlq";
|
||||
}
|
||||
};
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
One important thing to keep in mind when providing an implementation for `DlqDestinationResolver` is that the provisioner in the binder will not auto create topics for the application.
|
||||
This is because there is no way for the binder to infer the names of all the DLQ topics the implementation might send to.
|
||||
Therefore, if you provide DLQ names using this strategy, it is the application's responsibility to ensure that those topics are created beforehand.
|
||||
|
||||
[[dlq-handling]]
|
||||
==== Handling Records in a Dead-Letter Topic
|
||||
|
||||
Because the framework cannot anticipate how users would want to dispose of dead-lettered messages, it does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The sample Spring Boot application within this topic is an example of how to route those messages back to the original topic, but it moves them to a "`parking lot`" topic after three attempts.
|
||||
The application is another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
It exits when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
@@ -25,10 +78,8 @@ spring.cloud.stream.bindings.input.group=so8400replay
|
||||
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
|
||||
|
||||
spring.cloud.stream.bindings.output.destination=so8400out
|
||||
spring.cloud.stream.bindings.output.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
|
||||
spring.cloud.stream.bindings.parkingLot.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
|
||||
|
||||
@@ -90,7 +141,7 @@ public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, terminating");
|
||||
System.out.println("Idle, exiting");
|
||||
return;
|
||||
}
|
||||
}
|
||||
330
docs/src/main/asciidoc/ghpages.sh
Executable file
@@ -0,0 +1,330 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
set -e
|
||||
|
||||
# Set default props like MAVEN_PATH, ROOT_FOLDER etc.
|
||||
function set_default_props() {
|
||||
# The script should be run from the root folder
|
||||
ROOT_FOLDER=`pwd`
|
||||
echo "Current folder is ${ROOT_FOLDER}"
|
||||
|
||||
if [[ ! -e "${ROOT_FOLDER}/.git" ]]; then
|
||||
echo "You're not in the root folder of the project!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Prop that will let commit the changes
|
||||
COMMIT_CHANGES="no"
|
||||
MAVEN_PATH=${MAVEN_PATH:-}
|
||||
echo "Path to Maven is [${MAVEN_PATH}]"
|
||||
REPO_NAME=${PWD##*/}
|
||||
echo "Repo name is [${REPO_NAME}]"
|
||||
SPRING_CLOUD_STATIC_REPO=${SPRING_CLOUD_STATIC_REPO:-git@github.com:spring-cloud/spring-cloud-static.git}
|
||||
echo "Spring Cloud Static repo is [${SPRING_CLOUD_STATIC_REPO}"
|
||||
}
|
||||
|
||||
# Check if gh-pages exists and docs have been built
|
||||
function check_if_anything_to_sync() {
|
||||
git remote set-url --push origin `git config remote.origin.url | sed -e 's/^git:/https:/'`
|
||||
|
||||
if ! (git remote set-branches --add origin gh-pages && git fetch -q); then
|
||||
echo "No gh-pages, so not syncing"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ! [ -d docs/target/generated-docs ] && ! [ "${BUILD}" == "yes" ]; then
|
||||
echo "No gh-pages sources in docs/target/generated-docs, so not syncing"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function retrieve_current_branch() {
|
||||
# Code getting the name of the current branch. For master we want to publish as we did until now
|
||||
# https://stackoverflow.com/questions/1593051/how-to-programmatically-determine-the-current-checked-out-git-branch
|
||||
# If there is a branch already passed will reuse it - otherwise will try to find it
|
||||
CURRENT_BRANCH=${BRANCH}
|
||||
if [[ -z "${CURRENT_BRANCH}" ]] ; then
|
||||
CURRENT_BRANCH=$(git symbolic-ref -q HEAD)
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH##refs/heads/}
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH:-HEAD}
|
||||
fi
|
||||
echo "Current branch is [${CURRENT_BRANCH}]"
|
||||
git checkout ${CURRENT_BRANCH} || echo "Failed to check the branch... continuing with the script"
|
||||
}
|
||||
|
||||
# Switches to the provided value of the release version. We always prefix it with `v`
|
||||
function switch_to_tag() {
|
||||
git checkout v${VERSION}
|
||||
}
|
||||
|
||||
# Build the docs if switch is on
|
||||
function build_docs_if_applicable() {
|
||||
if [[ "${BUILD}" == "yes" ]] ; then
|
||||
./mvnw clean install -P docs -pl docs -DskipTests
|
||||
fi
|
||||
}
|
||||
|
||||
# Get the name of the `docs.main` property
|
||||
# Get allowed branches - assumes that a `docs` module is available under `docs` profile
|
||||
function retrieve_doc_properties() {
|
||||
MAIN_ADOC_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args='${docs.main}' \
|
||||
--non-recursive \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
|
||||
echo "Extracted 'main.adoc' from Maven build [${MAIN_ADOC_VALUE}]"
|
||||
|
||||
|
||||
ALLOW_PROPERTY=${ALLOW_PROPERTY:-"docs.allowed.branches"}
|
||||
ALLOWED_BRANCHES_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args="\${${ALLOW_PROPERTY}}" \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec \
|
||||
-P docs \
|
||||
-pl docs)
|
||||
echo "Extracted '${ALLOW_PROPERTY}' from Maven build [${ALLOWED_BRANCHES_VALUE}]"
|
||||
}
|
||||
|
||||
# Stash any outstanding changes
|
||||
function stash_changes() {
|
||||
git diff-index --quiet HEAD && dirty=$? || (echo "Failed to check if the current repo is dirty. Assuming that it is." && dirty="1")
|
||||
if [ "$dirty" != "0" ]; then git stash; fi
|
||||
}
|
||||
|
||||
# Switch to gh-pages branch to sync it with current branch
|
||||
function add_docs_from_target() {
|
||||
local DESTINATION_REPO_FOLDER
|
||||
if [[ -z "${DESTINATION}" && -z "${CLONE}" ]] ; then
|
||||
DESTINATION_REPO_FOLDER=${ROOT_FOLDER}
|
||||
elif [[ "${CLONE}" == "yes" ]]; then
|
||||
mkdir -p ${ROOT_FOLDER}/target
|
||||
local clonedStatic=${ROOT_FOLDER}/target/spring-cloud-static
|
||||
if [[ ! -e "${clonedStatic}/.git" ]]; then
|
||||
echo "Cloning Spring Cloud Static to target"
|
||||
git clone ${SPRING_CLOUD_STATIC_REPO} ${clonedStatic} && git checkout gh-pages
|
||||
else
|
||||
echo "Spring Cloud Static already cloned - will pull changes"
|
||||
cd ${clonedStatic} && git checkout gh-pages && git pull origin gh-pages
|
||||
fi
|
||||
DESTINATION_REPO_FOLDER=${clonedStatic}/${REPO_NAME}
|
||||
mkdir -p ${DESTINATION_REPO_FOLDER}
|
||||
else
|
||||
if [[ ! -e "${DESTINATION}/.git" ]]; then
|
||||
echo "[${DESTINATION}] is not a git repository"
|
||||
exit 1
|
||||
fi
|
||||
DESTINATION_REPO_FOLDER=${DESTINATION}/${REPO_NAME}
|
||||
mkdir -p ${DESTINATION_REPO_FOLDER}
|
||||
echo "Destination was provided [${DESTINATION}]"
|
||||
fi
|
||||
cd ${DESTINATION_REPO_FOLDER}
|
||||
git checkout gh-pages
|
||||
git pull origin gh-pages
|
||||
|
||||
# Add git branches
|
||||
###################################################################
|
||||
if [[ -z "${VERSION}" ]] ; then
|
||||
copy_docs_for_current_version
|
||||
else
|
||||
copy_docs_for_provided_version
|
||||
fi
|
||||
commit_changes_if_applicable
|
||||
}
|
||||
|
||||
|
||||
# Copies the docs by using the retrieved properties from Maven build
|
||||
function copy_docs_for_current_version() {
|
||||
if [[ "${CURRENT_BRANCH}" == "master" ]] ; then
|
||||
echo -e "Current branch is master - will copy the current docs only to the root folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
# Not ignored...
|
||||
cp -rf $f ${ROOT_FOLDER}/
|
||||
git add -A ${ROOT_FOLDER}/$file
|
||||
fi
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Current branch is [${CURRENT_BRANCH}]"
|
||||
# https://stackoverflow.com/questions/29300806/a-bash-script-to-check-if-a-string-is-present-in-a-comma-separated-list-of-strin
|
||||
if [[ ",${ALLOWED_BRANCHES_VALUE}," = *",${CURRENT_BRANCH},"* ]] ; then
|
||||
mkdir -p ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is allowed! Will copy the current docs to the [${CURRENT_BRANCH}] folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
# Not ignored...
|
||||
# We want users to access 2.0.0.BUILD-SNAPSHOT/ instead of 1.0.0.RELEASE/spring-cloud.sleuth.html
|
||||
if [[ "${file}" == "${MAIN_ADOC_VALUE}.html" ]] ; then
|
||||
# We don't want to copy the spring-cloud-sleuth.html
|
||||
# we want it to be converted to index.html
|
||||
cp -rf $f ${ROOT_FOLDER}/${CURRENT_BRANCH}/index.html
|
||||
git add -A ${ROOT_FOLDER}/${CURRENT_BRANCH}/index.html
|
||||
else
|
||||
cp -rf $f ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
git add -A ${ROOT_FOLDER}/${CURRENT_BRANCH}/$file
|
||||
fi
|
||||
fi
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is not on the allow list! Check out the Maven [${ALLOW_PROPERTY}] property in
|
||||
[docs] module available under [docs] profile. Won't commit any changes to gh-pages for this branch."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Copies the docs by using the explicitly provided version
|
||||
function copy_docs_for_provided_version() {
|
||||
local FOLDER=${DESTINATION_REPO_FOLDER}/${VERSION}
|
||||
mkdir -p ${FOLDER}
|
||||
echo -e "Current tag is [v${VERSION}] Will copy the current docs to the [${FOLDER}] folder"
|
||||
for f in ${ROOT_FOLDER}/docs/target/generated-docs/*; do
|
||||
file=${f#${ROOT_FOLDER}/docs/target/generated-docs/*}
|
||||
copy_docs_for_branch ${file} ${FOLDER}
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
CURRENT_BRANCH="v${VERSION}"
|
||||
}
|
||||
|
||||
# Copies the docs from target to the provided destination
|
||||
# Params:
|
||||
# $1 - file from target
|
||||
# $2 - destination to which copy the files
|
||||
function copy_docs_for_branch() {
|
||||
local file=$1
|
||||
local destination=$2
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^${file}$; then
|
||||
# Not ignored...
|
||||
# We want users to access 2.0.0.BUILD-SNAPSHOT/ instead of 1.0.0.RELEASE/spring-cloud.sleuth.html
|
||||
if [[ ("${file}" == "${MAIN_ADOC_VALUE}.html") || ("${file}" == "${REPO_NAME}.html") ]] ; then
|
||||
# We don't want to copy the spring-cloud-sleuth.html
|
||||
# we want it to be converted to index.html
|
||||
cp -rf $f ${destination}/index.html
|
||||
git add -A ${destination}/index.html
|
||||
else
|
||||
cp -rf $f ${destination}
|
||||
git add -A ${destination}/$file
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function commit_changes_if_applicable() {
|
||||
if [[ "${COMMIT_CHANGES}" == "yes" ]] ; then
|
||||
COMMIT_SUCCESSFUL="no"
|
||||
git commit -a -m "Sync docs from ${CURRENT_BRANCH} to gh-pages" && COMMIT_SUCCESSFUL="yes" || echo "Failed to commit changes"
|
||||
|
||||
# Uncomment the following push if you want to auto push to
|
||||
# the gh-pages branch whenever you commit to master locally.
|
||||
# This is a little extreme. Use with care!
|
||||
###################################################################
|
||||
if [[ "${COMMIT_SUCCESSFUL}" == "yes" ]] ; then
|
||||
git push origin gh-pages
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Switch back to the previous branch and exit block
|
||||
function checkout_previous_branch() {
|
||||
# If -version was provided we need to come back to root project
|
||||
cd ${ROOT_FOLDER}
|
||||
git checkout ${CURRENT_BRANCH} || echo "Failed to check the branch... continuing with the script"
|
||||
if [ "$dirty" != "0" ]; then git stash pop; fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Assert if properties have been properly passed
|
||||
function assert_properties() {
|
||||
echo "VERSION [${VERSION}], DESTINATION [${DESTINATION}], CLONE [${CLONE}]"
|
||||
if [[ "${VERSION}" != "" && (-z "${DESTINATION}" && -z "${CLONE}") ]] ; then echo "Version was set but destination / clone was not!"; exit 1;fi
|
||||
if [[ ("${DESTINATION}" != "" && "${CLONE}" != "") && -z "${VERSION}" ]] ; then echo "Destination / clone was set but version was not!"; exit 1;fi
|
||||
if [[ "${DESTINATION}" != "" && "${CLONE}" == "yes" ]] ; then echo "Destination and clone was set. Pick one!"; exit 1;fi
|
||||
}
|
||||
|
||||
# Prints the usage
|
||||
function print_usage() {
|
||||
cat <<EOF
|
||||
The idea of this script is to update gh-pages branch with the generated docs. Without any options
|
||||
the script will work in the following manner:
|
||||
|
||||
- if there's no gh-pages / target for docs module then the script ends
|
||||
- for master branch the generated docs are copied to the root of gh-pages branch
|
||||
- for any other branch (if that branch is allowed) a subfolder with branch name is created
|
||||
and docs are copied there
|
||||
- if the version switch is passed (-v) then a tag with (v) prefix will be retrieved and a folder
|
||||
with that version number will be created in the gh-pages branch. WARNING! No allow verification will take place
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
|
||||
USAGE:
|
||||
|
||||
You can use the following options:
|
||||
|
||||
-v|--version - the script will apply the whole procedure for a particular library version
|
||||
-d|--destination - the root of destination folder where the docs should be copied. You have to use the full path.
|
||||
E.g. point to spring-cloud-static folder. Can't be used with (-c)
|
||||
-b|--build - will run the standard build process after checking out the branch
|
||||
-c|--clone - will automatically clone the spring-cloud-static repo instead of providing the destination.
|
||||
Obviously can't be used with (-d)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
# ==========================================
|
||||
# ____ ____ _____ _____ _____ _______
|
||||
# / ____|/ ____| __ \|_ _| __ \__ __|
|
||||
# | (___ | | | |__) | | | | |__) | | |
|
||||
# \___ \| | | _ / | | | ___/ | |
|
||||
# ____) | |____| | \ \ _| |_| | | |
|
||||
# |_____/ \_____|_| \_\_____|_| |_|
|
||||
#
|
||||
# ==========================================
|
||||
|
||||
while [[ $# > 0 ]]
|
||||
do
|
||||
key="$1"
|
||||
case ${key} in
|
||||
-v|--version)
|
||||
VERSION="$2"
|
||||
shift # past argument
|
||||
;;
|
||||
-d|--destination)
|
||||
DESTINATION="$2"
|
||||
shift # past argument
|
||||
;;
|
||||
-b|--build)
|
||||
BUILD="yes"
|
||||
;;
|
||||
-c|--clone)
|
||||
CLONE="yes"
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option: [$1]"
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift # past argument or value
|
||||
done
|
||||
|
||||
assert_properties
|
||||
set_default_props
|
||||
check_if_anything_to_sync
|
||||
if [[ -z "${VERSION}" ]] ; then
|
||||
retrieve_current_branch
|
||||
else
|
||||
switch_to_tag
|
||||
fi
|
||||
build_docs_if_applicable
|
||||
retrieve_doc_properties
|
||||
stash_changes
|
||||
add_docs_from_target
|
||||
checkout_previous_branch
|
||||
|
Before Width: | Height: | Size: 9.2 KiB After Width: | Height: | Size: 9.2 KiB |
|
Before Width: | Height: | Size: 119 KiB After Width: | Height: | Size: 119 KiB |
|
After Width: | Height: | Size: 233 KiB |
2245
docs/src/main/asciidoc/kafka-streams.adoc
Normal file
973
docs/src/main/asciidoc/overview.adoc
Normal file
@@ -0,0 +1,973 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
=== Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::{github-raw}/docs/src/main/asciidoc/images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` version `2.3.1`.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
=== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to the binder, see the https://cloud.spring.io/spring-cloud-static/spring-cloud-stream/current/reference/html/spring-cloud-stream.html#binding-properties[binding properties] in core documentation.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Unknown Kafka producer or consumer properties provided through this configuration are filtered out and not allowed to propagate.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
In addition to support known Kafka consumer properties, unknown consumer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
In addition to support known Kafka producer properties, unknown producer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
NOTE: If you are using Kafka broker versions prior to 2.4, then this value should be set to at least `1`.
|
||||
Starting with version 3.0.8, the binder uses `-1` as the default value, which indicates that the broker 'default.replication.factor' property will be used to determine the number of replicas.
|
||||
Check with your Kafka broker admins to see if there is a policy in place that requires a minimum replication factor, if that's the case then, typically, the `default.replication.factor` will match that value and `-1` should be used, unless you need a replication factor greater than the minimum.
|
||||
+
|
||||
Default: `-1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.create.topics.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `BinderHeaderMapper` bean that uses JSON deserialization for the headers.
|
||||
If this custom `BinderHeaderMapper` bean is not made available to the binder using this property, then the binder will look for a header mapper bean with the name `kafkaBinderHeaderMapper` that is of type `BinderHeaderMapper` before falling back to a default `BinderHeaderMapper` created by the binder.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
spring.cloud.stream.kafka.binder.considerDownWhenAnyPartitionHasNoLeader::
|
||||
Flag to set the binder health as `down`, when any partitions on the topic, regardless of the consumer that is receiving data from it, is found without a leader.
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
spring.cloud.stream.kafka.binder.certificateStoreDirectory::
|
||||
When the truststore or keystore certificate location is given as a classpath URL (`classpath:...`), the binder copies the resource from the classpath location inside the JAR file to a location on the filesystem.
|
||||
This is true for both broker level certificates (`ssl.truststore.location` and `ssl.keystore.location`) and certificates intended for schema registry (`schema.registry.ssl.truststore.location` and `schema.registry.ssl.keystore.location`).
|
||||
Keep in mind that the truststore and keystore classpath locations must be provided under `spring.cloud.stream.kafka.binder.configuration...`.
|
||||
For example, `spring.cloud.stream.kafka.binder.configuration.ssl.truststore.location`, ``spring.cloud.stream.kafka.binder.configuration.schema.registry.ssl.truststore.location`, etc.
|
||||
The file will be moved to the location specified as the value for this property which must be an existing directory on the filesystem that is writable by the process running the application.
|
||||
If this value is not set and the certificate file is a classpath resource, then it will be moved to System's temp directory as returned by `System.getProperty("java.io.tmpdir")`.
|
||||
This is also true, if this value is present, but the directory cannot be found on the filesystem or is not writable.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.consumer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
This property is deprecated as of 3.1 in favor of using `ackMode`.
|
||||
If the `ackMode` is not set and batch mode is not enabled, `RECORD` ackMode will be used.
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
autoCommitOffset::
|
||||
|
||||
Starting with version 3.1, this property is deprecated.
|
||||
See `ackMode` for more details on alternatives.
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
ackMode::
|
||||
Specify the container ack mode.
|
||||
This is based on the AckMode enumeration defined in Spring Kafka.
|
||||
If `ackEachRecord` property is set to `true` and consumer is not in batch mode, then this will use the ack mode of `RECORD`, otherwise, use the provided ack mode using this property.
|
||||
|
||||
autoCommitOnError::
|
||||
In pollable consumers, if set to `true`, it always auto commits on error.
|
||||
If not set (the default) or false, it will not auto commit in pollable consumers.
|
||||
Note that this property is only applicable for pollable consumers.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaBindingRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
See <<reset-offsets>> for more information about this property.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
See <<reset-offsets>> for more information about this property.
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property or by defining a `@Bean` of type `DlqDestinationResolver`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
By default, a failed record is sent to the same partition number in the DLQ topic as the original record.
|
||||
See <<dlq-partition-selection>> for how to change that behavior.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
dlqPartitions::
|
||||
When `enableDlq` is true, and this property is not set, a dead letter topic with the same number of partitions as the primary topic(s) is created.
|
||||
Usually, dead-letter records are sent to the same partition in the dead-letter topic as the original record.
|
||||
This behavior can be changed; see <<dlq-partition-selection>>.
|
||||
If this property is set to `1` and there is no `DqlPartitionFunction` bean, all dead-letter records will be written to partition `0`.
|
||||
If this property is greater than `1`, you **MUST** provide a `DlqPartitionFunction` bean.
|
||||
Note that the actual partition count is affected by the binder's `minPartitionCount` property.
|
||||
+
|
||||
Default: `none`
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
When native decoding is enabled on the consumer (i.e., useNativeDecoding: true) , the application must provide corresponding key/value serializers for DLQ.
|
||||
This must be provided in the form of `dlqProducerProperties.configuration.key.serializer` and `dlqProducerProperties.configuration.value.serializer`.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of -1 is used).
|
||||
pollTimeout::
|
||||
Timeout used for polling in pollable consumers.
|
||||
+
|
||||
Default: 5 seconds.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
txCommitRecovered::
|
||||
When using a transactional binder, the offset of a recovered record (e.g. when retries are exhausted and the record is sent to a dead letter topic) will be committed via a new transaction, by default.
|
||||
Setting this property to `false` suppresses committing the offset of recovered record.
|
||||
+
|
||||
Default: true.
|
||||
commonErrorHandlerBeanName::
|
||||
`CommonErrorHandler` bean name to use per consumer binding.
|
||||
When present, this user provided `CommonErrorHandler` takes precedence over any other error handlers defined by the binder.
|
||||
This is a handy way to express error handlers, if the application does not want to use a `ListenerContainerCustomizer` and then check the destination/group combination to set an error handler.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[reset-offsets]]
|
||||
==== Resetting Offsets
|
||||
|
||||
When an application starts, the initial position in each assigned partition depends on two properties `startOffset` and `resetOffsets`.
|
||||
If `resetOffsets` is `false`, normal Kafka consumer https://kafka.apache.org/documentation/#consumerconfigs_auto.offset.reset[`auto.offset.reset`] semantics apply.
|
||||
i.e. If there is no committed offset for a partition for the binding's consumer group, the position is `earliest` or `latest`.
|
||||
By default, bindings with an explicit `group` use `earliest`, and anonymous bindings (with no `group`) use `latest`.
|
||||
These defaults can be overridden by setting the `startOffset` binding property.
|
||||
There will be no committed offset(s) the first time the binding is started with a particular `group`.
|
||||
The other condition where no committed offset exists is if the offset has been expired.
|
||||
With modern brokers (since 2.1), and default broker properties, the offsets are expired 7 days after the last member leaves the group.
|
||||
See the https://kafka.apache.org/documentation/#brokerconfigs_offsets.retention.minutes[`offsets.retention.minutes`] broker property for more information.
|
||||
|
||||
When `resetOffsets` is `true`, the binder applies similar semantics to those that apply when there is no committed offset on the broker, as if this binding has never consumed from the topic; i.e. any current committed offset is ignored.
|
||||
|
||||
Following are two use cases when this might be used.
|
||||
|
||||
1. Consuming from a compacted topic containing key/value pairs.
|
||||
Set `resetOffsets` to `true` and `startOffset` to `earliest`; the binding will perform a `seekToBeginning` on all newly assigned partitions.
|
||||
|
||||
2. Consuming from a topic containing events, where you are only interested in events that occur while this binding is running.
|
||||
Set `resetOffsets` to `true` and `startOffset` to `latest`; the binding will perform a `seekToEnd` on all newly assigned partitions.
|
||||
|
||||
IMPORTANT: If a rebalance occurs after the initial assignment, the seeks will only be performed on any newly assigned partitions that were not assigned during the initial assignment.
|
||||
|
||||
For more control over topic offsets, see <<rebalance-listener>>; when a listener is provided, `resetOffsets` should not be set to `true`, otherwise, that will cause an error.
|
||||
|
||||
==== Consuming Batches
|
||||
|
||||
Starting with version 3.0, when `spring.cloud.stream.binding.<name>.consumer.batch-mode` is set to `true`, all of the records received by polling the Kafka `Consumer` will be presented as a `List<?>` to the listener method.
|
||||
Otherwise, the method will be called with one record at a time.
|
||||
The size of the batch is controlled by Kafka consumer properties `max.poll.records`, `fetch.min.bytes`, `fetch.max.wait.ms`; refer to the Kafka documentation for more information.
|
||||
|
||||
Bear in mind that batch mode is not supported with `@StreamListener` - it only works with the newer functional programming model.
|
||||
|
||||
IMPORTANT: Retry within the binder is not supported when using batch mode, so `maxAttempts` will be overridden to 1.
|
||||
You can configure a `SeekToCurrentBatchErrorHandler` (using a `ListenerContainerCustomizer`) to achieve similar functionality to retry in the binder.
|
||||
You can also use a manual `AckMode` and call `Ackowledgment.nack(index, sleep)` to commit the offsets for a partial batch and have the remaining records redelivered.
|
||||
Refer to the https://docs.spring.io/spring-kafka/docs/2.3.0.BUILD-SNAPSHOT/reference/html/#committing-offsets[Spring for Apache Kafka documentation] for more information about these techniques.
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.producer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
sendTimeoutExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to evaluate the time to wait for ack when synchronous publish is enabled -- for example, `headers['mySendTimeout']`.
|
||||
The value of the timeout is in milliseconds.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
In the case of a regular processor (`Function<String, String>` or `Function<Message<?>, Message<?>`), if the produced key needs to be same as the incoming key from the topic, this property can be set as below.
|
||||
`spring.cloud.stream.kafka.bindings.<output-binding-name>.producer.messageKeyExpression: headers['kafka_receivedMessageKey']`
|
||||
There is an important caveat to keep in mind for reactive functions.
|
||||
In that case, it is up to the application to manually copy the headers from the incoming messages to outbound messages.
|
||||
You can set the header, e.g. `myKey` and use `headers['myKey']` as suggested above or, for convenience, simply set the `KafkaHeaders.MESSAGE_KEY` header, and you do not need to set this property at all.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.output.producer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of -1 is used).
|
||||
useTopicHeader::
|
||||
Set to `true` to override the default binding destination (topic name) with the value of the `KafkaHeaders.TOPIC` message header in the outbound message.
|
||||
If the header is not present, the default binding destination is used.
|
||||
+
|
||||
Default: `false`.
|
||||
recordMetadataChannel::
|
||||
The bean name of a `MessageChannel` to which successful send results should be sent; the bean must exist in the application context.
|
||||
The message sent to the channel is the sent message (after conversion, if any) with an additional header `KafkaHeaders.RECORD_METADATA`.
|
||||
The header contains a `RecordMetadata` object provided by the Kafka client; it includes the partition and offset where the record was written in the topic.
|
||||
+
|
||||
`ResultMetadata meta = sendResultMsg.getHeaders().get(KafkaHeaders.RECORD_METADATA, RecordMetadata.class)`
|
||||
+
|
||||
Failed sends go the producer error channel (if configured); see <<kafka-error-channels>>.
|
||||
+
|
||||
Default: null.
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy`, `lz4` and `zstd`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
closeTimeout::
|
||||
Timeout in number of seconds to wait for when closing the producer.
|
||||
+
|
||||
Default: `30`
|
||||
|
||||
allowNonTransactional::
|
||||
Normally, all output bindings associated with a transactional binder will publish in a new transaction, if one is not already in process.
|
||||
This property allows you to override that behavior.
|
||||
If set to true, records published to this output binding will not be run in a transaction, unless one is already in process.
|
||||
+
|
||||
Default: `false`
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `ackMode` to `MANUAL` and Relying on Manual Acknowledgement
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.ackMode` be set to `MANUAL`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
====== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
====== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
====== Multi-binder configuration and JAAS
|
||||
|
||||
When connecting to multiple clusters in which each one requires separate JAAS configuration, then set the JAAS configuration using the property `sasl.jaas.config`.
|
||||
When this property is present in the applicaiton, it takes precedence over the other strategies mentioned above.
|
||||
See this https://cwiki.apache.org/confluence/display/KAFKA/KIP-85%3A+Dynamic+JAAS+configuration+for+Kafka+clients[KIP-85] for more details.
|
||||
|
||||
For example, if you have two clusters in your application with separate JAAS configuration, then the following is a template that you can use:
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
binders:
|
||||
kafka1:
|
||||
type: kafka
|
||||
environment:
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
kafka:
|
||||
binder:
|
||||
brokers: localhost:9092
|
||||
configuration.sasl.jaas.config: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"admin-secret\";"
|
||||
kafka2:
|
||||
type: kafka
|
||||
environment:
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
kafka:
|
||||
binder:
|
||||
brokers: localhost:9093
|
||||
configuration.sasl.jaas.config: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"user1\" password=\"user1-secret\";"
|
||||
kafka.binder:
|
||||
configuration:
|
||||
security.protocol: SASL_PLAINTEXT
|
||||
sasl.mechanism: PLAIN
|
||||
```
|
||||
|
||||
Note that both the Kafka clusters, and the `sasl.jaas.config` values for each of them are different in the above configuration.
|
||||
|
||||
See this https://github.com/spring-cloud/spring-cloud-stream-samples/tree/main/multi-binder-samples/kafka-multi-binder-jaas[sample application] for more details on how to setup and run such an application.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-transactional-binder]]
|
||||
=== Transactional Binder
|
||||
|
||||
Enable transactions by setting `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` to a non-empty value, e.g. `tx-`.
|
||||
When used in a processor application, the consumer starts the transaction; any records sent on the consumer thread participate in the same transaction.
|
||||
When the listener exits normally, the listener container will send the offset to the transaction and commit it.
|
||||
A common producer factory is used for all producer bindings configured using `spring.cloud.stream.kafka.binder.transaction.producer.*` properties; individual binding Kafka producer properties are ignored.
|
||||
|
||||
IMPORTANT: Normal binder retries (and dead lettering) are not supported with transactions because the retries will run in the original transaction, which may be rolled back and any published records will be rolled back too.
|
||||
When retries are enabled (the common property `maxAttempts` is greater than zero) the retry properties are used to configure a `DefaultAfterRollbackProcessor` to enable retries at the container level.
|
||||
Similarly, instead of publishing dead-letter records within the transaction, this functionality is moved to the listener container, again via the `DefaultAfterRollbackProcessor` which runs after the main transaction has rolled back.
|
||||
|
||||
If you wish to use transactions in a source application, or from some arbitrary thread for producer-only transaction (e.g. `@Scheduled` method), you must get a reference to the transactional producer factory and define a `KafkaTransactionManager` bean using it.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public PlatformTransactionManager transactionManager(BinderFactory binders,
|
||||
@Value("${unique.tx.id.per.instance}") String txId) {
|
||||
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
KafkaTransactionManager tm = new KafkaTransactionManager<>(pf);
|
||||
tm.setTransactionId(txId)
|
||||
return tm;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Notice that we get a reference to the binder using the `BinderFactory`; use `null` in the first argument when there is only one binder configured.
|
||||
If more than one binder is configured, use the binder name to get the reference.
|
||||
Once we have a reference to the binder, we can obtain a reference to the `ProducerFactory` and create a transaction manager.
|
||||
|
||||
Then you would use normal Spring transaction support, e.g. `TransactionTemplate` or `@Transactional`, for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public static class Sender {
|
||||
|
||||
@Transactional
|
||||
public void doInTransaction(MessageChannel output, List<String> stuffToSend) {
|
||||
stuffToSend.forEach(stuff -> output.send(new GenericMessage<>(stuff)));
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
If you wish to synchronize producer-only transactions with those from some other transaction manager, use a `ChainedTransactionManager`.
|
||||
|
||||
IMPORTANT: If you deploy multiple instances of your application, each instance needs a unique `transactionIdPrefix`.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See https://cloud.spring.io/spring-cloud-static/spring-cloud-stream/current/reference/html/spring-cloud-stream.html#spring-cloud-stream-overview-error-handling[this section on error handling] for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
=== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Micrometer library.
|
||||
The binder creates the `KafkaBinderMetrics` bean if Micrometer is on the classpath and no other such beans provided by the application.
|
||||
The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
You can exclude `KafkaBinderMetrics` from creating the necessary infrastructure like consumers and then reporting the metrics by providing the following component in the application.
|
||||
|
||||
```
|
||||
@Component
|
||||
class NoOpBindingMeters {
|
||||
NoOpBindingMeters(MeterRegistry registry) {
|
||||
registry.config().meterFilter(
|
||||
MeterFilter.denyNameStartsWith(KafkaBinderMetrics.OFFSET_LAG_METRIC_NAME));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
More details on how to suppress meters selectively can be found https://micrometer.io/docs/concepts#_meter_filters[here].
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
When using compacted topics, a record with a `null` value (also called a tombstone record) represents the deletion of a key.
|
||||
To receive such messages in a `@StreamListener` method, the parameter must be marked as not required to receive a `null` value argument.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
@Payload(required = false) Customer customer) {
|
||||
// customer is null if a tombstone record
|
||||
...
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaBindingRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaBindingRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public interface KafkaBindingRebalanceListener {
|
||||
|
||||
/**
|
||||
* Invoked by the container before any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedBeforeCommit(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by the container after any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedAfterCommit(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance.
|
||||
* Applications might only want to perform seek operations on an initial assignment.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions,
|
||||
boolean initial) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
|
||||
[[retry-and-dlq-processing]]
|
||||
=== Retry and Dead Letter Processing
|
||||
|
||||
By default, when you configure retry (e.g. `maxAttemts`) and `enableDlq` in a consumer binding, these functions are performed within the binder, with no participation by the listener container or Kafka consumer.
|
||||
|
||||
There are situations where it is preferable to move this functionality to the listener container, such as:
|
||||
|
||||
* The aggregate of retries and delays will exceed the consumer's `max.poll.interval.ms` property, potentially causing a partition rebalance.
|
||||
* You wish to publish the dead letter to a different Kafka cluster.
|
||||
* You wish to add retry listeners to the error handler.
|
||||
* ...
|
||||
|
||||
To configure moving this functionality from the binder to the container, define a `@Bean` of type `ListenerContainerWithDlqAndRetryCustomizer`.
|
||||
This interface has the following methods:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
/**
|
||||
* Configure the container.
|
||||
* @param container the container.
|
||||
* @param destinationName the destination name.
|
||||
* @param group the group.
|
||||
* @param dlqDestinationResolver a destination resolver for the dead letter topic (if
|
||||
* enableDlq).
|
||||
* @param backOff the backOff using retry properties (if configured).
|
||||
* @see #retryAndDlqInBinding(String, String)
|
||||
*/
|
||||
void configure(AbstractMessageListenerContainer<?, ?> container, String destinationName, String group,
|
||||
@Nullable BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> dlqDestinationResolver,
|
||||
@Nullable BackOff backOff);
|
||||
|
||||
/**
|
||||
* Return false to move retries and DLQ from the binding to a customized error handler
|
||||
* using the retry metadata and/or a {@code DeadLetterPublishingRecoverer} when
|
||||
* configured via
|
||||
* {@link #configure(AbstractMessageListenerContainer, String, String, BiFunction, BackOff)}.
|
||||
* @param destinationName the destination name.
|
||||
* @param group the group.
|
||||
* @return true to disable retrie in the binding
|
||||
*/
|
||||
default boolean retryAndDlqInBinding(String destinationName, String group) {
|
||||
return true;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
The destination resolver and `BackOff` are created from the binding properties (if configured).
|
||||
You can then use these to create a custom error handler and dead letter publisher; for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
ListenerContainerWithDlqAndRetryCustomizer cust(KafkaTemplate<?, ?> template) {
|
||||
return new ListenerContainerWithDlqAndRetryCustomizer() {
|
||||
|
||||
@Override
|
||||
public void configure(AbstractMessageListenerContainer<?, ?> container, String destinationName,
|
||||
String group,
|
||||
@Nullable BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> dlqDestinationResolver,
|
||||
@Nullable BackOff backOff) {
|
||||
|
||||
if (destinationName.equals("topicWithLongTotalRetryConfig")) {
|
||||
ConsumerRecordRecoverer dlpr = new DeadLetterPublishingRecoverer(template),
|
||||
dlqDestinationResolver);
|
||||
container.setCommonErrorHandler(new DefaultErrorHandler(dlpr, backOff));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean retryAndDlqInBinding(String destinationName, String group) {
|
||||
return !destinationName.contains("topicWithLongTotalRetryConfig");
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Now, only a single retry delay needs to be greater than the consumer's `max.poll.interval.ms` property.
|
||||
|
||||
[[consumer-producer-config-customizer]]
|
||||
=== Customizing Consumer and Producer configuration
|
||||
|
||||
If you want advanced customization of consumer and producer configuration that is used for creating `ConsumerFactory` and `ProducerFactory` in Kafka,
|
||||
you can implement the following customizers.
|
||||
|
||||
* ConsumerConfigCustomizer
|
||||
* ProducerConfigCustomizer
|
||||
|
||||
Both of these interfaces provide a way to configure the config map used for consumer and producer properties.
|
||||
For example, if you want to gain access to a bean that is defined at the application level, you can inject that in the implementation of the `configure` method.
|
||||
When the binder discovers that these customizers are available as beans, it will invoke the `configure` method right before creating the consumer and producer factories.
|
||||
|
||||
Both of these interfaces also provide access to both the binding and destination names so that they can be accessed while customizing producer and consumer properties.
|
||||
|
||||
[[admin-client-config-customization]]
|
||||
=== Customizing AdminClient Configuration
|
||||
|
||||
As with consumer and producer config customization above, applications can also customize the configuration for admin clients by providing an `AdminClientConfigCustomizer`.
|
||||
AdminClientConfigCustomizer's configure method provides access to the admin client properties, using which you can define further customization.
|
||||
Binder's Kafka topic provisioner gives the highest precedence for the properties given through this customizer.
|
||||
Here is an example of providing this customizer bean.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public AdminClientConfigCustomizer adminClientConfigCustomizer() {
|
||||
return props -> {
|
||||
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
|
||||
};
|
||||
}
|
||||
```
|
||||
@@ -1,4 +1,4 @@
|
||||
== Partitioning with the Kafka Binder
|
||||
=== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
@@ -49,7 +49,6 @@ spring:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partitioned: true
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
55
docs/src/main/asciidoc/spring-cloud-stream-binder-kafka.adoc
Normal file
@@ -0,0 +1,55 @@
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:toc: left
|
||||
:toclevels: 8
|
||||
:nofooter:
|
||||
:sectlinks: true
|
||||
|
||||
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell, Arnaud Jardiné, Soby Chacko
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
:source-highlighter: prettify
|
||||
:numbered:
|
||||
:icons: font
|
||||
:hide-uri-scheme:
|
||||
:spring-cloud-stream-binder-kafka-repo: snapshot
|
||||
:github-tag: master
|
||||
:spring-cloud-stream-binder-kafka-docs-version: current
|
||||
:spring-cloud-stream-binder-kafka-docs: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: https://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: https://github.com/{github-repo}/wiki
|
||||
:github-master-code: https://github.com/{github-repo}/tree/master
|
||||
:sc-ext: java
|
||||
// ======================================================================================
|
||||
|
||||
|
||||
*{project-version}*
|
||||
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
include::kafka-streams.adoc[]
|
||||
|
||||
include::tips.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
865
docs/src/main/asciidoc/tips.adoc
Normal file
@@ -0,0 +1,865 @@
|
||||
== Tips, Tricks and Recipes
|
||||
|
||||
=== Simple DLQ with Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
As a developer, I want to write a consumer application that processes records from a Kafka topic.
|
||||
However, if some error occurs in processing, I don't want the application to stop completely.
|
||||
Instead, I want to send the record in error to a DLT (Dead-Letter-Topic) and then continue processing new records.
|
||||
|
||||
==== Solution
|
||||
|
||||
The solution for this problem is to use the DLQ feature in Spring Cloud Stream.
|
||||
For the purposes of this discussion, let us assume that the following is our processor function.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<byte[]> processData() {
|
||||
return s -> {
|
||||
throw new RuntimeException();
|
||||
};
|
||||
```
|
||||
|
||||
This is a very trivial function that throws an exception for all the records that it processes, but you can take this function and extend it to any other similar situations.
|
||||
|
||||
In order to send the records in error to a DLT, we need to provide the following configuration.
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
group: my-group
|
||||
destination: input-topic
|
||||
kafka:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
consumer:
|
||||
enableDlq: true
|
||||
dlqName: input-topic-dlq
|
||||
```
|
||||
|
||||
In order to activate DLQ, the application must provide a group name.
|
||||
Anonymous consumers cannot use the DLQ facilities.
|
||||
We also need to enable DLQ by setting the `enableDLQ` property on the Kafka consumer binding to `true`.
|
||||
Finally, we can optionally provide the DLT name by providing the `dlqName` on Kafka consumer binding, which otherwise default to `input-topic-dlq.my-group.error` in this case.
|
||||
|
||||
Note that in the example consumer provided above, the type of the payload is `byte[]`.
|
||||
By default, the DLQ producer in Kafka binder expects the payload of type `byte[]`.
|
||||
If that is not the case, then we need to provide the configuration for proper serializer.
|
||||
For example, let us re-write the consumer function as below:
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<String> processData() {
|
||||
return s -> {
|
||||
throw new RuntimeException();
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Now, we need to tell Spring Cloud Stream, how we want to serialize the data when writing to the DLT.
|
||||
Here is the modified configuration for this scenario:
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
group: my-group
|
||||
destination: input-topic
|
||||
kafka:
|
||||
bindings:
|
||||
processData-in-0:
|
||||
consumer:
|
||||
enableDlq: true
|
||||
dlqName: input-topic-dlq
|
||||
dlqProducerProperties:
|
||||
configuration:
|
||||
value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
|
||||
```
|
||||
|
||||
=== DLQ with Advanced Retry Options
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
This is similar to the recipe above, but as a developer I would like to configure the way retries are handled.
|
||||
|
||||
==== Solution
|
||||
|
||||
If you followed the above recipe, then you get the default retry options built into the Kafka binder when the processing encounters an error.
|
||||
|
||||
By default, the binder retires for a maximum of 3 attempts with a one second initial delay, 2.0 multiplier with each back off with a max delay of 10 seconds.
|
||||
You can change all these configurations as below:
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.maxAtttempts
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.backOffInitialInterval
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.backOffMultipler
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.backOffMaxInterval
|
||||
```
|
||||
|
||||
If you want, you can also provide a list of retryable exceptions by providing a map of boolean values.
|
||||
For example,
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.retryableExceptions.java.lang.IllegalStateException=true
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.retryableExceptions.java.lang.IllegalArgumentException=false
|
||||
```
|
||||
|
||||
By default, any exceptions not listed in the map above will be retried.
|
||||
If that is not desired, then you can disable that by providing,
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.defaultRetryable=false
|
||||
```
|
||||
|
||||
You can also provide your own `RetryTemplate` and mark it as `@StreamRetryTemplate` which will be scanned and used by the binder.
|
||||
This is useful when you want more sophisticated retry strategies and policies.
|
||||
|
||||
If you have multiple `@StreamRetryTemplate` beans, then you can specify which one your binding wants by using the property,
|
||||
|
||||
```
|
||||
spring.cloud.stream.bindings.processData-in-0.consumer.retry-template-name=<your-retry-template-bean-name>
|
||||
```
|
||||
|
||||
=== Handling Deserialization errors with DLQ
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I have a processor that encounters a deserilzartion exception in Kafka consumer.
|
||||
I would expect that the Spring Cloud Stream DLQ mechanism will catch that scenario, but it does not.
|
||||
How can I handle this?
|
||||
|
||||
==== Solution
|
||||
|
||||
The normal DLQ mechanism offered by Spring Cloud Stream will not help when Kafka consumer throws an irrecoverable deserialization excepion.
|
||||
This is because, this exception happens even before the consumer's `poll()` method returns.
|
||||
Spring for Apache Kafka project offers some great ways to help the binder with this situation.
|
||||
Let us explore those.
|
||||
|
||||
Assuming this is our function:
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<String> functionName() {
|
||||
return s -> {
|
||||
System.out.println(s);
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
It is a trivial function that takes a `String` parameter.
|
||||
|
||||
We want to bypass the message converters provided by Spring Cloud Stream and want to use native deserializers instead.
|
||||
In the case of `String` types, it does not make much sense, but for more complex types like AVRO etc. you have to rely on external deserializers and therefore want to delegate the conversion to Kafka.
|
||||
|
||||
Now when the consumer receives the data, let us assume that there is a bad record that causes a deserilziation errror, maybe someone passed an `Integer` instead of a `String` for example.
|
||||
In that case, if you don't do something in the application, the excption will be propagated through the chain and your application will exit eventually.
|
||||
|
||||
In order to handle this, you can add a `ListenerContainerCustomizer` `@Bean` that configures a `SeekToCurrentErrorHandler`.
|
||||
This `SeekToCurrentErrorHandler` is configured with a `DeadLetterPublishingRecoverer`.
|
||||
We also need to configure an `ErrorHandlingDeserializer` for the consumer.
|
||||
That sounds like a lot of complex things, but in reality, it boils down to these 3 beans in this case.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public ListenerContainerCustomizer<AbstractMessageListenerContainer<byte[], byte[]>> customizer(SeekToCurrentErrorHandler errorHandler) {
|
||||
return (container, dest, group) -> {
|
||||
container.setErrorHandler(errorHandler);
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
@Bean
|
||||
public SeekToCurrentErrorHandler errorHandler(DeadLetterPublishingRecoverer deadLetterPublishingRecoverer) {
|
||||
return new SeekToCurrentErrorHandler(deadLetterPublishingRecoverer);
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
@Bean
|
||||
public DeadLetterPublishingRecoverer publisher(KafkaOperations bytesTemplate) {
|
||||
return new DeadLetterPublishingRecoverer(bytesTemplate);
|
||||
}
|
||||
```
|
||||
|
||||
Let us analyze each of them.
|
||||
The first one is the `ListenerContainerCustomizer` bean that takes a `SeekToCurrentErrorHandler`.
|
||||
The container is now customized with that particular error handler.
|
||||
You can learn more about container customization https://docs.spring.io/spring-cloud-stream/docs/current/reference/html/spring-cloud-stream.html#_advanced_consumer_configuration[here].
|
||||
|
||||
The second bean is the `SeekToCurrentErrorHandler` that is configured with a publishing to a `DLT`.
|
||||
See https://docs.spring.io/spring-kafka/docs/current/reference/html/#seek-to-current[here] for more details on `SeekToCurrentErrorHandler`.
|
||||
|
||||
The third bean is the `DeadLetterPublishingRecoverer` that is ultimately responsible for sending to the `DLT`.
|
||||
By default, the `DLT` topic is named as the ORIGINAL_TOPIC_NAME.DLT.
|
||||
You can change that though.
|
||||
See the https://docs.spring.io/spring-kafka/docs/current/reference/html/#dead-letters[docs] for more details.
|
||||
|
||||
|
||||
We also need to configure an https://docs.spring.io/spring-kafka/docs/current/reference/html/#error-handling-deserializer[ErrorHandlingDeserializer] through application config.
|
||||
|
||||
The `ErrorHandlingDeserializer` delegates to the actual deserializer.
|
||||
In case of errors, it sets key/value of the record to be null and includes the raw bytes of the message.
|
||||
It then sets the exception in a header and passes this record to the listener, which then calls the registered error handler.
|
||||
|
||||
Following is the configuration required:
|
||||
|
||||
```
|
||||
spring.cloud.stream:
|
||||
function:
|
||||
definition: functionName
|
||||
bindings:
|
||||
functionName-in-0:
|
||||
group: group-name
|
||||
destination: input-topic
|
||||
consumer:
|
||||
use-native-decoding: true
|
||||
kafka:
|
||||
bindings:
|
||||
functionName-in-0:
|
||||
consumer:
|
||||
enableDlq: true
|
||||
dlqName: dlq-topic
|
||||
dlqProducerProperties:
|
||||
configuration:
|
||||
value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
configuration:
|
||||
value.deserializer: org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
|
||||
spring.deserializer.value.delegate.class: org.apache.kafka.common.serialization.StringDeserializer
|
||||
```
|
||||
|
||||
We are providing the `ErrorHandlingDeserializer` through the `configuration` property on the binding.
|
||||
We are also indicating that the actual deserializer to delegate is the `StringDeserializer`.
|
||||
|
||||
Keep in mind that none of the dlq properties above are relevant for the discussions in this recipe.
|
||||
They are purely meant for addressing any application level errors only.
|
||||
|
||||
=== Basic offset management in Kafka binder
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I want to write a Spring Cloud Stream Kafka consumer applicaiton and not sure about how it manages Kafka consumer offsets.
|
||||
Can you exaplain?
|
||||
|
||||
==== Solution
|
||||
|
||||
We encourage you read the https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current/reference/html/spring-cloud-stream-binder-kafka.html#reset-offsets[docs] section on this to get a thorough understanding on it.
|
||||
|
||||
Here is it in a gist:
|
||||
|
||||
Kafka supports two types of offsets to start with by default - `earliest` and `latest`.
|
||||
Their semantics are self-explanatory from their names.
|
||||
|
||||
Assuming you are running the consumer for the first time.
|
||||
If you miss the group.id in your Spring Cloud Stream application, then it becomes an anonymous consumer.
|
||||
Whenever, you have an anonymous consumer, in that case, Spring Cloud Stream application by default will start from the `latest` available offset in the topic partition.
|
||||
On the other hand, if you explicitly specify a group.id, then by default, the Spring Cloud Stream application will start from the `earliest` available offset in the topic partiton.
|
||||
|
||||
In both cases above (consumers with explicit groups and anonymous groups), the starting offset can be switched around by using the property `spring.cloud.stream.kafka.bindings.<binding-name>.consumer.startOffset` and setting it to either `earliest` or `latest`.
|
||||
|
||||
Now, assume that you already ran the consumer before and now starting it again.
|
||||
In this case, the starting offset semantics in the above case do not apply as the consumer finds an already committed offset for the consumer group (In the case of an anonymous consumer, although the application does not provide a group.id, the binder will auto generate one for you).
|
||||
It simply picks up from the last committed offset onward.
|
||||
This is true, even when you have a `startOffset` value provided.
|
||||
|
||||
However, you can override the default behavior where the consumer starts from the last committed offset by using the `resetOffsets` property.
|
||||
In order to do that, set the property `spring.cloud.stream.kafka.bindings.<binding-name>.consumer.resetOffsets` to `true` (which is `false` by default).
|
||||
Then make sure you provide the `startOffset` value (either `earliest` or `latest`).
|
||||
When you do that and then start the consumer application, each time you start, it starts as if this is starting for the first time and ignore any committed offsets for the partition.
|
||||
|
||||
=== Seeking to arbitrary offsets in Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Using Kafka binder, I know that it can set the offset to either `earliest` or `latest`, but I have a requirement to seek the offset to something in the middle, an arbitrary offset.
|
||||
Is there a way to achieve this using Spring Cloud Stream Kafka biner?
|
||||
|
||||
==== Solution
|
||||
|
||||
Previously we saw how Kafka binder allows you to tackle basic offset management.
|
||||
By default, the binder does not allow you to rewind to an arbitrary offset, at least through the mechanism we saw in that reipce.
|
||||
However, there are some low-level strategies that the binder provides to achieve this use case.
|
||||
Let's explore them.
|
||||
|
||||
First of all, when you want to reset to an arbitrary offset other than `earliest` or `latest`, make sure to leave the `resetOffsets` configuration to its defaults, which is `false`.
|
||||
Then you have to provide a custom bean of type `KafkaBindingRebalanceListener`, which will be injected into all consumer bindings.
|
||||
It is an interface that comes with a few default methods, but here is the method that we are interested in:
|
||||
|
||||
```
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance. Applications
|
||||
* might only want to perform seek operations on an initial assignment. While the
|
||||
* 'initial' argument is true for each thread (when concurrency is greater than 1),
|
||||
* implementations should keep track of exactly which partitions have been sought.
|
||||
* There is a race in that a rebalance could occur during startup and so a topic/
|
||||
* partition that has been sought on one thread may be re-assigned to another
|
||||
* thread and you may not wish to re-seek it at that time.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment on the current thread.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions, boolean initial) {
|
||||
// do nothing
|
||||
}
|
||||
```
|
||||
|
||||
Let us look at the details.
|
||||
|
||||
In essence, this method will be invoked each time during the initial assignment for a topic partition or after a rebalance.
|
||||
For better illustration, let us assume that our topic is `foo` and it has 4 partitions.
|
||||
Initially, we are only starting a single consumer in the group and this consumer will consume from all partitions.
|
||||
When the consumer starts for the first time, all 4 partitions are getting initially assigned.
|
||||
However, we do not want to start the partitions to consume at the defaults (`earliest` since we define a group), rather for each partition, we want them to consume after seeking to arbitrary offsets.
|
||||
Imagine that you have a business case to consume from certain offsets as below.
|
||||
|
||||
```
|
||||
Partition start offset
|
||||
|
||||
0 1000
|
||||
1 2000
|
||||
2 2000
|
||||
3 1000
|
||||
```
|
||||
|
||||
This could be achieved by implementing the above method as below.
|
||||
|
||||
```
|
||||
|
||||
@Override
|
||||
public void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions, boolean initial) {
|
||||
|
||||
Map<TopicPartition, Long> topicPartitionOffset = new HashMap<>();
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 0), 1000L);
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 1), 2000L);
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 2), 2000L);
|
||||
topicPartitionOffset.put(new TopicPartition("foo", 3), 1000L);
|
||||
|
||||
if (initial) {
|
||||
partitions.forEach(tp -> {
|
||||
if (topicPartitionOffset.containsKey(tp)) {
|
||||
final Long offset = topicPartitionOffset.get(tp);
|
||||
try {
|
||||
consumer.seek(tp, offset);
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Handle excpetions carefully.
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This is just a rudimentary implementation.
|
||||
Real world use cases are much more complex than this and you need to adjust accordingly, but this certainly gives you a basic sketch.
|
||||
When consumer `seek` fails, it may throw some runtime exceptions and you need to decide what to do in those cases.
|
||||
|
||||
==== What if we start a second consumer with the same group id?
|
||||
|
||||
When we add a second consumer, a rebalance will occur and some partitions will be moved around.
|
||||
Let's say that the new consumer gets partitions `2` and `3`.
|
||||
When this new Spring Cloud Stream consumer calls this `onPartitionsAssigned` method, it will see that this is the initial assignment for partititon `2` and `3` on this consumer.
|
||||
Therefore, it will do the seek operation becuase of the conditional check on the `initial` argument.
|
||||
In the case of the first consumer, it now only has partitons `0` and `1`
|
||||
However, for this consumer it was simply a rebalance event and not considered as an intial assignment.
|
||||
Thus, it will not re-seek to the given offsets because of the conditional check on the `initial` argument.
|
||||
|
||||
=== How do I manually acknowledge using Kafka binder?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Using Kafka binder, I want to manually acknowledge messages in my consumer.
|
||||
How do I do that?
|
||||
|
||||
==== Solution
|
||||
|
||||
By default, Kafka binder delegates to the default commit settings in Spring for Apache Kafka project.
|
||||
The default `ackMode` in Spring Kafka is `batch`.
|
||||
See https://docs.spring.io/spring-kafka/docs/current/reference/html/#committing-offsets[here] for more details on that.
|
||||
|
||||
There are situations in which you want to disable this default commit behavior and rely on manual commits.
|
||||
Following steps allow you to do that.
|
||||
|
||||
Set the property `spring.cloud.stream.kafka.bindings.<binding-name>.consumer.ackMode` to either `MANUAL` or `MANUAL_IMMEDIATE`.
|
||||
When it is set like that, then there will be a header called `kafka_acknowledgment` (from `KafkaHeaders.ACKNOWLEDGMENT`) present in the message received by the consumer method.
|
||||
|
||||
For example, imagine this as your consumer method.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<Message<String>> myConsumer() {
|
||||
return msg -> {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Then you set the property `spring.cloud.stream.bindings.myConsumer-in-0.consumer.ackMode` to `MANUAL` or `MANUAL_IMMEDIATE`.
|
||||
|
||||
=== How do I override the default binding names in Spring Cloud Stream?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Spring Cloud Stream creates default bindings based on the function definition and signature, but how do I override these to more domain friendly names?
|
||||
|
||||
==== Solution
|
||||
|
||||
Assume that following is your function signature.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Function<String, String> uppercase(){
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
By default, Spring Cloud Stream will create the bindings as below.
|
||||
|
||||
1. uppercase-in-0
|
||||
2. uppercase-out-0
|
||||
|
||||
You can override these bindings to something by using the following properties.
|
||||
|
||||
```
|
||||
spring.cloud.stream.function.bindings.uppercase-in-0=my-transformer-in
|
||||
spring.cloud.stream.function.bindings.uppercase-out-0=my-transformer-out
|
||||
```
|
||||
|
||||
After this, all binding properties must be made on the new names, `my-transformer-in` and `my-transformer-out`.
|
||||
|
||||
Here is another example with Kafka Streams and multiple inputs.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public BiFunction<KStream<String, Order>, KTable<String, Account>, KStream<String, EnrichedOrder>> processOrder() {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
By default, Spring Cloud Stream will create three different binding names for this function.
|
||||
|
||||
1. processOrder-in-0
|
||||
2. processOrder-in-1
|
||||
3. processOrder-out-0
|
||||
|
||||
You have to use these binding names each time you want to set some configuration on these bindings.
|
||||
You don't like that, and you want to use more domain-friendly and readable binding names, for example, something like.
|
||||
|
||||
1. orders
|
||||
2. accounts
|
||||
3. enrichedOrders
|
||||
|
||||
You can easily do that by simply setting these three properties
|
||||
|
||||
1. spring.cloud.stream.function.bindings.processOrder-in-0=orders
|
||||
2. spring.cloud.stream.function.bindings.processOrder-in-1=accounts
|
||||
3. spring.cloud.stream.function.bindings.processOrder-out-0=enrichedOrders
|
||||
|
||||
Once you do that, it overrides the default binding names and any properties that you want to set on them must be on these new binding names.
|
||||
|
||||
=== How do I send a message key as part of my record?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I need to send a key along with the payload of the record, is there a way to do that in Spring Cloud Stream?
|
||||
|
||||
==== Solution
|
||||
|
||||
It is often necessary that you want to send associative data structure like a map as the record with a key and value.
|
||||
Spring Cloud Stream allows you to do that in a straightforward manner.
|
||||
Following is a basic blueprint for doing this, but you may want to adapt it to your paricular use case.
|
||||
|
||||
Here is sample producer method (aka `Supplier`).
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supplier() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader(KafkaHeaders.MESSAGE_KEY, "my-foo").build();
|
||||
}
|
||||
```
|
||||
|
||||
This is a trivial function that sends a message with a `String` payload, but also with a key.
|
||||
Note that we set the key as a message header using `KafkaHeaders.MESSAGE_KEY`.
|
||||
|
||||
If you want to change the key from the default `kafka_messageKey`, then in the configuration, we need to specify this property:
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.supplier-out-0.producer.messageKeyExpression=headers['my-special-key']
|
||||
```
|
||||
|
||||
Please note that we use the binding name `supplier-out-0` since that is our function name, please update accordingly.
|
||||
|
||||
Then, we use this new key when we produce the message.
|
||||
|
||||
=== How do I use native serializer and deserializer instead of message conversion done by Spring Cloud Stream?
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
Instead of using the message converters in Spring Cloud Stream, I want to use native Serializer and Deserializer in Kafka.
|
||||
By default, Spring Cloud Stream takes care of this conversion using its internal built-in message converters.
|
||||
How can I bypass this and delegate the responsibility to Kafka?
|
||||
|
||||
==== Solution
|
||||
|
||||
This is really easy to do.
|
||||
|
||||
All you have to do is to provide the following property to enable native serialization.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.useNativeEncoding: true
|
||||
```
|
||||
|
||||
Then, you need to also set the serailzers.
|
||||
There are a couple of ways to do this.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.configurarion.key.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.configurarion.value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
```
|
||||
|
||||
or using the binder configuration.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.binder.configurarion.key.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
spring.cloud.stream.kafka.binder.configurarion.value.serializer: org.apache.kafka.common.serialization.StringSerializer
|
||||
```
|
||||
|
||||
When using the binder way, it is applied against all the bindings whereas setting them at the bindings are per binding.
|
||||
|
||||
On the deserializing side, you just need to provide the deserializers as configuration.
|
||||
|
||||
For example,
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.consumer.configurarion.key.deserializer: org.apache.kafka.common.serialization.StringDeserializer
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.producer.configurarion.value.deserializer: org.apache.kafka.common.serialization.StringDeserializer
|
||||
```
|
||||
|
||||
You can also set them at the binder level.
|
||||
|
||||
There is an optional property that you can set to force native decoding.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.<binding-name>.consumer.useNativeDecoding: true
|
||||
```
|
||||
|
||||
However, in the case of Kafka binder, this is unncessary, as by the time it reaches the binder, Kafka already deserializes them using the configured deserializers.
|
||||
|
||||
=== Explain how offset resetting work in Kafka Streams binder
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
By default, Kafka Streams binder always starts from the earliest offset for a new consumer.
|
||||
Sometimes, it is beneficial or required by the application to start from the latest offset.
|
||||
Kafka Streams binder allows you to do that.
|
||||
|
||||
==== Solution
|
||||
|
||||
Before we look at the solution, let us look at the following scenario.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public BiConsumer<KStream<Object, Object>, KTable<Object, Object>> myBiConsumer{
|
||||
(s, t) -> s.join(t, ...)
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
We have a `BiConsumer` bean that requires two input bindings.
|
||||
In this case, the first binding is for a `KStream` and the second one is for a `KTable`.
|
||||
When running this application for the first time, by default, both bindings start from the `earliest` offset.
|
||||
What about I want to start from the `latest` offset due to some requirements?
|
||||
You can do this by enabling the following properties.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.streams.bindings.myBiConsumer-in-0.consumer.startOffset: latest
|
||||
spring.cloud.stream.kafka.streams.bindings.myBiConsumer-in-1.consumer.startOffset: latest
|
||||
```
|
||||
|
||||
If you want only one binding to start from the `latest` offset and the other to consumer from the default `earliest`, then leave the latter binding out from the configuration.
|
||||
|
||||
Keep in mind that, once there are committed offsets, these setting are *not* honored and the committed offsets take precedence.
|
||||
|
||||
=== Keeping track of successful sending of records (producing) in Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I have a Kafka producer application and I want to keep track of all my successful sedings.
|
||||
|
||||
==== Solution
|
||||
|
||||
Let us assume that we have this following supplier in the application.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supplier() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader(KafkaHeaders.MESSAGE_KEY, "my-foo").build();
|
||||
}
|
||||
```
|
||||
|
||||
Then, we need to define a new `MessageChannel` bean to capture all the successful send information.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public MessageChannel fooRecordChannel() {
|
||||
return new DirectChannel();
|
||||
}
|
||||
```
|
||||
|
||||
Next, define this property in the application configuration to provide the bean name for the `recordMetadataChannel`.
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.supplier-out-0.producer.recordMetadataChannel: fooRecordChannel
|
||||
```
|
||||
|
||||
At this point, successful sent information will be sent to the `fooRecordChannel`.
|
||||
|
||||
You can write an `IntegrationFlow` as below to see the information.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public IntegrationFlow integrationFlow() {
|
||||
return f -> f.channel("fooRecordChannel")
|
||||
.handle((payload, messageHeaders) -> payload);
|
||||
}
|
||||
```
|
||||
|
||||
In the `handle` method, the payload is what got sent to Kafka and the message headers contain a special key called `kafka_recordMetadata`.
|
||||
Its value is a `RecordMetadata` that contains information about topic partition, current offset etc.
|
||||
|
||||
=== Adding custom header mapper in Kafka
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
I have a Kafka producer application that sets some headers, but they are missing in the consumer application. Why is that?
|
||||
|
||||
==== Solution
|
||||
|
||||
Under normal circumstances, this should be fine.
|
||||
|
||||
Imagine, you have the following producer.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supply() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader("foo", "bar").build();
|
||||
}
|
||||
```
|
||||
|
||||
On the consumer side, you should still see the header "foo", and the following should not give you any issues.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Consumer<Message<String>> consume() {
|
||||
return s -> {
|
||||
final String foo = (String)s.getHeaders().get("foo");
|
||||
System.out.println(foo);
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
If you provide a https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/3.1.3/reference/html/spring-cloud-stream-binder-kafka.html#_kafka_binder_properties[custom header mapper] in the application, then this won't work.
|
||||
Let's say you have an empty `KafkaHeaderMapper` in the application.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public KafkaHeaderMapper kafkaBinderHeaderMapper() {
|
||||
return new KafkaHeaderMapper() {
|
||||
@Override
|
||||
public void fromHeaders(MessageHeaders headers, Headers target) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> target) {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
If that is your implementation, then you will miss the `foo` header on the consumer.
|
||||
Chances are that, you may have some logic inside those `KafkaHeaderMapper` methods.
|
||||
You need the following to populate the `foo` header.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public KafkaHeaderMapper kafkaBinderHeaderMapper() {
|
||||
return new KafkaHeaderMapper() {
|
||||
@Override
|
||||
public void fromHeaders(MessageHeaders headers, Headers target) {
|
||||
final String foo = (String) headers.get("foo");
|
||||
target.add("foo", foo.getBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> target) {
|
||||
final Header foo = source.lastHeader("foo");
|
||||
target.put("foo", new String(foo.value()));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
That will properly populate the `foo` header from the producer to consumer.
|
||||
|
||||
==== Special note on the id header
|
||||
|
||||
In Spring Cloud Stream, the `id` header is a special header, but some applications may want to have special custom id headers - something like `custom-id` or `ID` or `Id`.
|
||||
The first one (`custom-id`) will propagate without any custom header mapper from producer to consumer.
|
||||
However, if you produce with a variant of the framework reserved `id` header - such as `ID`, `Id`, `iD` etc. then you will run into issues with the internals of the framework.
|
||||
See this https://stackoverflow.com/questions/68412600/change-the-behaviour-in-spring-cloud-stream-make-header-matcher-case-sensitive[StackOverflow thread] fore more context on this use case.
|
||||
In that case, you must use a custom `KafkaHeaderMapper` to map the case-sensitive id header.
|
||||
For example, let's say you have the following producer.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public Supplier<Message<String>> supply() {
|
||||
return () -> MessageBuilder.withPayload("foo").setHeader("Id", "my-id").build();
|
||||
}
|
||||
```
|
||||
|
||||
The header `Id` above will be gone from the consuming side as it clashes with the framework `id` header.
|
||||
You can provide a custom `KafkaHeaderMapper` to solve this issue.
|
||||
|
||||
```
|
||||
@Bean
|
||||
public KafkaHeaderMapper kafkaBinderHeaderMapper1() {
|
||||
return new KafkaHeaderMapper() {
|
||||
@Override
|
||||
public void fromHeaders(MessageHeaders headers, Headers target) {
|
||||
final String myId = (String) headers.get("Id");
|
||||
target.add("Id", myId.getBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> target) {
|
||||
final Header Id = source.lastHeader("Id");
|
||||
target.put("Id", new String(Id.value()));
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
By doing this, both `id` and `Id` headers will be available from the producer to the consumer side.
|
||||
|
||||
=== Producing to multiple topics in transaction
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
How do I produce transactional messages to multiple Kafka topics?
|
||||
|
||||
For more context, see this https://stackoverflow.com/questions/68928091/dlq-bounded-retry-and-eos-when-producing-to-multiple-topics-using-spring-cloud[StackOverflow question].
|
||||
|
||||
==== Solution
|
||||
|
||||
Use transactional support in Kafka binder for transactions and then provide an `AfterRollbackProcessor`.
|
||||
In order to produce to multiple topics, use `StreamBridge` API.
|
||||
|
||||
Below are the code snippets for this:
|
||||
|
||||
```
|
||||
@Autowired
|
||||
StreamBridge bridge;
|
||||
|
||||
@Bean
|
||||
Consumer<String> input() {
|
||||
return str -> {
|
||||
System.out.println(str);
|
||||
this.bridge.send("left", str.toUpperCase());
|
||||
this.bridge.send("right", str.toLowerCase());
|
||||
if (str.equals("Fail")) {
|
||||
throw new RuntimeException("test");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
ListenerContainerCustomizer<AbstractMessageListenerContainer<?, ?>> customizer(BinderFactory binders) {
|
||||
return (container, dest, group) -> {
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
KafkaTemplate<byte[], byte[]> template = new KafkaTemplate<>(pf);
|
||||
DefaultAfterRollbackProcessor rollbackProcessor = rollbackProcessor(template);
|
||||
container.setAfterRollbackProcessor(rollbackProcessor);
|
||||
};
|
||||
}
|
||||
|
||||
DefaultAfterRollbackProcessor rollbackProcessor(KafkaTemplate<byte[], byte[]> template) {
|
||||
return new DefaultAfterRollbackProcessor<>(
|
||||
new DeadLetterPublishingRecoverer(template), new FixedBackOff(2000L, 2L), template, true);
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
==== Required Configuration
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix: tx-
|
||||
spring.cloud.stream.kafka.binder.required-acks=all
|
||||
spring.cloud.stream.bindings.input-in-0.group=foo
|
||||
spring.cloud.stream.bindings.input-in-0.destination=input
|
||||
spring.cloud.stream.bindings.left.destination=left
|
||||
spring.cloud.stream.bindings.right.destination=right
|
||||
|
||||
spring.cloud.stream.kafka.bindings.input-in-0.consumer.maxAttempts=1
|
||||
```
|
||||
|
||||
in order to test, you can use the following:
|
||||
|
||||
```
|
||||
@Bean
|
||||
public ApplicationRunner runner(KafkaTemplate<byte[], byte[]> template) {
|
||||
return args -> {
|
||||
System.in.read();
|
||||
template.send("input", "Fail".getBytes());
|
||||
template.send("input", "Good".getBytes());
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
Some important notes:
|
||||
|
||||
Please ensure that you don't have any DLQ settings on the application configuration as we manually configure DLT (By default it will be published to a topic named `input.DLT` based on the initial consumer function).
|
||||
Also, reset the `maxAttempts` on consumer binding to `1` in order to avoid retries by the binder.
|
||||
It will be max tried a total of 3 in the example above (initial try + the 2 attempts in the `FixedBackoff`).
|
||||
|
||||
See the https://stackoverflow.com/questions/68928091/dlq-bounded-retry-and-eos-when-producing-to-multiple-topics-using-spring-cloud[StackOverflow thread] for more details on how to test this code.
|
||||
If you are using Spring Cloud Stream to test it by adding more consumer functions, make sure to set the `isolation-level` on the consumer binding to `read-committed`.
|
||||
|
||||
This https://stackoverflow.com/questions/68941306/spring-cloud-stream-database-transaction-does-not-roll-back[StackOverflow thread] is also related to this discussion.
|
||||
|
||||
=== Pitfalls to avoid when running multiple pollable consumers
|
||||
|
||||
==== Problem Statement
|
||||
|
||||
How can I run multiple instances of the pollable consumers and generate unique `client.id` for each instance?
|
||||
|
||||
==== Solution
|
||||
|
||||
Assuming that I have the following definition:
|
||||
|
||||
```
|
||||
spring.cloud.stream.pollable-source: foo
|
||||
spring.cloud.stream.bindings.foo-in-0.group: my-group
|
||||
```
|
||||
|
||||
When running the application, the Kafka consumer generates a client.id (something like `consumer-my-group-1`).
|
||||
For each instance of the application that is running, this `client.id` will be the same, causing unexpected issues.
|
||||
|
||||
In order to fix this, you can add the following property on each instance of the application:
|
||||
|
||||
```
|
||||
spring.cloud.stream.kafka.bindings.foo-in-0.consumer.configuration.client.id=${client.id}
|
||||
```
|
||||
|
||||
See this https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1139[GitHub issue] for more details.
|
||||
|
||||
37
docs/src/main/ruby/generate_readme.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
base_dir = File.join(File.dirname(__FILE__),'../../..')
|
||||
src_dir = File.join(base_dir, "/src/main/asciidoc")
|
||||
require 'asciidoctor'
|
||||
require 'optparse'
|
||||
|
||||
options = {}
|
||||
file = "#{src_dir}/README.adoc"
|
||||
|
||||
OptionParser.new do |o|
|
||||
o.on('-o OUTPUT_FILE', 'Output file (default is stdout)') { |file| options[:to_file] = file unless file=='-' }
|
||||
o.on('-h', '--help') { puts o; exit }
|
||||
o.parse!
|
||||
end
|
||||
|
||||
file = ARGV[0] if ARGV.length>0
|
||||
|
||||
# Copied from https://github.com/asciidoctor/asciidoctor-extensions-lab/blob/master/scripts/asciidoc-coalescer.rb
|
||||
doc = Asciidoctor.load_file file, safe: :unsafe, header_only: true, attributes: options[:attributes]
|
||||
header_attr_names = (doc.instance_variable_get :@attributes_modified).to_a
|
||||
header_attr_names.each {|k| doc.attributes[%(#{k}!)] = '' unless doc.attr? k }
|
||||
attrs = doc.attributes
|
||||
attrs['allow-uri-read'] = true
|
||||
puts attrs
|
||||
|
||||
out = "// Do not edit this file (e.g. go instead to src/main/asciidoc)\n\n"
|
||||
doc = Asciidoctor.load_file file, safe: :unsafe, parse: false, attributes: attrs
|
||||
out << doc.reader.read
|
||||
|
||||
unless options[:to_file]
|
||||
puts out
|
||||
else
|
||||
File.open(options[:to_file],'w+') do |file|
|
||||
file.write(out)
|
||||
end
|
||||
end
|
||||
189
mvnw
vendored
@@ -19,7 +19,7 @@
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Maven2 Start Up Batch script
|
||||
# Maven Start Up Batch script
|
||||
#
|
||||
# Required ENV vars:
|
||||
# ------------------
|
||||
@@ -54,38 +54,16 @@ case "`uname`" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
MINGW*) mingw=true;;
|
||||
Darwin*) darwin=true
|
||||
#
|
||||
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
|
||||
# for the new JDKs provided by Oracle.
|
||||
#
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
|
||||
#
|
||||
# Oracle JDKs
|
||||
#
|
||||
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
fi
|
||||
;;
|
||||
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
|
||||
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ -x "/usr/libexec/java_home" ]; then
|
||||
export JAVA_HOME="`/usr/libexec/java_home`"
|
||||
else
|
||||
export JAVA_HOME="/Library/Java/Home"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
@@ -130,13 +108,12 @@ if $cygwin ; then
|
||||
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# For Migwn, ensure paths are in UNIX format before anything is touched
|
||||
# For Mingw, ensure paths are in UNIX format before anything is touched
|
||||
if $mingw ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME="`(cd "$M2_HOME"; pwd)`"
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
|
||||
# TODO classpath?
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
@@ -184,27 +161,28 @@ fi
|
||||
|
||||
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# traverses directory structure from process work directory to filesystem root
|
||||
# first directory with .mvn subdirectory is considered project base directory
|
||||
find_maven_basedir() {
|
||||
local basedir=$(pwd)
|
||||
local wdir=$(pwd)
|
||||
|
||||
if [ -z "$1" ]
|
||||
then
|
||||
echo "Path not specified to find_maven_basedir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
basedir="$1"
|
||||
wdir="$1"
|
||||
while [ "$wdir" != '/' ] ; do
|
||||
if [ -d "$wdir"/.mvn ] ; then
|
||||
basedir=$wdir
|
||||
break
|
||||
fi
|
||||
wdir=$(cd "$wdir/.."; pwd)
|
||||
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
|
||||
if [ -d "${wdir}" ]; then
|
||||
wdir=`cd "$wdir/.."; pwd`
|
||||
fi
|
||||
# end of workaround
|
||||
done
|
||||
echo "${basedir}"
|
||||
}
|
||||
@@ -216,9 +194,108 @@ concat_lines() {
|
||||
fi
|
||||
}
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
|
||||
BASE_DIR=`find_maven_basedir "$(pwd)"`
|
||||
if [ -z "$BASE_DIR" ]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
##########################################################################################
|
||||
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
# This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
##########################################################################################
|
||||
if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found .mvn/wrapper/maven-wrapper.jar"
|
||||
fi
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
|
||||
fi
|
||||
if [ -n "$MVNW_REPOURL" ]; then
|
||||
jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
else
|
||||
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
fi
|
||||
while IFS="=" read key value; do
|
||||
case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
|
||||
esac
|
||||
done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Downloading from: $jarUrl"
|
||||
fi
|
||||
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
|
||||
if $cygwin; then
|
||||
wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
|
||||
fi
|
||||
|
||||
if command -v wget > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found wget ... using wget"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
wget "$jarUrl" -O "$wrapperJarPath"
|
||||
else
|
||||
wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
|
||||
fi
|
||||
elif command -v curl > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found curl ... using curl"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
curl -o "$wrapperJarPath" "$jarUrl" -f
|
||||
else
|
||||
curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
|
||||
fi
|
||||
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Falling back to using Java to download"
|
||||
fi
|
||||
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
|
||||
# For Cygwin, switch paths to Windows format before running javac
|
||||
if $cygwin; then
|
||||
javaClass=`cygpath --path --windows "$javaClass"`
|
||||
fi
|
||||
if [ -e "$javaClass" ]; then
|
||||
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Compiling MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
# Compiling the Java class
|
||||
("$JAVA_HOME/bin/javac" "$javaClass")
|
||||
fi
|
||||
if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
# Running the downloader
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Running MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
##########################################################################################
|
||||
# End of extension
|
||||
##########################################################################################
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo $MAVEN_PROJECTBASEDIR
|
||||
fi
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
[ -n "$MAVEN_PROJECTBASEDIR" ] &&
|
||||
MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
|
||||
fi
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
@@ -226,20 +303,8 @@ export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
echo "Running version check"
|
||||
VERSION=$( sed '\!<parent!,\!</parent!d' `dirname $0`/pom.xml | grep '<version' | head -1 | sed -e 's/.*<version>//' -e 's!</version>.*$!!' )
|
||||
echo "The found version is [${VERSION}]"
|
||||
|
||||
if echo $VERSION | egrep -q 'M|RC'; then
|
||||
echo Activating \"milestone\" profile for version=\"$VERSION\"
|
||||
echo $MAVEN_ARGS | grep -q milestone || MAVEN_ARGS="$MAVEN_ARGS -Pmilestone"
|
||||
else
|
||||
echo Deactivating \"milestone\" profile for version=\"$VERSION\"
|
||||
echo $MAVEN_ARGS | grep -q milestone && MAVEN_ARGS=$(echo $MAVEN_ARGS | sed -e 's/-Pmilestone//')
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} ${MAVEN_ARGS} "$@"
|
||||
${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
|
||||
|
||||
53
mvnw.cmd
vendored
@@ -18,7 +18,7 @@
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven2 Start Up Batch script
|
||||
@REM Maven Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@@ -26,7 +26,7 @@
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@@ -35,7 +35,9 @@
|
||||
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
|
||||
@REM set title of command window
|
||||
title %0
|
||||
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
@@ -80,8 +82,6 @@ goto error
|
||||
|
||||
:init
|
||||
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
@@ -117,11 +117,48 @@ for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do s
|
||||
:endReadAdditionalConfig
|
||||
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
|
||||
set WRAPPER_JAR="".\.mvn\wrapper\maven-wrapper.jar""
|
||||
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CMD_LINE_ARGS%
|
||||
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
|
||||
FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
|
||||
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||
)
|
||||
|
||||
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
if exist %WRAPPER_JAR% (
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Found %WRAPPER_JAR%
|
||||
)
|
||||
) else (
|
||||
if not "%MVNW_REPOURL%" == "" (
|
||||
SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
)
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||
echo Downloading from: %DOWNLOAD_URL%
|
||||
)
|
||||
|
||||
powershell -Command "&{"^
|
||||
"$webclient = new-object System.Net.WebClient;"^
|
||||
"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
|
||||
"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
|
||||
"}"^
|
||||
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
|
||||
"}"
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Finished downloading %WRAPPER_JAR%
|
||||
)
|
||||
)
|
||||
@REM End of extension
|
||||
|
||||
@REM Provide a "standardized" way to retrieve the CLI args that will
|
||||
@REM work with both Windows and non-Windows executions.
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
|
||||
223
pom.xml
@@ -1,29 +1,41 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M1</version>
|
||||
<version>3.2.1</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>2.0.2.RELEASE</version>
|
||||
<version>3.1.0</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<scm>
|
||||
<url>https://github.com/spring-cloud/spring-cloud-stream-binder-kafka</url>
|
||||
<connection>scm:git:git://github.com/spring-cloud/spring-cloud-stream-binder-kafka.git
|
||||
</connection>
|
||||
<developerConnection>
|
||||
scm:git:ssh://git@github.com/spring-cloud/spring-cloud-stream-binder-kafka.git
|
||||
</developerConnection>
|
||||
<tag>HEAD</tag>
|
||||
</scm>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.1.5.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.0.3.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>1.0.1</kafka.version>
|
||||
<spring-cloud-stream.version>2.1.0.M1</spring-cloud-stream.version>
|
||||
<spring-kafka.version>2.8.0</spring-kafka.version>
|
||||
<spring-integration-kafka.version>5.5.5</spring-integration-kafka.version>
|
||||
<kafka.version>3.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>3.2.1</spring-cloud-stream.version>
|
||||
<maven-checkstyle-plugin.failsOnError>true</maven-checkstyle-plugin.failsOnError>
|
||||
<maven-checkstyle-plugin.failsOnViolation>true</maven-checkstyle-plugin.failsOnViolation>
|
||||
<maven-checkstyle-plugin.includeTestSourceDirectory>true</maven-checkstyle-plugin.includeTestSourceDirectory>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kafka-streams</module>
|
||||
</modules>
|
||||
<module>docs</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
@@ -47,6 +59,7 @@
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
@@ -82,7 +95,13 @@
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
@@ -101,9 +120,24 @@
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.junit.vintage</groupId>
|
||||
<artifactId>junit-vintage-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
@@ -131,84 +165,115 @@
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-tools</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>${maven-compiler-plugin.version}</version>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<headerLocation>checkstyle-header.txt</headerLocation>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
<source>${java.version}</source>
|
||||
<target>${java.version}</target>
|
||||
<compilerArgument>-parameters</compilerArgument>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>spring</id>
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>http://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
</repositories>
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
<releases>
|
||||
<enabled>false</enabled>
|
||||
</releases>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>http://repo.spring.io/libs-release-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
</pluginRepositories>
|
||||
|
||||
|
||||
</profile>
|
||||
<profile>
|
||||
<id>coverage</id>
|
||||
<activation>
|
||||
<property>
|
||||
<name>env.TRAVIS</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
</activation>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.jacoco</groupId>
|
||||
<artifactId>jacoco-maven-plugin</artifactId>
|
||||
<version>0.7.9</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>agent</id>
|
||||
<goals>
|
||||
<goal>prepare-agent</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>report</id>
|
||||
<phase>test</phase>
|
||||
<goals>
|
||||
<goal>report</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>rsocket-snapshots</id>
|
||||
<name>RSocket Snapshots</name>
|
||||
<url>https://oss.jfrog.org/oss-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/snapshot</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/milestone</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</pluginRepository>
|
||||
<pluginRepository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
</pluginRepository>
|
||||
</pluginRepositories>
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</reporting>
|
||||
</project>
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M1</version>
|
||||
<version>3.2.1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
provides: spring-cloud-starter-stream-kafka
|
||||
@@ -1,18 +1,18 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M1</version>
|
||||
<version>3.2.1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
|
||||
<dependencies>
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -21,6 +21,7 @@ import java.util.Map;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
@@ -28,17 +29,18 @@ import org.springframework.util.Assert;
|
||||
* for the Kafka or Zookeeper client.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class JaasLoginModuleConfiguration {
|
||||
|
||||
private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
|
||||
|
||||
private AppConfigurationEntry.LoginModuleControlFlag controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
|
||||
private KafkaJaasLoginModuleInitializer.ControlFlag controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.REQUIRED;
|
||||
|
||||
private Map<String,String> options = new HashMap<>();
|
||||
private Map<String, String> options = new HashMap<>();
|
||||
|
||||
public String getLoginModule() {
|
||||
return loginModule;
|
||||
return this.loginModule;
|
||||
}
|
||||
|
||||
public void setLoginModule(String loginModule) {
|
||||
@@ -46,38 +48,22 @@ public class JaasLoginModuleConfiguration {
|
||||
this.loginModule = loginModule;
|
||||
}
|
||||
|
||||
public String getControlFlag() {
|
||||
return controlFlag.toString();
|
||||
}
|
||||
|
||||
public AppConfigurationEntry.LoginModuleControlFlag getControlFlagValue() {
|
||||
return controlFlag;
|
||||
public KafkaJaasLoginModuleInitializer.ControlFlag getControlFlag() {
|
||||
return this.controlFlag;
|
||||
}
|
||||
|
||||
public void setControlFlag(String controlFlag) {
|
||||
Assert.notNull(controlFlag, "cannot be null");
|
||||
if (AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUIRED.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUISITE.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT;
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException(controlFlag + " is not a supported control flag");
|
||||
}
|
||||
this.controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag
|
||||
.valueOf(controlFlag.toUpperCase());
|
||||
}
|
||||
|
||||
public Map<String, String> getOptions() {
|
||||
return options;
|
||||
return this.options;
|
||||
}
|
||||
|
||||
public void setOptions(Map<String, String> options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
* Copyright 2015-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,40 +16,64 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.time.Duration;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.AssertTrue;
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties.CompressionType;
|
||||
import org.springframework.core.io.DefaultResourceLoader;
|
||||
import org.springframework.core.io.Resource;
|
||||
import org.springframework.expression.Expression;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Configuration properties for the Kafka binder. The properties in this class are
|
||||
* prefixed with <b>spring.cloud.stream.kafka.binder</b>.
|
||||
*
|
||||
* @author David Turanski
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Rafal Zukowski
|
||||
* @author Aldo Sinanaj
|
||||
* @author Lukasz Kaminski
|
||||
* @author Chukwubuikem Ume-Ugwa
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
|
||||
/**
|
||||
* Arbitrary kafka properties that apply to both producers and consumers.
|
||||
*/
|
||||
@@ -65,48 +89,26 @@ public class KafkaBinderConfigurationProperties {
|
||||
*/
|
||||
private Map<String, String> producerProperties = new HashMap<>();
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
private String[] headers = new String[] {};
|
||||
|
||||
private int offsetUpdateTimeWindow = 10000;
|
||||
|
||||
private int offsetUpdateCount;
|
||||
|
||||
private int offsetUpdateShutdownTimeout = 2000;
|
||||
|
||||
private int maxWait = 100;
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
|
||||
private boolean autoAlterTopics;
|
||||
|
||||
private boolean autoAddPartitions;
|
||||
|
||||
private int socketBufferSize = 2097152;
|
||||
|
||||
/**
|
||||
* ZK session timeout in milliseconds.
|
||||
*/
|
||||
private int zkSessionTimeout = 10000;
|
||||
|
||||
/**
|
||||
* ZK Connection timeout in milliseconds.
|
||||
*/
|
||||
private int zkConnectionTimeout = 10000;
|
||||
private boolean considerDownWhenAnyPartitionHasNoLeader;
|
||||
|
||||
private String requiredAcks = "1";
|
||||
|
||||
private short replicationFactor = 1;
|
||||
|
||||
private int fetchSize = 1024 * 1024;
|
||||
private short replicationFactor = -1;
|
||||
|
||||
private int minPartitionCount = 1;
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
@@ -115,33 +117,114 @@ public class KafkaBinderConfigurationProperties {
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
* The bean name of a custom header mapper to use instead of a
|
||||
* {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
/**
|
||||
* Time between retries after AuthorizationException is caught in
|
||||
* the ListenerContainer; defalt is null which disables retries.
|
||||
* For more info see: {@link org.springframework.kafka.listener.ConsumerProperties#setAuthorizationExceptionRetryInterval(java.time.Duration)}
|
||||
*/
|
||||
private Duration authorizationExceptionRetryInterval;
|
||||
|
||||
/**
|
||||
* When a certificate store location is given as classpath URL (classpath:), then the binder
|
||||
* moves the resource from the classpath location inside the JAR to a location on
|
||||
* the filesystem. If this value is set, then this location is used, otherwise, the
|
||||
* certificate file is copied to the directory returned by java.io.tmpdir.
|
||||
*/
|
||||
private String certificateStoreDirectory;
|
||||
|
||||
public KafkaBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
Assert.notNull(kafkaProperties, "'kafkaProperties' cannot be null");
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
}
|
||||
|
||||
public KafkaProperties getKafkaProperties() {
|
||||
return this.kafkaProperties;
|
||||
}
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the connection String
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
@Deprecated
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
public String getKafkaConnectionString() {
|
||||
// We need to do a check on certificate file locations to see if they are given as classpath resources.
|
||||
// If that is the case, then we will move them to a file system location and use those as the certificate locations.
|
||||
// This is due to a limitation in Kafka itself in which it doesn't allow reading certificate resources from the classpath.
|
||||
// See this: https://issues.apache.org/jira/browse/KAFKA-7685
|
||||
// and this: https://cwiki.apache.org/confluence/display/KAFKA/KIP-398%3A+Support+reading+trust+store+from+classpath
|
||||
moveCertsToFileSystemIfNecessary();
|
||||
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String getKafkaConnectionString() {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
private void moveCertsToFileSystemIfNecessary() {
|
||||
try {
|
||||
moveBrokerCertsIfApplicable();
|
||||
moveSchemaRegistryCertsIfApplicable();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void moveBrokerCertsIfApplicable() throws IOException {
|
||||
final String trustStoreLocation = this.configuration.get("ssl.truststore.location");
|
||||
if (trustStoreLocation != null && trustStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(trustStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("ssl.truststore.location", fileSystemLocation);
|
||||
}
|
||||
final String keyStoreLocation = this.configuration.get("ssl.keystore.location");
|
||||
if (keyStoreLocation != null && keyStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(keyStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("ssl.keystore.location", fileSystemLocation);
|
||||
}
|
||||
}
|
||||
|
||||
private void moveSchemaRegistryCertsIfApplicable() throws IOException {
|
||||
String trustStoreLocation = this.configuration.get("schema.registry.ssl.truststore.location");
|
||||
if (trustStoreLocation != null && trustStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(trustStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("schema.registry.ssl.truststore.location", fileSystemLocation);
|
||||
}
|
||||
final String keyStoreLocation = this.configuration.get("schema.registry.ssl.keystore.location");
|
||||
if (keyStoreLocation != null && keyStoreLocation.startsWith("classpath:")) {
|
||||
final String fileSystemLocation = moveCertToFileSystem(keyStoreLocation, this.certificateStoreDirectory);
|
||||
// Overriding the value with absolute filesystem path.
|
||||
this.configuration.put("schema.registry.ssl.keystore.location", fileSystemLocation);
|
||||
}
|
||||
}
|
||||
|
||||
private String moveCertToFileSystem(String classpathLocation, String fileSystemLocation) throws IOException {
|
||||
File targetFile;
|
||||
final String tempDir = System.getProperty("java.io.tmpdir");
|
||||
Resource resource = new DefaultResourceLoader().getResource(classpathLocation);
|
||||
if (StringUtils.hasText(fileSystemLocation)) {
|
||||
final Path path = Paths.get(fileSystemLocation);
|
||||
if (!Files.exists(path) || !Files.isDirectory(path) || !Files.isWritable(path)) {
|
||||
logger.warn("The filesystem location to move the cert files (" + fileSystemLocation + ") " +
|
||||
"is not found or a directory that is writable. The system temp folder (java.io.tmpdir) will be used instead.");
|
||||
targetFile = new File(Paths.get(tempDir, resource.getFilename()).toString());
|
||||
}
|
||||
else {
|
||||
// the given location is verified to be a writable directory.
|
||||
targetFile = new File(Paths.get(fileSystemLocation, resource.getFilename()).toString());
|
||||
}
|
||||
}
|
||||
else {
|
||||
targetFile = new File(Paths.get(tempDir, resource.getFilename()).toString());
|
||||
}
|
||||
|
||||
try (InputStream inputStream = resource.getInputStream()) {
|
||||
Files.copy(inputStream, targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
return targetFile.getAbsolutePath();
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
@@ -152,72 +235,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
return this.headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the window.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateTimeWindow() {
|
||||
return this.offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the count.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateCount() {
|
||||
return this.offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the timeout.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateShutdownTimeout() {
|
||||
return this.offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @return the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public String[] getZkNodes() {
|
||||
return this.zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @param zkNodes the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkNodes(String... zkNodes) {
|
||||
this.zkNodes = zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper port.
|
||||
* @param defaultZkPort the port.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setDefaultZkPort(String defaultZkPort) {
|
||||
this.defaultZkPort = defaultZkPort;
|
||||
}
|
||||
|
||||
public String[] getBrokers() {
|
||||
return this.brokers;
|
||||
}
|
||||
@@ -235,86 +252,11 @@ public class KafkaBinderConfigurationProperties {
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateTimeWindow the window.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
|
||||
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateCount the count.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateCount(int offsetUpdateCount) {
|
||||
this.offsetUpdateCount = offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateShutdownTimeout the timeout.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
|
||||
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @return the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkSessionTimeout() {
|
||||
return this.zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @param zkSessionTimeout the timout
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkSessionTimeout(int zkSessionTimeout) {
|
||||
this.zkSessionTimeout = zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @return the timout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkConnectionTimeout() {
|
||||
return this.zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @param zkConnectionTimeout the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkConnectionTimeout(int zkConnectionTimeout) {
|
||||
this.zkConnectionTimeout = zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an array of host values to a comma-separated String.
|
||||
*
|
||||
* It will append the default port value, if not already specified.
|
||||
* Converts an array of host values to a comma-separated String. It will append the
|
||||
* default port value, if not already specified.
|
||||
* @param hosts host string
|
||||
* @param defaultPort port
|
||||
* @return formatted connection string
|
||||
*/
|
||||
private String toConnectionString(String[] hosts, String defaultPort) {
|
||||
String[] fullyFormattedHosts = new String[hosts.length];
|
||||
@@ -329,36 +271,10 @@ public class KafkaBinderConfigurationProperties {
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the wait.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getMaxWait() {
|
||||
return this.maxWait;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer user.
|
||||
* @param maxWait the wait.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setMaxWait(int maxWait) {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
|
||||
public String getRequiredAcks() {
|
||||
return this.requiredAcks;
|
||||
}
|
||||
|
||||
public void setRequiredAcks(int requiredAcks) {
|
||||
this.requiredAcks = String.valueOf(requiredAcks);
|
||||
}
|
||||
|
||||
public void setRequiredAcks(String requiredAcks) {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
@@ -371,28 +287,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getFetchSize() {
|
||||
return this.fetchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param fetchSize the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setFetchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
public int getMinPartitionCount() {
|
||||
return this.minPartitionCount;
|
||||
}
|
||||
@@ -409,28 +303,6 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the queue size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param queueSize the queue size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setQueueSize(int queueSize) {
|
||||
this.queueSize = queueSize;
|
||||
}
|
||||
|
||||
public boolean isAutoCreateTopics() {
|
||||
return this.autoCreateTopics;
|
||||
}
|
||||
@@ -439,6 +311,14 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.autoCreateTopics = autoCreateTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAlterTopics() {
|
||||
return autoAlterTopics;
|
||||
}
|
||||
|
||||
public void setAutoAlterTopics(boolean autoAlterTopics) {
|
||||
this.autoAlterTopics = autoAlterTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAddPartitions() {
|
||||
return this.autoAddPartitions;
|
||||
}
|
||||
@@ -447,32 +327,8 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @return the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public int getSocketBufferSize() {
|
||||
return this.socketBufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @param socketBufferSize the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public void setSocketBufferSize(int socketBufferSize) {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return configuration;
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
@@ -507,29 +363,19 @@ public class KafkaBinderConfigurationProperties {
|
||||
Map<String, Object> consumerConfiguration = new HashMap<>();
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
// Copy configured binder properties that apply to consumers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration.entrySet()) {
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration
|
||||
.entrySet()) {
|
||||
if (ConsumerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
consumerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
consumerConfiguration.put(configurationEntry.getKey(),
|
||||
configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
consumerConfiguration.putAll(this.consumerProperties);
|
||||
filterStreamManagedConfiguration(consumerConfiguration);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
consumerConfiguration.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) consumerConfiguration
|
||||
.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
consumerConfiguration.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(consumerConfiguration);
|
||||
return getConfigurationWithBootstrapServer(consumerConfiguration,
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -542,29 +388,47 @@ public class KafkaBinderConfigurationProperties {
|
||||
Map<String, Object> producerConfiguration = new HashMap<>();
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
// Copy configured binder properties that apply to producers
|
||||
for (Map.Entry<String, String> configurationEntry : configuration.entrySet()) {
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration
|
||||
.entrySet()) {
|
||||
if (ProducerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
producerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
producerConfiguration.put(configurationEntry.getKey(),
|
||||
configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
producerConfiguration.putAll(this.producerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
producerConfiguration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
return getConfigurationWithBootstrapServer(producerConfiguration,
|
||||
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
private void filterStreamManagedConfiguration(Map<String, Object> configuration) {
|
||||
if (configuration.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)
|
||||
&& configuration.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).equals(true)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) +
|
||||
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG + "=true is not supported by the Kafka binder");
|
||||
configuration.remove(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) producerConfiguration
|
||||
.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
producerConfiguration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
if (configuration.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.GROUP_ID_CONFIG) +
|
||||
"Use spring.cloud.stream.default.group or spring.cloud.stream.binding.<name>.group to specify " +
|
||||
"the group instead of " + ConsumerConfig.GROUP_ID_CONFIG);
|
||||
configuration.remove(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
return Collections.unmodifiableMap(producerConfiguration);
|
||||
}
|
||||
|
||||
private String constructIgnoredConfigMessage(String config) {
|
||||
return String.format("Ignoring provided value(s) for '%s'. ", config);
|
||||
}
|
||||
|
||||
private Map<String, Object> getConfigurationWithBootstrapServer(
|
||||
Map<String, Object> configuration, String bootstrapServersConfig) {
|
||||
final String kafkaConnectionString = getKafkaConnectionString();
|
||||
if (ObjectUtils.isEmpty(configuration.get(bootstrapServersConfig)) ||
|
||||
!kafkaConnectionString.equals("localhost:9092")) {
|
||||
configuration.put(bootstrapServersConfig, kafkaConnectionString);
|
||||
}
|
||||
return Collections.unmodifiableMap(configuration);
|
||||
}
|
||||
|
||||
public JaasLoginModuleConfiguration getJaas() {
|
||||
@@ -583,9 +447,36 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public Duration getAuthorizationExceptionRetryInterval() {
|
||||
return authorizationExceptionRetryInterval;
|
||||
}
|
||||
|
||||
public void setAuthorizationExceptionRetryInterval(Duration authorizationExceptionRetryInterval) {
|
||||
this.authorizationExceptionRetryInterval = authorizationExceptionRetryInterval;
|
||||
}
|
||||
|
||||
public boolean isConsiderDownWhenAnyPartitionHasNoLeader() {
|
||||
return this.considerDownWhenAnyPartitionHasNoLeader;
|
||||
}
|
||||
|
||||
public void setConsiderDownWhenAnyPartitionHasNoLeader(boolean considerDownWhenAnyPartitionHasNoLeader) {
|
||||
this.considerDownWhenAnyPartitionHasNoLeader = considerDownWhenAnyPartitionHasNoLeader;
|
||||
}
|
||||
|
||||
public String getCertificateStoreDirectory() {
|
||||
return this.certificateStoreDirectory;
|
||||
}
|
||||
|
||||
public void setCertificateStoreDirectory(String certificateStoreDirectory) {
|
||||
this.certificateStoreDirectory = certificateStoreDirectory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Domain class that models transaction capabilities in Kafka.
|
||||
*/
|
||||
public static class Transaction {
|
||||
|
||||
private final KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
private final CombinedProducerProperties producer = new CombinedProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
@@ -597,10 +488,182 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getProducer() {
|
||||
public CombinedProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* An combination of {@link ProducerProperties} and {@link KafkaProducerProperties} so
|
||||
* that common and kafka-specific properties can be set for the transactional
|
||||
* producer.
|
||||
*
|
||||
* @since 2.1
|
||||
*/
|
||||
public static class CombinedProducerProperties {
|
||||
|
||||
private final ProducerProperties producerProperties = new ProducerProperties();
|
||||
|
||||
private final KafkaProducerProperties kafkaProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
public Expression getPartitionKeyExpression() {
|
||||
return this.producerProperties.getPartitionKeyExpression();
|
||||
}
|
||||
|
||||
public void setPartitionKeyExpression(Expression partitionKeyExpression) {
|
||||
this.producerProperties.setPartitionKeyExpression(partitionKeyExpression);
|
||||
}
|
||||
|
||||
public boolean isPartitioned() {
|
||||
return this.producerProperties.isPartitioned();
|
||||
}
|
||||
|
||||
public Expression getPartitionSelectorExpression() {
|
||||
return this.producerProperties.getPartitionSelectorExpression();
|
||||
}
|
||||
|
||||
public void setPartitionSelectorExpression(
|
||||
Expression partitionSelectorExpression) {
|
||||
this.producerProperties
|
||||
.setPartitionSelectorExpression(partitionSelectorExpression);
|
||||
}
|
||||
|
||||
public @Min(value = 1, message = "Partition count should be greater than zero.") int getPartitionCount() {
|
||||
return this.producerProperties.getPartitionCount();
|
||||
}
|
||||
|
||||
public void setPartitionCount(int partitionCount) {
|
||||
this.producerProperties.setPartitionCount(partitionCount);
|
||||
}
|
||||
|
||||
public String[] getRequiredGroups() {
|
||||
return this.producerProperties.getRequiredGroups();
|
||||
}
|
||||
|
||||
public void setRequiredGroups(String... requiredGroups) {
|
||||
this.producerProperties.setRequiredGroups(requiredGroups);
|
||||
}
|
||||
|
||||
public @AssertTrue(message = "Partition key expression and partition key extractor class properties "
|
||||
+ "are mutually exclusive.") boolean isValidPartitionKeyProperty() {
|
||||
return this.producerProperties.isValidPartitionKeyProperty();
|
||||
}
|
||||
|
||||
public @AssertTrue(message = "Partition selector class and partition selector expression "
|
||||
+ "properties are mutually exclusive.") boolean isValidPartitionSelectorProperty() {
|
||||
return this.producerProperties.isValidPartitionSelectorProperty();
|
||||
}
|
||||
|
||||
public HeaderMode getHeaderMode() {
|
||||
return this.producerProperties.getHeaderMode();
|
||||
}
|
||||
|
||||
public void setHeaderMode(HeaderMode headerMode) {
|
||||
this.producerProperties.setHeaderMode(headerMode);
|
||||
}
|
||||
|
||||
public boolean isUseNativeEncoding() {
|
||||
return this.producerProperties.isUseNativeEncoding();
|
||||
}
|
||||
|
||||
public void setUseNativeEncoding(boolean useNativeEncoding) {
|
||||
this.producerProperties.setUseNativeEncoding(useNativeEncoding);
|
||||
}
|
||||
|
||||
public boolean isErrorChannelEnabled() {
|
||||
return this.producerProperties.isErrorChannelEnabled();
|
||||
}
|
||||
|
||||
public void setErrorChannelEnabled(boolean errorChannelEnabled) {
|
||||
this.producerProperties.setErrorChannelEnabled(errorChannelEnabled);
|
||||
}
|
||||
|
||||
public String getPartitionKeyExtractorName() {
|
||||
return this.producerProperties.getPartitionKeyExtractorName();
|
||||
}
|
||||
|
||||
public void setPartitionKeyExtractorName(String partitionKeyExtractorName) {
|
||||
this.producerProperties
|
||||
.setPartitionKeyExtractorName(partitionKeyExtractorName);
|
||||
}
|
||||
|
||||
public String getPartitionSelectorName() {
|
||||
return this.producerProperties.getPartitionSelectorName();
|
||||
}
|
||||
|
||||
public void setPartitionSelectorName(String partitionSelectorName) {
|
||||
this.producerProperties.setPartitionSelectorName(partitionSelectorName);
|
||||
}
|
||||
|
||||
public int getBufferSize() {
|
||||
return this.kafkaProducerProperties.getBufferSize();
|
||||
}
|
||||
|
||||
public void setBufferSize(int bufferSize) {
|
||||
this.kafkaProducerProperties.setBufferSize(bufferSize);
|
||||
}
|
||||
|
||||
public @NotNull CompressionType getCompressionType() {
|
||||
return this.kafkaProducerProperties.getCompressionType();
|
||||
}
|
||||
|
||||
public void setCompressionType(CompressionType compressionType) {
|
||||
this.kafkaProducerProperties.setCompressionType(compressionType);
|
||||
}
|
||||
|
||||
public boolean isSync() {
|
||||
return this.kafkaProducerProperties.isSync();
|
||||
}
|
||||
|
||||
public void setSync(boolean sync) {
|
||||
this.kafkaProducerProperties.setSync(sync);
|
||||
}
|
||||
|
||||
public int getBatchTimeout() {
|
||||
return this.kafkaProducerProperties.getBatchTimeout();
|
||||
}
|
||||
|
||||
public void setBatchTimeout(int batchTimeout) {
|
||||
this.kafkaProducerProperties.setBatchTimeout(batchTimeout);
|
||||
}
|
||||
|
||||
public Expression getMessageKeyExpression() {
|
||||
return this.kafkaProducerProperties.getMessageKeyExpression();
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.kafkaProducerProperties.setMessageKeyExpression(messageKeyExpression);
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.kafkaProducerProperties.getHeaderPatterns();
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.kafkaProducerProperties.setHeaderPatterns(headerPatterns);
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.kafkaProducerProperties.getConfiguration();
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.kafkaProducerProperties.setConfiguration(configuration);
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.kafkaProducerProperties.getTopic();
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.kafkaProducerProperties.setTopic(topic);
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getExtension() {
|
||||
return this.kafkaProducerProperties;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,28 +16,48 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBindingProperties {
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
|
||||
/**
|
||||
* Container object for Kafka specific extended producer and consumer binding properties.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
public class KafkaBindingProperties implements BinderSpecificPropertiesProvider {
|
||||
|
||||
/**
|
||||
* Consumer specific binding properties. @see {@link KafkaConsumerProperties}.
|
||||
*/
|
||||
private KafkaConsumerProperties consumer = new KafkaConsumerProperties();
|
||||
|
||||
/**
|
||||
* Producer specific binding properties. @see {@link KafkaProducerProperties}.
|
||||
*/
|
||||
private KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
/**
|
||||
* @return {@link KafkaConsumerProperties}
|
||||
* Consumer specific binding properties. @see {@link KafkaConsumerProperties}.
|
||||
*/
|
||||
public KafkaConsumerProperties getConsumer() {
|
||||
return consumer;
|
||||
return this.consumer;
|
||||
}
|
||||
|
||||
public void setConsumer(KafkaConsumerProperties consumer) {
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {@link KafkaProducerProperties}
|
||||
* Producer specific binding properties. @see {@link KafkaProducerProperties}.
|
||||
*/
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return producer;
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
public void setProducer(KafkaProducerProperties producer) {
|
||||
this.producer = producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
* Copyright 2016-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,11 +19,16 @@ package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.kafka.listener.ContainerProperties;
|
||||
|
||||
/**
|
||||
* Extended consumer properties for Kafka binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Aldo Sinanaj
|
||||
*
|
||||
* <p>
|
||||
* Thanks to Laszlo Szabo for providing the initial patch for generic property support.
|
||||
@@ -31,8 +36,18 @@ import java.util.Map;
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
/**
|
||||
* Enumeration for starting consumer offset.
|
||||
*/
|
||||
public enum StartOffset {
|
||||
|
||||
/**
|
||||
* Starting from earliest offset.
|
||||
*/
|
||||
earliest(-2L),
|
||||
/**
|
||||
* Starting from latest offset.
|
||||
*/
|
||||
latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
@@ -44,65 +59,226 @@ public class KafkaConsumerProperties {
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Standard headers for the message.
|
||||
*/
|
||||
public enum StandardHeaders {
|
||||
|
||||
/**
|
||||
* No headers.
|
||||
*/
|
||||
none,
|
||||
/**
|
||||
* Message header representing ID.
|
||||
*/
|
||||
id,
|
||||
/**
|
||||
* Message header representing timestamp.
|
||||
*/
|
||||
timestamp,
|
||||
/**
|
||||
* Indicating both ID and timestamp headers.
|
||||
*/
|
||||
both
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* When true the offset is committed after each record, otherwise the offsets for the complete set of records
|
||||
* received from the poll() are committed after all records have been processed.
|
||||
*/
|
||||
@Deprecated
|
||||
private boolean ackEachRecord;
|
||||
|
||||
/**
|
||||
* When true, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
* When false, each consumer is assigned a fixed set of partitions based on spring.cloud.stream.instanceCount and spring.cloud.stream.instanceIndex.
|
||||
*/
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
/**
|
||||
* Whether to autocommit offsets when a message has been processed.
|
||||
* If set to false, a header with the key kafka_acknowledgment of the type org.springframework.kafka.support.Acknowledgment header
|
||||
* is present in the inbound message. Applications may use this header for acknowledging messages.
|
||||
*/
|
||||
@Deprecated
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
/**
|
||||
* Controlling the container acknowledgement mode. This is the preferred way to control the ack mode on the
|
||||
* container instead of the deprecated autoCommitOffset property.
|
||||
*/
|
||||
private ContainerProperties.AckMode ackMode;
|
||||
|
||||
/**
|
||||
* Flag to enable auto commit on error in polled consumers.
|
||||
*/
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
/**
|
||||
* The starting offset for new groups. Allowed values: earliest and latest.
|
||||
*/
|
||||
private StartOffset startOffset;
|
||||
|
||||
/**
|
||||
* Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
* Must be false if a KafkaRebalanceListener is provided.
|
||||
*/
|
||||
private boolean resetOffsets;
|
||||
|
||||
/**
|
||||
* When set to true, it enables DLQ behavior for the consumer.
|
||||
* By default, messages that result in errors are forwarded to a topic named error.name-of-destination.name-of-group.
|
||||
* The DLQ topic name can be configurable by setting the dlqName property.
|
||||
*/
|
||||
private boolean enableDlq;
|
||||
|
||||
/**
|
||||
* The name of the DLQ topic to receive the error messages.
|
||||
*/
|
||||
private String dlqName;
|
||||
|
||||
/**
|
||||
* Number of partitions to use on the DLQ.
|
||||
*/
|
||||
private Integer dlqPartitions;
|
||||
|
||||
/**
|
||||
* Using this, DLQ-specific producer properties can be set.
|
||||
* All the properties available through kafka producer properties can be set through this property.
|
||||
*/
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
/**
|
||||
* @deprecated No longer used by the binder.
|
||||
*/
|
||||
@Deprecated
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
/**
|
||||
* List of trusted packages to provide the header mapper.
|
||||
*/
|
||||
private String[] trustedPackages;
|
||||
|
||||
/**
|
||||
* Indicates which standard headers are populated by the inbound channel adapter.
|
||||
* Allowed values: none, id, timestamp, or both.
|
||||
*/
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
/**
|
||||
* The name of a bean that implements RecordMessageConverter.
|
||||
*/
|
||||
private String converterBeanName;
|
||||
|
||||
/**
|
||||
* The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
*/
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
/**
|
||||
* When true, the destination is treated as a regular expression Pattern used to match topic names by the broker.
|
||||
*/
|
||||
private boolean destinationIsPattern;
|
||||
|
||||
/**
|
||||
* Map with a key/value pair containing generic Kafka consumer properties.
|
||||
* In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaAdminProperties admin = new KafkaAdminProperties();
|
||||
/**
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
private long pollTimeout = org.springframework.kafka.listener.ConsumerProperties.DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
/**
|
||||
* Transaction manager bean name - overrides the binder's transaction configuration.
|
||||
*/
|
||||
private String transactionManager;
|
||||
|
||||
/**
|
||||
* Set to false to NOT commit the offset of a successfully recovered recovered in the after rollback processor.
|
||||
*/
|
||||
private boolean txCommitRecovered = true;
|
||||
|
||||
/**
|
||||
* CommonErrorHandler bean name per consumer binding.
|
||||
* @since 3.2
|
||||
*/
|
||||
private String commonErrorHandlerBeanName;
|
||||
|
||||
/**
|
||||
* @return if each record needs to be acknowledged.
|
||||
*
|
||||
* When true the offset is committed after each record, otherwise the offsets for the complete set of records
|
||||
* received from the poll() are committed after all records have been processed.
|
||||
*
|
||||
* @deprecated since 3.1 in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isAckEachRecord() {
|
||||
return this.ackEachRecord;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param ackEachRecord
|
||||
*
|
||||
* @deprecated in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public void setAckEachRecord(boolean ackEachRecord) {
|
||||
this.ackEachRecord = ackEachRecord;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is autocommit offset enabled
|
||||
*
|
||||
* Whether to autocommit offsets when a message has been processed.
|
||||
* If set to false, a header with the key kafka_acknowledgment of the type org.springframework.kafka.support.Acknowledgment header
|
||||
* is present in the inbound message. Applications may use this header for acknowledging messages.
|
||||
*
|
||||
* @deprecated since 3.1 in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param autoCommitOffset
|
||||
*
|
||||
* @deprecated in favor of using {@link #ackMode}
|
||||
*/
|
||||
@Deprecated
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Container's ack mode.
|
||||
*/
|
||||
public ContainerProperties.AckMode getAckMode() {
|
||||
return this.ackMode;
|
||||
}
|
||||
|
||||
public void setAckMode(ContainerProperties.AckMode ackMode) {
|
||||
this.ackMode = ackMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return start offset
|
||||
*
|
||||
* The starting offset for new groups. Allowed values: earliest and latest.
|
||||
*/
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
@@ -111,6 +287,12 @@ public class KafkaConsumerProperties {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if resetting offset is enabled
|
||||
*
|
||||
* Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
* Must be false if a KafkaRebalanceListener is provided.
|
||||
*/
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
@@ -119,6 +301,13 @@ public class KafkaConsumerProperties {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is DLQ enabled.
|
||||
*
|
||||
* When set to true, it enables DLQ behavior for the consumer.
|
||||
* By default, messages that result in errors are forwarded to a topic named error.name-of-destination.name-of-group.
|
||||
* The DLQ topic name can be configurable by setting the dlqName property.
|
||||
*/
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
@@ -127,10 +316,20 @@ public class KafkaConsumerProperties {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is autocommit on error in polled consumers.
|
||||
*
|
||||
* This property accessor is only used in polled consumers.
|
||||
*/
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return this.autoCommitOnError;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param autoCommitOnError commit on error in polled consumers.
|
||||
*
|
||||
*/
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
@@ -138,7 +337,7 @@ public class KafkaConsumerProperties {
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the interval.
|
||||
* @deprecated
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
public int getRecoveryInterval() {
|
||||
@@ -148,13 +347,19 @@ public class KafkaConsumerProperties {
|
||||
/**
|
||||
* No longer used.
|
||||
* @param recoveryInterval the interval.
|
||||
* @deprecated
|
||||
* @deprecated No longer needed by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is auto rebalance enabled
|
||||
*
|
||||
* When true, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
* When false, each consumer is assigned a fixed set of partitions based on spring.cloud.stream.instanceCount and spring.cloud.stream.instanceIndex.
|
||||
*/
|
||||
public boolean isAutoRebalanceEnabled() {
|
||||
return this.autoRebalanceEnabled;
|
||||
}
|
||||
@@ -163,6 +368,12 @@ public class KafkaConsumerProperties {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a map of configuration
|
||||
*
|
||||
* Map with a key/value pair containing generic Kafka consumer properties.
|
||||
* In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
*/
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -171,29 +382,65 @@ public class KafkaConsumerProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return dlq name
|
||||
*
|
||||
* The name of the DLQ topic to receive the error messages.
|
||||
*/
|
||||
public String getDlqName() {
|
||||
return dlqName;
|
||||
return this.dlqName;
|
||||
}
|
||||
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return number of partitions on the DLQ topic
|
||||
*
|
||||
* Number of partitions to use on the DLQ.
|
||||
*/
|
||||
public Integer getDlqPartitions() {
|
||||
return this.dlqPartitions;
|
||||
}
|
||||
|
||||
public void setDlqPartitions(Integer dlqPartitions) {
|
||||
this.dlqPartitions = dlqPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return trusted packages
|
||||
*
|
||||
* List of trusted packages to provide the header mapper.
|
||||
*/
|
||||
public String[] getTrustedPackages() {
|
||||
return trustedPackages;
|
||||
return this.trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return dlq producer properties
|
||||
*
|
||||
* Using this, DLQ-specific producer properties can be set.
|
||||
* All the properties available through kafka producer properties can be set through this property.
|
||||
*/
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return dlqProducerProperties;
|
||||
return this.dlqProducerProperties;
|
||||
}
|
||||
|
||||
public void setDlqProducerProperties(KafkaProducerProperties dlqProducerProperties) {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return standard headers
|
||||
*
|
||||
* Indicates which standard headers are populated by the inbound channel adapter.
|
||||
* Allowed values: none, id, timestamp, or both.
|
||||
*/
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
@@ -202,6 +449,11 @@ public class KafkaConsumerProperties {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return converter bean name
|
||||
*
|
||||
* The name of a bean that implements RecordMessageConverter.
|
||||
*/
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
@@ -210,6 +462,11 @@ public class KafkaConsumerProperties {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return idle event interval
|
||||
*
|
||||
* The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
*/
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
@@ -218,6 +475,11 @@ public class KafkaConsumerProperties {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is destination given through a pattern
|
||||
*
|
||||
* When true, the destination is treated as a regular expression Pattern used to match topic names by the broker.
|
||||
*/
|
||||
public boolean isDestinationIsPattern() {
|
||||
return this.destinationIsPattern;
|
||||
}
|
||||
@@ -226,12 +488,58 @@ public class KafkaConsumerProperties {
|
||||
this.destinationIsPattern = destinationIsPattern;
|
||||
}
|
||||
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.admin;
|
||||
/**
|
||||
* @return topic properties
|
||||
*
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.admin = admin;
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return timeout in pollable consumers
|
||||
*
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
public long getPollTimeout() {
|
||||
return this.pollTimeout;
|
||||
}
|
||||
|
||||
public void setPollTimeout(long pollTimeout) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the transaction manager bean name.
|
||||
*
|
||||
* Transaction manager bean name (must be {@code KafkaAwareTransactionManager}.
|
||||
*/
|
||||
public String getTransactionManager() {
|
||||
return this.transactionManager;
|
||||
}
|
||||
|
||||
public void setTransactionManager(String transactionManager) {
|
||||
this.transactionManager = transactionManager;
|
||||
}
|
||||
|
||||
public boolean isTxCommitRecovered() {
|
||||
return this.txCommitRecovered;
|
||||
}
|
||||
|
||||
public void setTxCommitRecovered(boolean txCommitRecovered) {
|
||||
this.txCommitRecovered = txCommitRecovered;
|
||||
}
|
||||
|
||||
public String getCommonErrorHandlerBeanName() {
|
||||
return commonErrorHandlerBeanName;
|
||||
}
|
||||
|
||||
public void setCommonErrorHandlerBeanName(String commonErrorHandlerBeanName) {
|
||||
this.commonErrorHandlerBeanName = commonErrorHandlerBeanName;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,70 +16,40 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.AbstractExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
|
||||
/**
|
||||
* Kafka specific extended binding properties class that extends from
|
||||
* {@link AbstractExtendedBindingProperties}.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
public class KafkaExtendedBindingProperties extends
|
||||
AbstractExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties, KafkaBindingProperties> {
|
||||
|
||||
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
|
||||
private static final String DEFAULTS_PREFIX = "spring.cloud.stream.kafka.default";
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return DEFAULTS_PREFIX;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
return this.doGetBindings();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getConsumer() != null) {
|
||||
return bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
this.bindings.get(channelName).setConsumer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setConsumer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getProducer() != null) {
|
||||
return bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
this.bindings.get(channelName).setProducer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setProducer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return KafkaBindingProperties.class;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -24,28 +24,97 @@ import javax.validation.constraints.NotNull;
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
* Extended producer properties for Kafka binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
/**
|
||||
* Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
*/
|
||||
private int bufferSize = 16384;
|
||||
|
||||
/**
|
||||
* Set the compression.type producer property. Supported values are none, gzip, snappy and lz4.
|
||||
* See {@link CompressionType} for more details.
|
||||
*/
|
||||
private CompressionType compressionType = CompressionType.none;
|
||||
|
||||
/**
|
||||
* Whether the producer is synchronous.
|
||||
*/
|
||||
private boolean sync;
|
||||
|
||||
/**
|
||||
* A SpEL expression evaluated against the outgoing message used to evaluate the time to wait
|
||||
* for ack when synchronous publish is enabled.
|
||||
*/
|
||||
private Expression sendTimeoutExpression;
|
||||
|
||||
/**
|
||||
* How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
*/
|
||||
private int batchTimeout;
|
||||
|
||||
/**
|
||||
* A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
*/
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
/**
|
||||
* A comma-delimited list of simple patterns to match Spring messaging headers
|
||||
* to be mapped to the Kafka Headers in the ProducerRecord.
|
||||
*/
|
||||
private String[] headerPatterns;
|
||||
|
||||
/**
|
||||
* Map with a key/value pair containing generic Kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaAdminProperties admin = new KafkaAdminProperties();
|
||||
/**
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Set to true to override the default binding destination (topic name) with the value of the
|
||||
* KafkaHeaders.TOPIC message header in the outbound message. If the header is not present,
|
||||
* the default binding destination is used.
|
||||
*/
|
||||
private boolean useTopicHeader;
|
||||
|
||||
/**
|
||||
* The bean name of a MessageChannel to which successful send results should be sent;
|
||||
* the bean must exist in the application context.
|
||||
*/
|
||||
private String recordMetadataChannel;
|
||||
|
||||
/**
|
||||
* Transaction manager bean name - overrides the binder's transaction configuration.
|
||||
*/
|
||||
private String transactionManager;
|
||||
|
||||
/*
|
||||
* Timeout value in seconds for the duration to wait when closing the producer.
|
||||
* If not set this defaults to 30 seconds.
|
||||
*/
|
||||
private int closeTimeout;
|
||||
|
||||
/**
|
||||
* Set to true to disable transactions.
|
||||
*/
|
||||
private boolean allowNonTransactional;
|
||||
|
||||
/**
|
||||
* @return buffer size
|
||||
*
|
||||
* Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
*/
|
||||
public int getBufferSize() {
|
||||
return this.bufferSize;
|
||||
}
|
||||
@@ -54,6 +123,12 @@ public class KafkaProducerProperties {
|
||||
this.bufferSize = bufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return compression type {@link CompressionType}
|
||||
*
|
||||
* Set the compression.type producer property. Supported values are none, gzip, snappy, lz4 and zstd.
|
||||
* See {@link CompressionType} for more details.
|
||||
*/
|
||||
@NotNull
|
||||
public CompressionType getCompressionType() {
|
||||
return this.compressionType;
|
||||
@@ -63,6 +138,11 @@ public class KafkaProducerProperties {
|
||||
this.compressionType = compressionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if synchronous sending is enabled
|
||||
*
|
||||
* Whether the producer is synchronous.
|
||||
*/
|
||||
public boolean isSync() {
|
||||
return this.sync;
|
||||
}
|
||||
@@ -71,6 +151,25 @@ public class KafkaProducerProperties {
|
||||
this.sync = sync;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return timeout expression for send
|
||||
*
|
||||
* A SpEL expression evaluated against the outgoing message used to evaluate the time to wait
|
||||
* for ack when synchronous publish is enabled.
|
||||
*/
|
||||
public Expression getSendTimeoutExpression() {
|
||||
return this.sendTimeoutExpression;
|
||||
}
|
||||
|
||||
public void setSendTimeoutExpression(Expression sendTimeoutExpression) {
|
||||
this.sendTimeoutExpression = sendTimeoutExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return batch timeout
|
||||
*
|
||||
* How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
*/
|
||||
public int getBatchTimeout() {
|
||||
return this.batchTimeout;
|
||||
}
|
||||
@@ -79,14 +178,25 @@ public class KafkaProducerProperties {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return message key expression
|
||||
*
|
||||
* A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
*/
|
||||
public Expression getMessageKeyExpression() {
|
||||
return messageKeyExpression;
|
||||
return this.messageKeyExpression;
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return header patterns
|
||||
*
|
||||
* A comma-delimited list of simple patterns to match Spring messaging headers
|
||||
* to be mapped to the Kafka Headers in the ProducerRecord.
|
||||
*/
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
@@ -95,6 +205,11 @@ public class KafkaProducerProperties {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return map of configuration
|
||||
*
|
||||
* Map with a key/value pair containing generic Kafka producer properties.
|
||||
*/
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -103,18 +218,110 @@ public class KafkaProducerProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.admin;
|
||||
/**
|
||||
* @return topic properties
|
||||
*
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.admin = admin;
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if using topic header
|
||||
*
|
||||
* Set to true to override the default binding destination (topic name) with the value of the
|
||||
* KafkaHeaders.TOPIC message header in the outbound message. If the header is not present,
|
||||
* the default binding destination is used.
|
||||
*/
|
||||
public boolean isUseTopicHeader() {
|
||||
return this.useTopicHeader;
|
||||
}
|
||||
|
||||
public void setUseTopicHeader(boolean useTopicHeader) {
|
||||
this.useTopicHeader = useTopicHeader;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return record metadata channel
|
||||
*
|
||||
* The bean name of a MessageChannel to which successful send results should be sent;
|
||||
* the bean must exist in the application context.
|
||||
*/
|
||||
public String getRecordMetadataChannel() {
|
||||
return this.recordMetadataChannel;
|
||||
}
|
||||
|
||||
public void setRecordMetadataChannel(String recordMetadataChannel) {
|
||||
this.recordMetadataChannel = recordMetadataChannel;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the transaction manager bean name.
|
||||
*
|
||||
* Transaction manager bean name (must be {@code KafkaAwareTransactionManager}.
|
||||
*/
|
||||
public String getTransactionManager() {
|
||||
return this.transactionManager;
|
||||
}
|
||||
|
||||
public void setTransactionManager(String transactionManager) {
|
||||
this.transactionManager = transactionManager;
|
||||
}
|
||||
|
||||
/*
|
||||
* @return timeout in seconds for closing the producer
|
||||
*/
|
||||
public int getCloseTimeout() {
|
||||
return this.closeTimeout;
|
||||
}
|
||||
|
||||
public void setCloseTimeout(int closeTimeout) {
|
||||
this.closeTimeout = closeTimeout;
|
||||
}
|
||||
|
||||
public boolean isAllowNonTransactional() {
|
||||
return this.allowNonTransactional;
|
||||
}
|
||||
|
||||
public void setAllowNonTransactional(boolean allowNonTransactional) {
|
||||
this.allowNonTransactional = allowNonTransactional;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enumeration for compression types.
|
||||
*/
|
||||
public enum CompressionType {
|
||||
|
||||
/**
|
||||
* No compression.
|
||||
*/
|
||||
none,
|
||||
|
||||
/**
|
||||
* gzip based compression.
|
||||
*/
|
||||
gzip,
|
||||
snappy
|
||||
|
||||
/**
|
||||
* snappy based compression.
|
||||
*/
|
||||
snappy,
|
||||
|
||||
/**
|
||||
* lz4 compression.
|
||||
*/
|
||||
lz4,
|
||||
|
||||
/**
|
||||
* zstd compression.
|
||||
*/
|
||||
zstd,
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -23,20 +23,20 @@ import java.util.Map;
|
||||
/**
|
||||
* Properties for configuring topics.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
* @author Aldo Sinanaj
|
||||
* @since 2.2
|
||||
*
|
||||
*/
|
||||
public class KafkaAdminProperties {
|
||||
public class KafkaTopicProperties {
|
||||
|
||||
private Short replicationFactor;
|
||||
|
||||
private Map<Integer, List<Integer>> replicasAssignments = new HashMap<>();
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
private Map<String, String> properties = new HashMap<>();
|
||||
|
||||
public Short getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
return replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(Short replicationFactor) {
|
||||
@@ -44,19 +44,19 @@ public class KafkaAdminProperties {
|
||||
}
|
||||
|
||||
public Map<Integer, List<Integer>> getReplicasAssignments() {
|
||||
return this.replicasAssignments;
|
||||
return replicasAssignments;
|
||||
}
|
||||
|
||||
public void setReplicasAssignments(Map<Integer, List<Integer>> replicasAssignments) {
|
||||
this.replicasAssignments = replicasAssignments;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
public Map<String, String> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
public void setProperties(Map<String, String> properties) {
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2021-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Customizer for configuring AdminClient.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.1.2
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface AdminClientConfigCustomizer {
|
||||
|
||||
void configure(Map<String, Object> adminClientProperties);
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -18,20 +18,27 @@ package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.AlterConfigOp;
|
||||
import org.apache.kafka.clients.admin.AlterConfigsResult;
|
||||
import org.apache.kafka.clients.admin.Config;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeConfigsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
@@ -39,16 +46,19 @@ import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.config.ConfigResource;
|
||||
import org.apache.kafka.common.errors.TopicExistsException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaAdminProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaTopicProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
@@ -59,25 +69,30 @@ import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka implementation for {@link ProvisioningProvider}
|
||||
* Kafka implementation for {@link ProvisioningProvider}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
public class KafkaTopicProvisioner implements
|
||||
// @checkstyle:off
|
||||
ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>>,
|
||||
// @checkstyle:on
|
||||
InitializingBean {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KafkaTopicProvisioner.class);
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
@@ -86,15 +101,31 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
/**
|
||||
* Create an instance.
|
||||
* @param kafkaBinderConfigurationProperties the binder configuration properties.
|
||||
* @param kafkaProperties the boot Kafka properties used to build the
|
||||
* @param adminClientConfigCustomizer to customize {@link AdminClient}.
|
||||
* {@link AdminClient}.
|
||||
*/
|
||||
public KafkaTopicProvisioner(
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties,
|
||||
AdminClientConfigCustomizer adminClientConfigCustomizer) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
normalalizeBootPropsWithBinder(adminClientProperties, kafkaProperties, kafkaBinderConfigurationProperties);
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
normalalizeBootPropsWithBinder(this.adminClientProperties, kafkaProperties,
|
||||
kafkaBinderConfigurationProperties);
|
||||
// If the application provides an AdminConfig customizer
|
||||
// and overrides properties, that takes precedence.
|
||||
if (adminClientConfigCustomizer != null) {
|
||||
adminClientConfigCustomizer.configure(this.adminClientProperties);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mutator for metadata retry operations.
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
@@ -102,7 +133,7 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
public void afterPropertiesSet() {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
@@ -123,25 +154,35 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
public ProducerDestination provisionProducerDestination(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
if (logger.isInfoEnabled()) {
|
||||
logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false, properties.getExtension().getAdmin());
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false,
|
||||
properties.getExtension().getTopic());
|
||||
int partitions = 0;
|
||||
Map<String, TopicDescription> topicDescriptions = new HashMap<>();
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
|
||||
Map<String, TopicDescription> topicDescriptions = null;
|
||||
try {
|
||||
topicDescriptions = all.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", e);
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Attempting to retrieve the description for the topic: " + name);
|
||||
}
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
topicDescriptions.putAll(all.get(this.operationTimeout, TimeUnit.SECONDS));
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding for: " + name, ex);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
if (topicDescription != null) {
|
||||
partitions = topicDescription.partitions().size();
|
||||
}
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
@@ -149,7 +190,8 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group,
|
||||
public ConsumerDestination provisionConsumerDestination(final String name,
|
||||
final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
if (!properties.isMultiplex()) {
|
||||
return doProvisionConsumerDestination(name, group, properties);
|
||||
@@ -163,14 +205,15 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerDestination doProvisionConsumerDestination(final String name, final String group,
|
||||
private ConsumerDestination doProvisionConsumerDestination(final String name,
|
||||
final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
if (properties.getExtension().isDestinationIsPattern()) {
|
||||
Assert.isTrue(!properties.getExtension().isEnableDlq(),
|
||||
"enableDLQ is not allowed when listening to topic patterns");
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug("Listening to a topic pattern - " + name
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Listening to a topic pattern - " + name
|
||||
+ " - no provisioning performed");
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
@@ -185,22 +228,28 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
ConsumerDestination consumerDestination = new KafkaConsumerDestination(name);
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
createTopic(adminClient, name, partitionCount, properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getAdmin());
|
||||
createTopic(adminClient, name, partitionCount,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getTopic());
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
Map<String, TopicDescription> topicDescriptions = all
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
consumerDestination = createDlqIfNeedBe(adminClient, name, group, properties, anonymous, partitions);
|
||||
consumerDestination = createDlqIfNeedBe(adminClient, name, group,
|
||||
properties, anonymous, partitions);
|
||||
if (consumerDestination == null) {
|
||||
consumerDestination = new KafkaConsumerDestination(name, partitions);
|
||||
consumerDestination = new KafkaConsumerDestination(name,
|
||||
partitions);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("provisioning exception", e);
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("Provisioning exception encountered for " + name, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -212,22 +261,24 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
|
||||
/**
|
||||
* In general, binder properties supersede boot kafka properties.
|
||||
* The one exception is the bootstrap servers. In that case, we should only override
|
||||
* the boot properties if (there is a binder property AND it is a non-default value)
|
||||
* OR (if there is no boot property); this is needed because the binder property
|
||||
* never returns a null value.
|
||||
* In general, binder properties supersede boot kafka properties. The one exception is
|
||||
* the bootstrap servers. In that case, we should only override the boot properties if
|
||||
* (there is a binder property AND it is a non-default value) OR (if there is no boot
|
||||
* property); this is needed because the binder property never returns a null value.
|
||||
* @param adminProps the admin properties to normalize.
|
||||
* @param bootProps the boot kafka properties.
|
||||
* @param binderProps the binder kafka properties.
|
||||
*/
|
||||
private void normalalizeBootPropsWithBinder(Map<String, Object> adminProps, KafkaProperties bootProps,
|
||||
KafkaBinderConfigurationProperties binderProps) {
|
||||
public static void normalalizeBootPropsWithBinder(Map<String, Object> adminProps,
|
||||
KafkaProperties bootProps, KafkaBinderConfigurationProperties binderProps) {
|
||||
// First deal with the outlier
|
||||
String kafkaConnectionString = binderProps.getKafkaConnectionString();
|
||||
if (ObjectUtils.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
|
||||
if (ObjectUtils
|
||||
.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString
|
||||
.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
}
|
||||
// Now override any boot values with binder values
|
||||
Map<String, String> binderProperties = binderProps.getConfiguration();
|
||||
@@ -239,29 +290,37 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
if (adminConfigNames.contains(key)) {
|
||||
Object replaced = adminProps.put(key, value);
|
||||
if (replaced != null && this.logger.isDebugEnabled()) {
|
||||
logger.debug("Overrode boot property: [" + key + "], from: [" + replaced + "] to: [" + value + "]");
|
||||
if (replaced != null && KafkaTopicProvisioner.logger.isDebugEnabled()) {
|
||||
KafkaTopicProvisioner.logger.debug("Overrode boot property: [" + key + "], from: ["
|
||||
+ replaced + "] to: [" + value + "]");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name,
|
||||
String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName())
|
||||
? properties.getExtension().getDlqName()
|
||||
: "error." + name + "." + group;
|
||||
int dlqPartitions = properties.getExtension().getDlqPartitions() == null
|
||||
? partitions
|
||||
: properties.getExtension().getDlqPartitions();
|
||||
try {
|
||||
createTopicAndPartitions(adminClient, dlqTopic, partitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(), properties.getExtension().getAdmin());
|
||||
final KafkaProducerProperties dlqProducerProperties = properties.getExtension().getDlqProducerProperties();
|
||||
createTopicAndPartitions(adminClient, dlqTopic, dlqPartitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
dlqProducerProperties.getTopic());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
throw new ProvisioningException("Provisioning exception encountered for " + name, throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
@@ -269,115 +328,149 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(AdminClient adminClient, String name, int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaAdminProperties properties) {
|
||||
private void createTopic(AdminClient adminClient, String name, int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaTopicProperties properties) {
|
||||
try {
|
||||
createTopicIfNecessary(adminClient, name, partitionCount, tolerateLowerPartitionsOnBroker, properties);
|
||||
createTopicIfNecessary(adminClient, name, partitionCount,
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
// TODO: Remove catching Throwable. See this thread:
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
throw new ProvisioningException("Provisioning exception encountered for " + name, throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(AdminClient adminClient, final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaAdminProperties properties) throws Throwable {
|
||||
private void createTopicIfNecessary(AdminClient adminClient, final String topicName,
|
||||
final int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaTopicProperties properties) throws Throwable {
|
||||
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
createTopicAndPartitions(adminClient, topicName, partitionCount, tolerateLowerPartitionsOnBroker,
|
||||
properties);
|
||||
createTopicAndPartitions(adminClient, topicName, partitionCount,
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
else {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
* @param adminClient
|
||||
* @param adminProperties
|
||||
* @param adminClient kafka admin client
|
||||
* @param topicName topic name
|
||||
* @param partitionCount partition count
|
||||
* @param tolerateLowerPartitionsOnBroker whether lower partitions count on broker is
|
||||
* tolerated ot not
|
||||
* @param topicProperties kafka topic properties
|
||||
* @throws Throwable from topic creation
|
||||
*/
|
||||
private void createTopicAndPartitions(AdminClient adminClient, final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaAdminProperties adminProperties) throws Throwable {
|
||||
private void createTopicAndPartitions(AdminClient adminClient, final String topicName,
|
||||
final int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaTopicProperties topicProperties) throws Throwable {
|
||||
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
|
||||
Set<String> names = namesFutures.get(operationTimeout, TimeUnit.SECONDS);
|
||||
Set<String> names = namesFutures.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
//check if topic.properties are different from Topic Configuration in Kafka
|
||||
if (this.configurationProperties.isAutoAlterTopics()) {
|
||||
alterTopicConfigsIfNecessary(adminClient, topicName, topicProperties);
|
||||
}
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
int effectivePartitionCount = this.configurationProperties
|
||||
.isAutoAddPartitions()
|
||||
? Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get(operationTimeout, TimeUnit.SECONDS);
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult
|
||||
.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(topicName);
|
||||
int partitionSize = topicDescription.partitions().size();
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
CreatePartitionsResult partitions = adminClient.createPartitions(
|
||||
Collections.singletonMap(topicName, NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
CreatePartitionsResult partitions = adminClient
|
||||
.createPartitions(Collections.singletonMap(topicName,
|
||||
NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
+ (effectivePartitionCount - partitionSize)
|
||||
+ " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
throw new ProvisioningException(
|
||||
"The number of expected partitions was: " + partitionCount
|
||||
+ ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling "
|
||||
+ "`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
final int effectivePartitionCount = Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(), partitionCount);
|
||||
this.metadataRetryOperations.execute((context) -> {
|
||||
|
||||
NewTopic newTopic;
|
||||
Map<Integer, List<Integer>> replicasAssignments = adminProperties.getReplicasAssignments();
|
||||
if (replicasAssignments != null && replicasAssignments.size() > 0) {
|
||||
newTopic = new NewTopic(topicName, adminProperties.getReplicasAssignments());
|
||||
Map<Integer, List<Integer>> replicasAssignments = topicProperties
|
||||
.getReplicasAssignments();
|
||||
if (replicasAssignments != null && replicasAssignments.size() > 0) {
|
||||
newTopic = new NewTopic(topicName,
|
||||
topicProperties.getReplicasAssignments());
|
||||
}
|
||||
else {
|
||||
newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
adminProperties.getReplicationFactor() != null
|
||||
? adminProperties.getReplicationFactor()
|
||||
: configurationProperties.getReplicationFactor());
|
||||
topicProperties.getReplicationFactor() != null
|
||||
? topicProperties.getReplicationFactor()
|
||||
: this.configurationProperties
|
||||
.getReplicationFactor());
|
||||
}
|
||||
if (adminProperties.getConfiguration().size() > 0) {
|
||||
newTopic.configs(adminProperties.getConfiguration());
|
||||
if (topicProperties.getProperties().size() > 0) {
|
||||
newTopic.configs(topicProperties.getProperties());
|
||||
}
|
||||
CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(newTopic));
|
||||
CreateTopicsResult createTopicsResult = adminClient
|
||||
.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
createTopicsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (e instanceof ExecutionException) {
|
||||
String exceptionMessage = e.getMessage();
|
||||
if (exceptionMessage.contains("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
catch (Exception ex) {
|
||||
if (ex instanceof ExecutionException) {
|
||||
if (ex.getCause() instanceof TopicExistsException) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
logger.warn("Attempt to create topic: " + topicName
|
||||
+ ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
@@ -385,33 +478,117 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(context -> {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
private void alterTopicConfigsIfNecessary(AdminClient adminClient,
|
||||
String topicName,
|
||||
KafkaTopicProperties topicProperties)
|
||||
throws InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
|
||||
ConfigResource topicConfigResource = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||
DescribeConfigsResult describeConfigsResult = adminClient
|
||||
.describeConfigs(Collections.singletonList(topicConfigResource));
|
||||
KafkaFuture<Map<ConfigResource, Config>> topicConfigurationFuture = describeConfigsResult.all();
|
||||
Map<ConfigResource, Config> topicConfigMap = topicConfigurationFuture
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
Config config = topicConfigMap.get(topicConfigResource);
|
||||
final List<AlterConfigOp> updatedConfigEntries = topicProperties.getProperties().entrySet().stream()
|
||||
.filter(propertiesEntry -> {
|
||||
// Property is new and should be added
|
||||
if (config.get(propertiesEntry.getKey()) == null) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
// Property changed and should be updated
|
||||
return !config.get(propertiesEntry.getKey()).value().equals(propertiesEntry.getValue());
|
||||
}
|
||||
|
||||
})
|
||||
.map(propertyEntry -> new ConfigEntry(propertyEntry.getKey(), propertyEntry.getValue()))
|
||||
.map(configEntry -> new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))
|
||||
.collect(Collectors.toList());
|
||||
if (!updatedConfigEntries.isEmpty()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Attempting to alter configs " + updatedConfigEntries + " for the topic:" + topicName);
|
||||
}
|
||||
Map<ConfigResource, Collection<AlterConfigOp>> alterConfigForTopics = new HashMap<>();
|
||||
alterConfigForTopics.put(topicConfigResource, updatedConfigEntries);
|
||||
AlterConfigsResult alterConfigsResult = adminClient.incrementalAlterConfigs(alterConfigForTopics);
|
||||
alterConfigsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the topic has the expected number of partitions and return the partition information.
|
||||
* @param partitionCount the expected count.
|
||||
* @param tolerateLowerPartitionsOnBroker if false, throw an exception if there are not enough partitions.
|
||||
* @param callable a Callable that will provide the partition information.
|
||||
* @param topicName the topic./
|
||||
* @return the partition information.
|
||||
*/
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable, final String topicName) {
|
||||
try {
|
||||
return this.metadataRetryOperations.execute((context) -> {
|
||||
Collection<PartitionInfo> partitions = Collections.emptyList();
|
||||
|
||||
try {
|
||||
// This call may return null or throw an exception.
|
||||
partitions = callable.call();
|
||||
}
|
||||
catch (Exception ex) {
|
||||
// The above call can potentially throw exceptions such as timeout. If
|
||||
// we can determine
|
||||
// that the exception was due to an unknown topic on the broker, just
|
||||
// simply rethrow that.
|
||||
if (ex instanceof UnknownTopicOrPartitionException) {
|
||||
throw ex;
|
||||
}
|
||||
logger.error("Failed to obtain partition information", ex);
|
||||
}
|
||||
// In some cases, the above partition query may not throw an UnknownTopic..Exception for various reasons.
|
||||
// For that, we are forcing another query to ensure that the topic is present on the server.
|
||||
if (CollectionUtils.isEmpty(partitions)) {
|
||||
try (AdminClient adminClient = AdminClient
|
||||
.create(this.adminClientProperties)) {
|
||||
final DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
|
||||
describeTopicsResult.all().get();
|
||||
}
|
||||
catch (ExecutionException ex) {
|
||||
if (ex.getCause() instanceof UnknownTopicOrPartitionException) {
|
||||
throw (UnknownTopicOrPartitionException) ex.getCause();
|
||||
}
|
||||
else {
|
||||
logger.warn("No partitions have been retrieved for the topic "
|
||||
+ "(" + topicName
|
||||
+ "). This will affect the health check.");
|
||||
}
|
||||
}
|
||||
}
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = CollectionUtils.isEmpty(partitions) ? 0 : partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
+ (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
"The number of expected partitions was: " + partitionCount
|
||||
+ ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
logger.error("Cannot initialize Binder", ex);
|
||||
throw new BinderException("Cannot initialize binder:", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -428,21 +605,20 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return producerDestinationName;
|
||||
return this.producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNameForPartition(int partition) {
|
||||
return producerDestinationName;
|
||||
return this.producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaProducerDestination{" +
|
||||
"producerDestinationName='" + producerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
'}';
|
||||
return "KafkaProducerDestination{" + "producerDestinationName='"
|
||||
+ producerDestinationName + '\'' + ", partitions=" + partitions + '}';
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static final class KafkaConsumerDestination implements ConsumerDestination {
|
||||
@@ -461,7 +637,8 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
this(consumerDestinationName, partitions, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions, String dlqName) {
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions,
|
||||
String dlqName) {
|
||||
this.consumerDestinationName = consumerDestinationName;
|
||||
this.partitions = partitions;
|
||||
this.dlqName = dlqName;
|
||||
@@ -474,11 +651,11 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" +
|
||||
"consumerDestinationName='" + consumerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
", dlqName='" + dlqName + '\'' +
|
||||
'}';
|
||||
return "KafkaConsumerDestination{" + "consumerDestinationName='"
|
||||
+ consumerDestinationName + '\'' + ", partitions=" + partitions
|
||||
+ ", dlqName='" + dlqName + '\'' + '}';
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright 2020-2020 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
/**
|
||||
* A {@link BiFunction} extension for defining DLQ destination resolvers.
|
||||
*
|
||||
* The BiFunction takes the {@link ConsumerRecord} and the exception as inputs
|
||||
* and returns a topic name as the DLQ.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.9
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface DlqDestinationResolver extends BiFunction<ConsumerRecord<?, ?>, Exception, String> {
|
||||
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
import org.springframework.lang.Nullable;
|
||||
|
||||
/**
|
||||
* A TriFunction that takes a consumer group, consumer record, and throwable and returns
|
||||
* which partition to publish to the dead letter topic. Returning {@code null} means Kafka
|
||||
* will choose the partition.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 3.0
|
||||
*
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface DlqPartitionFunction {
|
||||
|
||||
/**
|
||||
* Returns the same partition as the original recor.
|
||||
*/
|
||||
DlqPartitionFunction ORIGINAL_PARTITION = (group, rec, ex) -> rec.partition();
|
||||
|
||||
/**
|
||||
* Returns 0.
|
||||
*/
|
||||
DlqPartitionFunction PARTITION_ZERO = (group, rec, ex) -> 0;
|
||||
|
||||
/**
|
||||
* Apply the function.
|
||||
* @param group the consumer group.
|
||||
* @param record the consumer record.
|
||||
* @param throwable the exception.
|
||||
* @return the DLQ partition, or null.
|
||||
*/
|
||||
@Nullable
|
||||
Integer apply(String group, ConsumerRecord<?, ?> record, Throwable throwable);
|
||||
|
||||
/**
|
||||
* Determine the fallback function to use based on the dlq partition count if no
|
||||
* {@link DlqPartitionFunction} bean is provided.
|
||||
* @param dlqPartitions the partition count.
|
||||
* @param logger the logger.
|
||||
* @return the fallback.
|
||||
*/
|
||||
static DlqPartitionFunction determineFallbackFunction(@Nullable Integer dlqPartitions, Log logger) {
|
||||
if (dlqPartitions == null) {
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else if (dlqPartitions > 1) {
|
||||
logger.error("'dlqPartitions' is > 1 but a custom DlqPartitionFunction bean is not provided");
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else {
|
||||
return PARTITION_ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,6 +19,8 @@ package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
||||
/**
|
||||
* Utility methods releated to Kafka topics.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public final class KafkaTopicUtils {
|
||||
@@ -28,22 +30,25 @@ public final class KafkaTopicUtils {
|
||||
}
|
||||
|
||||
/**
|
||||
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
|
||||
* Validate topic name. Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
|
||||
* @param topicName name of the topic
|
||||
*/
|
||||
public static void validateTopicName(String topicName) {
|
||||
try {
|
||||
byte[] utf8 = topicName.getBytes("UTF-8");
|
||||
for (byte b : utf8) {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|
||||
|| (b == '-') || (b == '_'))) {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z')
|
||||
|| (b >= '0') && (b <= '9') || (b == '.') || (b == '-')
|
||||
|| (b == '_'))) {
|
||||
throw new IllegalArgumentException(
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '" + topicName
|
||||
+ "'");
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '"
|
||||
+ topicName + "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (UnsupportedEncodingException e) {
|
||||
throw new AssertionError(e); // Can't happen
|
||||
catch (UnsupportedEncodingException ex) {
|
||||
throw new AssertionError(ex); // Can't happen
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.assertj.core.util.Files;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setGroupId("group1");
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setEnableAutoCommit(true);
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConsumerProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConsumerProps() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCertificateFilesAreConvertedToAbsolutePathsFromClassPathResources() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
final Map<String, String> configuration = kafkaBinderConfigurationProperties.getConfiguration();
|
||||
configuration.put("ssl.truststore.location", "classpath:testclient.truststore");
|
||||
configuration.put("ssl.keystore.location", "classpath:testclient.keystore");
|
||||
|
||||
kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
assertThat(configuration.get("ssl.truststore.location"))
|
||||
.isEqualTo(Paths.get(System.getProperty("java.io.tmpdir"), "testclient.truststore").toString());
|
||||
assertThat(configuration.get("ssl.keystore.location"))
|
||||
.isEqualTo(Paths.get(System.getProperty("java.io.tmpdir"), "testclient.keystore").toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCertificateFilesAreConvertedToGivenAbsolutePathsFromClassPathResources() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
final Map<String, String> configuration = kafkaBinderConfigurationProperties.getConfiguration();
|
||||
configuration.put("ssl.truststore.location", "classpath:testclient.truststore");
|
||||
configuration.put("ssl.keystore.location", "classpath:testclient.keystore");
|
||||
kafkaBinderConfigurationProperties.setCertificateStoreDirectory("target");
|
||||
|
||||
kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
assertThat(configuration.get("ssl.truststore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.truststore").toString());
|
||||
assertThat(configuration.get("ssl.keystore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.keystore").toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCertificateFilesAreMovedForSchemaRegistryConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
final Map<String, String> configuration = kafkaBinderConfigurationProperties.getConfiguration();
|
||||
configuration.put("schema.registry.ssl.truststore.location", "classpath:testclient.truststore");
|
||||
configuration.put("schema.registry.ssl.keystore.location", "classpath:testclient.keystore");
|
||||
kafkaBinderConfigurationProperties.setCertificateStoreDirectory("target");
|
||||
|
||||
kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
assertThat(configuration.get("schema.registry.ssl.truststore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.truststore").toString());
|
||||
assertThat(configuration.get("schema.registry.ssl.keystore.location")).isEqualTo(
|
||||
Paths.get(Files.currentFolder().toString(), "target", "testclient.keystore").toString());
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -35,7 +35,6 @@ import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
@@ -43,22 +42,34 @@ import static org.assertj.core.api.Assertions.fail;
|
||||
*/
|
||||
public class KafkaTopicProvisionerTests {
|
||||
|
||||
AdminClientConfigCustomizer adminClientConfigCustomizer = adminClientProperties -> adminClientProperties.put("foo", "bar");
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenExceptServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
"PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:1234"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG,
|
||||
"SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig, adminClientConfigCustomizer);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
assertThat(configs.get("foo")).isEqualTo("bar");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@@ -66,33 +77,44 @@ public class KafkaTopicProvisionerTests {
|
||||
@Test
|
||||
public void bootPropertiesOverriddenIncludingServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
"PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:9092"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG,
|
||||
"SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig, adminClientConfigCustomizer);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:1234");
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
|
||||
"localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig, adminClientConfigCustomizer);
|
||||
fail("Expected illegal state");
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage())
|
||||
.isEqualTo("Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
assertThat(e.getMessage()).isEqualTo(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M1</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
<name>spring-cloud-stream-binder-kafka-docs</name>
|
||||
<description>Spring Cloud Stream Kafka Binder Docs</description>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/..</main.basedir>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>full</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>xml-maven-plugin</artifactId>
|
||||
<version>1.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>transform</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<transformationSets>
|
||||
<transformationSet>
|
||||
<dir>${project.build.directory}/external-resources</dir>
|
||||
<stylesheet>src/main/xslt/dependencyVersions.xsl</stylesheet>
|
||||
<fileMappers>
|
||||
<fileMapper implementation="org.codehaus.plexus.components.io.filemappers.FileExtensionMapper">
|
||||
<targetExtension>.adoc</targetExtension>
|
||||
</fileMapper>
|
||||
</fileMappers>
|
||||
<outputDir>${project.build.directory}/generated-resources</outputDir>
|
||||
</transformationSet>
|
||||
</transformationSets>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
<phase>prepare-package</phase>
|
||||
<configuration>
|
||||
<includeDependencySources>true</includeDependencySources>
|
||||
<dependencySourceIncludes>
|
||||
<dependencySourceInclude>${project.groupId}:*</dependencySourceInclude>
|
||||
</dependencySourceIncludes>
|
||||
<attach>false</attach>
|
||||
<quiet>true</quiet>
|
||||
<stylesheetfile>${basedir}/src/main/javadoc/spring-javadoc.css</stylesheetfile>
|
||||
<links>
|
||||
<link>http://docs.spring.io/spring-framework/docs/${spring.version}/javadoc-api/</link>
|
||||
<link>http://docs.spring.io/spring-shell/docs/current/api/</link>
|
||||
</links>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctor-maven-plugin</artifactId>
|
||||
<version>1.5.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>generate-docbook</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>process-asciidoc</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sourceDocumentName>index.adoc</sourceDocumentName>
|
||||
<backend>docbook5</backend>
|
||||
<doctype>book</doctype>
|
||||
<attributes>
|
||||
<docinfo>true</docinfo>
|
||||
<spring-cloud-stream-binder-kafka-version>${project.version}</spring-cloud-stream-binder-kafka-version>
|
||||
<spring-cloud-stream-binder-kafka-docs-version>${project.version}</spring-cloud-stream-binder-kafka-docs-version>
|
||||
<github-tag>${github-tag}</github-tag>
|
||||
</attributes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.agilejava.docbkx</groupId>
|
||||
<artifactId>docbkx-maven-plugin</artifactId>
|
||||
<version>2.0.15</version>
|
||||
<configuration>
|
||||
<sourceDirectory>${basedir}/target/generated-docs</sourceDirectory>
|
||||
<includes>index.xml</includes>
|
||||
<xincludeSupported>true</xincludeSupported>
|
||||
<chunkedOutput>false</chunkedOutput>
|
||||
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
|
||||
<useExtensions>1</useExtensions>
|
||||
<highlightSource>1</highlightSource>
|
||||
<highlightXslthlConfig>${basedir}/src/main/docbook/xsl/xslthl-config.xml</highlightXslthlConfig>
|
||||
</configuration>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>net.sf.xslthl</groupId>
|
||||
<artifactId>xslthl</artifactId>
|
||||
<version>2.1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.sf.docbook</groupId>
|
||||
<artifactId>docbook-xml</artifactId>
|
||||
<version>5.0-all</version>
|
||||
<classifier>resources</classifier>
|
||||
<type>zip</type>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>html-single</id>
|
||||
<goals>
|
||||
<goal>generate-html</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-singlepage.xsl</htmlCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/htmlsingle</targetDirectory>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference/htmlsingle">
|
||||
<fileset dir="${basedir}/target/docbook/htmlsingle">
|
||||
<include name="**/*.html" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/htmlsingle">
|
||||
<fileset dir="${basedir}/src/main/docbook">
|
||||
<include name="**/*.css" />
|
||||
<include name="**/*.png" />
|
||||
<include name="**/*.gif" />
|
||||
<include name="**/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/htmlsingle">
|
||||
<fileset dir="${basedir}/src/main/asciidoc">
|
||||
<include name="images/*.css" />
|
||||
<include name="images/*.png" />
|
||||
<include name="images/*.gif" />
|
||||
<include name="images/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>html</id>
|
||||
<goals>
|
||||
<goal>generate-html</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-multipage.xsl</htmlCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/html</targetDirectory>
|
||||
<chunkedOutput>true</chunkedOutput>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference/html">
|
||||
<fileset dir="${basedir}/target/docbook/html">
|
||||
<include name="**/*.html" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/html">
|
||||
<fileset dir="${basedir}/src/main/docbook">
|
||||
<include name="**/*.css" />
|
||||
<include name="**/*.png" />
|
||||
<include name="**/*.gif" />
|
||||
<include name="**/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/html">
|
||||
<fileset dir="${basedir}/src/main/asciidoc">
|
||||
<include name="images/*.css" />
|
||||
<include name="images/*.png" />
|
||||
<include name="images/*.gif" />
|
||||
<include name="images/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>pdf</id>
|
||||
<goals>
|
||||
<goal>generate-pdf</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/pdf</targetDirectory>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference">
|
||||
<fileset dir="${basedir}/target/docbook">
|
||||
<include name="**/*.pdf" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<move file="${basedir}/target/contents/reference/pdf/index.pdf" tofile="${basedir}/target/contents/reference/pdf/spring-cloud-stream-reference.pdf" />
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>epub</id>
|
||||
<goals>
|
||||
<goal>generate-epub3</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<epubCustomization>${basedir}/src/main/docbook/xsl/epub.xsl</epubCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/epub</targetDirectory>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference/epub">
|
||||
<fileset dir="${basedir}/target/docbook">
|
||||
<include name="**/*.epub" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<move file="${basedir}/target/contents/reference/epub/index.epub" tofile="${basedir}/target/contents/reference/epub/spring-cloud-stream-reference.epub" />
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>ant-contrib</groupId>
|
||||
<artifactId>ant-contrib</artifactId>
|
||||
<version>1.0b3</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>ant</groupId>
|
||||
<artifactId>ant</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.ant</groupId>
|
||||
<artifactId>ant-nodeps</artifactId>
|
||||
<version>1.8.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.tigris.antelope</groupId>
|
||||
<artifactId>antelopetasks</artifactId>
|
||||
<version>3.2.10</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>package-and-attach-docs-zip</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<zip destfile="${project.build.directory}/${project.artifactId}-${project.version}.zip">
|
||||
<zipfileset src="${project.build.directory}/${project.artifactId}-${project.version}-javadoc.jar" prefix="api" />
|
||||
<fileset dir="${project.build.directory}/contents" />
|
||||
</zip>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>setup-maven-properties</id>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<exportAntProperties>true</exportAntProperties>
|
||||
<target>
|
||||
<taskdef resource="net/sf/antcontrib/antcontrib.properties" />
|
||||
<taskdef name="stringutil" classname="ise.antelope.tasks.StringUtilTask" />
|
||||
<var name="version-type" value="${project.version}" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp=".*\.(.*)" replace="\1" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(M)\d+" replace="MILESTONE" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(RC)\d+" replace="MILESTONE" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp="BUILD-(.*)" replace="SNAPSHOT" />
|
||||
<stringutil string="${version-type}" property="spring-boot-repo">
|
||||
<lowercase />
|
||||
</stringutil>
|
||||
<var name="github-tag" value="v${project.version}" />
|
||||
<propertyregex property="github-tag" override="true" input="${github-tag}" regexp=".*SNAPSHOT" replace="master" />
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-zip</id>
|
||||
<goals>
|
||||
<goal>attach-artifact</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<artifacts>
|
||||
<artifact>
|
||||
<file>${project.build.directory}/${project.artifactId}-${project.version}.zip</file>
|
||||
<type>zip;zip.type=docs;zip.deployed=false</type>
|
||||
</artifact>
|
||||
</artifacts>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
@@ -1,37 +0,0 @@
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
:source-highlighter: prettify
|
||||
:numbered:
|
||||
:icons: font
|
||||
:hide-uri-scheme:
|
||||
:spring-cloud-stream-binder-kafka-repo: snapshot
|
||||
:github-tag: master
|
||||
:spring-cloud-stream-binder-kafka-docs-version: current
|
||||
:spring-cloud-stream-binder-kafka-docs: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: http://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: http://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: http://github.com/{github-repo}/wiki
|
||||
:github-master-code: http://github.com/{github-repo}/tree/master
|
||||
:sc-ext: java
|
||||
// ======================================================================================
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
@@ -1,654 +0,0 @@
|
||||
== Usage
|
||||
|
||||
For using the Kafka Streams binder, you just need to add it to your Spring Cloud Stream application, using the following
|
||||
Maven coordinates:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Kafka Streams Binder Overview
|
||||
|
||||
Spring Cloud Stream's Apache Kafka support also includes a binder implementation designed explicitly for Apache Kafka
|
||||
Streams binding. With this native integration, a Spring Cloud Stream "processor" application can directly use the
|
||||
https://kafka.apache.org/documentation/streams/developer-guide[Apache Kafka Streams] APIs in the core business logic.
|
||||
|
||||
Kafka Streams binder implementation builds on the foundation provided by the http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafka Streams in Spring Kafka]
|
||||
project.
|
||||
|
||||
As part of this native integration, the high-level https://docs.confluent.io/current/streams/developer-guide/dsl-api.html[Streams DSL]
|
||||
provided by the Kafka Streams API is available for use in the business logic, too.
|
||||
|
||||
An early version of the https://docs.confluent.io/current/streams/developer-guide/processor-api.html[Processor API]
|
||||
support is available as well.
|
||||
|
||||
As noted early-on, Kafka Streams support in Spring Cloud Stream strictly only available for use in the Processor model.
|
||||
A model in which the messages read from an inbound topic, business processing can be applied, and the transformed messages
|
||||
can be written to an outbound topic. It can also be used in Processor applications with a no-outbound destination.
|
||||
|
||||
=== Streams DSL
|
||||
|
||||
This application consumes data from a Kafka topic (e.g., `words`), computes word count for each unique word in a 5 seconds
|
||||
time window, and the computed results are sent to a downstream topic (e.g., `counts`) for further processing.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
public class WordCountProcessorApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<?, String> input) {
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("WordCounts-multi"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(WordCountProcessorApplication.class, args);
|
||||
}
|
||||
----
|
||||
|
||||
Once built as a uber-jar (e.g., `wordcount-processor.jar`), you can run the above example like the following.
|
||||
|
||||
[source]
|
||||
----
|
||||
java -jar wordcount-processor.jar --spring.cloud.stream.bindings.input.destination=words --spring.cloud.stream.bindings.output.destination=counts
|
||||
----
|
||||
|
||||
This application will consume messages from the Kafka topic `words` and the computed results are published to an output
|
||||
topic `counts`.
|
||||
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are automatically bound as
|
||||
KStream objects. As a developer, you can exclusively focus on the business aspects of the code, i.e. writing the logic
|
||||
required in the processor. Setting up the Streams DSL specific configuration required by the Kafka Streams infrastructure
|
||||
is automatically handled by the framework.
|
||||
|
||||
== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Kafka Streams binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Streams Properties
|
||||
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.streams.binder.`
|
||||
literal.
|
||||
|
||||
configuration::
|
||||
Map with a key/value pair containing properties pertaining to Apache Kafka Streams API.
|
||||
This property must be prefixed with `spring.cloud.stream.kafka.streams.binder.`.
|
||||
Following are some examples of using this property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000
|
||||
----
|
||||
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs in
|
||||
Apache Kafka Streams docs.
|
||||
|
||||
brokers::
|
||||
Broker URL
|
||||
+
|
||||
Default: `localhost`
|
||||
zkNodes::
|
||||
Zookeeper URL
|
||||
+
|
||||
Default: `localhost`
|
||||
serdeError::
|
||||
Deserialization error handler type.
|
||||
Possible values are - `logAndContinue`, `logAndFail` or `sendToDlq`
|
||||
+
|
||||
Default: `logAndFail`
|
||||
applicationId::
|
||||
Application ID for all the stream configurations in the current application context.
|
||||
You can override the application id for an individual `StreamListener` method using the `group` property on the binding.
|
||||
You have to ensure that you are using the same group name for all input bindings in the case of multiple inputs on the same methods.
|
||||
+
|
||||
Default: `default`
|
||||
|
||||
The following properties are _only_ available for Kafka Streams producers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.producer.`
|
||||
literal.
|
||||
|
||||
keySerde::
|
||||
key serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
valueSerde::
|
||||
value serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
useNativeEncoding::
|
||||
flag to enable native encoding
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
The following properties are _only_ available for Kafka Streams consumers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.consumer.`
|
||||
literal.
|
||||
|
||||
keySerde::
|
||||
key serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
valueSerde::
|
||||
value serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
materializedAs::
|
||||
state store to materialize when using incoming KTable types
|
||||
+
|
||||
Default: `none`.
|
||||
useNativeDecoding::
|
||||
flag to enable native decoding
|
||||
+
|
||||
Default: `false`.
|
||||
dlqName::
|
||||
DLQ topic name.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
=== TimeWindow properties:
|
||||
|
||||
Windowing is an important concept in stream processing applications. Following properties are available to configure
|
||||
time-window computations.
|
||||
|
||||
spring.cloud.stream.kafka.streams.timeWindow.length::
|
||||
When this property is given, you can autowire a `TimeWindows` bean into the application.
|
||||
The value is expressed in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
spring.cloud.stream.kafka.streams.timeWindow.advanceBy::
|
||||
Value is given in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
== Multiple Input Bindings
|
||||
|
||||
For use cases that requires multiple incoming KStream objects or a combination of KStream and KTable objects, the Kafka
|
||||
Streams binder provides multiple bindings support.
|
||||
|
||||
Let's see it in action.
|
||||
|
||||
=== Multiple Input Bindings as a Sink
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamKTableBinding.class)
|
||||
.....
|
||||
.....
|
||||
@StreamListener
|
||||
public void process(@Input("inputStream") KStream<String, PlayEvent> playEvents,
|
||||
@Input("inputTable") KTable<Long, Song> songTable) {
|
||||
....
|
||||
....
|
||||
}
|
||||
|
||||
interface KStreamKTableBinding {
|
||||
|
||||
@Input("inputStream")
|
||||
KStream<?, ?> inputStream();
|
||||
|
||||
@Input("inputTable")
|
||||
KTable<?, ?> inputTable();
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
In the above example, the application is written as a sink, i.e. there are no output bindings and the application has to
|
||||
decide concerning downstream processing. When you write applications in this style, you might want to send the information
|
||||
downstream or store them in a state store (See below for Queryable State Stores).
|
||||
|
||||
In the case of incoming KTable, if you want to materialize the computations to a state store, you have to express it
|
||||
through the following property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.inputTable.consumer.materializedAs: all-songs
|
||||
----
|
||||
|
||||
=== Multiple Input Bindings as a Processor
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamKTableBinding.class)
|
||||
....
|
||||
....
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<String, Long> process(@Input("input") KStream<String, Long> userClicksStream,
|
||||
@Input("inputTable") KTable<String, String> userRegionsTable) {
|
||||
....
|
||||
....
|
||||
}
|
||||
|
||||
interface KStreamKTableBinding extends KafkaStreamsProcessor {
|
||||
|
||||
@Input("inputX")
|
||||
KTable<?, ?> inputTable();
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
== Multiple Output Bindings (aka Branching)
|
||||
|
||||
Kafka Streams allow outbound data to be split into multiple topics based on some predicates. The Kafka Streams binder provides
|
||||
support for this feature without compromising the programming model exposed through `StreamListener` in the end user application.
|
||||
|
||||
You can write the application in the usual way as demonstrated above in the word count example. However, when using the
|
||||
branching feature, you are required to do a few things. First, you need to make sure that your return type is `KStream[]`
|
||||
instead of a regular `KStream`. Second, you need to use the `SendTo` annotation containing the output bindings in the order
|
||||
(see example below). For each of these output bindings, you need to configure destination, content-type etc., complying with
|
||||
the standard Spring Cloud Stream expectations.
|
||||
|
||||
Here is an example:
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamProcessorWithBranches.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo({"output1","output2","output3})
|
||||
public KStream<?, WordCount>[] process(KStream<Object, String> input) {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("WordCounts-1"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/json
|
||||
spring.cloud.stream.bindings.output3.contentType: application/json
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms: 1000
|
||||
spring.cloud.stream.kafka.streams.binder.configuration:
|
||||
default.key.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
default.value.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.bindings.output1:
|
||||
destination: foo
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output2:
|
||||
destination: bar
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output3:
|
||||
destination: fox
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.input:
|
||||
destination: words
|
||||
consumer:
|
||||
headerMode: raw
|
||||
----
|
||||
|
||||
== Message Conversion
|
||||
|
||||
Similar to message-channel based binder applications, the Kafka Streams binder adapts to the out-of-the-box content-type
|
||||
conversions without any compromise.
|
||||
|
||||
It is typical for Kafka Streams operations to know the type of SerDe’s used to transform the key and value correctly.
|
||||
Therefore, it may be more natural to rely on the SerDe facilities provided by the Apache Kafka Streams library itself at
|
||||
the inbound and outbound conversions rather than using the content-type conversions offered by the framework.
|
||||
On the other hand, you might be already familiar with the content-type conversion patterns provided by the framework, and
|
||||
that, you'd like to continue using for inbound and outbound conversions.
|
||||
|
||||
Both the options are supported in the Kafka Streams binder implementation.
|
||||
|
||||
==== Outbound serialization
|
||||
|
||||
If native encoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the outbound
|
||||
in this case for outbound serialization.
|
||||
|
||||
Here is the property to set the contentType on the outbound.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output.contentType: application/json
|
||||
----
|
||||
|
||||
Here is the property to enable native encoding.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output.nativeEncoding: true
|
||||
----
|
||||
|
||||
If native encoding is enabled on the output binding (user has to enable it as above explicitly), then the framework will
|
||||
skip any form of automatic message conversion on the outbound. In that case, it will switch to the Serde set by the user.
|
||||
The `valueSerde` property set on the actual output binding will be used. Here is an example.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
----
|
||||
If this property is not set, then it will use the "default" SerDe: `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Kafka Streams binder does not serialize the keys on outbound - it simply relies on Kafka itself.
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
`keySerde`.
|
||||
|
||||
Binding level key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde
|
||||
----
|
||||
|
||||
Common Key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
If branching is used, then you need to use multiple output bindings. For example,
|
||||
|
||||
[source]
|
||||
----
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
----
|
||||
|
||||
If `nativeEncoding` is set, then you can set different SerDe's on individual output bindings as below.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output1.producer.valueSerde=IntegerSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output2.producer.valueSerde=StringSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output3.producer.valueSerde=JsonSerde
|
||||
----
|
||||
|
||||
Then if you have `SendTo` like this, @SendTo({"output1", "output2", "output3"}), the `KStream[]` from the branches are
|
||||
applied with proper SerDe objects as defined above. If you are not enabling `nativeEncoding`, you can then set different
|
||||
contentType values on the output bindings as below. In that case, the framework will use the appropriate message converter
|
||||
to convert the messages before sending to Kafka.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/java-serialzied-object
|
||||
spring.cloud.stream.bindings.output3.contentType: application/octet-stream
|
||||
----
|
||||
|
||||
==== Inbound Deserialization
|
||||
|
||||
Similar rules apply to data deserialization on the inbound.
|
||||
|
||||
If native decoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the inbound
|
||||
in this case for inbound deserialization.
|
||||
|
||||
Here is the property to set the contentType on the inbound.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.contentType: application/json
|
||||
----
|
||||
|
||||
Here is the property to enable native decoding.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.nativeDecoding: true
|
||||
----
|
||||
|
||||
If native decoding is enabled on the input binding (user has to enable it as above explicitly), then the framework will
|
||||
skip doing any message conversion on the inbound. In that case, it will switch to the SerDe set by the user. The `valueSerde`
|
||||
property set on the actual output binding will be used. Here is an example.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
----
|
||||
|
||||
If this property is not set, it will use the default SerDe: `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Kafka Streams binder does not deserialize the keys on inbound - it simply relies on Kafka itself.
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
`keySerde`.
|
||||
|
||||
Binding level key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.keySerde
|
||||
----
|
||||
|
||||
Common Key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
As in the case of KStream branching on the outbound, the benefit of setting value SerDe per binding is that if you have
|
||||
multiple input bindings (multiple KStreams object) and they all require separate value SerDe's, then you can configure
|
||||
them individually. If you use the common configuration approach, then this feature won't be applicable.
|
||||
|
||||
== Error Handling
|
||||
|
||||
Apache Kafka Streams provide the capability for natively handling exceptions from deserialization errors.
|
||||
For details on this support, please see https://cwiki.apache.org/confluence/display/KAFKA/KIP-161%3A+streams+deserialization+exception+handlers[this]
|
||||
Out of the box, Apache Kafka Streams provide two kinds of deserialization exception handlers - `logAndContinue` and `logAndFail`.
|
||||
As the name indicates, the former will log the error and continue processing the next records and the latter will log the
|
||||
error and fail. `LogAndFail` is the default deserialization exception handler.
|
||||
|
||||
=== Handling Deserialization Exceptions
|
||||
|
||||
Kafka Streams binder supports a selection of exception handlers through the following properties.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: logAndContinue
|
||||
----
|
||||
|
||||
In addition to the above two deserialization exception handlers, the binder also provides a third one for sending the erroneous
|
||||
records (poison pills) to a DLQ topic. Here is how you enable this DLQ exception handler.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: sendToDlq
|
||||
----
|
||||
When the above property is set, all the deserialization error records are automatically sent to the DLQ topic.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.dlqName: foo-dlq
|
||||
----
|
||||
|
||||
If this is set, then the error records are sent to the topic `foo-dlq`. If this is not set, then it will create a DLQ
|
||||
topic with the name `error.<input-topic-name>.<group-name>`.
|
||||
|
||||
A couple of things to keep in mind when using the exception handling feature in Kafka Streams binder.
|
||||
|
||||
* The property `spring.cloud.stream.kafka.streams.binder.serdeError` is applicable for the entire application. This implies
|
||||
that if there are multiple `StreamListener` methods in the same application, this property is applied to all of them.
|
||||
* The exception handling for deserialization works consistently with native deserialization and framework provided message
|
||||
conversion.
|
||||
|
||||
=== Handling Non-Deserialization Exceptions
|
||||
|
||||
For general error handling in Kafka Streams binder, it is up to the end user applications to handle application level errors.
|
||||
As a side effect of providing a DLQ for deserialization exception handlers, Kafka Streams binder provides a way to get
|
||||
access to the DLQ sending bean directly from your application.
|
||||
Once you get access to that bean, you can programmatically send any exception records from your application to the DLQ.
|
||||
|
||||
It continues to remain hard to robust error handling using the high-level DSL; Kafka Streams doesn't natively support error
|
||||
handling yet.
|
||||
|
||||
However, when you use the low-level Processor API in your application, there are options to control this behavior. See
|
||||
below.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private SendToDlqAndContinue dlqHandler;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
|
||||
input.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
|
||||
try {
|
||||
.....
|
||||
.....
|
||||
}
|
||||
catch(Exception e) {
|
||||
//explicitly provide the kafka topic corresponding to the input binding as the first argument.
|
||||
//DLQ handler will correctly map to the dlq topic from the actual incoming destination.
|
||||
dlqHandler.sendToDlq("topic-name", (byte[]) o1, (byte[]) o2, context.partition());
|
||||
}
|
||||
}
|
||||
|
||||
.....
|
||||
.....
|
||||
});
|
||||
}
|
||||
----
|
||||
|
||||
== State Store
|
||||
|
||||
State store is created automatically by Kafka Streams when the DSL is used.
|
||||
When processor API is used, you need to register a state store manually. In order to do so, you can use `KafkaStreamsStateStore` annotation.
|
||||
You can specify the name and type of the store, flags to control log and disabling cache, etc.
|
||||
Once the store is created by the binder during the bootstrapping phase, you can access this state store through the processor API.
|
||||
Below are some primitives for doing this.
|
||||
|
||||
Creating a state store:
|
||||
[source]
|
||||
----
|
||||
@KafkaStreamsStateStore(name="mystate", type= KafkaStreamsStateStoreProperties.StoreType.WINDOW, lengthMs=300000)
|
||||
public void process(KStream<Object, Product> input) {
|
||||
...
|
||||
}
|
||||
----
|
||||
|
||||
Accessing the state store:
|
||||
[source]
|
||||
----
|
||||
Processor<Object, Product>() {
|
||||
|
||||
WindowStore<Object, String> state;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
state = (WindowStore)processorContext.getStateStore("mystate");
|
||||
}
|
||||
...
|
||||
}
|
||||
----
|
||||
|
||||
== Interactive Queries
|
||||
|
||||
As part of the public Kafka Streams binder API, we expose a class called `InteractiveQueryService`.
|
||||
You can access this as a Spring bean in your application. An easy way to get access to this bean from your application is to "autowire" the bean.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private InteractiveQueryService interactiveQueryService;
|
||||
----
|
||||
|
||||
Once you gain access to this bean, then you can query for the particular state-store that you are interested. See below.
|
||||
|
||||
[source]
|
||||
----
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
interactiveQueryService.getQueryableStoreType("my-store", QueryableStoreTypes.keyValueStore());
|
||||
----
|
||||
|
||||
If there are multiple instances of the kafka streams application running, then before you can query them interactively, you need to identify which application instance hosts the key.
|
||||
`InteractiveQueryService` API provides methods for identifying the host information.
|
||||
|
||||
In order for this to work, you must configure the property `application.server` as below:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.application.server: <server>:<port>
|
||||
----
|
||||
|
||||
Here are some code snippets:
|
||||
|
||||
[source]
|
||||
----
|
||||
org.apache.kafka.streams.state.HostInfo hostInfo = interactiveQueryService.getHostInfo("store-name",
|
||||
key, keySerializer);
|
||||
|
||||
if (interactiveQueryService.getCurrentHostInfo().equals(hostInfo)) {
|
||||
|
||||
//query from the store that is locally available
|
||||
}
|
||||
else {
|
||||
//query from the remote host
|
||||
}
|
||||
----
|
||||
@@ -1,493 +0,0 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown inn the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Apache Kafka Binder Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` 1.0.0 jar and is designed to be used with a broker of at least that version.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.topic.create.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `DefaultKafkaHeaderMapper` that uses JSON deserialization for the headers.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
A `Map` of Kafka topic properties used when provisioning topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
=== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See `NewTopic` javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replication-factor::
|
||||
The replication factor to use when provisioning new topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
The payload cannot be used because, by the time this expression is evaluated, the payload is already in the form of a `byte[]`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
=== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
==== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
==== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the http://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 http://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
===== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
===== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
==== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-error-channels]]
|
||||
== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
For example, if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, the consumer group named `myGroup` has `1000` messages waiting to be consumed from the topic calle `myTopic`.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
code highlight CSS resemblign the Eclipse IDE default color schema
|
||||
@author Costin Leau
|
||||
*/
|
||||
|
||||
.hl-keyword {
|
||||
color: #7F0055;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.hl-comment {
|
||||
color: #3F5F5F;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.hl-multiline-comment {
|
||||
color: #3F5FBF;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.hl-tag {
|
||||
color: #3F7F7F;
|
||||
}
|
||||
|
||||
.hl-attribute {
|
||||
color: #7F007F;
|
||||
}
|
||||
|
||||
.hl-value {
|
||||
color: #2A00FF;
|
||||
}
|
||||
|
||||
.hl-string {
|
||||
color: #2A00FF;
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
@IMPORT url("manual.css");
|
||||
|
||||
body.firstpage {
|
||||
background: url("../images/background.png") no-repeat center top;
|
||||
}
|
||||
|
||||
div.part h1 {
|
||||
border-top: none;
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
@IMPORT url("manual.css");
|
||||
|
||||
body {
|
||||
background: url("../images/background.png") no-repeat center top;
|
||||
}
|
||||
|
||||
@@ -1,344 +0,0 @@
|
||||
@IMPORT url("highlight.css");
|
||||
|
||||
html {
|
||||
padding: 0pt;
|
||||
margin: 0pt;
|
||||
}
|
||||
|
||||
body {
|
||||
color: #333333;
|
||||
margin: 15px 30px;
|
||||
font-family: Helvetica, Arial, Freesans, Clean, Sans-serif;
|
||||
line-height: 1.6;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
code {
|
||||
font-size: 16px;
|
||||
font-family: Consolas, "Liberation Mono", Courier, monospace;
|
||||
}
|
||||
|
||||
:not(a)>code {
|
||||
color: #6D180B;
|
||||
}
|
||||
|
||||
:not(pre)>code {
|
||||
background-color: #F2F2F2;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 4px;
|
||||
padding: 1px 3px 0;
|
||||
text-shadow: none;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
body>*:first-child {
|
||||
margin-top: 0 !important;
|
||||
}
|
||||
|
||||
div {
|
||||
margin: 0pt;
|
||||
}
|
||||
|
||||
hr {
|
||||
border: 1px solid #CCCCCC;
|
||||
background: #CCCCCC;
|
||||
}
|
||||
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: #000000;
|
||||
cursor: text;
|
||||
font-weight: bold;
|
||||
margin: 30px 0 10px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
h1,h2,h3 {
|
||||
margin: 40px 0 10px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
margin: 70px 0 30px;
|
||||
padding-top: 20px;
|
||||
}
|
||||
|
||||
div.part h1 {
|
||||
border-top: 1px dotted #CCCCCC;
|
||||
}
|
||||
|
||||
h1,h1 code {
|
||||
font-size: 32px;
|
||||
}
|
||||
|
||||
h2,h2 code {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
h3,h3 code {
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
h4,h1 code,h5,h5 code,h6,h6 code {
|
||||
font-size: 18px;
|
||||
}
|
||||
|
||||
div.book,div.chapter,div.appendix,div.part,div.preface {
|
||||
min-width: 300px;
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
p.releaseinfo {
|
||||
font-weight: bold;
|
||||
margin-bottom: 40px;
|
||||
margin-top: 40px;
|
||||
}
|
||||
|
||||
div.authorgroup {
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
p.copyright {
|
||||
line-height: 1;
|
||||
margin-bottom: -5px;
|
||||
}
|
||||
|
||||
.legalnotice p {
|
||||
font-style: italic;
|
||||
font-size: 14px;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
div.titlepage+p,div.titlepage+p {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
pre {
|
||||
line-height: 1.0;
|
||||
color: black;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #4183C4;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 15px 0;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
ul,ol {
|
||||
padding-left: 30px;
|
||||
}
|
||||
|
||||
li p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.table {
|
||||
margin: 1em;
|
||||
padding: 0.5em;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
div.table table,div.informaltable table {
|
||||
display: table;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.table td {
|
||||
padding-left: 7px;
|
||||
padding-right: 7px;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
line-height: 1.4;
|
||||
padding: 0 20px;
|
||||
background-color: #F8F8F8;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 3px 3px 3px 3px;
|
||||
}
|
||||
|
||||
.sidebar p.title {
|
||||
color: #6D180B;
|
||||
}
|
||||
|
||||
pre.programlisting,pre.screen {
|
||||
font-size: 15px;
|
||||
padding: 6px 10px;
|
||||
background-color: #F8F8F8;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 3px 3px 3px 3px;
|
||||
clear: both;
|
||||
overflow: auto;
|
||||
line-height: 1.4;
|
||||
font-family: Consolas, "Liberation Mono", Courier, monospace;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
border: 1px solid #DDDDDD !important;
|
||||
border-radius: 4px !important;
|
||||
border-collapse: separate !important;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
table thead {
|
||||
background: #F5F5F5;
|
||||
}
|
||||
|
||||
table tr {
|
||||
border: none;
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
table th {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
table th,table td {
|
||||
border: none !important;
|
||||
padding: 6px 13px;
|
||||
}
|
||||
|
||||
table tr:nth-child(2n) {
|
||||
background-color: #F8F8F8;
|
||||
}
|
||||
|
||||
td p {
|
||||
margin: 0 0 15px 0;
|
||||
}
|
||||
|
||||
div.table-contents td p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.important *,div.note *,div.tip *,div.warning *,div.navheader *,div.navfooter *,div.calloutlist *
|
||||
{
|
||||
border: none !important;
|
||||
background: none !important;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.important p,div.note p,div.tip p,div.warning p {
|
||||
color: #6F6F6F;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
div.important code,div.note code,div.tip code,div.warning code {
|
||||
background-color: #F2F2F2 !important;
|
||||
border: 1px solid #CCCCCC !important;
|
||||
border-radius: 4px !important;
|
||||
padding: 1px 3px 0 !important;
|
||||
text-shadow: none !important;
|
||||
white-space: nowrap !important;
|
||||
}
|
||||
|
||||
.note th,.tip th,.warning th {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.note tr:first-child td,.tip tr:first-child td,.warning tr:first-child td
|
||||
{
|
||||
border-right: 1px solid #CCCCCC !important;
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
div.calloutlist p,div.calloutlist td {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.calloutlist>table>tbody>tr>td:first-child {
|
||||
padding-left: 10px;
|
||||
width: 30px !important;
|
||||
}
|
||||
|
||||
div.important,div.note,div.tip,div.warning {
|
||||
margin-left: 0px !important;
|
||||
margin-right: 20px !important;
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
|
||||
div.toc {
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
dl,dt {
|
||||
margin-top: 1px;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.toc>dl>dt {
|
||||
font-size: 32px;
|
||||
font-weight: bold;
|
||||
margin: 30px 0 10px 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
div.toc>dl>dd>dl>dt {
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
margin: 20px 0 10px 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
div.toc>dl>dd>dl>dd>dl>dt {
|
||||
font-weight: bold;
|
||||
font-size: 20px;
|
||||
margin: 10px 0 0 0;
|
||||
}
|
||||
|
||||
tbody.footnotes * {
|
||||
border: none !important;
|
||||
}
|
||||
|
||||
div.footnote p {
|
||||
margin: 0;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
div.footnote p sup {
|
||||
margin-right: 6px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
div.navheader {
|
||||
border-bottom: 1px solid #CCCCCC;
|
||||
}
|
||||
|
||||
div.navfooter {
|
||||
border-top: 1px solid #CCCCCC;
|
||||
}
|
||||
|
||||
.title {
|
||||
margin-left: -1em;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.title>a {
|
||||
position: absolute;
|
||||
visibility: hidden;
|
||||
display: block;
|
||||
font-size: 0.85em;
|
||||
margin-top: 0.05em;
|
||||
margin-left: -1em;
|
||||
vertical-align: text-top;
|
||||
color: black;
|
||||
}
|
||||
|
||||
.title>a:before {
|
||||
content: "\00A7";
|
||||
}
|
||||
|
||||
.title:hover>a,.title>a:hover,.title:hover>a:hover {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.title:focus>a,.title>a:focus,.title:focus>a:focus {
|
||||
outline: 0;
|
||||
}
|
||||
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 2.0 KiB |
|
Before Width: | Height: | Size: 2.0 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 931 B |
|
Before Width: | Height: | Size: 2.1 KiB |
@@ -1,45 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl d"
|
||||
version='1.0'>
|
||||
|
||||
<!-- Extensions -->
|
||||
<xsl:param name="use.extensions">1</xsl:param>
|
||||
<xsl:param name="tablecolumns.extension">0</xsl:param>
|
||||
<xsl:param name="callout.extensions">1</xsl:param>
|
||||
|
||||
<!-- Graphics -->
|
||||
<xsl:param name="admon.graphics" select="1"/>
|
||||
<xsl:param name="admon.graphics.path">images/</xsl:param>
|
||||
<xsl:param name="admon.graphics.extension">.png</xsl:param>
|
||||
|
||||
<!-- Table of Contents -->
|
||||
<xsl:param name="generate.toc">book toc,title</xsl:param>
|
||||
<xsl:param name="toc.section.depth">3</xsl:param>
|
||||
|
||||
<!-- Hide revhistory -->
|
||||
<xsl:template match="d:revhistory" mode="titlepage.mode"/>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,31 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl d"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="common.xsl"/>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,73 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="html.xsl"/>
|
||||
|
||||
<xsl:param name="html.stylesheet">css/manual-multipage.css</xsl:param>
|
||||
|
||||
<xsl:param name="chunk.section.depth">'5'</xsl:param>
|
||||
<xsl:param name="use.id.as.filename">'1'</xsl:param>
|
||||
|
||||
<!-- Replace chunk-element-content from chunk-common to add firstpage class to body -->
|
||||
<xsl:template name="chunk-element-content">
|
||||
<xsl:param name="prev"/>
|
||||
<xsl:param name="next"/>
|
||||
<xsl:param name="nav.context"/>
|
||||
<xsl:param name="content">
|
||||
<xsl:apply-imports/>
|
||||
</xsl:param>
|
||||
|
||||
<xsl:call-template name="user.preroot"/>
|
||||
|
||||
<html>
|
||||
<xsl:call-template name="html.head">
|
||||
<xsl:with-param name="prev" select="$prev"/>
|
||||
<xsl:with-param name="next" select="$next"/>
|
||||
</xsl:call-template>
|
||||
<body>
|
||||
<xsl:if test="count($prev) = 0">
|
||||
<xsl:attribute name="class">firstpage</xsl:attribute>
|
||||
</xsl:if>
|
||||
<xsl:call-template name="body.attributes"/>
|
||||
<xsl:call-template name="user.header.navigation"/>
|
||||
<xsl:call-template name="header.navigation">
|
||||
<xsl:with-param name="prev" select="$prev"/>
|
||||
<xsl:with-param name="next" select="$next"/>
|
||||
<xsl:with-param name="nav.context" select="$nav.context"/>
|
||||
</xsl:call-template>
|
||||
<xsl:call-template name="user.header.content"/>
|
||||
<xsl:copy-of select="$content"/>
|
||||
<xsl:call-template name="user.footer.content"/>
|
||||
<xsl:call-template name="footer.navigation">
|
||||
<xsl:with-param name="prev" select="$prev"/>
|
||||
<xsl:with-param name="next" select="$next"/>
|
||||
<xsl:with-param name="nav.context" select="$nav.context"/>
|
||||
</xsl:call-template>
|
||||
<xsl:call-template name="user.footer.navigation"/>
|
||||
</body>
|
||||
</html>
|
||||
<xsl:value-of select="$chunk.append"/>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
@@ -1,30 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="html.xsl"/>
|
||||
|
||||
<xsl:param name="html.stylesheet">css/manual-singlepage.css</xsl:param>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,141 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
|
||||
<xsl:import href="common.xsl"/>
|
||||
|
||||
<!-- Only use scaling in FO -->
|
||||
<xsl:param name="ignore.image.scaling">1</xsl:param>
|
||||
|
||||
<!-- Use code syntax highlighting -->
|
||||
<xsl:param name="highlight.source">1</xsl:param>
|
||||
|
||||
<!-- Activate Graphics -->
|
||||
<xsl:param name="callout.graphics" select="1" />
|
||||
<xsl:param name="callout.defaultcolumn">120</xsl:param>
|
||||
<xsl:param name="callout.graphics.path">images/callouts/</xsl:param>
|
||||
<xsl:param name="callout.graphics.extension">.png</xsl:param>
|
||||
|
||||
<xsl:param name="table.borders.with.css" select="1"/>
|
||||
<xsl:param name="html.stylesheet.type">text/css</xsl:param>
|
||||
|
||||
<xsl:param name="admonition.title.properties">text-align: left</xsl:param>
|
||||
|
||||
<!-- Leave image paths as relative when navigating XInclude -->
|
||||
<xsl:param name="keep.relative.image.uris" select="1"/>
|
||||
|
||||
<!-- Label Chapters and Sections (numbering) -->
|
||||
<xsl:param name="chapter.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel.max.depth" select="2"/>
|
||||
<xsl:param name="section.label.includes.component.label" select="1"/>
|
||||
<xsl:param name="table.footnote.number.format" select="'1'"/>
|
||||
|
||||
<!-- Remove "Chapter" from the Chapter titles... -->
|
||||
<xsl:param name="local.l10n.xml" select="document('')"/>
|
||||
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
|
||||
<l:l10n language="en">
|
||||
<l:context name="title-numbered">
|
||||
<l:template name="chapter" text="%n. %t"/>
|
||||
<l:template name="section" text="%n %t"/>
|
||||
</l:context>
|
||||
</l:l10n>
|
||||
</l:i18n>
|
||||
|
||||
<!-- Syntax Highlighting -->
|
||||
<xsl:template match='xslthl:keyword' mode="xslthl">
|
||||
<span class="hl-keyword"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:comment' mode="xslthl">
|
||||
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:oneline-comment' mode="xslthl">
|
||||
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:multiline-comment' mode="xslthl">
|
||||
<span class="hl-multiline-comment"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:tag' mode="xslthl">
|
||||
<span class="hl-tag"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:attribute' mode="xslthl">
|
||||
<span class="hl-attribute"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:value' mode="xslthl">
|
||||
<span class="hl-value"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:string' mode="xslthl">
|
||||
<span class="hl-string"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Custom Title Page -->
|
||||
<xsl:template match="d:author" mode="titlepage.mode">
|
||||
<xsl:if test="name(preceding-sibling::*[1]) = 'author'">
|
||||
<xsl:text>, </xsl:text>
|
||||
</xsl:if>
|
||||
<span class="{name(.)}">
|
||||
<xsl:call-template name="person.name"/>
|
||||
<xsl:apply-templates mode="titlepage.mode" select="./contrib"/>
|
||||
</span>
|
||||
</xsl:template>
|
||||
<xsl:template match="d:authorgroup" mode="titlepage.mode">
|
||||
<div class="{name(.)}">
|
||||
<h2>Authors</h2>
|
||||
<xsl:apply-templates mode="titlepage.mode"/>
|
||||
</div>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Title Links -->
|
||||
<xsl:template name="anchor">
|
||||
<xsl:param name="node" select="."/>
|
||||
<xsl:param name="conditional" select="1"/>
|
||||
<xsl:variable name="id">
|
||||
<xsl:call-template name="object.id">
|
||||
<xsl:with-param name="object" select="$node"/>
|
||||
</xsl:call-template>
|
||||
</xsl:variable>
|
||||
<xsl:if test="$conditional = 0 or $node/@id or $node/@xml:id">
|
||||
<xsl:element name="a">
|
||||
<xsl:attribute name="name">
|
||||
<xsl:value-of select="$id"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="href">
|
||||
<xsl:text>#</xsl:text>
|
||||
<xsl:value-of select="$id"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
</xsl:if>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,582 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns:fo="http://www.w3.org/1999/XSL/Format"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:xlink='http://www.w3.org/1999/xlink'
|
||||
xmlns:exsl="http://exslt.org/common"
|
||||
exclude-result-prefixes="exsl xslthl d xlink"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
|
||||
<xsl:import href="common.xsl"/>
|
||||
|
||||
<!-- Extensions -->
|
||||
<xsl:param name="fop1.extensions" select="1"/>
|
||||
|
||||
<xsl:param name="paper.type" select="'A4'"/>
|
||||
<xsl:param name="page.margin.top" select="'1cm'"/>
|
||||
<xsl:param name="region.before.extent" select="'1cm'"/>
|
||||
<xsl:param name="body.margin.top" select="'1.5cm'"/>
|
||||
|
||||
<xsl:param name="body.margin.bottom" select="'1.5cm'"/>
|
||||
<xsl:param name="region.after.extent" select="'1cm'"/>
|
||||
<xsl:param name="page.margin.bottom" select="'1cm'"/>
|
||||
<xsl:param name="title.margin.left" select="'0cm'"/>
|
||||
|
||||
<!-- allow break across pages -->
|
||||
<xsl:attribute-set name="formal.object.properties">
|
||||
<xsl:attribute name="keep-together.within-column">auto</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- use color links and sensible rendering -->
|
||||
<xsl:attribute-set name="xref.properties">
|
||||
<xsl:attribute name="text-decoration">underline</xsl:attribute>
|
||||
<xsl:attribute name="color">#204060</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
<xsl:param name="ulink.show" select="0"></xsl:param>
|
||||
<xsl:param name="ulink.footnotes" select="0"></xsl:param>
|
||||
|
||||
<!-- TITLE PAGE -->
|
||||
|
||||
<xsl:template name="book.titlepage.recto">
|
||||
<fo:block>
|
||||
<fo:table table-layout="fixed" width="175mm">
|
||||
<fo:table-column column-width="175mm"/>
|
||||
<fo:table-body>
|
||||
<fo:table-row>
|
||||
<fo:table-cell text-align="center">
|
||||
<fo:block>
|
||||
<fo:external-graphic src="images/logo.png" width="240px"
|
||||
height="auto" content-width="scale-to-fit"
|
||||
content-height="scale-to-fit"
|
||||
content-type="content-type:image/png" text-align="center"
|
||||
/>
|
||||
</fo:block>
|
||||
<fo:block font-family="Helvetica" font-size="20pt" font-weight="bold" padding="10mm">
|
||||
<xsl:value-of select="d:info/d:title"/>
|
||||
</fo:block>
|
||||
<fo:block font-family="Helvetica" font-size="14pt" padding-before="2mm">
|
||||
<xsl:value-of select="d:info/d:subtitle"/>
|
||||
</fo:block>
|
||||
<fo:block font-family="Helvetica" font-size="14pt" padding="2mm">
|
||||
<xsl:value-of select="d:info/d:releaseinfo"/>
|
||||
</fo:block>
|
||||
</fo:table-cell>
|
||||
</fo:table-row>
|
||||
<fo:table-row>
|
||||
<fo:table-cell text-align="center">
|
||||
<fo:block font-family="Helvetica" font-size="14pt" padding="5mm">
|
||||
<xsl:value-of select="d:info/d:pubdate"/>
|
||||
</fo:block>
|
||||
</fo:table-cell>
|
||||
</fo:table-row>
|
||||
<fo:table-row>
|
||||
<fo:table-cell text-align="center">
|
||||
<fo:block font-family="Helvetica" font-size="10pt" padding="10mm">
|
||||
<xsl:for-each select="d:info/d:authorgroup/d:author">
|
||||
<xsl:if test="position() > 1">
|
||||
<xsl:text>, </xsl:text>
|
||||
</xsl:if>
|
||||
<xsl:value-of select="."/>
|
||||
</xsl:for-each>
|
||||
</fo:block>
|
||||
|
||||
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm">
|
||||
<xsl:value-of select="d:info/d:pubdate"/>
|
||||
</fo:block>
|
||||
|
||||
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm" padding-before="25em">
|
||||
<xsl:text>Copyright © </xsl:text><xsl:value-of select="d:info/d:copyright"/>
|
||||
</fo:block>
|
||||
|
||||
<fo:block font-family="Helvetica" font-size="8pt" padding="1mm">
|
||||
<xsl:value-of select="d:info/d:legalnotice"/>
|
||||
</fo:block>
|
||||
</fo:table-cell>
|
||||
</fo:table-row>
|
||||
</fo:table-body>
|
||||
</fo:table>
|
||||
</fo:block>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Prevent blank pages in output -->
|
||||
<xsl:template name="book.titlepage.before.verso">
|
||||
</xsl:template>
|
||||
<xsl:template name="book.titlepage.verso">
|
||||
</xsl:template>
|
||||
<xsl:template name="book.titlepage.separator">
|
||||
</xsl:template>
|
||||
|
||||
<!-- HEADER -->
|
||||
|
||||
<!-- More space in the center header for long text -->
|
||||
<xsl:attribute-set name="header.content.properties">
|
||||
<xsl:attribute name="font-family">
|
||||
<xsl:value-of select="$body.font.family"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="margin-left">-5em</xsl:attribute>
|
||||
<xsl:attribute name="margin-right">-5em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">8pt</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:template name="header.content">
|
||||
<xsl:param name="pageclass" select="''"/>
|
||||
<xsl:param name="sequence" select="''"/>
|
||||
<xsl:param name="position" select="''"/>
|
||||
<xsl:param name="gentext-key" select="''"/>
|
||||
|
||||
<xsl:variable name="Version">
|
||||
<xsl:choose>
|
||||
<xsl:when test="//d:title">
|
||||
<xsl:value-of select="//d:title"/><xsl:text> </xsl:text>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:text>please define title in your docbook file!</xsl:text>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:choose>
|
||||
<xsl:when test="$sequence='blank'">
|
||||
<xsl:choose>
|
||||
<xsl:when test="$position='center'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$pageclass='titlepage'">
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$position='center'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:template>
|
||||
|
||||
<!-- FOOTER-->
|
||||
<xsl:attribute-set name="footer.content.properties">
|
||||
<xsl:attribute name="font-family">
|
||||
<xsl:value-of select="$body.font.family"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="font-size">8pt</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:template name="footer.content">
|
||||
<xsl:param name="pageclass" select="''"/>
|
||||
<xsl:param name="sequence" select="''"/>
|
||||
<xsl:param name="position" select="''"/>
|
||||
<xsl:param name="gentext-key" select="''"/>
|
||||
|
||||
<xsl:variable name="Version">
|
||||
<xsl:choose>
|
||||
<xsl:when test="//d:releaseinfo">
|
||||
<xsl:value-of select="//d:releaseinfo"/>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:variable name="Title">
|
||||
<xsl:choose>
|
||||
<xsl:when test="//d:productname">
|
||||
<xsl:value-of select="//d:productname"/><xsl:text> </xsl:text>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:text>please define title in your docbook file!</xsl:text>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:choose>
|
||||
<xsl:when test="$sequence='blank'">
|
||||
<xsl:choose>
|
||||
<xsl:when test="$double.sided != 0 and $position = 'left'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided = 0 and $position = 'center'">
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
<fo:page-number/>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$pageclass='titlepage'">
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='left'">
|
||||
<fo:page-number/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='right'">
|
||||
<fo:page-number/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided = 0 and $position='right'">
|
||||
<fo:page-number/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='left'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='right'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided = 0 and $position='left'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$position='center'">
|
||||
<xsl:value-of select="$Title"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="processing-instruction('hard-pagebreak')">
|
||||
<fo:block break-before='page'/>
|
||||
</xsl:template>
|
||||
|
||||
|
||||
<!-- PAPER & PAGE SIZE -->
|
||||
|
||||
<!-- Paper type, no headers on blank pages, no double sided printing -->
|
||||
<xsl:param name="double.sided">0</xsl:param>
|
||||
<xsl:param name="headers.on.blank.pages">0</xsl:param>
|
||||
<xsl:param name="footers.on.blank.pages">0</xsl:param>
|
||||
|
||||
<!-- FONTS & STYLES -->
|
||||
|
||||
<xsl:param name="hyphenate">false</xsl:param>
|
||||
|
||||
<!-- Default Font size -->
|
||||
<xsl:param name="body.font.family">Helvetica</xsl:param>
|
||||
<xsl:param name="body.font.master">10</xsl:param>
|
||||
<xsl:param name="body.font.small">8</xsl:param>
|
||||
<xsl:param name="title.font.family">Helvetica</xsl:param>
|
||||
|
||||
<!-- Line height in body text -->
|
||||
<xsl:param name="line-height">1.4</xsl:param>
|
||||
|
||||
<!-- Chapter title size -->
|
||||
<xsl:attribute-set name="chapter.titlepage.recto.style">
|
||||
<xsl:attribute name="text-align">left</xsl:attribute>
|
||||
<xsl:attribute name="font-weight">bold</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.8"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Why is the font-size for chapters hardcoded in the XSL FO templates?
|
||||
Let's remove it, so this sucker can use our attribute-set only... -->
|
||||
<xsl:template match="d:title" mode="chapter.titlepage.recto.auto.mode">
|
||||
<fo:block xmlns:fo="http://www.w3.org/1999/XSL/Format"
|
||||
xsl:use-attribute-sets="chapter.titlepage.recto.style">
|
||||
<xsl:call-template name="component.title">
|
||||
<xsl:with-param name="node" select="ancestor-or-self::d:chapter[1]"/>
|
||||
</xsl:call-template>
|
||||
</fo:block>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Sections 1, 2 and 3 titles have a small bump factor and padding -->
|
||||
<xsl:attribute-set name="section.title.level1.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.6em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.6em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.6em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.5"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="section.title.level2.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.25"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="section.title.level3.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.0"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="section.title.level4.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.3em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.3em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.3em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 0.9"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
|
||||
<!-- TABLES -->
|
||||
|
||||
<!-- Some padding inside tables -->
|
||||
<xsl:attribute-set name="table.cell.padding">
|
||||
<xsl:attribute name="padding-left">4pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-right">4pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-top">4pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-bottom">4pt</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Only hairlines as frame and cell borders in tables -->
|
||||
<xsl:param name="table.frame.border.thickness">0.1pt</xsl:param>
|
||||
<xsl:param name="table.cell.border.thickness">0.1pt</xsl:param>
|
||||
|
||||
<!-- LABELS -->
|
||||
|
||||
<!-- Label Chapters and Sections (numbering) -->
|
||||
<xsl:param name="chapter.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel.max.depth" select="1"/>
|
||||
|
||||
<xsl:param name="section.label.includes.component.label" select="1"/>
|
||||
<xsl:param name="table.footnote.number.format" select="'1'"/>
|
||||
|
||||
<!-- PROGRAMLISTINGS -->
|
||||
|
||||
<!-- Verbatim text formatting (programlistings) -->
|
||||
<xsl:attribute-set name="monospace.verbatim.properties">
|
||||
<xsl:attribute name="font-size">7pt</xsl:attribute>
|
||||
<xsl:attribute name="wrap-option">wrap</xsl:attribute>
|
||||
<xsl:attribute name="keep-together.within-column">1</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="verbatim.properties">
|
||||
<xsl:attribute name="space-before.minimum">1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
|
||||
<xsl:attribute name="border-color">#444444</xsl:attribute>
|
||||
<xsl:attribute name="border-style">solid</xsl:attribute>
|
||||
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-top">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="padding-right">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="padding-bottom">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="margin-left">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="margin-right">0.5em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Shade (background) programlistings -->
|
||||
<xsl:param name="shade.verbatim">1</xsl:param>
|
||||
<xsl:attribute-set name="shade.verbatim.style">
|
||||
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="list.block.spacing">
|
||||
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="example.properties">
|
||||
<xsl:attribute name="space-before.minimum">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="sidebar.properties">
|
||||
<xsl:attribute name="border-color">#444444</xsl:attribute>
|
||||
<xsl:attribute name="border-style">solid</xsl:attribute>
|
||||
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
|
||||
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
|
||||
<!-- TITLE INFORMATION FOR FIGURES, EXAMPLES ETC. -->
|
||||
|
||||
<xsl:attribute-set name="formal.title.properties" use-attribute-sets="normal.para.spacing">
|
||||
<xsl:attribute name="font-weight">normal</xsl:attribute>
|
||||
<xsl:attribute name="font-style">italic</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="hyphenate">false</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- CALLOUTS -->
|
||||
|
||||
<!-- don't use images for callouts -->
|
||||
<xsl:param name="callout.graphics">0</xsl:param>
|
||||
<xsl:param name="callout.unicode">1</xsl:param>
|
||||
|
||||
<!-- Place callout marks at this column in annotated areas -->
|
||||
<xsl:param name="callout.defaultcolumn">90</xsl:param>
|
||||
|
||||
<!-- MISC -->
|
||||
|
||||
<!-- Placement of titles -->
|
||||
<xsl:param name="formal.title.placement">
|
||||
figure after
|
||||
example after
|
||||
equation before
|
||||
table before
|
||||
procedure before
|
||||
</xsl:param>
|
||||
|
||||
<!-- Format Variable Lists as Blocks (prevents horizontal overflow) -->
|
||||
<xsl:param name="variablelist.as.blocks">1</xsl:param>
|
||||
<xsl:param name="body.start.indent">0pt</xsl:param>
|
||||
|
||||
<!-- Remove "Chapter" from the Chapter titles... -->
|
||||
<xsl:param name="local.l10n.xml" select="document('')"/>
|
||||
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
|
||||
<l:l10n language="en">
|
||||
<l:context name="title-numbered">
|
||||
<l:template name="chapter" text="%n. %t"/>
|
||||
<l:template name="section" text="%n %t"/>
|
||||
</l:context>
|
||||
<l:context name="title">
|
||||
<l:template name="example" text="Example %n %t"/>
|
||||
</l:context>
|
||||
</l:l10n>
|
||||
</l:i18n>
|
||||
|
||||
<!-- admon -->
|
||||
<xsl:param name="admon.graphics" select="0"/>
|
||||
|
||||
<xsl:attribute-set name="nongraphical.admonition.properties">
|
||||
<xsl:attribute name="margin-left">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="margin-right">2em</xsl:attribute>
|
||||
<xsl:attribute name="border-left-width">.75pt</xsl:attribute>
|
||||
<xsl:attribute name="border-left-style">solid</xsl:attribute>
|
||||
<xsl:attribute name="border-left-color">#5c5c4f</xsl:attribute>
|
||||
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">1.5em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="admonition.title.properties">
|
||||
<xsl:attribute name="font-size">10pt</xsl:attribute>
|
||||
<xsl:attribute name="font-weight">bold</xsl:attribute>
|
||||
<xsl:attribute name="hyphenate">false</xsl:attribute>
|
||||
<xsl:attribute name="keep-with-next.within-column">always</xsl:attribute>
|
||||
<xsl:attribute name="margin-left">0</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="admonition.properties">
|
||||
<xsl:attribute name="space-before.optimum">0em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Asciidoc -->
|
||||
<xsl:template match="processing-instruction('asciidoc-br')">
|
||||
<fo:block/>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="processing-instruction('asciidoc-hr')">
|
||||
<fo:block space-after="1em">
|
||||
<fo:leader leader-pattern="rule" rule-thickness="0.5pt" rule-style="solid" leader-length.minimum="100%"/>
|
||||
</fo:block>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="processing-instruction('asciidoc-pagebreak')">
|
||||
<fo:block break-after='page'/>
|
||||
</xsl:template>
|
||||
|
||||
<!-- SYNTAX HIGHLIGHT -->
|
||||
|
||||
<xsl:template match='xslthl:keyword' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#7F0055"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:string' mode="xslthl">
|
||||
<fo:inline font-weight="bold" font-style="italic" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:comment' mode="xslthl">
|
||||
<fo:inline font-style="italic" color="#3F5FBF"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:tag' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#3F7F7F"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:attribute' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#7F007F"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:value' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,23 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<xslthl-config>
|
||||
<highlighter id="java" file="./xslthl/java-hl.xml" />
|
||||
<highlighter id="groovy" file="./xslthl/java-hl.xml" />
|
||||
<highlighter id="html" file="./xslthl/html-hl.xml" />
|
||||
<highlighter id="ini" file="./xslthl/ini-hl.xml" />
|
||||
<highlighter id="php" file="./xslthl/php-hl.xml" />
|
||||
<highlighter id="c" file="./xslthl/c-hl.xml" />
|
||||
<highlighter id="cpp" file="./xslthl/cpp-hl.xml" />
|
||||
<highlighter id="csharp" file="./xslthl/csharp-hl.xml" />
|
||||
<highlighter id="python" file="./xslthl/python-hl.xml" />
|
||||
<highlighter id="ruby" file="./xslthl/ruby-hl.xml" />
|
||||
<highlighter id="perl" file="./xslthl/perl-hl.xml" />
|
||||
<highlighter id="javascript" file="./xslthl/javascript-hl.xml" />
|
||||
<highlighter id="bash" file="./xslthl/bourne-hl.xml" />
|
||||
<highlighter id="css" file="./xslthl/css-hl.xml" />
|
||||
<highlighter id="sql" file="./xslthl/sql2003-hl.xml" />
|
||||
<highlighter id="asciidoc" file="./xslthl/asciidoc-hl.xml" />
|
||||
<highlighter id="properties" file="./xslthl/properties-hl.xml" />
|
||||
<highlighter id="json" file="./xslthl/json-hl.xml" />
|
||||
<highlighter id="yaml" file="./xslthl/yaml-hl.xml" />
|
||||
<namespace prefix="xslthl" uri="http://xslthl.sf.net" />
|
||||
</xslthl-config>
|
||||
@@ -1,41 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for AsciiDoc files
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>////</start>
|
||||
<end>////</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start>//</start>
|
||||
<solitary/>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(={1,6} .+)$</pattern>
|
||||
<style>heading</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(\.[^\.\s].+)$</pattern>
|
||||
<style>title</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(:!?\w.*?:)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(-|\*{1,5}|\d*\.{1,5})(?= .+$)</pattern>
|
||||
<style>bullet</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(\[.+\])$</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,95 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for SH
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2010 Mathieu Malaterre
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<</start>
|
||||
<quote>'</quote>
|
||||
<quote>"</quote>
|
||||
<flag>-</flag>
|
||||
<noWhiteSpace />
|
||||
<looseTerminator />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<!-- reserved words -->
|
||||
<keyword>if</keyword>
|
||||
<keyword>then</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elif</keyword>
|
||||
<keyword>fi</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>esac</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>until</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>done</keyword>
|
||||
<!-- built-ins -->
|
||||
<keyword>exec</keyword>
|
||||
<keyword>shift</keyword>
|
||||
<keyword>exit</keyword>
|
||||
<keyword>times</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>export</keyword>
|
||||
<keyword>trap</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>readonly</keyword>
|
||||
<keyword>wait</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>return</keyword>
|
||||
<!-- other commands -->
|
||||
<keyword>cd</keyword>
|
||||
<keyword>echo</keyword>
|
||||
<keyword>hash</keyword>
|
||||
<keyword>pwd</keyword>
|
||||
<keyword>read</keyword>
|
||||
<keyword>set</keyword>
|
||||
<keyword>test</keyword>
|
||||
<keyword>type</keyword>
|
||||
<keyword>ulimit</keyword>
|
||||
<keyword>umask</keyword>
|
||||
<keyword>unset</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,117 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
Syntax highlighting definition for C
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start><![CDATA[/// ]]></start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<!-- use the online-comment highlighter to detect directives -->
|
||||
<start>#</start>
|
||||
<lineBreakEscape>\</lineBreakEscape>
|
||||
<style>directive</style>
|
||||
<solitary />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>f</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>auto</keyword>
|
||||
<keyword>_Bool</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>_Complex</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>extern</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>_Imaginary</keyword>
|
||||
<keyword>inline</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>register</keyword>
|
||||
<keyword>restrict</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>signed</keyword>
|
||||
<keyword>sizeof</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>struct</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>typedef</keyword>
|
||||
<keyword>union</keyword>
|
||||
<keyword>unsigned</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,151 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for C++
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start><![CDATA[/// ]]></start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<!-- use the online-comment highlighter to detect directives -->
|
||||
<start>#</start>
|
||||
<lineBreakEscape>\</lineBreakEscape>
|
||||
<style>directive</style>
|
||||
<solitary/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>f</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<!-- C keywords -->
|
||||
<keyword>auto</keyword>
|
||||
<keyword>_Bool</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>_Complex</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>extern</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>_Imaginary</keyword>
|
||||
<keyword>inline</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>register</keyword>
|
||||
<keyword>restrict</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>signed</keyword>
|
||||
<keyword>sizeof</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>struct</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>typedef</keyword>
|
||||
<keyword>union</keyword>
|
||||
<keyword>unsigned</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
<!-- C++ keywords -->
|
||||
<keyword>asm</keyword>
|
||||
<keyword>dynamic_cast</keyword>
|
||||
<keyword>namespace</keyword>
|
||||
<keyword>reinterpret_cast</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>bool</keyword>
|
||||
<keyword>explicit</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>static_cast</keyword>
|
||||
<keyword>typeid</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>false</keyword>
|
||||
<keyword>operator</keyword>
|
||||
<keyword>template</keyword>
|
||||
<keyword>typename</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>friend</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>using</keyword>
|
||||
<keyword>const_cast</keyword>
|
||||
<keyword>inline</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>virtual</keyword>
|
||||
<keyword>delete</keyword>
|
||||
<keyword>mutable</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>wchar_t</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,194 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for C#
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start>///</start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<!-- annotations are called (custom) "attributes" in .NET -->
|
||||
<start>[</start>
|
||||
<end>]</end>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<!-- C# supports a couple of directives -->
|
||||
<start>#</start>
|
||||
<lineBreakEscape>\</lineBreakEscape>
|
||||
<style>directive</style>
|
||||
<solitary/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<!-- strings starting with an "@" can span multiple lines -->
|
||||
<string>@"</string>
|
||||
<endString>"</endString>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>m</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>as</keyword>
|
||||
<keyword>base</keyword>
|
||||
<keyword>bool</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>byte</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>checked</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>decimal</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>delegate</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>event</keyword>
|
||||
<keyword>explicit</keyword>
|
||||
<keyword>extern</keyword>
|
||||
<keyword>false</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>fixed</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>foreach</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>implicit</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>internal</keyword>
|
||||
<keyword>is</keyword>
|
||||
<keyword>lock</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>namespace</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>null</keyword>
|
||||
<keyword>object</keyword>
|
||||
<keyword>operator</keyword>
|
||||
<keyword>out</keyword>
|
||||
<keyword>override</keyword>
|
||||
<keyword>params</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>readonly</keyword>
|
||||
<keyword>ref</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>sbyte</keyword>
|
||||
<keyword>sealed</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>sizeof</keyword>
|
||||
<keyword>stackalloc</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>string</keyword>
|
||||
<keyword>struct</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>typeof</keyword>
|
||||
<keyword>uint</keyword>
|
||||
<keyword>ulong</keyword>
|
||||
<keyword>unchecked</keyword>
|
||||
<keyword>unsafe</keyword>
|
||||
<keyword>ushort</keyword>
|
||||
<keyword>using</keyword>
|
||||
<keyword>virtual</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<!-- special words, not really keywords -->
|
||||
<keyword>add</keyword>
|
||||
<keyword>alias</keyword>
|
||||
<keyword>from</keyword>
|
||||
<keyword>get</keyword>
|
||||
<keyword>global</keyword>
|
||||
<keyword>group</keyword>
|
||||
<keyword>into</keyword>
|
||||
<keyword>join</keyword>
|
||||
<keyword>orderby</keyword>
|
||||
<keyword>partial</keyword>
|
||||
<keyword>remove</keyword>
|
||||
<keyword>select</keyword>
|
||||
<keyword>set</keyword>
|
||||
<keyword>value</keyword>
|
||||
<keyword>where</keyword>
|
||||
<keyword>yield</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,176 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for CSS files
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2011-2012 Martin Hujer, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Martin Hujer <mhujer at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
Reference: http://www.w3.org/TR/CSS21/propidx.html
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines/>
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<word>@charset</word>
|
||||
<word>@import</word>
|
||||
<word>@media</word>
|
||||
<word>@page</word>
|
||||
<style>directive</style>
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<partChars>-</partChars>
|
||||
<keyword>azimuth</keyword>
|
||||
<keyword>background-attachment</keyword>
|
||||
<keyword>background-color</keyword>
|
||||
<keyword>background-image</keyword>
|
||||
<keyword>background-position</keyword>
|
||||
<keyword>background-repeat</keyword>
|
||||
<keyword>background</keyword>
|
||||
<keyword>border-collapse</keyword>
|
||||
<keyword>border-color</keyword>
|
||||
<keyword>border-spacing</keyword>
|
||||
<keyword>border-style</keyword>
|
||||
<keyword>border-top</keyword>
|
||||
<keyword>border-right</keyword>
|
||||
<keyword>border-bottom</keyword>
|
||||
<keyword>border-left</keyword>
|
||||
<keyword>border-top-color</keyword>
|
||||
<keyword>border-right-color</keyword>
|
||||
<keyword>border-bottom-color</keyword>
|
||||
<keyword>border-left-color</keyword>
|
||||
<keyword>border-top-style</keyword>
|
||||
<keyword>border-right-style</keyword>
|
||||
<keyword>border-bottom-style</keyword>
|
||||
<keyword>border-left-style</keyword>
|
||||
<keyword>border-top-width</keyword>
|
||||
<keyword>border-right-width</keyword>
|
||||
<keyword>border-bottom-width</keyword>
|
||||
<keyword>border-left-width</keyword>
|
||||
<keyword>border-width</keyword>
|
||||
<keyword>border</keyword>
|
||||
<keyword>bottom</keyword>
|
||||
<keyword>caption-side</keyword>
|
||||
<keyword>clear</keyword>
|
||||
<keyword>clip</keyword>
|
||||
<keyword>color</keyword>
|
||||
<keyword>content</keyword>
|
||||
<keyword>counter-increment</keyword>
|
||||
<keyword>counter-reset</keyword>
|
||||
<keyword>cue-after</keyword>
|
||||
<keyword>cue-before</keyword>
|
||||
<keyword>cue</keyword>
|
||||
<keyword>cursor</keyword>
|
||||
<keyword>direction</keyword>
|
||||
<keyword>display</keyword>
|
||||
<keyword>elevation</keyword>
|
||||
<keyword>empty-cells</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>font-family</keyword>
|
||||
<keyword>font-size</keyword>
|
||||
<keyword>font-style</keyword>
|
||||
<keyword>font-variant</keyword>
|
||||
<keyword>font-weight</keyword>
|
||||
<keyword>font</keyword>
|
||||
<keyword>height</keyword>
|
||||
<keyword>left</keyword>
|
||||
<keyword>letter-spacing</keyword>
|
||||
<keyword>line-height</keyword>
|
||||
<keyword>list-style-image</keyword>
|
||||
<keyword>list-style-position</keyword>
|
||||
<keyword>list-style-type</keyword>
|
||||
<keyword>list-style</keyword>
|
||||
<keyword>margin-right</keyword>
|
||||
<keyword>margin-left</keyword>
|
||||
<keyword>margin-top</keyword>
|
||||
<keyword>margin-bottom</keyword>
|
||||
<keyword>margin</keyword>
|
||||
<keyword>max-height</keyword>
|
||||
<keyword>max-width</keyword>
|
||||
<keyword>min-height</keyword>
|
||||
<keyword>min-width</keyword>
|
||||
<keyword>orphans</keyword>
|
||||
<keyword>outline-color</keyword>
|
||||
<keyword>outline-style</keyword>
|
||||
<keyword>outline-width</keyword>
|
||||
<keyword>outline</keyword>
|
||||
<keyword>overflow</keyword>
|
||||
<keyword>padding-top</keyword>
|
||||
<keyword>padding-right</keyword>
|
||||
<keyword>padding-bottom</keyword>
|
||||
<keyword>padding-left</keyword>
|
||||
<keyword>padding</keyword>
|
||||
<keyword>page-break-after</keyword>
|
||||
<keyword>page-break-before</keyword>
|
||||
<keyword>page-break-inside</keyword>
|
||||
<keyword>pause-after</keyword>
|
||||
<keyword>pause-before</keyword>
|
||||
<keyword>pause</keyword>
|
||||
<keyword>pitch-range</keyword>
|
||||
<keyword>pitch</keyword>
|
||||
<keyword>play-during</keyword>
|
||||
<keyword>position</keyword>
|
||||
<keyword>quotes</keyword>
|
||||
<keyword>richness</keyword>
|
||||
<keyword>right</keyword>
|
||||
<keyword>speak-header</keyword>
|
||||
<keyword>speak-numeral</keyword>
|
||||
<keyword>speak-punctuation</keyword>
|
||||
<keyword>speak</keyword>
|
||||
<keyword>speech-rate</keyword>
|
||||
<keyword>stress</keyword>
|
||||
<keyword>table-layout</keyword>
|
||||
<keyword>text-align</keyword>
|
||||
<keyword>text-decoration</keyword>
|
||||
<keyword>text-indent</keyword>
|
||||
<keyword>text-transform</keyword>
|
||||
<keyword>top</keyword>
|
||||
<keyword>unicode-bidi</keyword>
|
||||
<keyword>vertical-align</keyword>
|
||||
<keyword>visibility</keyword>
|
||||
<keyword>voice-family</keyword>
|
||||
<keyword>volume</keyword>
|
||||
<keyword>white-space</keyword>
|
||||
<keyword>widows</keyword>
|
||||
<keyword>width</keyword>
|
||||
<keyword>word-spacing</keyword>
|
||||
<keyword>z-index</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,122 +0,0 @@
|
||||
<?xml version='1.0'?>
|
||||
<!--
|
||||
|
||||
Bakalarska prace: Zvyraznovani syntaxe v XSLT
|
||||
Michal Molhanec 2005
|
||||
|
||||
myxml-hl.xml - konfigurace zvyraznovace XML, ktera zvlast zvyrazni
|
||||
HTML elementy a XSL elementy
|
||||
|
||||
This file has been customized for the Asciidoctor project (http://asciidoctor.org).
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="xml">
|
||||
<elementSet>
|
||||
<style>htmltag</style>
|
||||
<element>a</element>
|
||||
<element>abbr</element>
|
||||
<element>address</element>
|
||||
<element>area</element>
|
||||
<element>article</element>
|
||||
<element>aside</element>
|
||||
<element>audio</element>
|
||||
<element>b</element>
|
||||
<element>base</element>
|
||||
<element>bdi</element>
|
||||
<element>blockquote</element>
|
||||
<element>body</element>
|
||||
<element>br</element>
|
||||
<element>button</element>
|
||||
<element>caption</element>
|
||||
<element>canvas</element>
|
||||
<element>cite</element>
|
||||
<element>code</element>
|
||||
<element>command</element>
|
||||
<element>col</element>
|
||||
<element>colgroup</element>
|
||||
<element>dd</element>
|
||||
<element>del</element>
|
||||
<element>dialog</element>
|
||||
<element>div</element>
|
||||
<element>dl</element>
|
||||
<element>dt</element>
|
||||
<element>em</element>
|
||||
<element>embed</element>
|
||||
<element>fieldset</element>
|
||||
<element>figcaption</element>
|
||||
<element>figure</element>
|
||||
<element>font</element>
|
||||
<element>form</element>
|
||||
<element>footer</element>
|
||||
<element>h1</element>
|
||||
<element>h2</element>
|
||||
<element>h3</element>
|
||||
<element>h4</element>
|
||||
<element>h5</element>
|
||||
<element>h6</element>
|
||||
<element>head</element>
|
||||
<element>header</element>
|
||||
<element>hr</element>
|
||||
<element>html</element>
|
||||
<element>i</element>
|
||||
<element>iframe</element>
|
||||
<element>img</element>
|
||||
<element>input</element>
|
||||
<element>ins</element>
|
||||
<element>kbd</element>
|
||||
<element>label</element>
|
||||
<element>legend</element>
|
||||
<element>li</element>
|
||||
<element>link</element>
|
||||
<element>map</element>
|
||||
<element>mark</element>
|
||||
<element>menu</element>
|
||||
<element>menu</element>
|
||||
<element>meta</element>
|
||||
<element>nav</element>
|
||||
<element>noscript</element>
|
||||
<element>object</element>
|
||||
<element>ol</element>
|
||||
<element>optgroup</element>
|
||||
<element>option</element>
|
||||
<element>p</element>
|
||||
<element>param</element>
|
||||
<element>pre</element>
|
||||
<element>q</element>
|
||||
<element>samp</element>
|
||||
<element>script</element>
|
||||
<element>section</element>
|
||||
<element>select</element>
|
||||
<element>small</element>
|
||||
<element>source</element>
|
||||
<element>span</element>
|
||||
<element>strong</element>
|
||||
<element>style</element>
|
||||
<element>sub</element>
|
||||
<element>summary</element>
|
||||
<element>sup</element>
|
||||
<element>table</element>
|
||||
<element>tbody</element>
|
||||
<element>td</element>
|
||||
<element>textarea</element>
|
||||
<element>tfoot</element>
|
||||
<element>th</element>
|
||||
<element>thead</element>
|
||||
<element>time</element>
|
||||
<element>title</element>
|
||||
<element>tr</element>
|
||||
<element>track</element>
|
||||
<element>u</element>
|
||||
<element>ul</element>
|
||||
<element>var</element>
|
||||
<element>video</element>
|
||||
<element>wbr</element>
|
||||
<element>xmp</element>
|
||||
<ignoreCase/>
|
||||
</elementSet>
|
||||
<elementPrefix>
|
||||
<style>namespace</style>
|
||||
<prefix>xsl:</prefix>
|
||||
</elementPrefix>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,45 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for ini files
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">;</highlighter>
|
||||
<highlighter type="regex">
|
||||
<!-- ini sections -->
|
||||
<pattern>^(\[.+\]\s*)$</pattern>
|
||||
<style>keyword</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<!-- the keys in an ini section -->
|
||||
<pattern>^(.+)(?==)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,117 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Java
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>boolean</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>byte</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>final</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>implements</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>instanceof</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>native</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>strictfp</keyword>
|
||||
<keyword>super</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>synchronized</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>throws</keyword>
|
||||
<keyword>transient</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,147 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for JavaScript
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>delete</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>function</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>instanceof</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>typeof</keyword>
|
||||
<keyword>var</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>with</keyword>
|
||||
<!-- future keywords -->
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>boolean</keyword>
|
||||
<keyword>byte</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>debugger</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>export</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>final</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>implements</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>native</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>super</keyword>
|
||||
<keyword>synchronized</keyword>
|
||||
<keyword>throws</keyword>
|
||||
<keyword>transient</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>prototype</keyword>
|
||||
<!-- Global Objects -->
|
||||
<keyword>Array</keyword>
|
||||
<keyword>Boolean</keyword>
|
||||
<keyword>Date</keyword>
|
||||
<keyword>Error</keyword>
|
||||
<keyword>EvalError</keyword>
|
||||
<keyword>Function</keyword>
|
||||
<keyword>Math</keyword>
|
||||
<keyword>Number</keyword>
|
||||
<keyword>Object</keyword>
|
||||
<keyword>RangeError</keyword>
|
||||
<keyword>ReferenceError</keyword>
|
||||
<keyword>RegExp</keyword>
|
||||
<keyword>String</keyword>
|
||||
<keyword>SyntaxError</keyword>
|
||||
<keyword>TypeError</keyword>
|
||||
<keyword>URIError</keyword>
|
||||
<!-- Global functions -->
|
||||
<keyword>decodeURI</keyword>
|
||||
<keyword>decodeURIComponent</keyword>
|
||||
<keyword>encodeURI</keyword>
|
||||
<keyword>encodeURIComponent</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>isFinite</keyword>
|
||||
<keyword>isNaN</keyword>
|
||||
<keyword>parseFloat</keyword>
|
||||
<keyword>parseInt</keyword>
|
||||
<!-- Global properties -->
|
||||
<keyword>Infinity</keyword>
|
||||
<keyword>NaN</keyword>
|
||||
<keyword>undefined</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,37 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>true</keyword>
|
||||
<keyword>false</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<word>{</word>
|
||||
<word>}</word>
|
||||
<word>,</word>
|
||||
<word>[</word>
|
||||
<word>]</word>
|
||||
<style>keyword</style>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,120 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Perl
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<</start>
|
||||
<quote>'</quote>
|
||||
<quote>"</quote>
|
||||
<noWhiteSpace/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines/>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>if</keyword>
|
||||
<keyword>unless</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>until</keyword>
|
||||
<keyword>foreach</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elsif</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>when</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>given</keyword>
|
||||
<!-- Keywords related to the control flow of your perl program -->
|
||||
<keyword>caller</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>die</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>dump</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>exit</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>last</keyword>
|
||||
<keyword>next</keyword>
|
||||
<keyword>redo</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>sub</keyword>
|
||||
<keyword>wantarray</keyword>
|
||||
<!-- Keywords related to scoping -->
|
||||
<keyword>caller</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>local</keyword>
|
||||
<keyword>my</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>use</keyword>
|
||||
<!-- Keywords related to perl modules -->
|
||||
<keyword>do</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>no</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>require</keyword>
|
||||
<keyword>use</keyword>
|
||||
<!-- Keywords related to classes and object-orientedness -->
|
||||
<keyword>bless</keyword>
|
||||
<keyword>dbmclose</keyword>
|
||||
<keyword>dbmopen</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>ref</keyword>
|
||||
<keyword>tie</keyword>
|
||||
<keyword>tied</keyword>
|
||||
<keyword>untie</keyword>
|
||||
<keyword>use</keyword>
|
||||
<!-- operators -->
|
||||
<keyword>and</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>not</keyword>
|
||||
<keyword>eq</keyword>
|
||||
<keyword>ne</keyword>
|
||||
<keyword>lt</keyword>
|
||||
<keyword>gt</keyword>
|
||||
<keyword>le</keyword>
|
||||
<keyword>ge</keyword>
|
||||
<keyword>cmp</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,154 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for PHP
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start><![CDATA[/// ]]></start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<<</start>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>and</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>xor</keyword>
|
||||
<keyword>__FILE__</keyword>
|
||||
<keyword>exception</keyword>
|
||||
<keyword>__LINE__</keyword>
|
||||
<keyword>array</keyword>
|
||||
<keyword>as</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>declare</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>die</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>echo</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elseif</keyword>
|
||||
<keyword>empty</keyword>
|
||||
<keyword>enddeclare</keyword>
|
||||
<keyword>endfor</keyword>
|
||||
<keyword>endforeach</keyword>
|
||||
<keyword>endif</keyword>
|
||||
<keyword>endswitch</keyword>
|
||||
<keyword>endwhile</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>exit</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>foreach</keyword>
|
||||
<keyword>function</keyword>
|
||||
<keyword>global</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>include</keyword>
|
||||
<keyword>include_once</keyword>
|
||||
<keyword>isset</keyword>
|
||||
<keyword>list</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>print</keyword>
|
||||
<keyword>require</keyword>
|
||||
<keyword>require_once</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>unset</keyword>
|
||||
<keyword>use</keyword>
|
||||
<keyword>var</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>__FUNCTION__</keyword>
|
||||
<keyword>__CLASS__</keyword>
|
||||
<keyword>__METHOD__</keyword>
|
||||
<keyword>final</keyword>
|
||||
<keyword>php_user_filter</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>implements</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>clone</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>cfunction</keyword>
|
||||
<keyword>old_function</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>false</keyword>
|
||||
<!-- PHP 5.3 -->
|
||||
<keyword>namespace</keyword>
|
||||
<keyword>__NAMESPACE__</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>__DIR__</keyword>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<!-- highlight the php open and close tags as directives -->
|
||||
<word>?></word>
|
||||
<word><?php</word>
|
||||
<word><?=</word>
|
||||
<style>directive</style>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,38 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Java
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(.+?)(?==|:)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,100 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Python
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="annotation">
|
||||
<!-- these are actually called decorators -->
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"""</string>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'''</string>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>and</keyword>
|
||||
<keyword>del</keyword>
|
||||
<keyword>from</keyword>
|
||||
<keyword>not</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>as</keyword>
|
||||
<keyword>elif</keyword>
|
||||
<keyword>global</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>with</keyword>
|
||||
<keyword>assert</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>pass</keyword>
|
||||
<keyword>yield</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>except</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>print</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>exec</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>raise</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>is</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>def</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>lambda</keyword>
|
||||
<keyword>try</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,109 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Ruby
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<</start>
|
||||
<noWhiteSpace/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>%Q{</string>
|
||||
<endString>}</endString>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>%/</string>
|
||||
<endString>/</endString>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>%q{</string>
|
||||
<endString>}</endString>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>alias</keyword>
|
||||
<keyword>and</keyword>
|
||||
<keyword>BEGIN</keyword>
|
||||
<keyword>begin</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>def</keyword>
|
||||
<keyword>defined</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elsif</keyword>
|
||||
<keyword>END</keyword>
|
||||
<keyword>end</keyword>
|
||||
<keyword>ensure</keyword>
|
||||
<keyword>false</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>module</keyword>
|
||||
<keyword>next</keyword>
|
||||
<keyword>nil</keyword>
|
||||
<keyword>not</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>redo</keyword>
|
||||
<keyword>rescue</keyword>
|
||||
<keyword>retry</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>self</keyword>
|
||||
<keyword>super</keyword>
|
||||
<keyword>then</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>undef</keyword>
|
||||
<keyword>unless</keyword>
|
||||
<keyword>until</keyword>
|
||||
<keyword>when</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>yield</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,565 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for SQL:1999
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2012 Michiel Hendriks, Martin Hujer, k42b3
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">--</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>U'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>B'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>N'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>X'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<ignoreCase />
|
||||
<!-- reserved -->
|
||||
<keyword>A</keyword>
|
||||
<keyword>ABS</keyword>
|
||||
<keyword>ABSOLUTE</keyword>
|
||||
<keyword>ACTION</keyword>
|
||||
<keyword>ADA</keyword>
|
||||
<keyword>ADMIN</keyword>
|
||||
<keyword>AFTER</keyword>
|
||||
<keyword>ALWAYS</keyword>
|
||||
<keyword>ASC</keyword>
|
||||
<keyword>ASSERTION</keyword>
|
||||
<keyword>ASSIGNMENT</keyword>
|
||||
<keyword>ATTRIBUTE</keyword>
|
||||
<keyword>ATTRIBUTES</keyword>
|
||||
<keyword>AVG</keyword>
|
||||
<keyword>BEFORE</keyword>
|
||||
<keyword>BERNOULLI</keyword>
|
||||
<keyword>BREADTH</keyword>
|
||||
<keyword>C</keyword>
|
||||
<keyword>CARDINALITY</keyword>
|
||||
<keyword>CASCADE</keyword>
|
||||
<keyword>CATALOG_NAME</keyword>
|
||||
<keyword>CATALOG</keyword>
|
||||
<keyword>CEIL</keyword>
|
||||
<keyword>CEILING</keyword>
|
||||
<keyword>CHAIN</keyword>
|
||||
<keyword>CHAR_LENGTH</keyword>
|
||||
<keyword>CHARACTER_LENGTH</keyword>
|
||||
<keyword>CHARACTER_SET_CATALOG</keyword>
|
||||
<keyword>CHARACTER_SET_NAME</keyword>
|
||||
<keyword>CHARACTER_SET_SCHEMA</keyword>
|
||||
<keyword>CHARACTERISTICS</keyword>
|
||||
<keyword>CHARACTERS</keyword>
|
||||
<keyword>CHECKED</keyword>
|
||||
<keyword>CLASS_ORIGIN</keyword>
|
||||
<keyword>COALESCE</keyword>
|
||||
<keyword>COBOL</keyword>
|
||||
<keyword>CODE_UNITS</keyword>
|
||||
<keyword>COLLATION_CATALOG</keyword>
|
||||
<keyword>COLLATION_NAME</keyword>
|
||||
<keyword>COLLATION_SCHEMA</keyword>
|
||||
<keyword>COLLATION</keyword>
|
||||
<keyword>COLLECT</keyword>
|
||||
<keyword>COLUMN_NAME</keyword>
|
||||
<keyword>COMMAND_FUNCTION_CODE</keyword>
|
||||
<keyword>COMMAND_FUNCTION</keyword>
|
||||
<keyword>COMMITTED</keyword>
|
||||
<keyword>CONDITION_NUMBER</keyword>
|
||||
<keyword>CONDITION</keyword>
|
||||
<keyword>CONNECTION_NAME</keyword>
|
||||
<keyword>CONSTRAINT_CATALOG</keyword>
|
||||
<keyword>CONSTRAINT_NAME</keyword>
|
||||
<keyword>CONSTRAINT_SCHEMA</keyword>
|
||||
<keyword>CONSTRAINTS</keyword>
|
||||
<keyword>CONSTRUCTORS</keyword>
|
||||
<keyword>CONTAINS</keyword>
|
||||
<keyword>CONVERT</keyword>
|
||||
<keyword>CORR</keyword>
|
||||
<keyword>COUNT</keyword>
|
||||
<keyword>COVAR_POP</keyword>
|
||||
<keyword>COVAR_SAMP</keyword>
|
||||
<keyword>CUME_DIST</keyword>
|
||||
<keyword>CURRENT_COLLATION</keyword>
|
||||
<keyword>CURSOR_NAME</keyword>
|
||||
<keyword>DATA</keyword>
|
||||
<keyword>DATETIME_INTERVAL_CODE</keyword>
|
||||
<keyword>DATETIME_INTERVAL_PRECISION</keyword>
|
||||
<keyword>DEFAULTS</keyword>
|
||||
<keyword>DEFERRABLE</keyword>
|
||||
<keyword>DEFERRED</keyword>
|
||||
<keyword>DEFINED</keyword>
|
||||
<keyword>DEFINER</keyword>
|
||||
<keyword>DEGREE</keyword>
|
||||
<keyword>DENSE_RANK</keyword>
|
||||
<keyword>DEPTH</keyword>
|
||||
<keyword>DERIVED</keyword>
|
||||
<keyword>DESC</keyword>
|
||||
<keyword>DESCRIPTOR</keyword>
|
||||
<keyword>DIAGNOSTICS</keyword>
|
||||
<keyword>DISPATCH</keyword>
|
||||
<keyword>DOMAIN</keyword>
|
||||
<keyword>DYNAMIC_FUNCTION_CODE</keyword>
|
||||
<keyword>DYNAMIC_FUNCTION</keyword>
|
||||
<keyword>EQUALS</keyword>
|
||||
<keyword>EVERY</keyword>
|
||||
<keyword>EXCEPTION</keyword>
|
||||
<keyword>EXCLUDE</keyword>
|
||||
<keyword>EXCLUDING</keyword>
|
||||
<keyword>EXP</keyword>
|
||||
<keyword>EXTRACT</keyword>
|
||||
<keyword>FINAL</keyword>
|
||||
<keyword>FIRST</keyword>
|
||||
<keyword>FLOOR</keyword>
|
||||
<keyword>FOLLOWING</keyword>
|
||||
<keyword>FORTRAN</keyword>
|
||||
<keyword>FOUND</keyword>
|
||||
<keyword>FUSION</keyword>
|
||||
<keyword>G</keyword>
|
||||
<keyword>GENERAL</keyword>
|
||||
<keyword>GO</keyword>
|
||||
<keyword>GOTO</keyword>
|
||||
<keyword>GRANTED</keyword>
|
||||
<keyword>HIERARCHY</keyword>
|
||||
<keyword>IMPLEMENTATION</keyword>
|
||||
<keyword>INCLUDING</keyword>
|
||||
<keyword>INCREMENT</keyword>
|
||||
<keyword>INITIALLY</keyword>
|
||||
<keyword>INSTANCE</keyword>
|
||||
<keyword>INSTANTIABLE</keyword>
|
||||
<keyword>INTERSECTION</keyword>
|
||||
<keyword>INVOKER</keyword>
|
||||
<keyword>ISOLATION</keyword>
|
||||
<keyword>K</keyword>
|
||||
<keyword>KEY_MEMBER</keyword>
|
||||
<keyword>KEY_TYPE</keyword>
|
||||
<keyword>KEY</keyword>
|
||||
<keyword>LAST</keyword>
|
||||
<keyword>LENGTH</keyword>
|
||||
<keyword>LEVEL</keyword>
|
||||
<keyword>LN</keyword>
|
||||
<keyword>LOCATOR</keyword>
|
||||
<keyword>LOWER</keyword>
|
||||
<keyword>M</keyword>
|
||||
<keyword>MAP</keyword>
|
||||
<keyword>MATCHED</keyword>
|
||||
<keyword>MAX</keyword>
|
||||
<keyword>MAXVALUE</keyword>
|
||||
<keyword>MESSAGE_LENGTH</keyword>
|
||||
<keyword>MESSAGE_OCTET_LENGTH</keyword>
|
||||
<keyword>MESSAGE_TEXT</keyword>
|
||||
<keyword>MIN</keyword>
|
||||
<keyword>MINVALUE</keyword>
|
||||
<keyword>MOD</keyword>
|
||||
<keyword>MORE</keyword>
|
||||
<keyword>MUMPS</keyword>
|
||||
<keyword>NAME</keyword>
|
||||
<keyword>NAMES</keyword>
|
||||
<keyword>NESTING</keyword>
|
||||
<keyword>NEXT</keyword>
|
||||
<keyword>NORMALIZE</keyword>
|
||||
<keyword>NORMALIZED</keyword>
|
||||
<keyword>NULLABLE</keyword>
|
||||
<keyword>NULLIF</keyword>
|
||||
<keyword>NULLS</keyword>
|
||||
<keyword>NUMBER</keyword>
|
||||
<keyword>OBJECT</keyword>
|
||||
<keyword>OCTET_LENGTH</keyword>
|
||||
<keyword>OCTETS</keyword>
|
||||
<keyword>OPTION</keyword>
|
||||
<keyword>OPTIONS</keyword>
|
||||
<keyword>ORDERING</keyword>
|
||||
<keyword>ORDINALITY</keyword>
|
||||
<keyword>OTHERS</keyword>
|
||||
<keyword>OVERLAY</keyword>
|
||||
<keyword>OVERRIDING</keyword>
|
||||
<keyword>PAD</keyword>
|
||||
<keyword>PARAMETER_MODE</keyword>
|
||||
<keyword>PARAMETER_NAME</keyword>
|
||||
<keyword>PARAMETER_ORDINAL_POSITION</keyword>
|
||||
<keyword>PARAMETER_SPECIFIC_CATALOG</keyword>
|
||||
<keyword>PARAMETER_SPECIFIC_NAME</keyword>
|
||||
<keyword>PARAMETER_SPECIFIC_SCHEMA</keyword>
|
||||
<keyword>PARTIAL</keyword>
|
||||
<keyword>PASCAL</keyword>
|
||||
<keyword>PATH</keyword>
|
||||
<keyword>PERCENT_RANK</keyword>
|
||||
<keyword>PERCENTILE_CONT</keyword>
|
||||
<keyword>PERCENTILE_DISC</keyword>
|
||||
<keyword>PLACING</keyword>
|
||||
<keyword>PLI</keyword>
|
||||
<keyword>POSITION</keyword>
|
||||
<keyword>POWER</keyword>
|
||||
<keyword>PRECEDING</keyword>
|
||||
<keyword>PRESERVE</keyword>
|
||||
<keyword>PRIOR</keyword>
|
||||
<keyword>PRIVILEGES</keyword>
|
||||
<keyword>PUBLIC</keyword>
|
||||
<keyword>RANK</keyword>
|
||||
<keyword>READ</keyword>
|
||||
<keyword>RELATIVE</keyword>
|
||||
<keyword>REPEATABLE</keyword>
|
||||
<keyword>RESTART</keyword>
|
||||
<keyword>RETURNED_CARDINALITY</keyword>
|
||||
<keyword>RETURNED_LENGTH</keyword>
|
||||
<keyword>RETURNED_OCTET_LENGTH</keyword>
|
||||
<keyword>RETURNED_SQLSTATE</keyword>
|
||||
<keyword>ROLE</keyword>
|
||||
<keyword>ROUTINE_CATALOG</keyword>
|
||||
<keyword>ROUTINE_NAME</keyword>
|
||||
<keyword>ROUTINE_SCHEMA</keyword>
|
||||
<keyword>ROUTINE</keyword>
|
||||
<keyword>ROW_COUNT</keyword>
|
||||
<keyword>ROW_NUMBER</keyword>
|
||||
<keyword>SCALE</keyword>
|
||||
<keyword>SCHEMA_NAME</keyword>
|
||||
<keyword>SCHEMA</keyword>
|
||||
<keyword>SCOPE_CATALOG</keyword>
|
||||
<keyword>SCOPE_NAME</keyword>
|
||||
<keyword>SCOPE_SCHEMA</keyword>
|
||||
<keyword>SECTION</keyword>
|
||||
<keyword>SECURITY</keyword>
|
||||
<keyword>SELF</keyword>
|
||||
<keyword>SEQUENCE</keyword>
|
||||
<keyword>SERIALIZABLE</keyword>
|
||||
<keyword>SERVER_NAME</keyword>
|
||||
<keyword>SESSION</keyword>
|
||||
<keyword>SETS</keyword>
|
||||
<keyword>SIMPLE</keyword>
|
||||
<keyword>SIZE</keyword>
|
||||
<keyword>SOURCE</keyword>
|
||||
<keyword>SPACE</keyword>
|
||||
<keyword>SPECIFIC_NAME</keyword>
|
||||
<keyword>SQRT</keyword>
|
||||
<keyword>STATE</keyword>
|
||||
<keyword>STATEMENT</keyword>
|
||||
<keyword>STDDEV_POP</keyword>
|
||||
<keyword>STDDEV_SAMP</keyword>
|
||||
<keyword>STRUCTURE</keyword>
|
||||
<keyword>STYLE</keyword>
|
||||
<keyword>SUBCLASS_ORIGIN</keyword>
|
||||
<keyword>SUBSTRING</keyword>
|
||||
<keyword>SUM</keyword>
|
||||
<keyword>TABLE_NAME</keyword>
|
||||
<keyword>TABLESAMPLE</keyword>
|
||||
<keyword>TEMPORARY</keyword>
|
||||
<keyword>TIES</keyword>
|
||||
<keyword>TOP_LEVEL_COUNT</keyword>
|
||||
<keyword>TRANSACTION_ACTIVE</keyword>
|
||||
<keyword>TRANSACTION</keyword>
|
||||
<keyword>TRANSACTIONS_COMMITTED</keyword>
|
||||
<keyword>TRANSACTIONS_ROLLED_BACK</keyword>
|
||||
<keyword>TRANSFORM</keyword>
|
||||
<keyword>TRANSFORMS</keyword>
|
||||
<keyword>TRANSLATE</keyword>
|
||||
<keyword>TRIGGER_CATALOG</keyword>
|
||||
<keyword>TRIGGER_NAME</keyword>
|
||||
<keyword>TRIGGER_SCHEMA</keyword>
|
||||
<keyword>TRIM</keyword>
|
||||
<keyword>TYPE</keyword>
|
||||
<keyword>UNBOUNDED</keyword>
|
||||
<keyword>UNCOMMITTED</keyword>
|
||||
<keyword>UNDER</keyword>
|
||||
<keyword>UNNAMED</keyword>
|
||||
<keyword>USAGE</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_CATALOG</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_CODE</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_NAME</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_SCHEMA</keyword>
|
||||
<keyword>VIEW</keyword>
|
||||
<keyword>WORK</keyword>
|
||||
<keyword>WRITE</keyword>
|
||||
<keyword>ZONE</keyword>
|
||||
<!-- non reserved -->
|
||||
<keyword>ADD</keyword>
|
||||
<keyword>ALL</keyword>
|
||||
<keyword>ALLOCATE</keyword>
|
||||
<keyword>ALTER</keyword>
|
||||
<keyword>AND</keyword>
|
||||
<keyword>ANY</keyword>
|
||||
<keyword>ARE</keyword>
|
||||
<keyword>ARRAY</keyword>
|
||||
<keyword>AS</keyword>
|
||||
<keyword>ASENSITIVE</keyword>
|
||||
<keyword>ASYMMETRIC</keyword>
|
||||
<keyword>AT</keyword>
|
||||
<keyword>ATOMIC</keyword>
|
||||
<keyword>AUTHORIZATION</keyword>
|
||||
<keyword>BEGIN</keyword>
|
||||
<keyword>BETWEEN</keyword>
|
||||
<keyword>BIGINT</keyword>
|
||||
<keyword>BINARY</keyword>
|
||||
<keyword>BLOB</keyword>
|
||||
<keyword>BOOLEAN</keyword>
|
||||
<keyword>BOTH</keyword>
|
||||
<keyword>BY</keyword>
|
||||
<keyword>CALL</keyword>
|
||||
<keyword>CALLED</keyword>
|
||||
<keyword>CASCADED</keyword>
|
||||
<keyword>CASE</keyword>
|
||||
<keyword>CAST</keyword>
|
||||
<keyword>CHAR</keyword>
|
||||
<keyword>CHARACTER</keyword>
|
||||
<keyword>CHECK</keyword>
|
||||
<keyword>CLOB</keyword>
|
||||
<keyword>CLOSE</keyword>
|
||||
<keyword>COLLATE</keyword>
|
||||
<keyword>COLUMN</keyword>
|
||||
<keyword>COMMIT</keyword>
|
||||
<keyword>CONNECT</keyword>
|
||||
<keyword>CONSTRAINT</keyword>
|
||||
<keyword>CONTINUE</keyword>
|
||||
<keyword>CORRESPONDING</keyword>
|
||||
<keyword>CREATE</keyword>
|
||||
<keyword>CROSS</keyword>
|
||||
<keyword>CUBE</keyword>
|
||||
<keyword>CURRENT_DATE</keyword>
|
||||
<keyword>CURRENT_DEFAULT_TRANSFORM_GROUP</keyword>
|
||||
<keyword>CURRENT_PATH</keyword>
|
||||
<keyword>CURRENT_ROLE</keyword>
|
||||
<keyword>CURRENT_TIME</keyword>
|
||||
<keyword>CURRENT_TIMESTAMP</keyword>
|
||||
<keyword>CURRENT_TRANSFORM_GROUP_FOR_TYPE</keyword>
|
||||
<keyword>CURRENT_USER</keyword>
|
||||
<keyword>CURRENT</keyword>
|
||||
<keyword>CURSOR</keyword>
|
||||
<keyword>CYCLE</keyword>
|
||||
<keyword>DATE</keyword>
|
||||
<keyword>DAY</keyword>
|
||||
<keyword>DEALLOCATE</keyword>
|
||||
<keyword>DEC</keyword>
|
||||
<keyword>DECIMAL</keyword>
|
||||
<keyword>DECLARE</keyword>
|
||||
<keyword>DEFAULT</keyword>
|
||||
<keyword>DELETE</keyword>
|
||||
<keyword>DEREF</keyword>
|
||||
<keyword>DESCRIBE</keyword>
|
||||
<keyword>DETERMINISTIC</keyword>
|
||||
<keyword>DISCONNECT</keyword>
|
||||
<keyword>DISTINCT</keyword>
|
||||
<keyword>DOUBLE</keyword>
|
||||
<keyword>DROP</keyword>
|
||||
<keyword>DYNAMIC</keyword>
|
||||
<keyword>EACH</keyword>
|
||||
<keyword>ELEMENT</keyword>
|
||||
<keyword>ELSE</keyword>
|
||||
<keyword>END</keyword>
|
||||
<keyword>END-EXEC</keyword>
|
||||
<keyword>ESCAPE</keyword>
|
||||
<keyword>EXCEPT</keyword>
|
||||
<keyword>EXEC</keyword>
|
||||
<keyword>EXECUTE</keyword>
|
||||
<keyword>EXISTS</keyword>
|
||||
<keyword>EXTERNAL</keyword>
|
||||
<keyword>FALSE</keyword>
|
||||
<keyword>FETCH</keyword>
|
||||
<keyword>FILTER</keyword>
|
||||
<keyword>FLOAT</keyword>
|
||||
<keyword>FOR</keyword>
|
||||
<keyword>FOREIGN</keyword>
|
||||
<keyword>FREE</keyword>
|
||||
<keyword>FROM</keyword>
|
||||
<keyword>FULL</keyword>
|
||||
<keyword>FUNCTION</keyword>
|
||||
<keyword>GET</keyword>
|
||||
<keyword>GLOBAL</keyword>
|
||||
<keyword>GRANT</keyword>
|
||||
<keyword>GROUP</keyword>
|
||||
<keyword>GROUPING</keyword>
|
||||
<keyword>HAVING</keyword>
|
||||
<keyword>HOLD</keyword>
|
||||
<keyword>HOUR</keyword>
|
||||
<keyword>IDENTITY</keyword>
|
||||
<keyword>IMMEDIATE</keyword>
|
||||
<keyword>IN</keyword>
|
||||
<keyword>INDICATOR</keyword>
|
||||
<keyword>INNER</keyword>
|
||||
<keyword>INOUT</keyword>
|
||||
<keyword>INPUT</keyword>
|
||||
<keyword>INSENSITIVE</keyword>
|
||||
<keyword>INSERT</keyword>
|
||||
<keyword>INT</keyword>
|
||||
<keyword>INTEGER</keyword>
|
||||
<keyword>INTERSECT</keyword>
|
||||
<keyword>INTERVAL</keyword>
|
||||
<keyword>INTO</keyword>
|
||||
<keyword>IS</keyword>
|
||||
<keyword>ISOLATION</keyword>
|
||||
<keyword>JOIN</keyword>
|
||||
<keyword>LANGUAGE</keyword>
|
||||
<keyword>LARGE</keyword>
|
||||
<keyword>LATERAL</keyword>
|
||||
<keyword>LEADING</keyword>
|
||||
<keyword>LEFT</keyword>
|
||||
<keyword>LIKE</keyword>
|
||||
<keyword>LOCAL</keyword>
|
||||
<keyword>LOCALTIME</keyword>
|
||||
<keyword>LOCALTIMESTAMP</keyword>
|
||||
<keyword>MATCH</keyword>
|
||||
<keyword>MEMBER</keyword>
|
||||
<keyword>MERGE</keyword>
|
||||
<keyword>METHOD</keyword>
|
||||
<keyword>MINUTE</keyword>
|
||||
<keyword>MODIFIES</keyword>
|
||||
<keyword>MODULE</keyword>
|
||||
<keyword>MONTH</keyword>
|
||||
<keyword>MULTISET</keyword>
|
||||
<keyword>NATIONAL</keyword>
|
||||
<keyword>NATURAL</keyword>
|
||||
<keyword>NCHAR</keyword>
|
||||
<keyword>NCLOB</keyword>
|
||||
<keyword>NEW</keyword>
|
||||
<keyword>NO</keyword>
|
||||
<keyword>NONE</keyword>
|
||||
<keyword>NOT</keyword>
|
||||
<keyword>NULL</keyword>
|
||||
<keyword>NUMERIC</keyword>
|
||||
<keyword>OF</keyword>
|
||||
<keyword>OLD</keyword>
|
||||
<keyword>ON</keyword>
|
||||
<keyword>ONLY</keyword>
|
||||
<keyword>OPEN</keyword>
|
||||
<keyword>OR</keyword>
|
||||
<keyword>ORDER</keyword>
|
||||
<keyword>OUT</keyword>
|
||||
<keyword>OUTER</keyword>
|
||||
<keyword>OUTPUT</keyword>
|
||||
<keyword>OVER</keyword>
|
||||
<keyword>OVERLAPS</keyword>
|
||||
<keyword>PARAMETER</keyword>
|
||||
<keyword>PARTITION</keyword>
|
||||
<keyword>PRECISION</keyword>
|
||||
<keyword>PREPARE</keyword>
|
||||
<keyword>PRIMARY</keyword>
|
||||
<keyword>PROCEDURE</keyword>
|
||||
<keyword>RANGE</keyword>
|
||||
<keyword>READS</keyword>
|
||||
<keyword>REAL</keyword>
|
||||
<keyword>RECURSIVE</keyword>
|
||||
<keyword>REF</keyword>
|
||||
<keyword>REFERENCES</keyword>
|
||||
<keyword>REFERENCING</keyword>
|
||||
<keyword>REGR_AVGX</keyword>
|
||||
<keyword>REGR_AVGY</keyword>
|
||||
<keyword>REGR_COUNT</keyword>
|
||||
<keyword>REGR_INTERCEPT</keyword>
|
||||
<keyword>REGR_R2</keyword>
|
||||
<keyword>REGR_SLOPE</keyword>
|
||||
<keyword>REGR_SXX</keyword>
|
||||
<keyword>REGR_SXY</keyword>
|
||||
<keyword>REGR_SYY</keyword>
|
||||
<keyword>RELEASE</keyword>
|
||||
<keyword>RESULT</keyword>
|
||||
<keyword>RETURN</keyword>
|
||||
<keyword>RETURNS</keyword>
|
||||
<keyword>REVOKE</keyword>
|
||||
<keyword>RIGHT</keyword>
|
||||
<keyword>ROLLBACK</keyword>
|
||||
<keyword>ROLLUP</keyword>
|
||||
<keyword>ROW</keyword>
|
||||
<keyword>ROWS</keyword>
|
||||
<keyword>SAVEPOINT</keyword>
|
||||
<keyword>SCROLL</keyword>
|
||||
<keyword>SEARCH</keyword>
|
||||
<keyword>SECOND</keyword>
|
||||
<keyword>SELECT</keyword>
|
||||
<keyword>SENSITIVE</keyword>
|
||||
<keyword>SESSION_USER</keyword>
|
||||
<keyword>SET</keyword>
|
||||
<keyword>SIMILAR</keyword>
|
||||
<keyword>SMALLINT</keyword>
|
||||
<keyword>SOME</keyword>
|
||||
<keyword>SPECIFIC</keyword>
|
||||
<keyword>SPECIFICTYPE</keyword>
|
||||
<keyword>SQL</keyword>
|
||||
<keyword>SQLEXCEPTION</keyword>
|
||||
<keyword>SQLSTATE</keyword>
|
||||
<keyword>SQLWARNING</keyword>
|
||||
<keyword>START</keyword>
|
||||
<keyword>STATIC</keyword>
|
||||
<keyword>SUBMULTISET</keyword>
|
||||
<keyword>SYMMETRIC</keyword>
|
||||
<keyword>SYSTEM_USER</keyword>
|
||||
<keyword>SYSTEM</keyword>
|
||||
<keyword>TABLE</keyword>
|
||||
<keyword>THEN</keyword>
|
||||
<keyword>TIME</keyword>
|
||||
<keyword>TIMESTAMP</keyword>
|
||||
<keyword>TIMEZONE_HOUR</keyword>
|
||||
<keyword>TIMEZONE_MINUTE</keyword>
|
||||
<keyword>TO</keyword>
|
||||
<keyword>TRAILING</keyword>
|
||||
<keyword>TRANSLATION</keyword>
|
||||
<keyword>TREAT</keyword>
|
||||
<keyword>TRIGGER</keyword>
|
||||
<keyword>TRUE</keyword>
|
||||
<keyword>UESCAPE</keyword>
|
||||
<keyword>UNION</keyword>
|
||||
<keyword>UNIQUE</keyword>
|
||||
<keyword>UNKNOWN</keyword>
|
||||
<keyword>UNNEST</keyword>
|
||||
<keyword>UPDATE</keyword>
|
||||
<keyword>UPPER</keyword>
|
||||
<keyword>USER</keyword>
|
||||
<keyword>USING</keyword>
|
||||
<keyword>VALUE</keyword>
|
||||
<keyword>VALUES</keyword>
|
||||
<keyword>VAR_POP</keyword>
|
||||
<keyword>VAR_SAMP</keyword>
|
||||
<keyword>VARCHAR</keyword>
|
||||
<keyword>VARYING</keyword>
|
||||
<keyword>WHEN</keyword>
|
||||
<keyword>WHENEVER</keyword>
|
||||
<keyword>WHERE</keyword>
|
||||
<keyword>WIDTH_BUCKET</keyword>
|
||||
<keyword>WINDOW</keyword>
|
||||
<keyword>WITH</keyword>
|
||||
<keyword>WITHIN</keyword>
|
||||
<keyword>WITHOUT</keyword>
|
||||
<keyword>YEAR</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,47 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>true</keyword>
|
||||
<keyword>false</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<word>{</word>
|
||||
<word>}</word>
|
||||
<word>,</word>
|
||||
<word>[</word>
|
||||
<word>]</word>
|
||||
<style>keyword</style>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(---)$</pattern>
|
||||
<style>comment</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(.+?)(?==|:)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -1,599 +0,0 @@
|
||||
/* Javadoc style sheet */
|
||||
/*
|
||||
Overall document style
|
||||
*/
|
||||
|
||||
@import url('resources/fonts/dejavu.css');
|
||||
|
||||
body {
|
||||
background-color:#ffffff;
|
||||
color:#353833;
|
||||
font-family:'DejaVu Sans', Arial, Helvetica, sans-serif;
|
||||
font-size:14px;
|
||||
margin:0;
|
||||
}
|
||||
a:link, a:visited {
|
||||
text-decoration:none;
|
||||
color:#4A6782;
|
||||
}
|
||||
a:hover, a:focus {
|
||||
text-decoration:none;
|
||||
color:#bb7a2a;
|
||||
}
|
||||
a:active {
|
||||
text-decoration:none;
|
||||
color:#4A6782;
|
||||
}
|
||||
a[name] {
|
||||
color:#353833;
|
||||
}
|
||||
a[name]:hover {
|
||||
text-decoration:none;
|
||||
color:#353833;
|
||||
}
|
||||
pre {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
}
|
||||
h1 {
|
||||
font-size:20px;
|
||||
}
|
||||
h2 {
|
||||
font-size:18px;
|
||||
}
|
||||
h3 {
|
||||
font-size:16px;
|
||||
font-style:italic;
|
||||
}
|
||||
h4 {
|
||||
font-size:13px;
|
||||
}
|
||||
h5 {
|
||||
font-size:12px;
|
||||
}
|
||||
h6 {
|
||||
font-size:11px;
|
||||
}
|
||||
ul {
|
||||
list-style-type:disc;
|
||||
}
|
||||
code, tt {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
padding-top:4px;
|
||||
margin-top:8px;
|
||||
line-height:1.4em;
|
||||
}
|
||||
dt code {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
padding-top:4px;
|
||||
}
|
||||
table tr td dt code {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
vertical-align:top;
|
||||
padding-top:4px;
|
||||
}
|
||||
sup {
|
||||
font-size:8px;
|
||||
}
|
||||
/*
|
||||
Document title and Copyright styles
|
||||
*/
|
||||
.clear {
|
||||
clear:both;
|
||||
height:0px;
|
||||
overflow:hidden;
|
||||
}
|
||||
.aboutLanguage {
|
||||
float:right;
|
||||
padding:0px 21px;
|
||||
font-size:11px;
|
||||
z-index:200;
|
||||
margin-top:-9px;
|
||||
}
|
||||
.legalCopy {
|
||||
margin-left:.5em;
|
||||
}
|
||||
.bar a, .bar a:link, .bar a:visited, .bar a:active {
|
||||
color:#FFFFFF;
|
||||
text-decoration:none;
|
||||
}
|
||||
.bar a:hover, .bar a:focus {
|
||||
color:#bb7a2a;
|
||||
}
|
||||
.tab {
|
||||
background-color:#0066FF;
|
||||
color:#ffffff;
|
||||
padding:8px;
|
||||
width:5em;
|
||||
font-weight:bold;
|
||||
}
|
||||
/*
|
||||
Navigation bar styles
|
||||
*/
|
||||
.bar {
|
||||
background-color:#4D7A97;
|
||||
color:#FFFFFF;
|
||||
padding:.8em .5em .4em .8em;
|
||||
height:auto;/*height:1.8em;*/
|
||||
font-size:11px;
|
||||
margin:0;
|
||||
}
|
||||
.topNav {
|
||||
background-color:#4D7A97;
|
||||
color:#FFFFFF;
|
||||
float:left;
|
||||
padding:0;
|
||||
width:100%;
|
||||
clear:right;
|
||||
height:2.8em;
|
||||
padding-top:10px;
|
||||
overflow:hidden;
|
||||
font-size:12px;
|
||||
}
|
||||
.bottomNav {
|
||||
margin-top:10px;
|
||||
background-color:#4D7A97;
|
||||
color:#FFFFFF;
|
||||
float:left;
|
||||
padding:0;
|
||||
width:100%;
|
||||
clear:right;
|
||||
height:2.8em;
|
||||
padding-top:10px;
|
||||
overflow:hidden;
|
||||
font-size:12px;
|
||||
}
|
||||
.subNav {
|
||||
background-color:#dee3e9;
|
||||
float:left;
|
||||
width:100%;
|
||||
overflow:hidden;
|
||||
font-size:12px;
|
||||
}
|
||||
.subNav div {
|
||||
clear:left;
|
||||
float:left;
|
||||
padding:0 0 5px 6px;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
ul.navList, ul.subNavList {
|
||||
float:left;
|
||||
margin:0 25px 0 0;
|
||||
padding:0;
|
||||
}
|
||||
ul.navList li{
|
||||
list-style:none;
|
||||
float:left;
|
||||
padding: 5px 6px;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
ul.subNavList li{
|
||||
list-style:none;
|
||||
float:left;
|
||||
}
|
||||
.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited {
|
||||
color:#FFFFFF;
|
||||
text-decoration:none;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
.topNav a:hover, .bottomNav a:hover {
|
||||
text-decoration:none;
|
||||
color:#bb7a2a;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
.navBarCell1Rev {
|
||||
background-color:#F8981D;
|
||||
color:#253441;
|
||||
margin: auto 5px;
|
||||
}
|
||||
.skipNav {
|
||||
position:absolute;
|
||||
top:auto;
|
||||
left:-9999px;
|
||||
overflow:hidden;
|
||||
}
|
||||
/*
|
||||
Page header and footer styles
|
||||
*/
|
||||
.header, .footer {
|
||||
clear:both;
|
||||
margin:0 20px;
|
||||
padding:5px 0 0 0;
|
||||
}
|
||||
.indexHeader {
|
||||
margin:10px;
|
||||
position:relative;
|
||||
}
|
||||
.indexHeader span{
|
||||
margin-right:15px;
|
||||
}
|
||||
.indexHeader h1 {
|
||||
font-size:13px;
|
||||
}
|
||||
.title {
|
||||
color:#2c4557;
|
||||
margin:10px 0;
|
||||
}
|
||||
.subTitle {
|
||||
margin:5px 0 0 0;
|
||||
}
|
||||
.header ul {
|
||||
margin:0 0 15px 0;
|
||||
padding:0;
|
||||
}
|
||||
.footer ul {
|
||||
margin:20px 0 5px 0;
|
||||
}
|
||||
.header ul li, .footer ul li {
|
||||
list-style:none;
|
||||
font-size:13px;
|
||||
}
|
||||
/*
|
||||
Heading styles
|
||||
*/
|
||||
div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 {
|
||||
background-color:#dee3e9;
|
||||
border:1px solid #d0d9e0;
|
||||
margin:0 0 6px -8px;
|
||||
padding:7px 5px;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList li.blockList h3 {
|
||||
background-color:#dee3e9;
|
||||
border:1px solid #d0d9e0;
|
||||
margin:0 0 6px -8px;
|
||||
padding:7px 5px;
|
||||
}
|
||||
ul.blockList ul.blockList li.blockList h3 {
|
||||
padding:0;
|
||||
margin:15px 0;
|
||||
}
|
||||
ul.blockList li.blockList h2 {
|
||||
padding:0px 0 20px 0;
|
||||
}
|
||||
/*
|
||||
Page layout container styles
|
||||
*/
|
||||
.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer {
|
||||
clear:both;
|
||||
padding:10px 20px;
|
||||
position:relative;
|
||||
}
|
||||
.indexContainer {
|
||||
margin:10px;
|
||||
position:relative;
|
||||
font-size:12px;
|
||||
}
|
||||
.indexContainer h2 {
|
||||
font-size:13px;
|
||||
padding:0 0 3px 0;
|
||||
}
|
||||
.indexContainer ul {
|
||||
margin:0;
|
||||
padding:0;
|
||||
}
|
||||
.indexContainer ul li {
|
||||
list-style:none;
|
||||
padding-top:2px;
|
||||
}
|
||||
.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt {
|
||||
font-size:12px;
|
||||
font-weight:bold;
|
||||
margin:10px 0 0 0;
|
||||
color:#4E4E4E;
|
||||
}
|
||||
.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd {
|
||||
margin:5px 0 10px 0px;
|
||||
font-size:14px;
|
||||
font-family:'DejaVu Sans Mono',monospace;
|
||||
}
|
||||
.serializedFormContainer dl.nameValue dt {
|
||||
margin-left:1px;
|
||||
font-size:1.1em;
|
||||
display:inline;
|
||||
font-weight:bold;
|
||||
}
|
||||
.serializedFormContainer dl.nameValue dd {
|
||||
margin:0 0 0 1px;
|
||||
font-size:1.1em;
|
||||
display:inline;
|
||||
}
|
||||
/*
|
||||
List styles
|
||||
*/
|
||||
ul.horizontal li {
|
||||
display:inline;
|
||||
font-size:0.9em;
|
||||
}
|
||||
ul.inheritance {
|
||||
margin:0;
|
||||
padding:0;
|
||||
}
|
||||
ul.inheritance li {
|
||||
display:inline;
|
||||
list-style:none;
|
||||
}
|
||||
ul.inheritance li ul.inheritance {
|
||||
margin-left:15px;
|
||||
padding-left:15px;
|
||||
padding-top:1px;
|
||||
}
|
||||
ul.blockList, ul.blockListLast {
|
||||
margin:10px 0 10px 0;
|
||||
padding:0;
|
||||
}
|
||||
ul.blockList li.blockList, ul.blockListLast li.blockList {
|
||||
list-style:none;
|
||||
margin-bottom:15px;
|
||||
line-height:1.4;
|
||||
}
|
||||
ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList {
|
||||
padding:0px 20px 5px 10px;
|
||||
border:1px solid #ededed;
|
||||
background-color:#f8f8f8;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList {
|
||||
padding:0 0 5px 8px;
|
||||
background-color:#ffffff;
|
||||
border:none;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList ul.blockList li.blockList {
|
||||
margin-left:0;
|
||||
padding-left:0;
|
||||
padding-bottom:15px;
|
||||
border:none;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast {
|
||||
list-style:none;
|
||||
border-bottom:none;
|
||||
padding-bottom:0;
|
||||
}
|
||||
table tr td dl, table tr td dl dt, table tr td dl dd {
|
||||
margin-top:0;
|
||||
margin-bottom:1px;
|
||||
}
|
||||
/*
|
||||
Table styles
|
||||
*/
|
||||
.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary {
|
||||
width:100%;
|
||||
border-left:1px solid #EEE;
|
||||
border-right:1px solid #EEE;
|
||||
border-bottom:1px solid #EEE;
|
||||
}
|
||||
.overviewSummary, .memberSummary {
|
||||
padding:0px;
|
||||
}
|
||||
.overviewSummary caption, .memberSummary caption, .typeSummary caption,
|
||||
.useSummary caption, .constantsSummary caption, .deprecatedSummary caption {
|
||||
position:relative;
|
||||
text-align:left;
|
||||
background-repeat:no-repeat;
|
||||
color:#253441;
|
||||
font-weight:bold;
|
||||
clear:none;
|
||||
overflow:hidden;
|
||||
padding:0px;
|
||||
padding-top:10px;
|
||||
padding-left:1px;
|
||||
margin:0px;
|
||||
white-space:pre;
|
||||
}
|
||||
.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link,
|
||||
.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link,
|
||||
.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover,
|
||||
.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover,
|
||||
.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active,
|
||||
.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active,
|
||||
.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited,
|
||||
.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited {
|
||||
color:#FFFFFF;
|
||||
}
|
||||
.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span,
|
||||
.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span {
|
||||
white-space:nowrap;
|
||||
padding-top:5px;
|
||||
padding-left:12px;
|
||||
padding-right:12px;
|
||||
padding-bottom:7px;
|
||||
display:inline-block;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
border: none;
|
||||
height:16px;
|
||||
}
|
||||
.memberSummary caption span.activeTableTab span {
|
||||
white-space:nowrap;
|
||||
padding-top:5px;
|
||||
padding-left:12px;
|
||||
padding-right:12px;
|
||||
margin-right:3px;
|
||||
display:inline-block;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
height:16px;
|
||||
}
|
||||
.memberSummary caption span.tableTab span {
|
||||
white-space:nowrap;
|
||||
padding-top:5px;
|
||||
padding-left:12px;
|
||||
padding-right:12px;
|
||||
margin-right:3px;
|
||||
display:inline-block;
|
||||
float:left;
|
||||
background-color:#4D7A97;
|
||||
height:16px;
|
||||
}
|
||||
.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab {
|
||||
padding-top:0px;
|
||||
padding-left:0px;
|
||||
padding-right:0px;
|
||||
background-image:none;
|
||||
float:none;
|
||||
display:inline;
|
||||
}
|
||||
.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd,
|
||||
.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd {
|
||||
display:none;
|
||||
width:5px;
|
||||
position:relative;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
}
|
||||
.memberSummary .activeTableTab .tabEnd {
|
||||
display:none;
|
||||
width:5px;
|
||||
margin-right:3px;
|
||||
position:relative;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
}
|
||||
.memberSummary .tableTab .tabEnd {
|
||||
display:none;
|
||||
width:5px;
|
||||
margin-right:3px;
|
||||
position:relative;
|
||||
background-color:#4D7A97;
|
||||
float:left;
|
||||
|
||||
}
|
||||
.overviewSummary td, .memberSummary td, .typeSummary td,
|
||||
.useSummary td, .constantsSummary td, .deprecatedSummary td {
|
||||
text-align:left;
|
||||
padding:0px 0px 12px 10px;
|
||||
width:100%;
|
||||
}
|
||||
th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th,
|
||||
td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{
|
||||
vertical-align:top;
|
||||
padding-right:0px;
|
||||
padding-top:8px;
|
||||
padding-bottom:3px;
|
||||
}
|
||||
th.colFirst, th.colLast, th.colOne, .constantsSummary th {
|
||||
background:#dee3e9;
|
||||
text-align:left;
|
||||
padding:8px 3px 3px 7px;
|
||||
}
|
||||
td.colFirst, th.colFirst {
|
||||
white-space:nowrap;
|
||||
font-size:13px;
|
||||
}
|
||||
td.colLast, th.colLast {
|
||||
font-size:13px;
|
||||
}
|
||||
td.colOne, th.colOne {
|
||||
font-size:13px;
|
||||
}
|
||||
.overviewSummary td.colFirst, .overviewSummary th.colFirst,
|
||||
.overviewSummary td.colOne, .overviewSummary th.colOne,
|
||||
.memberSummary td.colFirst, .memberSummary th.colFirst,
|
||||
.memberSummary td.colOne, .memberSummary th.colOne,
|
||||
.typeSummary td.colFirst{
|
||||
width:25%;
|
||||
vertical-align:top;
|
||||
}
|
||||
td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover {
|
||||
font-weight:bold;
|
||||
}
|
||||
.tableSubHeadingColor {
|
||||
background-color:#EEEEFF;
|
||||
}
|
||||
.altColor {
|
||||
background-color:#FFFFFF;
|
||||
}
|
||||
.rowColor {
|
||||
background-color:#EEEEEF;
|
||||
}
|
||||
/*
|
||||
Content styles
|
||||
*/
|
||||
.description pre {
|
||||
margin-top:0;
|
||||
}
|
||||
.deprecatedContent {
|
||||
margin:0;
|
||||
padding:10px 0;
|
||||
}
|
||||
.docSummary {
|
||||
padding:0;
|
||||
}
|
||||
|
||||
ul.blockList ul.blockList ul.blockList li.blockList h3 {
|
||||
font-style:normal;
|
||||
}
|
||||
|
||||
div.block {
|
||||
font-size:14px;
|
||||
font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif;
|
||||
}
|
||||
|
||||
td.colLast div {
|
||||
padding-top:0px;
|
||||
}
|
||||
|
||||
|
||||
td.colLast a {
|
||||
padding-bottom:3px;
|
||||
}
|
||||
/*
|
||||
Formatting effect styles
|
||||
*/
|
||||
.sourceLineNo {
|
||||
color:green;
|
||||
padding:0 30px 0 0;
|
||||
}
|
||||
h1.hidden {
|
||||
visibility:hidden;
|
||||
overflow:hidden;
|
||||
font-size:10px;
|
||||
}
|
||||
.block {
|
||||
display:block;
|
||||
margin:3px 10px 2px 0px;
|
||||
color:#474747;
|
||||
}
|
||||
.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink,
|
||||
.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel,
|
||||
.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink {
|
||||
font-weight:bold;
|
||||
}
|
||||
.deprecationComment, .emphasizedPhrase, .interfaceName {
|
||||
font-style:italic;
|
||||
}
|
||||
|
||||
div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase,
|
||||
div.block div.block span.interfaceName {
|
||||
font-style:normal;
|
||||
}
|
||||
|
||||
div.contentContainer ul.blockList li.blockList h2{
|
||||
padding-bottom:0px;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Spring
|
||||
*/
|
||||
|
||||
pre.code {
|
||||
background-color: #F8F8F8;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 3px 3px 3px 3px;
|
||||
overflow: auto;
|
||||
padding: 10px;
|
||||
margin: 4px 20px 2px 0px;
|
||||
}
|
||||
|
||||
pre.code code, pre.code code * {
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
pre.code code, pre.code code * {
|
||||
padding: 0 !important;
|
||||
margin: 0 !important;
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:mvn="http://maven.apache.org/POM/4.0.0"
|
||||
version="1.0">
|
||||
|
||||
<xsl:output method="text" encoding="UTF-8" indent="no"/>
|
||||
|
||||
<xsl:template match="/">
|
||||
<xsl:text>|===
</xsl:text>
|
||||
<xsl:text>| Group ID | Artifact ID | Version
</xsl:text>
|
||||
<xsl:for-each select="//mvn:dependency">
|
||||
<xsl:sort select="mvn:groupId"/>
|
||||
<xsl:sort select="mvn:artifactId"/>
|
||||
<xsl:text>
</xsl:text>
|
||||
<xsl:text>| `</xsl:text>
|
||||
<xsl:copy-of select="mvn:groupId"/>
|
||||
<xsl:text>`
</xsl:text>
|
||||
<xsl:text>| `</xsl:text>
|
||||
<xsl:copy-of select="mvn:artifactId"/>
|
||||
<xsl:text>`
</xsl:text>
|
||||
<xsl:text>| </xsl:text>
|
||||
<xsl:copy-of select="mvn:version"/>
|
||||
<xsl:text>
</xsl:text>
|
||||
</xsl:for-each>
|
||||
<xsl:text>|===</xsl:text>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
@@ -10,14 +10,23 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M1</version>
|
||||
<version>3.2.1</version>
|
||||
</parent>
|
||||
|
||||
<properties>
|
||||
<avro.version>1.8.2</avro.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
@@ -45,22 +54,54 @@
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.13</artifactId>
|
||||
<classifier>test</classifier>
|
||||
</dependency>
|
||||
<!-- Following dependency is only provided for testing and won't be packaged with the binder apps-->
|
||||
<dependency>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro</artifactId>
|
||||
<version>${avro.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-maven-plugin</artifactId>
|
||||
<version>${avro.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>generate-test-sources</phase>
|
||||
<goals>
|
||||
<goal>schema</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.basedir}/target/generated-test-sources</outputDirectory>
|
||||
<testOutputDirectory>${project.basedir}/target/generated-test-sources</testOutputDirectory>
|
||||
<testSourceDirectory>${project.basedir}/src/test/resources/avro</testSourceDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
||||
@@ -0,0 +1,616 @@
|
||||
/*
|
||||
* Copyright 2019-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.Topology;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.processor.TimestampExtractor;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.boot.context.properties.bind.BindContext;
|
||||
import org.springframework.boot.context.properties.bind.BindHandler;
|
||||
import org.springframework.boot.context.properties.bind.Bindable;
|
||||
import org.springframework.boot.context.properties.bind.PropertySourcesPlaceholdersResolver;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertySources;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.MutablePropertySources;
|
||||
import org.springframework.integration.support.utils.IntegrationUtils;
|
||||
import org.springframework.kafka.config.KafkaStreamsConfiguration;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBeanConfigurer;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.streams.RecoveringDeserializationExceptionHandler;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public abstract class AbstractKafkaStreamsBinderProcessor implements ApplicationContextAware {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(AbstractKafkaStreamsBinderProcessor.class);
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
private final CleanupConfig cleanupConfig;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
protected ConfigurableApplicationContext applicationContext;
|
||||
|
||||
public AbstractKafkaStreamsBinderProcessor(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver, CleanupConfig cleanupConfig) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext)
|
||||
throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
protected Topology.AutoOffsetReset getAutoOffsetReset(String inboundName, KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
final KafkaConsumerProperties.StartOffset startOffset = extendedConsumerProperties
|
||||
.getStartOffset();
|
||||
Topology.AutoOffsetReset autoOffsetReset = null;
|
||||
if (startOffset != null) {
|
||||
switch (startOffset) {
|
||||
case earliest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.EARLIEST;
|
||||
break;
|
||||
case latest:
|
||||
autoOffsetReset = Topology.AutoOffsetReset.LATEST;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (extendedConsumerProperties.isResetOffsets()) {
|
||||
AbstractKafkaStreamsBinderProcessor.LOG.warn("Detected resetOffsets configured on binding "
|
||||
+ inboundName + ". "
|
||||
+ "Setting resetOffsets in Kafka Streams binder does not have any effect.");
|
||||
}
|
||||
return autoOffsetReset;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void handleKTableGlobalKTableInputs(Object[] arguments, int index, String input, Class<?> parameterType, Object targetBean,
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean, StreamsBuilder streamsBuilder,
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
if (firstBuild) {
|
||||
addStateStoreBeans(streamsBuilder);
|
||||
}
|
||||
if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
KTable<?, ?> table = getKTable(extendedConsumerProperties, streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper =
|
||||
(KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(input, streamsBuilderFactoryBean);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addConsumerPropertiesPerSbfb(streamsBuilderFactoryBean,
|
||||
bindingServiceProperties.getConsumerProperties(input));
|
||||
arguments[index] = table;
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(GlobalKTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = this.bindingServiceProperties.getBindingDestination(input);
|
||||
GlobalKTable<?, ?> table = getGlobalKTable(extendedConsumerProperties, streamsBuilder, keySerde, valueSerde, materializedAs,
|
||||
bindingDestination, autoOffsetReset);
|
||||
GlobalKTableBoundElementFactory.GlobalKTableWrapper globalKTableWrapper =
|
||||
(GlobalKTableBoundElementFactory.GlobalKTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
globalKTableWrapper.wrap((GlobalKTable<Object, Object>) table);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactoryPerBinding(input, streamsBuilderFactoryBean);
|
||||
this.kafkaStreamsBindingInformationCatalogue.addConsumerPropertiesPerSbfb(streamsBuilderFactoryBean,
|
||||
bindingServiceProperties.getConsumerProperties(input));
|
||||
arguments[index] = table;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unchecked" })
|
||||
protected StreamsBuilderFactoryBean buildStreamsBuilderAndRetrieveConfig(String beanNamePostPrefix,
|
||||
ApplicationContext applicationContext, String inboundName,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties,
|
||||
StreamsBuilderFactoryBeanConfigurer customizer,
|
||||
ConfigurableEnvironment environment, BindingProperties bindingProperties) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext
|
||||
.getBeanFactory();
|
||||
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext
|
||||
.getBean("streamConfigGlobalProperties", Map.class);
|
||||
|
||||
// Use a copy because the global configuration will be shared by multiple processors.
|
||||
Map<String, Object> streamConfiguration = new HashMap<>(streamConfigGlobalProperties);
|
||||
|
||||
if (kafkaStreamsBinderConfigurationProperties != null) {
|
||||
final Map<String, KafkaStreamsBinderConfigurationProperties.Functions> functionConfigMap = kafkaStreamsBinderConfigurationProperties.getFunctions();
|
||||
if (!CollectionUtils.isEmpty(functionConfigMap)) {
|
||||
final KafkaStreamsBinderConfigurationProperties.Functions functionConfig = functionConfigMap.get(beanNamePostPrefix);
|
||||
if (functionConfig != null) {
|
||||
final Map<String, String> functionSpecificConfig = functionConfig.getConfiguration();
|
||||
if (!CollectionUtils.isEmpty(functionSpecificConfig)) {
|
||||
streamConfiguration.putAll(functionSpecificConfig);
|
||||
}
|
||||
|
||||
String applicationId = functionConfig.getApplicationId();
|
||||
if (!StringUtils.isEmpty(applicationId)) {
|
||||
streamConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final MutablePropertySources propertySources = environment.getPropertySources();
|
||||
|
||||
if (!StringUtils.isEmpty(bindingProperties.getBinder())) {
|
||||
final KafkaStreamsBinderConfigurationProperties multiBinderKafkaStreamsBinderConfigurationProperties =
|
||||
applicationContext.getBean(bindingProperties.getBinder() + "-KafkaStreamsBinderConfigurationProperties", KafkaStreamsBinderConfigurationProperties.class);
|
||||
String connectionString = multiBinderKafkaStreamsBinderConfigurationProperties.getKafkaConnectionString();
|
||||
if (StringUtils.isEmpty(connectionString)) {
|
||||
connectionString = (String) propertySources.get(bindingProperties.getBinder() + "-kafkaStreamsBinderEnv").getProperty("spring.cloud.stream.kafka.binder.brokers");
|
||||
}
|
||||
|
||||
streamConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, connectionString);
|
||||
|
||||
String binderProvidedApplicationId = multiBinderKafkaStreamsBinderConfigurationProperties.getApplicationId();
|
||||
if (StringUtils.hasText(binderProvidedApplicationId)) {
|
||||
streamConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
binderProvidedApplicationId);
|
||||
}
|
||||
|
||||
if (multiBinderKafkaStreamsBinderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.logAndContinue) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
}
|
||||
else if (multiBinderKafkaStreamsBinderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.logAndFail) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
}
|
||||
else if (multiBinderKafkaStreamsBinderConfigurationProperties
|
||||
.getDeserializationExceptionHandler() == DeserializationExceptionHandler.sendToDlq) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
RecoveringDeserializationExceptionHandler.class);
|
||||
SendToDlqAndContinue sendToDlqAndContinue = applicationContext.getBean(SendToDlqAndContinue.class);
|
||||
streamConfiguration.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, sendToDlqAndContinue);
|
||||
}
|
||||
|
||||
if (!ObjectUtils.isEmpty(multiBinderKafkaStreamsBinderConfigurationProperties.getConfiguration())) {
|
||||
streamConfiguration.putAll(multiBinderKafkaStreamsBinderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
if (!streamConfiguration.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG)) {
|
||||
streamConfiguration.put(StreamsConfig.REPLICATION_FACTOR_CONFIG,
|
||||
(int) multiBinderKafkaStreamsBinderConfigurationProperties.getReplicationFactor());
|
||||
}
|
||||
}
|
||||
|
||||
//this is only used primarily for StreamListener based processors. Although in theory, functions can use it,
|
||||
//it is ideal for functions to use the approach used in the above if statement by using a property like
|
||||
//spring.cloud.stream.kafka.streams.binder.functions.process.configuration.num.threads (assuming that process is the function name).
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName);
|
||||
Map<String, String> bindingConfig = extendedConsumerProperties.getConfiguration();
|
||||
Assert.state(!bindingConfig.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG),
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG + " cannot be overridden at the binding level; "
|
||||
+ "use multiple binders instead");
|
||||
// We will only add the per binding configuration to the current streamConfiguration and not the global one.
|
||||
streamConfiguration
|
||||
.putAll(bindingConfig);
|
||||
|
||||
String bindingLevelApplicationId = extendedConsumerProperties.getApplicationId();
|
||||
// override application.id if set at the individual binding level.
|
||||
// We provide this for backward compatibility with StreamListener based processors.
|
||||
// For function based processors see the approach used above
|
||||
// (i.e. use a property like spring.cloud.stream.kafka.streams.binder.functions.process.applicationId).
|
||||
if (StringUtils.hasText(bindingLevelApplicationId)) {
|
||||
streamConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
bindingLevelApplicationId);
|
||||
}
|
||||
|
||||
//If the application id is not set by any mechanism, then generate it.
|
||||
streamConfiguration.computeIfAbsent(StreamsConfig.APPLICATION_ID_CONFIG,
|
||||
k -> {
|
||||
String generatedApplicationID = beanNamePostPrefix + "-applicationId";
|
||||
LOG.info("Binder Generated Kafka Streams Application ID: " + generatedApplicationID);
|
||||
LOG.info("Use the binder generated application ID only for development and testing. ");
|
||||
LOG.info("For production deployments, please consider explicitly setting an application ID using a configuration property.");
|
||||
LOG.info("The generated applicationID is static and will be preserved over application restarts.");
|
||||
return generatedApplicationID;
|
||||
});
|
||||
|
||||
handleConcurrency(applicationContext, inboundName, streamConfiguration);
|
||||
|
||||
// Override deserialization exception handlers per binding
|
||||
final DeserializationExceptionHandler deserializationExceptionHandler =
|
||||
extendedConsumerProperties.getDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler == DeserializationExceptionHandler.logAndFail) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
}
|
||||
else if (deserializationExceptionHandler == DeserializationExceptionHandler.logAndContinue) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
}
|
||||
else if (deserializationExceptionHandler == DeserializationExceptionHandler.sendToDlq) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
RecoveringDeserializationExceptionHandler.class);
|
||||
streamConfiguration.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER,
|
||||
applicationContext.getBean(SendToDlqAndContinue.class));
|
||||
}
|
||||
else if (deserializationExceptionHandler == DeserializationExceptionHandler.skipAndContinue) {
|
||||
streamConfiguration.put(
|
||||
StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
SkipAndContinueExceptionHandler.class);
|
||||
}
|
||||
|
||||
KafkaStreamsConfiguration kafkaStreamsConfiguration = new KafkaStreamsConfiguration(streamConfiguration);
|
||||
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(kafkaStreamsConfiguration)
|
||||
: new StreamsBuilderFactoryBean(kafkaStreamsConfiguration,
|
||||
this.cleanupConfig);
|
||||
|
||||
streamsBuilderFactoryBean.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition = BeanDefinitionBuilder
|
||||
.genericBeanDefinition(
|
||||
(Class<StreamsBuilderFactoryBean>) streamsBuilderFactoryBean.getClass(),
|
||||
() -> streamsBuilderFactoryBean)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition(
|
||||
"stream-builder-" + beanNamePostPrefix, streamsBuilderBeanDefinition);
|
||||
|
||||
extendedConsumerProperties.setApplicationId((String) streamConfiguration.get(StreamsConfig.APPLICATION_ID_CONFIG));
|
||||
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBeanFromContext = applicationContext.getBean(
|
||||
"&stream-builder-" + beanNamePostPrefix, StreamsBuilderFactoryBean.class);
|
||||
//At this point, the StreamsBuilderFactoryBean is created. If the users call, getObject()
|
||||
//in the customizer, that should grant access to the StreamsBuilder.
|
||||
if (customizer != null) {
|
||||
customizer.configure(streamsBuilderFactoryBean);
|
||||
}
|
||||
return streamsBuilderFactoryBeanFromContext;
|
||||
}
|
||||
|
||||
private void handleConcurrency(ApplicationContext applicationContext, String inboundName,
|
||||
Map<String, Object> streamConfiguration) {
|
||||
// This rebinding is necessary to capture the concurrency explicitly set by the application.
|
||||
// This is added to fix this issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/899
|
||||
org.springframework.boot.context.properties.bind.Binder explicitConcurrencyResolver =
|
||||
new org.springframework.boot.context.properties.bind.Binder(ConfigurationPropertySources.get(applicationContext.getEnvironment()),
|
||||
new PropertySourcesPlaceholdersResolver(applicationContext.getEnvironment()),
|
||||
IntegrationUtils.getConversionService(this.applicationContext.getBeanFactory()), null);
|
||||
|
||||
boolean[] concurrencyExplicitlyProvided = new boolean[] {false};
|
||||
BindHandler handler = new BindHandler() {
|
||||
|
||||
@Override
|
||||
public Object onSuccess(ConfigurationPropertyName name, Bindable<?> target,
|
||||
BindContext context, Object result) {
|
||||
if (!concurrencyExplicitlyProvided[0]) {
|
||||
|
||||
concurrencyExplicitlyProvided[0] = name.getLastElement(ConfigurationPropertyName.Form.UNIFORM)
|
||||
.equals("concurrency") &&
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.bindings." + inboundName + ".consumer").isAncestorOf(name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
//Re-bind spring.cloud.stream properties to check if the application explicitly provided concurrency.
|
||||
try {
|
||||
explicitConcurrencyResolver.bind("spring.cloud.stream",
|
||||
Bindable.ofInstance(new BindingServiceProperties()), handler);
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Ignore this exception
|
||||
}
|
||||
|
||||
int concurrency = this.bindingServiceProperties.getConsumerProperties(inboundName)
|
||||
.getConcurrency();
|
||||
// override concurrency if set at the individual binding level.
|
||||
// Concurrency will be mapped to num.stream.threads.
|
||||
// This conditional also takes into account explicit concurrency settings left at the default value of 1
|
||||
// by the application to address concurrency behavior in applications with multiple processors.
|
||||
// See this GH issue: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/844
|
||||
if (concurrency >= 1 && concurrencyExplicitlyProvided[0]) {
|
||||
streamConfiguration.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG,
|
||||
concurrency);
|
||||
}
|
||||
}
|
||||
|
||||
protected Serde<?> getValueSerde(String inboundName, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, ResolvableType resolvableType) {
|
||||
if (bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding()) {
|
||||
BindingProperties bindingProperties = this.bindingServiceProperties
|
||||
.getBindingProperties(inboundName);
|
||||
return this.keyValueSerdeResolver.getInboundValueSerde(
|
||||
bindingProperties.getConsumer(), kafkaStreamsConsumerProperties, resolvableType);
|
||||
}
|
||||
else {
|
||||
return Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
protected KStream<?, ?> getKStream(String inboundName, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
|
||||
if (firstBuild) {
|
||||
addStateStoreBeans(streamsBuilder);
|
||||
}
|
||||
|
||||
final boolean nativeDecoding = this.bindingServiceProperties
|
||||
.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding) {
|
||||
LOG.info("Native decoding is enabled for " + inboundName
|
||||
+ ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName
|
||||
+ ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
|
||||
KStream<?, ?> stream;
|
||||
final Serde<?> valueSerdeToUse = StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes()) ?
|
||||
new Serdes.BytesSerde() : valueSerde;
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerdeToUse, autoOffsetReset);
|
||||
|
||||
if (this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(inboundName).isDestinationIsPattern()) {
|
||||
final Pattern pattern = Pattern.compile(this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
|
||||
stream = streamsBuilder.stream(pattern, consumed);
|
||||
}
|
||||
else {
|
||||
String[] bindingTargets = StringUtils.commaDelimitedListToStringArray(
|
||||
this.bindingServiceProperties.getBindingDestination(inboundName));
|
||||
stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
consumed);
|
||||
}
|
||||
//Check to see if event type based routing is enabled.
|
||||
//See this issue for more context: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1003
|
||||
if (StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes())) {
|
||||
AtomicBoolean matched = new AtomicBoolean();
|
||||
// Processor to retrieve the header value.
|
||||
stream.process(() -> eventTypeProcessor(kafkaStreamsConsumerProperties, matched));
|
||||
// Branching based on event type match.
|
||||
final KStream<?, ?>[] branch = stream.branch((key, value) -> matched.getAndSet(false));
|
||||
// Deserialize if we have a branch from above.
|
||||
final KStream<?, Object> deserializedKStream = branch[0].mapValues(value -> valueSerde.deserializer().deserialize(null, ((Bytes) value).get()));
|
||||
return getkStream(bindingProperties, deserializedKStream, nativeDecoding);
|
||||
}
|
||||
return getkStream(bindingProperties, stream, nativeDecoding);
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(BindingProperties bindingProperties, KStream<?, ?> stream, boolean nativeDecoding) {
|
||||
if (!nativeDecoding) {
|
||||
stream = stream.mapValues((value) -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (value != null && !StringUtils.isEmpty(contentType)) {
|
||||
returnValue = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
}
|
||||
else {
|
||||
returnValue = value;
|
||||
}
|
||||
return returnValue;
|
||||
});
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private void addStateStoreBeans(StreamsBuilder streamsBuilder) {
|
||||
try {
|
||||
final Map<String, StoreBuilder> storeBuilders = applicationContext.getBeansOfType(StoreBuilder.class);
|
||||
if (!CollectionUtils.isEmpty(storeBuilders)) {
|
||||
storeBuilders.values().forEach(storeBuilder -> {
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
// Pass through.
|
||||
}
|
||||
}
|
||||
|
||||
private <K, V> KTable<K, V> materializedAs(StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
|
||||
final Consumed<K, V> consumed = getConsumed(kafkaStreamsConsumerProperties, k, v, autoOffsetReset);
|
||||
return streamsBuilder.table(this.bindingServiceProperties.getBindingDestination(destination),
|
||||
consumed, getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private <K, V> Materialized<K, V, KeyValueStore<Bytes, byte[]>> getMaterialized(
|
||||
String storeName, Serde<K> k, Serde<V> v) {
|
||||
return Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k).withValueSerde(v);
|
||||
}
|
||||
|
||||
private <K, V> GlobalKTable<K, V> materializedAsGlobalKTable(
|
||||
StreamsBuilder streamsBuilder, String destination, String storeName,
|
||||
Serde<K> k, Serde<V> v, Topology.AutoOffsetReset autoOffsetReset, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
final Consumed<K, V> consumed = getConsumed(kafkaStreamsConsumerProperties, k, v, autoOffsetReset);
|
||||
return streamsBuilder.globalTable(
|
||||
this.bindingServiceProperties.getBindingDestination(destination),
|
||||
consumed,
|
||||
getMaterialized(storeName, k, v));
|
||||
}
|
||||
|
||||
private GlobalKTable<?, ?> getGlobalKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde, String materializedAs,
|
||||
String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
|
||||
return materializedAs != null
|
||||
? materializedAsGlobalKTable(streamsBuilder, bindingDestination,
|
||||
materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties)
|
||||
: streamsBuilder.globalTable(bindingDestination,
|
||||
consumed);
|
||||
}
|
||||
|
||||
private KTable<?, ?> getKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
StreamsBuilder streamsBuilder, Serde<?> keySerde,
|
||||
Serde<?> valueSerde, String materializedAs, String bindingDestination,
|
||||
Topology.AutoOffsetReset autoOffsetReset) {
|
||||
|
||||
final Serde<?> valueSerdeToUse = StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes()) ?
|
||||
new Serdes.BytesSerde() : valueSerde;
|
||||
|
||||
final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerdeToUse, autoOffsetReset);
|
||||
|
||||
final KTable<?, ?> kTable = materializedAs != null
|
||||
? materializedAs(streamsBuilder, bindingDestination, materializedAs,
|
||||
keySerde, valueSerdeToUse, autoOffsetReset, kafkaStreamsConsumerProperties)
|
||||
: streamsBuilder.table(bindingDestination,
|
||||
consumed);
|
||||
if (StringUtils.hasText(kafkaStreamsConsumerProperties.getEventTypes())) {
|
||||
AtomicBoolean matched = new AtomicBoolean();
|
||||
final KStream<?, ?> stream = kTable.toStream();
|
||||
|
||||
// Processor to retrieve the header value.
|
||||
stream.process(() -> eventTypeProcessor(kafkaStreamsConsumerProperties, matched));
|
||||
// Branching based on event type match.
|
||||
final KStream<?, ?>[] branch = stream.branch((key, value) -> matched.getAndSet(false));
|
||||
// Deserialize if we have a branch from above.
|
||||
final KStream<?, Object> deserializedKStream = branch[0].mapValues(value -> valueSerde.deserializer().deserialize(null, ((Bytes) value).get()));
|
||||
|
||||
return deserializedKStream.toTable();
|
||||
}
|
||||
return kTable;
|
||||
}
|
||||
|
||||
private <K, V> Consumed<K, V> getConsumed(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
|
||||
Serde<K> keySerde, Serde<V> valueSerde, Topology.AutoOffsetReset autoOffsetReset) {
|
||||
TimestampExtractor timestampExtractor = null;
|
||||
if (!StringUtils.isEmpty(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName())) {
|
||||
timestampExtractor = applicationContext.getBean(kafkaStreamsConsumerProperties.getTimestampExtractorBeanName(),
|
||||
TimestampExtractor.class);
|
||||
}
|
||||
final Consumed<K, V> consumed = Consumed.with(keySerde, valueSerde)
|
||||
.withOffsetResetPolicy(autoOffsetReset);
|
||||
if (timestampExtractor != null) {
|
||||
consumed.withTimestampExtractor(timestampExtractor);
|
||||
}
|
||||
if (StringUtils.hasText(kafkaStreamsConsumerProperties.getConsumedAs())) {
|
||||
consumed.withName(kafkaStreamsConsumerProperties.getConsumedAs());
|
||||
}
|
||||
return consumed;
|
||||
}
|
||||
|
||||
private <K, V> Processor<K, V> eventTypeProcessor(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, AtomicBoolean matched) {
|
||||
return new Processor() {
|
||||
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object key, Object value) {
|
||||
final Headers headers = this.context.headers();
|
||||
final Iterable<Header> eventTypeHeader = headers.headers(kafkaStreamsConsumerProperties.getEventTypeHeaderKey());
|
||||
if (eventTypeHeader != null && eventTypeHeader.iterator().hasNext()) {
|
||||
String eventTypeFromHeader = new String(eventTypeHeader.iterator().next().value());
|
||||
final String[] eventTypesFromBinding = StringUtils.commaDelimitedListToStringArray(kafkaStreamsConsumerProperties.getEventTypes());
|
||||
for (String eventTypeFromBinding : eventTypesFromBinding) {
|
||||
if (eventTypeFromHeader.equals(eventTypeFromBinding)) {
|
||||
matched.set(true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
/**
|
||||
* Enumeration for various {@link org.apache.kafka.streams.errors.DeserializationExceptionHandler} types.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public enum DeserializationExceptionHandler {
|
||||
|
||||
/**
|
||||
* Deserialization error handler with log and continue.
|
||||
* See {@link org.apache.kafka.streams.errors.LogAndContinueExceptionHandler}
|
||||
*/
|
||||
logAndContinue,
|
||||
/**
|
||||
* Deserialization error handler with log and fail.
|
||||
* See {@link org.apache.kafka.streams.errors.LogAndFailExceptionHandler}
|
||||
*/
|
||||
logAndFail,
|
||||
/**
|
||||
* Deserialization error handler with DLQ send.
|
||||
* See {@link org.springframework.kafka.streams.RecoveringDeserializationExceptionHandler}
|
||||
*/
|
||||
sendToDlq,
|
||||
/**
|
||||
* Deserialization error handler that silently skips the error and continue.
|
||||
*/
|
||||
skipAndContinue;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationPropertiesBindHandlerAdvisor;
|
||||
import org.springframework.boot.context.properties.bind.AbstractBindHandler;
|
||||
import org.springframework.boot.context.properties.bind.BindContext;
|
||||
import org.springframework.boot.context.properties.bind.BindHandler;
|
||||
import org.springframework.boot.context.properties.bind.BindResult;
|
||||
import org.springframework.boot.context.properties.bind.Bindable;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationPropertiesBindHandlerAdvisor} to detect nativeEncoding/Decoding settings
|
||||
* provided by the application explicitly.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 3.0.0
|
||||
*/
|
||||
public class EncodingDecodingBindAdviceHandler implements ConfigurationPropertiesBindHandlerAdvisor {
|
||||
|
||||
private boolean encodingSettingProvided;
|
||||
private boolean decodingSettingProvided;
|
||||
|
||||
public boolean isDecodingSettingProvided() {
|
||||
return decodingSettingProvided;
|
||||
}
|
||||
|
||||
public boolean isEncodingSettingProvided() {
|
||||
return this.encodingSettingProvided;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BindHandler apply(BindHandler bindHandler) {
|
||||
BindHandler handler = new AbstractBindHandler(bindHandler) {
|
||||
@Override
|
||||
public <T> Bindable<T> onStart(ConfigurationPropertyName name,
|
||||
Bindable<T> target, BindContext context) {
|
||||
final String configName = name.toString();
|
||||
if (configName.contains("use") && configName.contains("native") &&
|
||||
(configName.contains("encoding") || configName.contains("decoding"))) {
|
||||
BindResult<T> result = context.getBinder().bind(name, target);
|
||||
if (result.isBound()) {
|
||||
if (configName.contains("encoding")) {
|
||||
EncodingDecodingBindAdviceHandler.this.encodingSettingProvided = true;
|
||||
}
|
||||
else {
|
||||
EncodingDecodingBindAdviceHandler.this.decodingSettingProvided = true;
|
||||
}
|
||||
return target.withExistingValue(result.get());
|
||||
}
|
||||
}
|
||||
return bindHandler.onStart(name, target, context);
|
||||
}
|
||||
};
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
|
||||
import org.springframework.cloud.stream.config.BindingHandlerAdvise.MappingsProvider;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* {@link EnableAutoConfiguration Auto-configuration} for extended binding metadata for Kafka Streams.
|
||||
*
|
||||
* @author Chris Bono
|
||||
* @since 3.2
|
||||
*/
|
||||
@Configuration(proxyBeanMethods = false)
|
||||
public class ExtendedBindingHandlerMappingsProviderAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
public MappingsProvider kafkaStreamsExtendedPropertiesDefaultMappingsProvider() {
|
||||
return () -> {
|
||||
Map<ConfigurationPropertyName, ConfigurationPropertyName> mappings = new HashMap<>();
|
||||
mappings.put(
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams"),
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams.default"));
|
||||
mappings.put(
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams.bindings"),
|
||||
ConfigurationPropertyName.of("spring.cloud.stream.kafka.streams.default"));
|
||||
return mappings;
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.GlobalKTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.kafka.config.StreamsBuilderFactoryBean;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* An {@link AbstractBinder} implementation for {@link GlobalKTable}.
|
||||
* <p>
|
||||
* Provides only consumer binding for the bound {@link GlobalKTable}. Output bindings are
|
||||
* not allowed on this binder.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class GlobalKTableBinder extends
|
||||
// @checkstyle:off
|
||||
AbstractBinder<GlobalKTable<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements
|
||||
ExtendedPropertiesBinder<GlobalKTable<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
// @checkstyle:on
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
// @checkstyle:off
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
// @checkstyle:on
|
||||
|
||||
public GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue, KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<GlobalKTable<Object, Object>> doBindConsumer(String name,
|
||||
String group, GlobalKTable<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = properties.getExtension().getApplicationId();
|
||||
}
|
||||
final RetryTemplate retryTemplate = buildRetryTemplate(properties);
|
||||
|
||||
final String bindingName = this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget);
|
||||
final StreamsBuilderFactoryBean streamsBuilderFactoryBean = this.kafkaStreamsBindingInformationCatalogue
|
||||
.getStreamsBuilderFactoryBeanPerBinding().get(bindingName);
|
||||
|
||||
KafkaStreamsBinderUtils.prepareConsumerBinding(name, group,
|
||||
getApplicationContext(), this.kafkaTopicProvisioner,
|
||||
this.binderConfigurationProperties, properties, retryTemplate, getBeanFactory(),
|
||||
this.kafkaStreamsBindingInformationCatalogue.bindingNamePerTarget(inputTarget),
|
||||
this.kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
|
||||
return new DefaultBinding<GlobalKTable<Object, Object>>(bindingName, group, inputTarget, streamsBuilderFactoryBean) {
|
||||
|
||||
@Override
|
||||
public boolean isInput() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!streamsBuilderFactoryBean.isRunning()) {
|
||||
super.start();
|
||||
GlobalKTableBinder.this.kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean);
|
||||
//If we cached the previous KafkaStreams object (from a binding stop on the actuator), remove it.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
final String applicationId = (String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG);
|
||||
if (kafkaStreamsBindingInformationCatalogue.getStoppedKafkaStreams().containsKey(applicationId)) {
|
||||
kafkaStreamsBindingInformationCatalogue.removePreviousKafkaStreamsForApplicationId(applicationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (streamsBuilderFactoryBean.isRunning()) {
|
||||
final KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
super.stop();
|
||||
GlobalKTableBinder.this.kafkaStreamsRegistry.unregisterKafkaStreams(kafkaStreams);
|
||||
KafkaStreamsBinderUtils.closeDlqProducerFactories(kafkaStreamsBindingInformationCatalogue, streamsBuilderFactoryBean);
|
||||
//Caching the stopped KafkaStreams for health indicator purposes on the underlying processor.
|
||||
//See this issue for more details: https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/issues/1165
|
||||
GlobalKTableBinder.this.kafkaStreamsBindingInformationCatalogue.addPreviousKafkaStreamsForApplicationId(
|
||||
(String) streamsBuilderFactoryBean.getStreamsConfiguration().get(StreamsConfig.APPLICATION_ID_CONFIG), kafkaStreams);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<GlobalKTable<Object, Object>> doBindProducer(String name,
|
||||
GlobalKTable<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
throw new UnsupportedOperationException(
|
||||
"No producer level binding is allowed for GlobalKTable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(
|
||||
String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(
|
||||
String channelName) {
|
||||
throw new UnsupportedOperationException(
|
||||
"No producer binding is allowed and therefore no properties");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getDefaultsPrefix();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return this.kafkaStreamsExtendedBindingProperties
|
||||
.getExtendedPropertiesEntryClass();
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright 2018-2021 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.AdminClientConfigCustomizer;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* Configuration for GlobalKTable binder.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.1.0
|
||||
*/
|
||||
@Configuration
|
||||
@Import({ KafkaAutoConfiguration.class,
|
||||
MultiBinderPropertiesConfiguration.class,
|
||||
KafkaStreamsBinderHealthIndicatorConfiguration.class,
|
||||
KafkaStreamsJaasConfiguration.class})
|
||||
public class GlobalKTableBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties, ObjectProvider<AdminClientConfigCustomizer> adminClientConfigCustomizer) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties, adminClientConfigCustomizer.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public GlobalKTableBinder GlobalKTableBinder(
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
@Qualifier("streamConfigGlobalProperties") Map<String, Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
|
||||
GlobalKTableBinder globalKTableBinder = new GlobalKTableBinder(binderConfigurationProperties,
|
||||
kafkaTopicProvisioner, kafkaStreamsBindingInformationCatalogue, kafkaStreamsRegistry);
|
||||
globalKTableBinder.setKafkaStreamsExtendedBindingProperties(
|
||||
kafkaStreamsExtendedBindingProperties);
|
||||
return globalKTableBinder;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public static BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
|
||||
// It is safe to call getBean("outerContext") here, because this bean is
|
||||
// registered as first
|
||||
// and as independent from the parent context.
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory
|
||||
.getBean("outerContext");
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsExtendedBindingProperties.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsBindingInformationCatalogue.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
beanFactory.registerSingleton(
|
||||
KafkaStreamsRegistry.class.getSimpleName(),
|
||||
outerContext.getBean(KafkaStreamsRegistry.class));
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||