From 64d024264302723f0d7ce85239333944d1648241 Mon Sep 17 00:00:00 2001 From: Marc-Eric Martel Date: Mon, 6 Nov 2023 13:30:36 -0500 Subject: [PATCH] Manquait un ) --- .../bin/Debug/net7.0/gregorsamsa | Bin 151064 -> 151064 bytes .../bin/Debug/net7.0/gregorsamsa.dll | Bin 9216 -> 9216 bytes .../bin/Debug/net7.0/gregorsamsa.pdb | Bin 11556 -> 11564 bytes .../obj/Debug/net7.0/apphost | Bin 151064 -> 151064 bytes ....GeneratedMSBuildEditorConfig.editorconfig | 2 +- .../obj/Debug/net7.0/gregorsamsa.assets.cache | Bin 6261 -> 6261 bytes ...gregorsamsa.csproj.CoreCompileInputs.cache | 2 +- .../gregorsamsa.csproj.FileListAbsolute.txt | 42 + .../obj/Debug/net7.0/gregorsamsa.dll | Bin 9216 -> 9216 bytes .../net7.0/gregorsamsa.genruntimeconfig.cache | 2 +- .../obj/Debug/net7.0/gregorsamsa.pdb | Bin 11556 -> 11564 bytes .../obj/gregorsamsa.csproj.nuget.dgspec.json | 12 +- .../obj/project.assets.json | 8 +- .../obj/project.nuget.cache | 4 +- dotnet/josefk_producer/Program.cs | 2 +- .../josefk_producer/bin/Debug/net7.0/josefk | Bin 151064 -> 151064 bytes .../bin/Debug/net7.0/josefk.dll | Bin 6656 -> 6656 bytes .../bin/Debug/net7.0/josefk.pdb | Bin 10876 -> 10888 bytes .../josefk_producer/obj/Debug/net7.0/apphost | Bin 151064 -> 151064 bytes ....GeneratedMSBuildEditorConfig.editorconfig | 2 +- .../obj/Debug/net7.0/josefk.assets.cache | Bin 6261 -> 6261 bytes .../josefk.csproj.CoreCompileInputs.cache | 2 +- .../net7.0/josefk.csproj.FileListAbsolute.txt | 42 + .../obj/Debug/net7.0/josefk.dll | Bin 6656 -> 6656 bytes .../net7.0/josefk.genruntimeconfig.cache | 2 +- .../obj/Debug/net7.0/josefk.pdb | Bin 10876 -> 10888 bytes .../obj/josefk.csproj.nuget.dgspec.json | 12 +- .../josefk_producer/obj/project.assets.json | 8 +- .../josefk_producer/obj/project.nuget.cache | 4 +- logs/controller.log | 145 +- logs/kafkaServer-gc.log | 250 +-- logs/log-cleaner.log | 4 +- logs/server.log | 1956 +++++------------ logs/state-change.log | 403 ++-- logs/zookeeper-gc.log | 68 +- 35 files changed, 1027 insertions(+), 1945 deletions(-) diff --git a/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa b/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa index b10a4d0d89a83fee29d3e714af1f623bd6592f77..fefc1d5dbe1345c6907183742063805905881c30 100755 GIT binary patch delta 1540 zcmbO+hjYdp&J8-uB7Dz!wn%aPz6FkC_=?PhNL(0%O!>`&%2>7}rhy z@F0-!^<<}q4vdnUn;+`2F}~Qm>G?}0jccJ`dwxCo|G)V+V=1>suj#g!|Nl2LzUK7k zHGLBP|G(jFk6zoN&4*uoW@CIfdHY9e#-)=#e+*)rIN9x!CS&^Mf=`#3nAn0hyMAqD zWPCdL^fx`mWt)F}>tbZQxw-X6J|ko8=C40bF*3SrKJa@Qv$V4-$hV;vUhv8>GI+G! zE=g;!W++MY=r!Fnx$wUUeH99^Gx z>lsU@M{6+ZFg9)P&|u7GWGvnOLzA(QiSg9*Djh}zM$PTBbr`p>Fh)-gFl01k+_AmQ zkkN^eQDpi_BSr_twCP;Nj3$i5)9sBJRT=HJry4VEW@dc3UDBK}k&*Gz^lA%6dB#oC zXIn6OFdm-%#DXzZP>mmCdF_Q4vu1%9ru(KxSu(0Hx=*jSWOQYmzWt~rqY@)y>h>2_ zj46zabEo^-Fv>I5PS3SrjAYz2{kRR|bjC;1<82uW86~G*v}Md}F(KHGOk1qcNk*^yk5hs~F{`PYPjFVf?v$dkEu8Zbt9z3lkWR zGBJ8iPfuoi#+WqSFNJY4qxyEHR7QD5M)vJWX^h*M7;C2A%3%D%xMlkFOhy~V+tWp| z7$-6+OkbA87|z6YZ~Ff%#$z12Z~p@&?S0eFW;5C`Szn&co5Q%1v2^?K97c6^#_QAH z7c(AWyg7YS38OFL`)Q?&o*Xup!8)wA2bMC%urY>BUsui8$oOr$K@DR(qw4P?VA<^l z|NZyqy!oPU?f?HR2TBw@x`jP@ZEGQt*SBw}Wt3*(Dc=uL#G~TTYpcKgc0J=#M#h!f z8ygw>7>#QW{{v;UWp4lfdvuph@aVNoItCV*0ukAP5NQI5^x7Vu?$pdUozY|Zt!BnZ z^W(e0s+&Mbxx4g$N3X5>A+SgRLe&odokd^dd>BU1rm?R0h~CObyE>26F+CQO_j(`%TR zvKZ%1f6l}d&geSbo|(ysiOGF>Gc!{l1nJ?O^marzhq@{V2qls#m1D#_+xqx8dd8?ZJ%)qHfbqxlUJj;U#wF7qaxnQbhD$c?q|f+cdoL$b z88>6&b{0`4Uu|n+BV#>NLkmMQ1qbK++}z9(D+OXp;q5&ROf9S|99vkcCKv8?*&gA> HbbuWI7VcXf delta 1540 zcmbO+hjYdp&J8-uBHaR+Zii)(-Bw?dSRJcm)Dkl3(ap(?OcI+Tn0Kf%YE2f@7H3r0 ztf{RivN_P6mwR%%*9CoZJp(;MLx$AkBx92_3)9qOOQTf7B;#ZYLzC2$H1m{13u9wr z^Hf7i%T)6;GqcTM-XBU0U)25o|8inJ1H%N5ZhMbj)8p;`|G#WR@RLFOLInT9tOkQ_$0%P!I`&%2>7?)4} z@F0+J_hhGs4va#Zn;+`2G49yB>G?}0jq{;kdwxCo|G)V+V=1>suj%@j|Nl2LzUK7k zHN6}D|G(jFk6zpC&4*uoW@EfHdHY9e#(9%Je+**mn(X#TlQCg)!KceiO#g#6yMAqD zWV|=|^fx`mvdzD~bultt+T8jhpOLX_^VgrJ7#VFhANW0uS=z=Gn`89Z8V zm!vgVGn6EH^qOv-T=?IFapUG?{|~b;KHZ+h!nlElv2VJxAfqy4)^>YA##ScAC)1A# zGb%A2oc=#`fYW_ zdd9rz(He|8jMdvaG#K+48S}RP&}3|6Vw^R-N{3N_QEB^Z9mXvzjM~!!3>i%sH*Rk; zWOQO=GtVSmW(Ql4%6!`8C@ACY(HwrsKm$^xBZ0` zV+teVwCTP!jPi_S({pVYBNbccw5FoMxp5!Z5gu|cW<|`V>D-F^xR(J z%xKNXD7Srs3*%dm5%b&_-!n3nZtwG8WMpPMJ$-^VV>)B$^dH`g4;ZIxzv;us!o+xW z`#nEK4`#-u?dm~{-HeQjr*95sG-ec?{ydm*6{Gm{Ng<3XjGwk|4`F=C&FHdyVFKe( zCPt^}>B)@G7^A2Ar7&)0RM^gx$|%ps$go{0jd42@W9jr;8H|4z*G|8l$!NoPWx7Zf z<3vWu>C3Vh!fWX*Htq%GQQt#P{SC{sQTpySa!p~ zfB!u?Z@%bT`~QE-ff7ZJZefpJ+cJpch3#8v8Ks$cLQaDe@u+z8+G=dSUC+3bk#W)X z#zw|IM&q)>|3F!7ncM&W9^IuAJbG=TkAX#|Kty&RM4CV%y|xFYJ2f*-XLOu?tC=y< z{J?Io>LyT9?k+vx(QE5)2rNeOA>vZ8(MmI*A?NP0a zx0!Vg?gV>@ALON8(Fb6P7epBzc#-Y!|G#VNw>n+d&aXbbwlk+Eb~0)*UfkZ@$=DOm zSh-!|C1axq=^l`yD>4DFx_;YUc!)93X7Xp$pRUTn6wGwnb$S&GlRM+2>4#XD zf*9?mi?TBLGrf12p2o`5#F#z(B`cEyqtp-$7(Y($CT&K=>9L$l`ivj9_i{3o zaWhtJXAx!c)wVV?GS)LOGBGk&aB$Ag&CM*aQXsYj-rnQD)WXWLW{pPfkSJA zh8vT8rIcm<25;Yexh%z}W9#3E6Bm^IV`N}p&@WCdN-Rmvm^?`;*a)WJ1XKaYuqnbu z3ab{WaV^`W%c5=xauNhPg4hZZ6~!2hC+kUTvHn+OP;i(msI+}EpUf{-rVz``f=cfh ICmOH=09nFK^Z)<= diff --git a/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa.pdb b/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa.pdb index 7ede018c3fb7ea71ec622cd4d48dc14f69fb0c96..4c25f4aba5ae693d670f3bd34bfd1432d6c71fc6 100644 GIT binary patch delta 6169 zcmZ1ywI*spfx;S11_ouPoctsP28I=GpS+k}t##~S_~dl?*w+*C?^QlboKPa`!obMj z%fQ4imw}OCB?A+~PXn%Nrkou542+!ovMQF7k1+@Ea6@@{MHTgv6HvFilKKG%+zuO*Az%F*UL@O-?j2Gc-3g*!-Ba zhEd{DnZ#L!1_p*((OqTLHH}`=uCr`YF1Teoc>}xHo6<5pJ9^iQH9AH*kvTlUrGN4>1J?{DLqr0=-qRhYxN|L|9>YxVONy6dtB;0Nbli>g( z!$EX2FfyEF@?kl_eZ_Ivex7)q zUoQVA=W{Acyx8e+nZbdPq1aC9%aiLM$@bUL7D|rGotBncbxbl|8a7#;TWqo=mym>h(%-WT z2N)O*-c|n-+IQA>Q835Ose;~s+ibzaZp%jVKM&ilA8?A)uoh6AJk z>h{zXs}_Qk_#QmDQbKvJ@2-d!ukKA-Rtr)x`6HLO`0sdd3KHSwdUSlPr$nb5;tfj6W z@A*w}np^Vn^VYMI7jR40FHWn!4zgPDj>g{2;bqQ~dP914-2Am08b!|edByoTsn94o z()YI;WR?92j-n991o!pkxi5EJ_r45QTm(~@$0ENIq*C0molIpPFYZu zE{P?Hdd~T|xr+IDP_qp@RktxLU}8vLDHZwoa=q{JAloZO?vYO>NAsvS?OYRa4dlu_ z=hd7JC*Aa(K2zlXub(w0$i_jEK~l!XXCRBMIuCtQnZV-M_s7&p)P3XW$p?9qB{oc) z_YY)@!QrsaAN*JNG&M5a(q_u2n0$~&crrgPkHqs+N7sTBH0K>G>FKX^m~|w2o5s7p zkzfUGyy6npXJ0lmJYZn>Hi@A(&GVM;gozV=_Fua^6|A6xS4^U~HhT`pgspu>N#Bl{ zdoF0Lxw-A&&rYy{le~Nq!pxI4g3`;?oTi$KY)$SnH@|+hXu85ykOIx%lEmcfkfOw7 zSU6fd{(ThWorQNECFVp|`AyUKoAz%FX9h_1WIH}Ni5>iZ!Kv$-o^S2Vn;ovpi&qtY z&GSD6QlJu&pPvKEUe2+TW`i7dKU|&HDyG1+J|T(fVm)wiVSSlv z&oleq&OI}w0;M=~ATIyS$5$^@YVi`}7NOedxw9LX{g(^RTKyzbK@J)TknsPM7t;aC zhQE$KW_&v@!)23frqGKE!tzi>E=7sunR)5ZI2V&Rbra+!-ayvEyRipc=AX4Sd3dKs z5>0VHQD$ytiE?IHD$Fx{Coe4o1!Bvs<$bAkU)|^b^ep(jWcTsOtNCRmCUl*C2QroK zr0a64!_PcgUTtsm`tac361Oy~99zWd-Qs8atSWxJ7 z#HD{l@a(pzJNc6v1f(S-wbp`DT)Nu+@)RpS|6c$4J=Y(+?gZ;TAs{Hh?K%sryW^um zLrT|7$F=?sPOvOu@16WnKv}~2MDl5n(Hnhz^MAUnaow5XsN=<=oi+KRfEcvssJXEC z5GYcN);?$RSsCrJ`DgL}DJju;ASIK_1O+Ajh3`BEGUKUQ(%sLwksezL@2*UJ`R58q zfnQW+fmSL%j(9r?%~4-Fene`t{8{EYtsQkM}}t7Ip% z$Z753T~}7@w9c5!ETrkZ^KQr!kb~kH3?*XnIs98xtVD_eQ+6T@0oe%ULDEc%&4H&N z^NckaMRm&8d(Gb=&ab)V=cLKyLaGucqZkW0?X@(Pz;T;|lar1oxk%Mydh zy21()yh|SSfFuo*pQh}2Q0l)U{Db=*uRY%l2GDDY@U2?}0 zx3y+Xo-eH8HY4oGYmnrzEpO86IK%uV=B-tlmZWX~^^&K*UO;96Jf+(AeQN^Q^DekM zGt>S^HbjqB2Oy05C!sNA)U)edTuPi9^rG{~zm zkA4Rw_zi9Q^JY%`>NENOY7c=K%8Zi>MdZB8ThE^Y)o!b-^(;9ae)gKWBh#_3r7RTL zn1IBh#GIVe9H@EDo^cmI=FRiW4W4?|$7$EXmYB#(x_2hu5|OEYX4Tvek~Cde8&|0M z%6U_O*`yZdPIstzzNxwSMU_y4_IRzl2g>gztjA03tJB=pX}A=mtX|p$RqK}uX$&wh zW`A3{0pwT3!h~CeP3?ZO*NZ7V`{)6y5kLw(N=gd!QgUQKtrE>Wo4P@*lB)kvtJI&m zIxX3gv}l|5s>I0?MWyRM2<_hu3e;)yJSWX!m2lapH|+&yuAwQCr699>GK))6^HO0s z*){uPBS_b&Z{`v$hl6})wFh6EWAq{hNtbV8CM<+r_6Xhp1;UXnU203b+#KgJIcb}{ zoxdr#vUsw#n5+a}vS=sBKW0~6PX00Tu=4^pp`D@=v?C`ch^e?;^h=%s(#-f{*N+}a z50B+N9q)cTV%dOXRRE$GymE#MoS$C2a$0_KeT3)iqxJ3*3i+9nPl?G$tod5o0y4@` z@r0q#uN>bcXIxaorxxl?{wF3Tp&mN-Imk$SzDR@r=i#E;1iQsU5xOX99uAh_t% z)2xwMvf_;E#xT?ADoii$g7iIs(x*QJdMXGrx--Y6|R`Ioea#QfV6!Ij%V zg*xBmwSWD${nXhlCeU(lvYL#X#Dl4!>p{BTD*rf8qN3@)I5I#>ciuFq$qQu^CfCUD zN+ivf1{eCpES-_FUs^l1W^Di48}n9n@&HOqIVso2MSu~!|i z*eT5`$;^ed=t{5ZEMiFDVaU-Cd-$Pzsq_3vk?I!@&t{t}BC8zLzkUQ3P-Stjq zJLkQ^uAts0jp^)UA6Z!m=70CW?V4*}&em+$zRz>_r<%I&W#Q8%H_EC?oW6SVE5iat z296CITD*;3JFi^2{I2RO$FGwcW#uQ|l@*lu@jwk+FTZ{w>6ohV%ePfY>%+#ce8yk} zs&bMNRvHt)wea<6HFtJ1^m+G96^lA*Jmc2nFgXQ@Q+!Hm7!;Tp@{GC9rljxn+!GM{ zcE0F?pvir5GJfrg{!RnA`r!7`TCX%2pDC=0q52*oo@oB}L(MO%FRQNr^1M+ubdDDQZhL}UQl9I*ZIYu z^y1$Zv0G%TlG{9$Zyf70&-8#4D0>yd^TS2u>SZ8x4EOhatDS7_))$+!PsHoUO^~|D z*W?8yj!pUT3gm?wX-7jZt+aBU?;hlGQ$*c+@_%`C2^;=J-$7Z0F}!TbB~Cl97AE(X z`|kJ(PqtG~k>CxpI0BMXOET!?>bv096OfTa*KL2&!d;iLt z{lyZ>UtZS>QrX}3YD)2Ba` zdNXmm=RDW&aP45f$?Fv4CeKh3ck8uvSOb!O?|JIB_}XOe9s~FIy$d;0ps6M}HMz7X zvjkSS=h_-C1E~&J<#zn}A#InLbt&)k-QO@y)>W38%&*KQk;imyH%P&yhbw+4?Y!zf z`&fat(CoP1lXaCfB>FW^z5>O+PWpl=ACs0lEIux{!(c&O+T>hiWr-V-x4?raE@z!O zdj5B~ZdAS{op>wNa`FmgIf$#yM%U@-ZBipXR&6+sE52}R)kq~yBYjZP^?-6yTqvRQDcaXCnV zJhbj?zB%aw$Pmi}$L6Jx_4JyDEHo5DK%+588G%`j?-Hn~xR@s^ zp($6^b-Kvc@Vsa-yUFURsuJhI#n*sR>&6h(ldqrUxh;KcHmPp?wC9skR2AIW`r^S& z?IXOFYPkpU{U@+nr|=+8zB}dhUsdse)J*PE6Ox#8aAzY(!As>+ z8;&L#`FH-7GJ3r-zXq(}oSJ}y-}lh@AO&m9PNYUxnR>4}-m=m$tj2EgH#IqlVlLlV zpn$azE=k&XtirQ1!D*KNwD9=JI_gTkfp2Vof^`10>gW*ARrOt5d8cgQZH{>uff16K z3vYk^U3&B|NR#p19YPtF6286fpYwPxyq_?6wz`zWk28G_K$4pKJp2CbyzJQWb%F6Y zfwIe!&#Eib8_z$o9ORbrmWKN9eYsA%-YMB`$e2boP?tEGO^;vC%LmTbUOj_yL8 z`V-(3KJ6uE^-Ik-=gG?+hF%bRuI(HSi4CX3Vpy|dgHZoWkR7jU*9jb8b$6KDR5eRe p?wQ8qty-7F`WTl_Wn8|2arrLB<;NJ8Ut?VUoN@WT&2zO|83BwbZF2ws delta 5989 zcmZ1zwIpgnfx;3^1_ouPoctsP28O@E+jn0sOY!O0`gh{Q1!e!vG?%TLIH5$=j)9TE zje&__8UrK4LIx&=j|_|qe;JqoYKNipv>KKFb`y%?074XBSpX)?^9b;fC`Y4oz-ju|*PSl$(5k z#Z%YNOwUr=#5lA|)|#vKwoy zmZ5fPa+0x0nuTd#%MEg90N%w*1v4e=9Sc=R|#wZa(_u z`s7A-vB?+M*(CPeQTzwe&F3L|cxC?y&%M4^)@w|T37Y(lT~VUgqiZHevhe1v$OF+? zZmU<9$u$N0$4yq}kk35g#PEz^0Ruzp{ACAEetqh+-DhbYL+Ik$xy8x(MLC&CdMPp^kl;%(zYlT>>#V#^%fy9_n@)eS|5Eh7ck+D>QHfo_0XspG zE5u~FnKM?o^c?T!=eIg`bTSX8if7_pm2Dt*7PkL57_8v#*!BIusbn3|U%{2dC8@c3 z&iOexsmUdo`FX`qcg*Je-Oa$j$k52K@$TA7{~a1UimyF+HE+e_N={`76KnQo3=WJ8 zY2j7RJuhf`Ej*ah*;_TKa`F~V6$zcsJQqP$&Ht5Hs;lwWr&sIlqtg#Xu1x;GsVdPe z|9TThvazSp@kz}`-^SRVZ>|bmUOjmum)K-yE+L7H&DR$(9AIFOo)P-TjwRiDeTstP ztdf z$+nHd?E%Q*DIacqms}<8(#T4S)bzGnB zeKN)OoZGH%r)E}}U$mM0lUGHe|9IGXkZa`<_!Sn~IJ&LA`19*A<09tCrhJAH%h;a0 z1sS`P{SN1gGdEoKmW6)c`d)r>avh(%#N>x^3qg_z>uT@+n&IUzyZfMQ=!)8|$y@nU z>t{50w}OmiUh?k#+s@A(+wcASw=!924lKAb6VvnZi%T+-i}gZ^5|cCY(xH(cyDIPz zNMl@q`;Grb5B(QBSl)d~+^-R;5fa3WJio4k@>$!}BX_hOuJNA7|MtlD2& zNAmNCZ;coG0&<|pA^pd<4H^75i^$sQEc-sWhF{Tl;+%?YpfKrc%GfU%`PhB!`mCgY zYiHbHCRFAn=4K}A1(oKNWah$>R76qAW{`pE2612Rvf6mhC{EVgdZTX1ORJ+sTI(PLqENDEn|lPhSm^Oq#K-L}KYi-`z^G zesVl3*q~nY^amyXf}+&o;!JoJ_z}FK3*-qi`(18Ny@DL)^qqOQ>deOY$ytKx67xBx zeFqt6!nwC@(G4YszJJQa^#bL6|#H}F7qH8O+axfeDw}rL&?{yB(M%V_4 zI_;@%!Jc*Hx<5N+i@X0?OTMX0bw_thZWI!otRy5NA^pv71IP)f@t@tloTzfzmVoRMXfnHG(%9xP>k*%cx#)Gt$&Es)65aZ7ogm3Fg$HZjEL-Ti z)s^wi_t&QHLHZ|O77~=WtE7Agq(JxGjux?~evft=RfWDaCVxN*)WQ=>k~30^po!6O z_r0Ycw*>VhS%2&L>$dDc==%*ByQL6lH_lm_N!g(o9~vTo`G7^SL~mh zFRUa{R5}Bkb5z&8dKXbL!)0>kiYu{eCcT-wQdq?;q1m$qNtm;*9zLGV0*f z7kxQ-D}3kr&zdT<=Y0S)*nKkd3ZZ_Pv(|GfD8(=Ry)($=ioW+IhDY~bnEmycTrDCe zF-5BHHmGiL4yt_gB+|{l{qU@y4P~#yCvOyyu1_pr+zHY-&4P21@SG!#6Q^(a^13}M z5ZR8v($u2L#3Xn*t$a2LT;^~pCX4ZA9QR%tn6gJov9t`T$2T=Mzo-)GvI8-X7J+i} z*(8Cq>~&&}3-;wZ&6(Bm392^0FSP`!GDkA+Cdju57yKr6%ir={C0O$_zfIo?suHBo zqokw&s(OW$@_SJGLBPn`F=^EszYf{tPl>y(Y??eT~r# zRs@!2CPSTjV2b~7P&!-|^H}65PnG}lrGZg<{}dktD~G5KN-Zo+EiQqj-1?;2=pMhl95`t?Ru z$1hc0sy~@gLR#Wl)#O7UxA*Q<)~mQ+?$>Cd>uJ4*W6op~2~~+1=C0uOPty zNWrGvL5;PG7W%YZHw!s>bC%HLdlE8kQ<~Pz1KAs#nUi;NqMOG|f8qL+$(JEPoU7+o znwwe#sskbATHM1I`#?t7c*ffu3r=@^(7^Oh%1TWx@y$<3g(b@_5sq~ryB8cVm{cX6>DutMU~Ws&!kd$qORCjZ z@9+ROilb$=eZ6&aw%?8mhkVwvlpKbd7L=L>ZQ4R3zmq@h2FPg^`}o&X^@#f|dA}>| z?2G7)nCe|~Ghx}4X~v;Np!okHC%Vse=~aih!56QD9O*O-uAJ;6r6JLyS9AyDz>cH` zM}@lOoj2Qxtys#{eQ$D)l%@oO>&Hfrg>etcpS%}Sc3)ayZTvk-V(R3xQpys-N`
EeuX+;UigZd{K6qpzyGMT(SU9oVV8X9GN{7T*L$&S*> z5@F#x?t|={`FPnA)~R8REB`*zt+f07WO9wPy2O>+f3JazowTeV!~L9^%O)X-yf4>d zLMMNe7M^@gnpgTb9@BtNi5EG774pdBurouttXE z^}lC9#{50UNxA z0V9Ktd;!z(dFy=_#82(9mA!Boq-64QSwV@<3crqmLj2IVw~LN8tn%2jFSp$AUv$r8 z0XYi^M{!ATO=ui4?Zc5@*Ztd=o^P8W;N3SlK~6!U(q8R6g90-{%f*|I59Rl}bY?DN zduD9rJb8wkjD*O&^A|x*YgpQ`K_pYlZQm{VIWsQYuA6*DPEI1wTJZ)bulT)|`Tmdj zx_7tQeva63j+Du^^0Jc)G}$D!D6QxP=~h|c+jnmH8UIxVrXj16-=CjsE3YTWfHf2Q*@ksZcSwEuSPAf?gjGP5CFXEX_Cxf1rOu} zCAM$B4ldcn_;;M%@p*~Qx`{=nW-F`fffOi3XS> z&b=^Wl73F8W6xHtmm8~N{(*HbQWBRqr)L9h-7VObcjVbhSMM&aEBn}H>{&SZqLP?I zZO$!lCfMj&y3NE-%As>#;?%7NZtR}?Ur9ltBhM(9V8`X<_TZ-ce3#zS&o8liZ?UnJ_cL6jH~EFK zjKqeb{5K#YOO%BY^r!rBYR_KXR@h|tezJ^;s>J(AMqvA#kCacp`;5zDzWF7aKj~HL zCP%7BOPmON4(_&kIGWk(-1hO`wlM0*oG%eBlY3OuB>o)PG!2x{nXA&bY4bXGO}+Nc z=<@kxcPF1xQE-=7`y1S;S7GVj$nfWl%f#Zx+S}&{`ar!Al3G!s=T!{vIaC_0Y6P_) zg=-XxMK0a)UbdA(xsuHbbTaSa1cWtJ_G4YnRJfvUbw0I zsxF0OhO56-CiAO_LsRc_$G_mPtU1+lv~Kn#kE!xgy1!XnUJp_-d8V461k0~e;Bl>6 zM!S`DkGOeGeA3Ud`OJooUJzGNb4$r8*wLOWc>{vXgIfZwwUoXMdVE` zP?z<}P)_&(GMI7Nr$gs`Cwk4X*Q!fDSqTrOqQt!7M3m8}sJ%NUH-FdU3=P@(=^dQl+$NND zPhPOi(s5>Hn75(q<>@d}P_k6nn#qtNbL(Rs{|P+49S0NVdq_0Tf+|K7n!!8vtpGWk z>5I=^w&evri?+*E>*~ISg#$<-q}L-09z^teoei!rpKr80a5N{`c`MuOiVt0~B9j$0 zdlX}!(Bz{H?#o9Hhmu+VE;wob+s_P)l+;hORi&vVv-hYy@ztYtE&{^_vLr@_c; zfGhLg2nEg;a@n_p1<5O`Jkq(MD~ZR;NJeC&`cxuZ(BX~ z@0t6fzRIn!~da|yTxPaPz6FkC_=?PhNL(0%O!>`&%2>7}rhy z@F0-!^<<}q4vdnUn;+`2F}~Qm>G?}0jccJ`dwxCo|G)V+V=1>suj#g!|Nl2LzUK7k zHGLBP|G(jFk6zoN&4*uoW@CIfdHY9e#-)=#e+*)rIN9x!CS&^Mf=`#3nAn0hyMAqD zWPCdL^fx`mWt)F}>tbZQxw-X6J|ko8=C40bF*3SrKJa@Qv$V4-$hV;vUhv8>GI+G! zE=g;!W++MY=r!Fnx$wUUeH99^Gx z>lsU@M{6+ZFg9)P&|u7GWGvnOLzA(QiSg9*Djh}zM$PTBbr`p>Fh)-gFl01k+_AmQ zkkN^eQDpi_BSr_twCP;Nj3$i5)9sBJRT=HJry4VEW@dc3UDBK}k&*Gz^lA%6dB#oC zXIn6OFdm-%#DXzZP>mmCdF_Q4vu1%9ru(KxSu(0Hx=*jSWOQYmzWt~rqY@)y>h>2_ zj46zabEo^-Fv>I5PS3SrjAYz2{kRR|bjC;1<82uW86~G*v}Md}F(KHGOk1qcNk*^yk5hs~F{`PYPjFVf?v$dkEu8Zbt9z3lkWR zGBJ8iPfuoi#+WqSFNJY4qxyEHR7QD5M)vJWX^h*M7;C2A%3%D%xMlkFOhy~V+tWp| z7$-6+OkbA87|z6YZ~Ff%#$z12Z~p@&?S0eFW;5C`Szn&co5Q%1v2^?K97c6^#_QAH z7c(AWyg7YS38OFL`)Q?&o*Xup!8)wA2bMC%urY>BUsui8$oOr$K@DR(qw4P?VA<^l z|NZyqy!oPU?f?HR2TBw@x`jP@ZEGQt*SBw}Wt3*(Dc=uL#G~TTYpcKgc0J=#M#h!f z8ygw>7>#QW{{v;UWp4lfdvuph@aVNoItCV*0ukAP5NQI5^x7Vu?$pdUozY|Zt!BnZ z^W(e0s+&Mbxx4g$N3X5>A+SgRLe&odokd^dd>BU1rm?R0h~CObyE>26F+CQO_j(`%TR zvKZ%1f6l}d&geSbo|(ysiOGF>Gc!{l1nJ?O^marzhq@{V2qls#m1D#_+xqx8dd8?ZJ%)qHfbqxlUJj;U#wF7qaxnQbhD$c?q|f+cdoL$b z88>6&b{0`4Uu|n+BV#>NLkmMQ1qbK++}z9(D+OXp;q5&ROf9S|99vkcCKv8?*&gA> HbbuWI7VcXf delta 1540 zcmbO+hjYdp&J8-uBHaR+Zii)(-Bw?dSRJcm)Dkl3(ap(?OcI+Tn0Kf%YE2f@7H3r0 ztf{RivN_P6mwR%%*9CoZJp(;MLx$AkBx92_3)9qOOQTf7B;#ZYLzC2$H1m{13u9wr z^Hf7i%T)6;GqcTM-XBU0U)25o|8inJ1H%N5ZhMbj)8p;`|G#WR@RLFOLInT9tOkQ_$0%P!I`&%2>7?)4} z@F0+J_hhGs4va#Zn;+`2G49yB>G?}0jq{;kdwxCo|G)V+V=1>suj%@j|Nl2LzUK7k zHN6}D|G(jFk6zpC&4*uoW@EfHdHY9e#(9%Je+**mn(X#TlQCg)!KceiO#g#6yMAqD zWV|=|^fx`mvdzD~bultt+T8jhpOLX_^VgrJ7#VFhANW0uS=z=Gn`89Z8V zm!vgVGn6EH^qOv-T=?IFapUG?{|~b;KHZ+h!nlElv2VJxAfqy4)^>YA##ScAC)1A# zGb%A2oc=#`fYW_ zdd9rz(He|8jMdvaG#K+48S}RP&}3|6Vw^R-N{3N_QEB^Z9mXvzjM~!!3>i%sH*Rk; zWOQO=GtVSmW(Ql4%6!`8C@ACY(HwrsKm$^xBZ0` zV+teVwCTP!jPi_S({pVYBNbccw5FoMxp5!Z5gu|cW<|`V>D-F^xR(J z%xKNXD7Srs3*%dm5%b&_-!n3nZtwG8WMpPMJ$-^VV>)B$^dH`g4;ZIxzv;us!o+xW z`#nEK4`#-u?dm~{-HeQjr*95sG-ec?{ydm*6{Gm{Ng<3XjGwk|4`F=C&FHdyVFKe( zCPt^}>B)@G7^A2Ar7&)0RM^gx$|%ps$go{0jd42@W9jr;8H|4z*G|8l$!NoPWx7Zf z<3vWu>C3Vh!fWX*Htq%GQQt#P{SC{sQTpySa!p~ zfB!u?Z@%bT`~QE-ff7ZJZefpJ+cJpch3#8v8Ks$cLQaDe@u+z8+G=dSUC+3bk#W)X z#zw|IM&q)>|3F!7ncM&W9^IuAJbG=TkAX#|Kty&RM4CV%y|xFYJ2f*-XLOu?tC=y< z{J?Io>LyT9?k+vx(QE5)2rNeOA>vZ8(MmI*A?NP0a zx0!Vg?gV>@ALON8(Fb6P7epBzc#-Y!|G#VNw>n+d&aXbbwlk+Eb~0)*UfkZ@$=DOm zSh-!|C1axq=^l`yD>4DFx_;YUc!)93X7Xp$pRUTn6wGwnb$S&GlRM+2>4#XD zf*9?mi?TBLGrf12p2o`5#F#z(B`cEyqtp-$7(Y($CT&K=>9L$l`ivj9_i{3o zaWhtJXAx!c)wVV?GS)LOGBGk&aB$Ag&CM*aQXsYj-rnQD)WXWLW{pPfv-ppIvt?!u~&33=84W7mz>b3u3%rgI5!38qxJ{vWB FB>>L{5~BbB delta 49 zcmexr@YO&gz}wxChk=3Ni+fhA`G1AXm)#v}j50o@OfG-h+ztQDaE diff --git a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.CoreCompileInputs.cache b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.CoreCompileInputs.cache index 130baa4..322787e 100644 --- a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.CoreCompileInputs.cache +++ b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.CoreCompileInputs.cache @@ -1 +1 @@ -ae803ef1c48286dbe8e8ba58e1f13618e8c4a416 +2e6ed227385247b3097da8142402f636653d97a5 diff --git a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.FileListAbsolute.txt b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.FileListAbsolute.txt index 3d582e1..f6b5b49 100644 --- a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.FileListAbsolute.txt +++ b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.FileListAbsolute.txt @@ -124,3 +124,45 @@ /scratch/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.pdb /scratch/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.genruntimeconfig.cache /scratch/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/ref/gregorsamsa.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa.deps.json +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa.runtimeconfig.json +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/gregorsamsa.pdb +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/Confluent.Kafka.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/linux-arm64/native/librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/linux-x64/native/alpine-librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/linux-x64/native/centos6-librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/linux-x64/native/centos7-librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/linux-x64/native/librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/osx-arm64/native/librdkafka.dylib +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/osx-x64/native/librdkafka.dylib +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/libcrypto-3-x64.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/libcurl.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/librdkafka.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/librdkafkacpp.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/libssl-3-x64.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/msvcp140.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/vcruntime140.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/zlib1.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x64/native/zstd.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/libcrypto-3.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/libcurl.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/librdkafka.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/librdkafkacpp.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/libssl-3.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/msvcp140.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/vcruntime140.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/zlib1.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/bin/Debug/net7.0/runtimes/win-x86/native/zstd.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.AssemblyReference.cache +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.GeneratedMSBuildEditorConfig.editorconfig +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.AssemblyInfoInputs.cache +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.AssemblyInfo.cs +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.CoreCompileInputs.cache +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.csproj.CopyComplete +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/refint/gregorsamsa.dll +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.pdb +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.genruntimeconfig.cache +/scratch/repos/Exemple_Kafka/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/ref/gregorsamsa.dll diff --git a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.dll b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.dll index e1911b238244c3032df1dad89d73d830f1633021..00a972ee39dea7b2e086291cd94db5e958112561 100644 GIT binary patch delta 223 zcmZqhXz-ZO!LsbL$+d|+GK^6ZSBCSkS~4*BGcYhPI504zuufDIo~+0i&zLZ|k+Gi9 zax){-Yhi&fuk1wjHA_8OH|}Nj2y-#q{6|!Qg;)N)$_EBU-vA~C28N=^no{zt_beC~ z?oIZUQdU~w_Q{Ls)mq0MhEGnHk9|GC$iTp$Uz}W&SdyHfUzA#qUp%=-D%c3B?BW@y zGWqu^1kSJA zh8vT8rIcm<25;Yexh%z}W9#3E6Bm^IV`N}p&@WCdN-Rmvm^?`;*a)WJ1XKaYuqnbu z3ab{WaV^`W%c5=xauNhPg4hZZ6~!2hC+kUTvHn+OP;i(msI+}EpUf{-rVz``f=cfh ICmOH=09nFK^Z)<= diff --git a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.genruntimeconfig.cache b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.genruntimeconfig.cache index dae7078..28a4714 100644 --- a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.genruntimeconfig.cache +++ b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.genruntimeconfig.cache @@ -1 +1 @@ -50bc464a59aeb65ac858853faf7456b2e8429d5d +2d8fa65874f17431d718045273de21fcc8a07db0 diff --git a/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.pdb b/dotnet/gregorsamsa_consumer/obj/Debug/net7.0/gregorsamsa.pdb index 7ede018c3fb7ea71ec622cd4d48dc14f69fb0c96..4c25f4aba5ae693d670f3bd34bfd1432d6c71fc6 100644 GIT binary patch delta 6169 zcmZ1ywI*spfx;S11_ouPoctsP28I=GpS+k}t##~S_~dl?*w+*C?^QlboKPa`!obMj z%fQ4imw}OCB?A+~PXn%Nrkou542+!ovMQF7k1+@Ea6@@{MHTgv6HvFilKKG%+zuO*Az%F*UL@O-?j2Gc-3g*!-Ba zhEd{DnZ#L!1_p*((OqTLHH}`=uCr`YF1Teoc>}xHo6<5pJ9^iQH9AH*kvTlUrGN4>1J?{DLqr0=-qRhYxN|L|9>YxVONy6dtB;0Nbli>g( z!$EX2FfyEF@?kl_eZ_Ivex7)q zUoQVA=W{Acyx8e+nZbdPq1aC9%aiLM$@bUL7D|rGotBncbxbl|8a7#;TWqo=mym>h(%-WT z2N)O*-c|n-+IQA>Q835Ose;~s+ibzaZp%jVKM&ilA8?A)uoh6AJk z>h{zXs}_Qk_#QmDQbKvJ@2-d!ukKA-Rtr)x`6HLO`0sdd3KHSwdUSlPr$nb5;tfj6W z@A*w}np^Vn^VYMI7jR40FHWn!4zgPDj>g{2;bqQ~dP914-2Am08b!|edByoTsn94o z()YI;WR?92j-n991o!pkxi5EJ_r45QTm(~@$0ENIq*C0molIpPFYZu zE{P?Hdd~T|xr+IDP_qp@RktxLU}8vLDHZwoa=q{JAloZO?vYO>NAsvS?OYRa4dlu_ z=hd7JC*Aa(K2zlXub(w0$i_jEK~l!XXCRBMIuCtQnZV-M_s7&p)P3XW$p?9qB{oc) z_YY)@!QrsaAN*JNG&M5a(q_u2n0$~&crrgPkHqs+N7sTBH0K>G>FKX^m~|w2o5s7p zkzfUGyy6npXJ0lmJYZn>Hi@A(&GVM;gozV=_Fua^6|A6xS4^U~HhT`pgspu>N#Bl{ zdoF0Lxw-A&&rYy{le~Nq!pxI4g3`;?oTi$KY)$SnH@|+hXu85ykOIx%lEmcfkfOw7 zSU6fd{(ThWorQNECFVp|`AyUKoAz%FX9h_1WIH}Ni5>iZ!Kv$-o^S2Vn;ovpi&qtY z&GSD6QlJu&pPvKEUe2+TW`i7dKU|&HDyG1+J|T(fVm)wiVSSlv z&oleq&OI}w0;M=~ATIyS$5$^@YVi`}7NOedxw9LX{g(^RTKyzbK@J)TknsPM7t;aC zhQE$KW_&v@!)23frqGKE!tzi>E=7sunR)5ZI2V&Rbra+!-ayvEyRipc=AX4Sd3dKs z5>0VHQD$ytiE?IHD$Fx{Coe4o1!Bvs<$bAkU)|^b^ep(jWcTsOtNCRmCUl*C2QroK zr0a64!_PcgUTtsm`tac361Oy~99zWd-Qs8atSWxJ7 z#HD{l@a(pzJNc6v1f(S-wbp`DT)Nu+@)RpS|6c$4J=Y(+?gZ;TAs{Hh?K%sryW^um zLrT|7$F=?sPOvOu@16WnKv}~2MDl5n(Hnhz^MAUnaow5XsN=<=oi+KRfEcvssJXEC z5GYcN);?$RSsCrJ`DgL}DJju;ASIK_1O+Ajh3`BEGUKUQ(%sLwksezL@2*UJ`R58q zfnQW+fmSL%j(9r?%~4-Fene`t{8{EYtsQkM}}t7Ip% z$Z753T~}7@w9c5!ETrkZ^KQr!kb~kH3?*XnIs98xtVD_eQ+6T@0oe%ULDEc%&4H&N z^NckaMRm&8d(Gb=&ab)V=cLKyLaGucqZkW0?X@(Pz;T;|lar1oxk%Mydh zy21()yh|SSfFuo*pQh}2Q0l)U{Db=*uRY%l2GDDY@U2?}0 zx3y+Xo-eH8HY4oGYmnrzEpO86IK%uV=B-tlmZWX~^^&K*UO;96Jf+(AeQN^Q^DekM zGt>S^HbjqB2Oy05C!sNA)U)edTuPi9^rG{~zm zkA4Rw_zi9Q^JY%`>NENOY7c=K%8Zi>MdZB8ThE^Y)o!b-^(;9ae)gKWBh#_3r7RTL zn1IBh#GIVe9H@EDo^cmI=FRiW4W4?|$7$EXmYB#(x_2hu5|OEYX4Tvek~Cde8&|0M z%6U_O*`yZdPIstzzNxwSMU_y4_IRzl2g>gztjA03tJB=pX}A=mtX|p$RqK}uX$&wh zW`A3{0pwT3!h~CeP3?ZO*NZ7V`{)6y5kLw(N=gd!QgUQKtrE>Wo4P@*lB)kvtJI&m zIxX3gv}l|5s>I0?MWyRM2<_hu3e;)yJSWX!m2lapH|+&yuAwQCr699>GK))6^HO0s z*){uPBS_b&Z{`v$hl6})wFh6EWAq{hNtbV8CM<+r_6Xhp1;UXnU203b+#KgJIcb}{ zoxdr#vUsw#n5+a}vS=sBKW0~6PX00Tu=4^pp`D@=v?C`ch^e?;^h=%s(#-f{*N+}a z50B+N9q)cTV%dOXRRE$GymE#MoS$C2a$0_KeT3)iqxJ3*3i+9nPl?G$tod5o0y4@` z@r0q#uN>bcXIxaorxxl?{wF3Tp&mN-Imk$SzDR@r=i#E;1iQsU5xOX99uAh_t% z)2xwMvf_;E#xT?ADoii$g7iIs(x*QJdMXGrx--Y6|R`Ioea#QfV6!Ij%V zg*xBmwSWD${nXhlCeU(lvYL#X#Dl4!>p{BTD*rf8qN3@)I5I#>ciuFq$qQu^CfCUD zN+ivf1{eCpES-_FUs^l1W^Di48}n9n@&HOqIVso2MSu~!|i z*eT5`$;^ed=t{5ZEMiFDVaU-Cd-$Pzsq_3vk?I!@&t{t}BC8zLzkUQ3P-Stjq zJLkQ^uAts0jp^)UA6Z!m=70CW?V4*}&em+$zRz>_r<%I&W#Q8%H_EC?oW6SVE5iat z296CITD*;3JFi^2{I2RO$FGwcW#uQ|l@*lu@jwk+FTZ{w>6ohV%ePfY>%+#ce8yk} zs&bMNRvHt)wea<6HFtJ1^m+G96^lA*Jmc2nFgXQ@Q+!Hm7!;Tp@{GC9rljxn+!GM{ zcE0F?pvir5GJfrg{!RnA`r!7`TCX%2pDC=0q52*oo@oB}L(MO%FRQNr^1M+ubdDDQZhL}UQl9I*ZIYu z^y1$Zv0G%TlG{9$Zyf70&-8#4D0>yd^TS2u>SZ8x4EOhatDS7_))$+!PsHoUO^~|D z*W?8yj!pUT3gm?wX-7jZt+aBU?;hlGQ$*c+@_%`C2^;=J-$7Z0F}!TbB~Cl97AE(X z`|kJ(PqtG~k>CxpI0BMXOET!?>bv096OfTa*KL2&!d;iLt z{lyZ>UtZS>QrX}3YD)2Ba` zdNXmm=RDW&aP45f$?Fv4CeKh3ck8uvSOb!O?|JIB_}XOe9s~FIy$d;0ps6M}HMz7X zvjkSS=h_-C1E~&J<#zn}A#InLbt&)k-QO@y)>W38%&*KQk;imyH%P&yhbw+4?Y!zf z`&fat(CoP1lXaCfB>FW^z5>O+PWpl=ACs0lEIux{!(c&O+T>hiWr-V-x4?raE@z!O zdj5B~ZdAS{op>wNa`FmgIf$#yM%U@-ZBipXR&6+sE52}R)kq~yBYjZP^?-6yTqvRQDcaXCnV zJhbj?zB%aw$Pmi}$L6Jx_4JyDEHo5DK%+588G%`j?-Hn~xR@s^ zp($6^b-Kvc@Vsa-yUFURsuJhI#n*sR>&6h(ldqrUxh;KcHmPp?wC9skR2AIW`r^S& z?IXOFYPkpU{U@+nr|=+8zB}dhUsdse)J*PE6Ox#8aAzY(!As>+ z8;&L#`FH-7GJ3r-zXq(}oSJ}y-}lh@AO&m9PNYUxnR>4}-m=m$tj2EgH#IqlVlLlV zpn$azE=k&XtirQ1!D*KNwD9=JI_gTkfp2Vof^`10>gW*ARrOt5d8cgQZH{>uff16K z3vYk^U3&B|NR#p19YPtF6286fpYwPxyq_?6wz`zWk28G_K$4pKJp2CbyzJQWb%F6Y zfwIe!&#Eib8_z$o9ORbrmWKN9eYsA%-YMB`$e2boP?tEGO^;vC%LmTbUOj_yL8 z`V-(3KJ6uE^-Ik-=gG?+hF%bRuI(HSi4CX3Vpy|dgHZoWkR7jU*9jb8b$6KDR5eRe p?wQ8qty-7F`WTl_Wn8|2arrLB<;NJ8Ut?VUoN@WT&2zO|83BwbZF2ws delta 5989 zcmZ1zwIpgnfx;3^1_ouPoctsP28O@E+jn0sOY!O0`gh{Q1!e!vG?%TLIH5$=j)9TE zje&__8UrK4LIx&=j|_|qe;JqoYKNipv>KKFb`y%?074XBSpX)?^9b;fC`Y4oz-ju|*PSl$(5k z#Z%YNOwUr=#5lA|)|#vKwoy zmZ5fPa+0x0nuTd#%MEg90N%w*1v4e=9Sc=R|#wZa(_u z`s7A-vB?+M*(CPeQTzwe&F3L|cxC?y&%M4^)@w|T37Y(lT~VUgqiZHevhe1v$OF+? zZmU<9$u$N0$4yq}kk35g#PEz^0Ruzp{ACAEetqh+-DhbYL+Ik$xy8x(MLC&CdMPp^kl;%(zYlT>>#V#^%fy9_n@)eS|5Eh7ck+D>QHfo_0XspG zE5u~FnKM?o^c?T!=eIg`bTSX8if7_pm2Dt*7PkL57_8v#*!BIusbn3|U%{2dC8@c3 z&iOexsmUdo`FX`qcg*Je-Oa$j$k52K@$TA7{~a1UimyF+HE+e_N={`76KnQo3=WJ8 zY2j7RJuhf`Ej*ah*;_TKa`F~V6$zcsJQqP$&Ht5Hs;lwWr&sIlqtg#Xu1x;GsVdPe z|9TThvazSp@kz}`-^SRVZ>|bmUOjmum)K-yE+L7H&DR$(9AIFOo)P-TjwRiDeTstP ztdf z$+nHd?E%Q*DIacqms}<8(#T4S)bzGnB zeKN)OoZGH%r)E}}U$mM0lUGHe|9IGXkZa`<_!Sn~IJ&LA`19*A<09tCrhJAH%h;a0 z1sS`P{SN1gGdEoKmW6)c`d)r>avh(%#N>x^3qg_z>uT@+n&IUzyZfMQ=!)8|$y@nU z>t{50w}OmiUh?k#+s@A(+wcASw=!924lKAb6VvnZi%T+-i}gZ^5|cCY(xH(cyDIPz zNMl@q`;Grb5B(QBSl)d~+^-R;5fa3WJio4k@>$!}BX_hOuJNA7|MtlD2& zNAmNCZ;coG0&<|pA^pd<4H^75i^$sQEc-sWhF{Tl;+%?YpfKrc%GfU%`PhB!`mCgY zYiHbHCRFAn=4K}A1(oKNWah$>R76qAW{`pE2612Rvf6mhC{EVgdZTX1ORJ+sTI(PLqENDEn|lPhSm^Oq#K-L}KYi-`z^G zesVl3*q~nY^amyXf}+&o;!JoJ_z}FK3*-qi`(18Ny@DL)^qqOQ>deOY$ytKx67xBx zeFqt6!nwC@(G4YszJJQa^#bL6|#H}F7qH8O+axfeDw}rL&?{yB(M%V_4 zI_;@%!Jc*Hx<5N+i@X0?OTMX0bw_thZWI!otRy5NA^pv71IP)f@t@tloTzfzmVoRMXfnHG(%9xP>k*%cx#)Gt$&Es)65aZ7ogm3Fg$HZjEL-Ti z)s^wi_t&QHLHZ|O77~=WtE7Agq(JxGjux?~evft=RfWDaCVxN*)WQ=>k~30^po!6O z_r0Ycw*>VhS%2&L>$dDc==%*ByQL6lH_lm_N!g(o9~vTo`G7^SL~mh zFRUa{R5}Bkb5z&8dKXbL!)0>kiYu{eCcT-wQdq?;q1m$qNtm;*9zLGV0*f z7kxQ-D}3kr&zdT<=Y0S)*nKkd3ZZ_Pv(|GfD8(=Ry)($=ioW+IhDY~bnEmycTrDCe zF-5BHHmGiL4yt_gB+|{l{qU@y4P~#yCvOyyu1_pr+zHY-&4P21@SG!#6Q^(a^13}M z5ZR8v($u2L#3Xn*t$a2LT;^~pCX4ZA9QR%tn6gJov9t`T$2T=Mzo-)GvI8-X7J+i} z*(8Cq>~&&}3-;wZ&6(Bm392^0FSP`!GDkA+Cdju57yKr6%ir={C0O$_zfIo?suHBo zqokw&s(OW$@_SJGLBPn`F=^EszYf{tPl>y(Y??eT~r# zRs@!2CPSTjV2b~7P&!-|^H}65PnG}lrGZg<{}dktD~G5KN-Zo+EiQqj-1?;2=pMhl95`t?Ru z$1hc0sy~@gLR#Wl)#O7UxA*Q<)~mQ+?$>Cd>uJ4*W6op~2~~+1=C0uOPty zNWrGvL5;PG7W%YZHw!s>bC%HLdlE8kQ<~Pz1KAs#nUi;NqMOG|f8qL+$(JEPoU7+o znwwe#sskbATHM1I`#?t7c*ffu3r=@^(7^Oh%1TWx@y$<3g(b@_5sq~ryB8cVm{cX6>DutMU~Ws&!kd$qORCjZ z@9+ROilb$=eZ6&aw%?8mhkVwvlpKbd7L=L>ZQ4R3zmq@h2FPg^`}o&X^@#f|dA}>| z?2G7)nCe|~Ghx}4X~v;Np!okHC%Vse=~aih!56QD9O*O-uAJ;6r6JLyS9AyDz>cH` zM}@lOoj2Qxtys#{eQ$D)l%@oO>&Hfrg>etcpS%}Sc3)ayZTvk-V(R3xQpys-N`
EeuX+;UigZd{K6qpzyGMT(SU9oVV8X9GN{7T*L$&S*> z5@F#x?t|={`FPnA)~R8REB`*zt+f07WO9wPy2O>+f3JazowTeV!~L9^%O)X-yf4>d zLMMNe7M^@gnpgTb9@BtNi5EG774pdBurouttXE z^}lC9#{50UNxA z0V9Ktd;!z(dFy=_#82(9mA!Boq-64QSwV@<3crqmLj2IVw~LN8tn%2jFSp$AUv$r8 z0XYi^M{!ATO=ui4?Zc5@*Ztd=o^P8W;N3SlK~6!U(q8R6g90-{%f*|I59Rl}bY?DN zduD9rJb8wkjD*O&^A|x*YgpQ`K_pYlZQm{VIWsQYuA6*DPEI1wTJZ)bulT)|`Tmdj zx_7tQeva63j+Du^^0Jc)G}$D!D6QxP=~h|c+jnmH8UIxVrXj16-=CjsE3YTWfHf2Q*@ksZcSwEuSPAf?gjGP5CFXEX_Cxf1rOu} zCAM$B4ldcn_;;M%@p*~Qx`{=nW-F`fffOi3XS> z&b=^Wl73F8W6xHtmm8~N{(*HbQWBRqr)L9h-7VObcjVbhSMM&aEBn}H>{&SZqLP?I zZO$!lCfMj&y3NE-%As>#;?%7NZtR}?Ur9ltBhM(9V8`X<_TZ-ce3#zS&o8liZ?UnJ_cL6jH~EFK zjKqeb{5K#YOO%BY^r!rBYR_KXR@h|tezJ^;s>J(AMqvA#kCacp`;5zDzWF7aKj~HL zCP%7BOPmON4(_&kIGWk(-1hO`wlM0*oG%eBlY3OuB>o)PG!2x{nXA&bY4bXGO}+Nc z=<@kxcPF1xQE-=7`y1S;S7GVj$nfWl%f#Zx+S}&{`ar!Al3G!s=T!{vIaC_0Y6P_) zg=-XxMK0a)UbdA(xsuHbbTaSa1cWtJ_G4YnRJfvUbw0I zsxF0OhO56-CiAO_LsRc_$G_mPtU1+lv~Kn#kE!xgy1!XnUJp_-d8V461k0~e;Bl>6 zM!S`DkGOeGeA3Ud`OJooUJzGNb4$r8*wLOWc>{vXgIfZwwUoXMdVE` zP?z<}P)_&(GMI7Nr$gs`Cwk4X*Q!fDSqTrOqQt!7M3m8}sJ%NUH-FdU3=P@(=^dQl+$NND zPhPOi(s5>Hn75(q<>@d}P_k6nn#qtNbL(Rs{|P+49S0NVdq_0Tf+|K7n!!8vtpGWk z>5I=^w&evri?+*E>*~ISg#$<-q}L-09z^teoei!rpKr80a5N{`c`MuOiVt0~B9j$0 zdlX}!(Bz{H?#o9Hhmu+VE;wob+s_P)l+;hORi&vVv-hYy@ztYtE&{^_vLr@_c; zfGhLg2nEg;a@n_p1<5O`Jkq(MD~ZR;NJeC&`cxuZ(BX~ z@0t6fzRIn!~da|yTxP(config).Build(); - Console.Write("Enter message (type 'quit' to quit: "); + Console.Write("Enter message (type 'quit' to quit): "); mess = Console.ReadLine(); var topic = "test-topic"; var message = new Message { Value = mess }; diff --git a/dotnet/josefk_producer/bin/Debug/net7.0/josefk b/dotnet/josefk_producer/bin/Debug/net7.0/josefk index 195002cd9b8fc9a3da40bb4b0af564ce56029d69..0913ffe4c684f5c988592c6accd351a9feca9312 100755 GIT binary patch delta 1540 zcmbO+hjYdp&J8-uB7Dz!wn%aPz6FkC_=?PhNL(0%O!>`&%2>7}rhy z@F0-!^<<}q4vdnUn;+`2F}~Qm>G?}0jccJ`dwxCo|G)V+V=1>suj#g!|Nl2LzUK7k zHGLBP|G(jFk6zoN&4*uoW@CIfdHY9e#-)=#e+*)rIN9x!CS&^Mf=`#3nAn0hyMAqD zWPCdL^fx`mWt)F}>tbZQxw-X6J|ko8=C40bF*3SrKJa@Qv$V4-$hV;vUhv8>GI+G! zE=g;!W++MY=r!Fnx$wUUeH99^Gx z>lsU@M{6+ZFg9)P&|u7GWGvnOLzA(QiSg9*Djh}zM$PTBbr`p>Fh)-gFl01k+_AmQ zkkN^eQDpi_BSr_twCP;Nj3$i5)9sBJRT=HJry4VEW@dc3UDBK}k&*Gz^lA%6dB#oC zXIn6OFdm-%#DXzZP>mmCdF_Q4vu1%9ru(KxSu(0Hx=*jSWOQYmzWt~rqY@)y>h>2_ zj46zabEo^-Fv>I5PS3SrjAYz2{kRR|bjC;1<82uW86~G*v}Md}F(KHGOk1qcNk*^yk5hs~F{`PYPjFVf?v$dkEu8Zbt9z3lkWR zGBJ8iPfuoi#+WqSFNJY4qxyEHR7QD5M)vJWX^h*M7;C2A%3%D%xMlkFOhy~V+tWp| z7$-6+OkbA87|z6YZ~Ff%#$z12Z~p@&?S0eFW;5C`Szn&co5Q%1v2^?K97c6^#_QAH z7c(AWyg7YS38OFL`)Q?&o*Xup!8)wA2bMC%urY>BUsui8$oOr$K@DR(qw4P?VA<^l z|NZyqy!oPU?f?HR2TBw@x`jP@ZEGQt*SBw}Wt3*(Dc=uL#G~TTYpcKgc0J=#M#h!f z8ygw>7>#QW{{v;UWp4lfdvuph@aVNoItCV*0ukAP5NQI5^x7Vu?$pdUozY|Zt!BnZ z^W(e0s+&Mbxx4g$N3X5>A+SgRLe&odokd^dd>BU1rm?R0h~CObyE>26F+CQO_j(`%TR zvKZ%1f6l}d&geSbo|(ysiOGF>Gc!{l1nJ?O^marzhq@{V2qls#m1D#_+xqx8dd8?ZJ%)qHfbqxlUJj;U#wF7qaxnQbhD$c?q|f+cdoL$b z88>6&b{0`4Uu|n+BV#>NLkmMQ1qbK++}z9(D+OXp;q5&ROf9S|99vkcCKv8?*&gA> HbbuWI7VcXf delta 1540 zcmbO+hjYdp&J8-uBHaR+Zii)(-Bw?dSRJcm)Dkl3(ap(?OcI+Tn0Kf%YE2f@7H3r0 ztf{RivN_P6mwR%%*9CoZJp(;MLx$AkBx92_3)9qOOQTf7B;#ZYLzC2$H1m{13u9wr z^Hf7i%T)6;GqcTM-XBU0U)25o|8inJ1H%N5ZhMbj)8p;`|G#WR@RLFOLInT9tOkQ_$0%P!I`&%2>7?)4} z@F0+J_hhGs4va#Zn;+`2G49yB>G?}0jq{;kdwxCo|G)V+V=1>suj%@j|Nl2LzUK7k zHN6}D|G(jFk6zpC&4*uoW@EfHdHY9e#(9%Je+**mn(X#TlQCg)!KceiO#g#6yMAqD zWV|=|^fx`mvdzD~bultt+T8jhpOLX_^VgrJ7#VFhANW0uS=z=Gn`89Z8V zm!vgVGn6EH^qOv-T=?IFapUG?{|~b;KHZ+h!nlElv2VJxAfqy4)^>YA##ScAC)1A# zGb%A2oc=#`fYW_ zdd9rz(He|8jMdvaG#K+48S}RP&}3|6Vw^R-N{3N_QEB^Z9mXvzjM~!!3>i%sH*Rk; zWOQO=GtVSmW(Ql4%6!`8C@ACY(HwrsKm$^xBZ0` zV+teVwCTP!jPi_S({pVYBNbccw5FoMxp5!Z5gu|cW<|`V>D-F^xR(J z%xKNXD7Srs3*%dm5%b&_-!n3nZtwG8WMpPMJ$-^VV>)B$^dH`g4;ZIxzv;us!o+xW z`#nEK4`#-u?dm~{-HeQjr*95sG-ec?{ydm*6{Gm{Ng<3XjGwk|4`F=C&FHdyVFKe( zCPt^}>B)@G7^A2Ar7&)0RM^gx$|%ps$go{0jd42@W9jr;8H|4z*G|8l$!NoPWx7Zf z<3vWu>C3Vh!fWX*Htq%GQQt#P{SC{sQTpySa!p~ zfB!u?Z@%bT`~QE-ff7ZJZefpJ+cJpch3#8v8Ks$cLQaDe@u+z8+G=dSUC+3bk#W)X z#zw|IM&q)>|3F!7ncM&W9^IuAJbG=TkAX#|Kty&RM4CV%y|xFYJ2f*-XLOu?tC=y< z{J?Io>LyT9?k+vx(QE5)2rNeOA>vZ8(MmI*A?NP0a zx0!Vg?gV>@ALON8(Fb6P7epBzc#-Y!|G#VNw>n+d&aXbbwlk+Eb~0)*UfkZ@$=DOm zSh-!|C1axq=^l`yD>4DFx_;YUc!)93X7Xp$pRUTn6wGwnb$S&GlRM+2>4#XD zf*9?mi?TBLGrf12p2o`5#F#z(B`cEyqtp-$7(Y($CT&K=>9L$l`ivj9_i{3o zaWhtJXAx!c)wVV?GS)LOGBGk&aB$Ag&CM*aQXsYj-rnQD)WXWLW{pPf);d5A}3mU|`VL%*YbJ;pOSdkjGHMkjhZRpumvJkjhZZ zP|T3Xkj{|GpunKPP{L5jPylADGZZqEGGsE8FsL&qFqAOlGbq4CtQZs+7$+C~^(0cv8U#s5p{L?mlJ%}YB(i6m1n5ZbmXg!%pKx?y}z!t{Md_uoi QnPTiV3rf6aoM^xf04kMbH~;_u delta 353 zcmZoLX)u}4!IBuh@$keR8AiQ{E5rG2>M}6+GcYhPI505SuufDI=1E{+;9+23_@l$X z5HeYjNoR5-qdKF>n2?WhE0}zgU?J>^2KZyl0$fzzzT!eqYr9 diff --git a/dotnet/josefk_producer/bin/Debug/net7.0/josefk.pdb b/dotnet/josefk_producer/bin/Debug/net7.0/josefk.pdb index 33be704cc89caeb3bd28b6012f1cd0e6302732f6..2a1171d21b8c09c21cc485c4d4c677c4a4be00c6 100644 GIT binary patch delta 6127 zcmewp(h)kLK*2+UfkD|RCqIdSfng!b<|n1mXMGzdFC)GcYpDXJBG@&%ns=hk=Q~gprZKk&$V#BBQV<0|O(2fjR>cU}P|v?9Lb}$)cgo zz|4@usLsH`Fq!!P11p29#sLO42HnY98K#5xz=ptq~e`9&2sr5YADA8EYq*CR-XA8YUSgnHU=-rz9Js8CWEyr6wC0nj0Hgnj07< zf%t~WlixDeY8h)Ar8xenwpxJ8d;hqCmNXJw;w(c01H-N8uCnTyMz3ktS+*$`+_IgV$R;+~j*VO5AiL@$1_efjGd6~6 z(w@jV%u4TPm}GlYVR9myjD-0s>E9sT3=T4-XKE9@dv0^Bp5*cW@8qd$iV}B^OT7n4 z9(bgFtGmw7b^jGTUY! zC?_*XFC{03fq}8;y4_8X?I}X)PdD6O>d=0u^VsJ%nmKu?CB-F)c`1oSDNvQ)zjpj& zIKarT=ED860?RJAZ{uSUPu98lBDk`+BsEvhv7o>?Kd&UUq6Dnooq_S`wg*Q*&inuW zUGFoeh2EQ$xWwHzu62ZJa4JnpOD!q}t7c%#HM-XfQoOcs`oUuyZY$l_w14q4BvIyK&kC4lMl-Y?kkSV_VdK^ z{Brp}S%E`Y;>Avn%M1>T4CP)M*dr3&dQXac8)LPIyKZtchl)h;XUTaWW4lxBSQ%|) z{O2+*yt_l?t-|E#9I6s^EKjb3B->v{TPQg$cUoF*)iKFo5RbTC-sK(?6~=BH#BaX^Ye=Hb5fyk zbENNYH^?gc6&yt&jtTDT&2wMwy6$}$uDA%MGLJ=mCrG8ZW622_DPPZJn$~GNlby1l zDqRvw67`((b8{!Z=Hiqv@KoK#uz-mneWg_7=galJ%Y$sM7`aD2nasnj;`=3Nx?}O8=ryfwCX(cO=SX$W8WWBCsFr}rzaP2D@$yc zIPV|G7=y!MpFjAo@M&sfx~0vOQ89Tfx4gvjQ%Bc=B%AXNmh|-3I?Or}y-nlY-^j^N zxV0p#&%SJCc)-B$Z4yIon&&Ow2@@y$?7wz->SQ$@O^M>#>^UHvTlwJpO$YEWF4hC*cI(Srd!E_Zx2uVTGWsl`i> zokF$Kb7wa&`!5%swfae>f*dp~A;I`5FQx;OjeZ?}%=mU*hRY_|OraMSgyo@%T#6FQ zGxO4+DMC!*)J>3Ecmr7r@5UZ*nSa*S+`y3haVS@3(w?&FhV_+%v}be(<&GLG-0>vF5Z&pcXQZEy7Y@ZjL&iF{I?oJ~`~ z@oQ|WBR$=JqT5u*B#r0W>s~|ca?3BuO)M$a3rQ@3I_}pw%OxPw?itRjwvKA@oTqxF z$z+*Q;$%a9(aD_r0umXIpKb;z@V0dknRcf^}E&3rcXi&I0T1_^8m3(lyg@t^b1)EQ{ECCkqOQP2R!JD`9;i z`83G*jlRD5Ki$^2?o4si@nX@=0x6J#1a{4Z#fLyKVzl--o6pK3?*Xn zIs98xtVD_eQ+6T@am&m}h4LV-Zm~J=6l9*UCZniM`FgMUJH+`l*ZiC`Swm1&LaFP} zACRQmx0rg{;(Y%-)9xufpD6Nfa;l)3#E-p8z{QQgy1jpwOmcTv&GJ}chnLUl$%_S* zCDa41{sy_^yezM9>BVJEZA)tJmbWZ1n0!xAL4tS5qaKi?Ve-?IJr7F#cZ7d%-{ZCC z`(!a8C5iN^{XHN%!;keWE?Q>jva(C=c;dF!tjPgFDsD5vp1cN09^3LJy^b@?Z(`nB zrD;j(22d|~`s)Q`7NizK!^*zzTNB8hcfs8$_gZ2dn+1!Vewyt(FnO1dTKydl+sPoK zo(BHjxGrs{`>HyP%3a%7?Vv{aWabq@jj75!`W=+aH?-}~n>q2T&*cBBJp^VbGa{>W zttco;EiTT?&x2WB-g^ENsFqq~t!K&c@Uz#{9hr`OEoGrlT{>V30uqZ7b8=F1pf)*s z#$5o}G|w|PciAd&x zm3tNE=RuwFL1_POP$W#7=Q(K>tAxu&y=gBva}7<=wD@Eem!#&U!t%Ro_Qyt$u2bL4 zC0Y&#`Oa!L3BEYT=tT^YHs8cdSirsP5xfBkn6Ir+!T!_Eubgm#Kf(2kreD5~Oi(Jy%lNGIcuT|asxJv^58biDiVh-Cwk zJpqU^^2!-5aPE8Y%4zw{^%0)4kJh_ODCB2Ot`?P%So5{E1!R~&VHtB8u3fBF{C*Kv7l6e1Z(_K)##QR#nV4sqX z!(N^(F^%Wc@$AWtVsiC8xs5MDe!C&P zB)XAhlf#~+Ii>$*NH0WkesF$rc4`SMS!xzYT>zQ3bz0>^VZ9CBv(A}q`&`VOhNK}p zHOVOuzDPK~%Ds73}91QIHe=s}wv-Z;*4EU@UWER;=Pwn4Z$y z)S}E}SQw}j1Y8E$AToF7SN-Ke9#i|~?wfk&{%x@8$#29ZCER1{Pl2M+eL{bA@zX%3 z{=e}~B`u<#C(BAmO0eyzcnz}bjl=(xdGnSz?>O?~t%`JA++;rqH3^4ft>C~|Exh2v zky)}{%T_%g*O1XgN6fx|E#6gQ=nGK|0?m|2R;hqUpalGC)gr-ZZJnywbW7N%N(_l|(U1 zXXNac){dO~pRui@oZA zxw|y4Br_M*5H7u{vxp&qhapEp?BR##lb? z+d1zQb_Ml5X-sD)|CN@NVE%U>+!ng_R3SDK8^6jsGheGd^&v=Hz^%|WX#tFHj%4_2IGRwcK2D&IKPXP)T+sa8g+O)e@|F9WG#xWDgP?PPPezSyLFB3?&sg49iJ zmlKpYHs!}FkbiEZ9Syy-(#ml0h$5-vzfW-yj?FwBO$*Ys-4`k=bTNWI#UWv~8Z5&b=;$9q`oFFf&S+mPmK zWJ8xUy1T77{A%JPL33G%vSLsr3#wzc?LYq+WEzKe%0s#G_ud;02pBO=^!*M}1`S(_ zMY%gb8TGPv*_I;nVu$4~n6@9Ut;|)z2_h^{xQ9b@A&-JeeeA%Z}t~U zD1#LoP~el;>HF(HNP(rtbgu6wWF02XSXzBVwyz4TfLT#c{B&yCZIFWW=})EJOx*4{ z&ow+;JJ@e>yu93GPepOJUR#GXAo=&6r*4a{P4@0FaF5@+kTV6EZh}*jON%l~UVQ>l$Dbe4c9~h1@=o9V4ddi{ic*sgD6&c9F`e5DQgG?viXTclue#4ZR-i33 zJMQ=7dx{zo{hB9Vfg)WeeZiEENy{A;9~azVu%Iq&vYe8##0|+?-~khtvrZj7|2tea zD&LY$yp?J>IZ{bZ;%R90Mv$>3OQ)LhxH>xFh-IawT3|(CJgB;JMP&i?S<~zrRR^`x(87&r* z3zZez&u9Mo36flD%`UuMD#Ul6`*W?Q*F0pQp%?-hc|l73%yN8}KyAv!JXr}%xw5X) zMZSjTMT^-@zM-rtaV}hZ4Je6j3{gG#`dOaa(#K|#>ef$tK3P;nL4vI>9^6p?oh&rhDRnr)unR=KLTkLK@NKDkCkNn-uqTyW8n zKI`!x$-H=vy>B+Z{q*f*$>eP+YQ9nB)0TpCN@@ofY_s-pp57&RtAFiv*eF6sMp0^F z3aI%Ml31J#tA1Dg@#zHxnOe%mjv29!9TxiTl-GY%#Wz_~RYPLZ!JUmD$(PEfHXKbf z^6&gDW%PPwe$C`eRW%8}@1gTSl55RQq()bndapX(veGfE#%}UmRq@IDRM{npxqN4V z!qY~$Bx&cd3eV02r&<2f!s933RaKG*d}I3)r2D5;M~8r}s_){;J7o)RbIhA8t|lw- zcj?i?AW7r9JA^VUC476|Kj-mYct2sXkD8Rkk28G_K$4pKJp2CbyzJQWb%F6YfwIe! z>(mtLjprX(4zjbnrJ+82U#`=xcS^P!GNzP4<1j2Sr!+OBv;aQx!h3EuIA@)FK6U1t zzKuTXCC+|YR&d<|sy;jwHjH;LK^r_=;xjkmO_+P1+ww_1FVD$uKaQl(qoicAtU7=F zHsj;q{Pp2U#J-O<%}(2nike52ocaY*nwgi9Uk(d@>3J8x!*V;{xtxCYpw?~X;Sjz{ zT+wf!iX(Dit?JgoE8sDP+sQ}WWQ_Lvt`>dvKij{?4M`!m7n33j9(KHc(HlJUbz4>- z?eDg3mrjYpu1rh#swSUMmy?*cT=W$K0~5n$CBOLK^&Ebi3Yi!R)-Dd7{7+rp+tj3} z4HUlqZ!R?4Y2W7BQ<<{x&NCAyB#Rz*5sgyezuMD5DX)0y_aA@KFF4I%%}ia< zBGoWCLqk?#_J%19AoH3ub_q5z^Eu99dXvJ}?|N?XTn%}5)7v_`KrP@Kd(Ha~nh5(g zzKj&P{%J96Pz2=th=9bB3}^=5cG!Os$gBpb?~YurmikSLb8wGavO#__v!jVz4x;sp6s+y%K_e^7Q znx+Dy+2nT3ZH_IB%extuFJoN3fpPgB#^q-im)~Vv{)L~BhtZj_VHaa#5@Ta4lQbJ6 dBSYhNMgax}21XGZMg{{WWkCi;rb-3|1_0_{Tz>!n delta 6131 zcmeAO{Sz{wK*2_XfkD|RCqIdSfq`enE6oLao4q?Xx4pMJYMpx^jqlpT2?g?&42%qp z3``6?42%rZ8JHNJGcYoIW?*8_W@KbAXJneJ$S5qzz`)3$rp|x_7#XxCyEBGL{!&+G zU}lJ5RA*pe=wv>?z{()3ae#r1L3#34#wnBinG7`9^%)pB#pU!AmKL723{PC$H-G>6 z`RkjRT$Ep&pH`w5mRVeynB$aKoSCfWoL`ix zmy(miz`%IxuxI$vM17EMKKkYQiG6nz|A9>9^N>Bfvj2qV zUf(P0H73UdO`gxDC{gUuH4`LRcym|ef#@u^)vL?onu7h~Cf{I_&phJ9@Qh&r14HZl zWd~1wed@H`XK5Zo=;GVC#mV_aIhjdN+uu5#T?evV-etZ-Q7NnECZ8zP&huP5^HNKS zOA_-^5{puxDtAPC&S5yf$naryq(erJwPU}=nx+@e#Y-k9uuDkrrI_CbIfZprUZ-W^ zLdQ*~KiPjN`rkWw0=uZhuHb;3AjuVCGTqD>t6X}H_w(~x9XmRCA&2nf3+#dtiF;MH zf!tcy{^MY{!yh*QY2rPM&7^da@j+j)bwm?keP@{Yf7YGdo(eKvoo%W8i(B)2c5L99({kH7BDk`+BsEvhIX|yBKL?f?lcf~C zfvl=@v-h~f_QG>jIG3Eqe2*%)VtBe;Xk+^sq*5?7=Z(ufbFYP$gf45&a(V<+>5^EI zsOOxYo2!_g2TeiWO@BOLSir>a!`;;CBTK$dPgjb)ieT!x$zt3pPL_$13qY>C*MDc@ zjP6|yQ(JGdKjOdq8re8VQfT9FdjPU{%7eS=Hx;i;mQ5n0unFm zCtL;@<$HU}If+I`msZXNTiho8m<3jFiknAbX=U7Akb=M^;u}}=(jkYrud$7+x6|#%qsJXHelU*c?2c;kB6-X`9Lm# zUtyt*quc6>KffL`E@B2N_{k$Dv5f7>TafWf+3#?^ICH~wZ&~OEuJ7ewFRFy(=jRkd zgLLvkxrHEw3F~U_|C-_DFuVJpZ0L&GF0e_Jyh0K)8oXOURxvMmcmHkYXOHdoe*RmT ztTYF#U>h%=t?a76M<4}p1@1Tg8$I-2@L+lODRI9>Xt+ayypiYEbx`hWyL#k~*26X4 z^Z4H$`F?z|%Vb7AQSq(uVqZXR5ILm(__iT~|7HuDPviG3kb8fzDs7zKd&zN+Ki@Zrd)C(`pXHO3C^8Ry2hyp}D!s4ip|j6i z1P^(aVp>qI3G;?^;yTeW2tM?xeqN4FY-(M+vAnOtzW06(m`7ZRJ)DW+VT$ zur~j_&f(bz+aNKfJ@qZvv#wnCXUA-D_g`ztH4t z&+cDNR5|VPj=XC7wpn8Ge*tNUgxj;0fNT+HGP`8b*yb_o5ub^<=ygf3Zg)WeiEjP4 zPLP5!g$HZjEL-Ti)s^wi_t&QHC+7>QN!(RZJ_M4~eYc}UEUMq5-9}ZRZ;it4N!D4F3hxpT#p*fo>hOpX##aZ70SYyr7o(VU+r zep{q^uf4Wmg2#nVup-aXUoRlDAQhI0oImXP2(pK>{?3Q*Q`WexFg*Oi-*&n1bni9o}7a!A3ce5^KU;qD`-R6D{*9F0uqZ7b8=F1pynkOFzy7I zH_d``lJJ}(juWSE`SQ9wD-c;rU}*D$J40y9o+|gbRKX zyX9~Bt`e;Inct>wHCawXOk#zV@_SGVLcqw{F=^EszYf{tPl>y(Y?|yNA}#T7-%D^R zTcxF8+b?>;bN7r}qUnL_k|x)S$kne(e*@0Sr}@sEK2yNsHEW4UjX@!|5R(1AiJ7qA zn7Cp^BPclT?>@gWEH2oyeO6ZO%I^1qNUHr3b5ny-i}Q0zOAvXjT5i@ykmD_WPdKn| z!*tJ%>Cc}DYy4OcJXuCmxn5B#b3Mq^vWJfgxjW~0Pt%LIFnQloSS?4EQ_JMOx( zz)8=<>Zl!9PkpW)$cn(y%w(t)2d4NR2PMX3F^@%_@>KaxUm6&-_fPRbuyTm%pwz;2=pMhl95`t?Ru$1hc0s*hw@cxsY!PG)KzEcINgntTZ4qu#yBdKDMU{TfYlJ+1d} z%$fXDOx0_KxhuGB6t&u3Fi_CjdHaIHd-!IpkwrBxpeVlr=J-?@t1}?yy_vG@nVO=$ z|I%&!0S4MU-zWEri%rfF=au*?`t%`4!KU3ojkSvw`m|j)3psjomeAyWaT&KMP3z`? z921uV`>d8T@C1A~sxNMb9CxBBvGCOigVq=h#pM@Xo7RPXQrx1ghCwtc;IbGF}(3x|Bxvy>c$niiCrmXn%{ zl&m}X({6yAX0eZdO;wM$-;(#c($2n!-Z zLXLErf|N`ak`$Kc(JQ(Ga!^OogQG&-^3I!W#a1lk>b^JGR#H=f!S!P!$bz^B)-ASbI{L0{g8u~mv;BYnAMT&L zNJ?FTQ|9|YkWSTmi#{edZu6Mm?%Zdx_sxyT52fTJto_!5TWiXBE4g;n)w%2w(3a)( z`YJy;Mp|LAxiqhY<@LX3LAw7QdD-U2sp-0+HM4D<)P*mTW2E(5mjyAe1W8(m>}}JU z#_qM2=S5&w$5~j*6t(Trad6v9h6Emlg!hNNQ+L1dShiq0b9oTk-^u5t6(p`tjRnV< zN3GCu`4)Tsh7HxMKV_qnC$r1QN@z*mZUtE*Wgg4h#XG@!$LD`S*^_@XOg<a1@sWSBl0V(>@&eb=|*>>G`%90^WU-1!WZ^D(%(IGbk`Kv|POT_)vbo zOK0XXwr9p>&XYZ5Wh6xIoxccjTEo(g4I-IZZu@S@&zW)IcHQJ!SviS7YsDL&Y~c4= z=KDY9>)zdJ`#ECEIZ`ISla-x(L6%Koi_(f-kZzR~zJ2GGpYdO1U>dS2`ThCH?_~8P zp4O(k1-V%Lq)g(rZ;FnS&#j59{ne-i(yi=O3~w>@yxt8~w@*2*{m_SE$Ht{M7eu8= zGK17jW|0z<*uMQbxR@8?-*I}!=OsStCKjEVt*ovGQlJ!(3#&z5TBqIu1|CCel{n4s(3?#WQ_S=k8`wINpICpcJy^*Qx?GM-KgHY;D9c=pEZ$s6QlC%4NBN<_>#IRWJVIZb;$<%#k-Pr7r< zf0Bez-Q*4O>Jm#OPWON$IqbIUOC(yktX1iM-Rb7bIeCM;++II^>Hrr+uL?7?xt7Ta*&eAZ3_Gn$6k0o z0$FnS&8DfpBg(wjMa`G^lH2UXmiZtBWiQp&N~_KB-ZXd7Ou-#WMUy`$ z=t(gA?*`|)xff&l79Aq9Fm(79sfOJws$-K z?S8JiH{~X8P?S;&&&*57FNcNuVkK8_e?FltbfT@IvHRXERrR`bC+^7_O3HpKL@%EQ znbUh|+8Q74o`&*hFo554{qnrcj-O-{1UtO78_f6 zKf_gelV>W)NNgy|e*-eIL|G_7f65=H_UzSdg-wRBh!+dlr=7DgSJ^CiM%vaPb3#GfOZrhzg5b5;5_ zZC(ejsn^~aT|U3;?&NA^1$T+Hzrh`U6_);u41eCZOe}t^y?u_L57ZkWsTC!9NSUP4 zXjLPqkttlGSS)hsmiN-Vnj0HLHdar5q^v5@TDJ8#D4ni8eeTim{|kNR$1=6H)km04 zmRC`5KgaAc1LV0k@&7_E9r*1%@h`vJ-AgiuU>0Q*r6#5z+CdC?t7m}B>bS&^nK{+d zc|yGAm0y!;?oDn}QIb$Bd-D}!R&Lq03a5v-s~m0!Z<>ij7n7qoB~K6^Opq1()Js*=JzW^9u; zs>(@pB*uVClJy1|e{Nbla9+lsTx}s_wPx~0Rq@IHRJkRjb(O%4yc06A{)P`UoZC8E z%=nrj@+K>)$$DidC;R{z%{cATq4T~Iz2?|!)uo@Tga=enVqS3~xSock)8v2t-#|Lv zX-rA5&gJmipcc=d_>#+Oa)+9NM2v;#3{dP}X>NPtzD?47!S=R@C@$aX$p_RV9B&wS zf(QRf*;F?@5?t)RZqA!#>vc<{ppk+|x4u(mgG<=X*#r&w?sO zl%v5r_N@Rpoau|tUbf{0K8v=?RqN`$hJ^!2A*5>*3?Ah4dz}rgL!WQ7Ja9B8*?BA5 z?1~RvvLZ<42BjvZq!xu2WtPBuhZm-_-ezE6VoaPz6FkC_=?PhNL(0%O!>`&%2>7}rhy z@F0-!^<<}q4vdnUn;+`2F}~Qm>G?}0jccJ`dwxCo|G)V+V=1>suj#g!|Nl2LzUK7k zHGLBP|G(jFk6zoN&4*uoW@CIfdHY9e#-)=#e+*)rIN9x!CS&^Mf=`#3nAn0hyMAqD zWPCdL^fx`mWt)F}>tbZQxw-X6J|ko8=C40bF*3SrKJa@Qv$V4-$hV;vUhv8>GI+G! zE=g;!W++MY=r!Fnx$wUUeH99^Gx z>lsU@M{6+ZFg9)P&|u7GWGvnOLzA(QiSg9*Djh}zM$PTBbr`p>Fh)-gFl01k+_AmQ zkkN^eQDpi_BSr_twCP;Nj3$i5)9sBJRT=HJry4VEW@dc3UDBK}k&*Gz^lA%6dB#oC zXIn6OFdm-%#DXzZP>mmCdF_Q4vu1%9ru(KxSu(0Hx=*jSWOQYmzWt~rqY@)y>h>2_ zj46zabEo^-Fv>I5PS3SrjAYz2{kRR|bjC;1<82uW86~G*v}Md}F(KHGOk1qcNk*^yk5hs~F{`PYPjFVf?v$dkEu8Zbt9z3lkWR zGBJ8iPfuoi#+WqSFNJY4qxyEHR7QD5M)vJWX^h*M7;C2A%3%D%xMlkFOhy~V+tWp| z7$-6+OkbA87|z6YZ~Ff%#$z12Z~p@&?S0eFW;5C`Szn&co5Q%1v2^?K97c6^#_QAH z7c(AWyg7YS38OFL`)Q?&o*Xup!8)wA2bMC%urY>BUsui8$oOr$K@DR(qw4P?VA<^l z|NZyqy!oPU?f?HR2TBw@x`jP@ZEGQt*SBw}Wt3*(Dc=uL#G~TTYpcKgc0J=#M#h!f z8ygw>7>#QW{{v;UWp4lfdvuph@aVNoItCV*0ukAP5NQI5^x7Vu?$pdUozY|Zt!BnZ z^W(e0s+&Mbxx4g$N3X5>A+SgRLe&odokd^dd>BU1rm?R0h~CObyE>26F+CQO_j(`%TR zvKZ%1f6l}d&geSbo|(ysiOGF>Gc!{l1nJ?O^marzhq@{V2qls#m1D#_+xqx8dd8?ZJ%)qHfbqxlUJj;U#wF7qaxnQbhD$c?q|f+cdoL$b z88>6&b{0`4Uu|n+BV#>NLkmMQ1qbK++}z9(D+OXp;q5&ROf9S|99vkcCKv8?*&gA> HbbuWI7VcXf delta 1540 zcmbO+hjYdp&J8-uBHaR+Zii)(-Bw?dSRJcm)Dkl3(ap(?OcI+Tn0Kf%YE2f@7H3r0 ztf{RivN_P6mwR%%*9CoZJp(;MLx$AkBx92_3)9qOOQTf7B;#ZYLzC2$H1m{13u9wr z^Hf7i%T)6;GqcTM-XBU0U)25o|8inJ1H%N5ZhMbj)8p;`|G#WR@RLFOLInT9tOkQ_$0%P!I`&%2>7?)4} z@F0+J_hhGs4va#Zn;+`2G49yB>G?}0jq{;kdwxCo|G)V+V=1>suj%@j|Nl2LzUK7k zHN6}D|G(jFk6zpC&4*uoW@EfHdHY9e#(9%Je+**mn(X#TlQCg)!KceiO#g#6yMAqD zWV|=|^fx`mvdzD~bultt+T8jhpOLX_^VgrJ7#VFhANW0uS=z=Gn`89Z8V zm!vgVGn6EH^qOv-T=?IFapUG?{|~b;KHZ+h!nlElv2VJxAfqy4)^>YA##ScAC)1A# zGb%A2oc=#`fYW_ zdd9rz(He|8jMdvaG#K+48S}RP&}3|6Vw^R-N{3N_QEB^Z9mXvzjM~!!3>i%sH*Rk; zWOQO=GtVSmW(Ql4%6!`8C@ACY(HwrsKm$^xBZ0` zV+teVwCTP!jPi_S({pVYBNbccw5FoMxp5!Z5gu|cW<|`V>D-F^xR(J z%xKNXD7Srs3*%dm5%b&_-!n3nZtwG8WMpPMJ$-^VV>)B$^dH`g4;ZIxzv;us!o+xW z`#nEK4`#-u?dm~{-HeQjr*95sG-ec?{ydm*6{Gm{Ng<3XjGwk|4`F=C&FHdyVFKe( zCPt^}>B)@G7^A2Ar7&)0RM^gx$|%ps$go{0jd42@W9jr;8H|4z*G|8l$!NoPWx7Zf z<3vWu>C3Vh!fWX*Htq%GQQt#P{SC{sQTpySa!p~ zfB!u?Z@%bT`~QE-ff7ZJZefpJ+cJpch3#8v8Ks$cLQaDe@u+z8+G=dSUC+3bk#W)X z#zw|IM&q)>|3F!7ncM&W9^IuAJbG=TkAX#|Kty&RM4CV%y|xFYJ2f*-XLOu?tC=y< z{J?Io>LyT9?k+vx(QE5)2rNeOA>vZ8(MmI*A?NP0a zx0!Vg?gV>@ALON8(Fb6P7epBzc#-Y!|G#VNw>n+d&aXbbwlk+Eb~0)*UfkZ@$=DOm zSh-!|C1axq=^l`yD>4DFx_;YUc!)93X7Xp$pRUTn6wGwnb$S&GlRM+2>4#XD zf*9?mi?TBLGrf12p2o`5#F#z(B`cEyqtp-$7(Y($CT&K=>9L$l`ivj9_i{3o zaWhtJXAx!c)wVV?GS)LOGBGk&aB$Ag&CM*aQXsYj-rnQD)WXWLW{pPf5hyuZ}ZwW;YlGsc{ghK GN&o=PqY~Kw diff --git a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.CoreCompileInputs.cache b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.CoreCompileInputs.cache index b7dcd22..b601c21 100644 --- a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.CoreCompileInputs.cache +++ b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.CoreCompileInputs.cache @@ -1 +1 @@ -05e0be4c2d12ae0f17c5d6f12962bce0cc2e9e72 +ec703f0a9443496abc9a2f5f990e1ce3ee3711e0 diff --git a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.FileListAbsolute.txt b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.FileListAbsolute.txt index 149573d..9f7df77 100644 --- a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.FileListAbsolute.txt +++ b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.FileListAbsolute.txt @@ -124,3 +124,45 @@ /scratch/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.pdb /scratch/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.genruntimeconfig.cache /scratch/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/ref/josefk.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/josefk +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/josefk.deps.json +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/josefk.runtimeconfig.json +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/josefk.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/josefk.pdb +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/Confluent.Kafka.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/linux-arm64/native/librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/linux-x64/native/alpine-librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/linux-x64/native/centos6-librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/linux-x64/native/centos7-librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/linux-x64/native/librdkafka.so +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/osx-arm64/native/librdkafka.dylib +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/osx-x64/native/librdkafka.dylib +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/libcrypto-3-x64.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/libcurl.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/librdkafka.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/librdkafkacpp.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/libssl-3-x64.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/msvcp140.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/vcruntime140.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/zlib1.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x64/native/zstd.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/libcrypto-3.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/libcurl.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/librdkafka.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/librdkafkacpp.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/libssl-3.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/msvcp140.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/vcruntime140.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/zlib1.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/bin/Debug/net7.0/runtimes/win-x86/native/zstd.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.AssemblyReference.cache +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.GeneratedMSBuildEditorConfig.editorconfig +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.AssemblyInfoInputs.cache +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.AssemblyInfo.cs +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.CoreCompileInputs.cache +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.csproj.CopyComplete +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/refint/josefk.dll +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.pdb +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/josefk.genruntimeconfig.cache +/scratch/repos/Exemple_Kafka/dotnet/josefk_producer/obj/Debug/net7.0/ref/josefk.dll diff --git a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.dll b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.dll index 03ea41628293f8fabf4870610e321c74460998a3..09a48d0affbd98a6e6c33cbf926bdd82b7b8dfd7 100644 GIT binary patch delta 409 zcmZoLX)u}4!P4-h);d5A}3mU|`VL%*YbJ;pOSdkjGHMkjhZRpumvJkjhZZ zP|T3Xkj{|GpunKPP{L5jPylADGZZqEGGsE8FsL&qFqAOlGbq4CtQZs+7$+C~^(0cv8U#s5p{L?mlJ%}YB(i6m1n5ZbmXg!%pKx?y}z!t{Md_uoi QnPTiV3rf6aoM^xf04kMbH~;_u delta 353 zcmZoLX)u}4!IBuh@$keR8AiQ{E5rG2>M}6+GcYhPI505SuufDI=1E{+;9+23_@l$X z5HeYjNoR5-qdKF>n2?WhE0}zgU?J>^2KZyl0$fzzzT!eqYr9 diff --git a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.genruntimeconfig.cache b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.genruntimeconfig.cache index d21faaa..9ec69d1 100644 --- a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.genruntimeconfig.cache +++ b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.genruntimeconfig.cache @@ -1 +1 @@ -39a2766786638ba4709cdd2bfddf26636796fba7 +b8aed1e142221deddf26177798ca77680631a442 diff --git a/dotnet/josefk_producer/obj/Debug/net7.0/josefk.pdb b/dotnet/josefk_producer/obj/Debug/net7.0/josefk.pdb index 33be704cc89caeb3bd28b6012f1cd0e6302732f6..2a1171d21b8c09c21cc485c4d4c677c4a4be00c6 100644 GIT binary patch delta 6127 zcmewp(h)kLK*2+UfkD|RCqIdSfng!b<|n1mXMGzdFC)GcYpDXJBG@&%ns=hk=Q~gprZKk&$V#BBQV<0|O(2fjR>cU}P|v?9Lb}$)cgo zz|4@usLsH`Fq!!P11p29#sLO42HnY98K#5xz=ptq~e`9&2sr5YADA8EYq*CR-XA8YUSgnHU=-rz9Js8CWEyr6wC0nj0Hgnj07< zf%t~WlixDeY8h)Ar8xenwpxJ8d;hqCmNXJw;w(c01H-N8uCnTyMz3ktS+*$`+_IgV$R;+~j*VO5AiL@$1_efjGd6~6 z(w@jV%u4TPm}GlYVR9myjD-0s>E9sT3=T4-XKE9@dv0^Bp5*cW@8qd$iV}B^OT7n4 z9(bgFtGmw7b^jGTUY! zC?_*XFC{03fq}8;y4_8X?I}X)PdD6O>d=0u^VsJ%nmKu?CB-F)c`1oSDNvQ)zjpj& zIKarT=ED860?RJAZ{uSUPu98lBDk`+BsEvhv7o>?Kd&UUq6Dnooq_S`wg*Q*&inuW zUGFoeh2EQ$xWwHzu62ZJa4JnpOD!q}t7c%#HM-XfQoOcs`oUuyZY$l_w14q4BvIyK&kC4lMl-Y?kkSV_VdK^ z{Brp}S%E`Y;>Avn%M1>T4CP)M*dr3&dQXac8)LPIyKZtchl)h;XUTaWW4lxBSQ%|) z{O2+*yt_l?t-|E#9I6s^EKjb3B->v{TPQg$cUoF*)iKFo5RbTC-sK(?6~=BH#BaX^Ye=Hb5fyk zbENNYH^?gc6&yt&jtTDT&2wMwy6$}$uDA%MGLJ=mCrG8ZW622_DPPZJn$~GNlby1l zDqRvw67`((b8{!Z=Hiqv@KoK#uz-mneWg_7=galJ%Y$sM7`aD2nasnj;`=3Nx?}O8=ryfwCX(cO=SX$W8WWBCsFr}rzaP2D@$yc zIPV|G7=y!MpFjAo@M&sfx~0vOQ89Tfx4gvjQ%Bc=B%AXNmh|-3I?Or}y-nlY-^j^N zxV0p#&%SJCc)-B$Z4yIon&&Ow2@@y$?7wz->SQ$@O^M>#>^UHvTlwJpO$YEWF4hC*cI(Srd!E_Zx2uVTGWsl`i> zokF$Kb7wa&`!5%swfae>f*dp~A;I`5FQx;OjeZ?}%=mU*hRY_|OraMSgyo@%T#6FQ zGxO4+DMC!*)J>3Ecmr7r@5UZ*nSa*S+`y3haVS@3(w?&FhV_+%v}be(<&GLG-0>vF5Z&pcXQZEy7Y@ZjL&iF{I?oJ~`~ z@oQ|WBR$=JqT5u*B#r0W>s~|ca?3BuO)M$a3rQ@3I_}pw%OxPw?itRjwvKA@oTqxF z$z+*Q;$%a9(aD_r0umXIpKb;z@V0dknRcf^}E&3rcXi&I0T1_^8m3(lyg@t^b1)EQ{ECCkqOQP2R!JD`9;i z`83G*jlRD5Ki$^2?o4si@nX@=0x6J#1a{4Z#fLyKVzl--o6pK3?*Xn zIs98xtVD_eQ+6T@am&m}h4LV-Zm~J=6l9*UCZniM`FgMUJH+`l*ZiC`Swm1&LaFP} zACRQmx0rg{;(Y%-)9xufpD6Nfa;l)3#E-p8z{QQgy1jpwOmcTv&GJ}chnLUl$%_S* zCDa41{sy_^yezM9>BVJEZA)tJmbWZ1n0!xAL4tS5qaKi?Ve-?IJr7F#cZ7d%-{ZCC z`(!a8C5iN^{XHN%!;keWE?Q>jva(C=c;dF!tjPgFDsD5vp1cN09^3LJy^b@?Z(`nB zrD;j(22d|~`s)Q`7NizK!^*zzTNB8hcfs8$_gZ2dn+1!Vewyt(FnO1dTKydl+sPoK zo(BHjxGrs{`>HyP%3a%7?Vv{aWabq@jj75!`W=+aH?-}~n>q2T&*cBBJp^VbGa{>W zttco;EiTT?&x2WB-g^ENsFqq~t!K&c@Uz#{9hr`OEoGrlT{>V30uqZ7b8=F1pf)*s z#$5o}G|w|PciAd&x zm3tNE=RuwFL1_POP$W#7=Q(K>tAxu&y=gBva}7<=wD@Eem!#&U!t%Ro_Qyt$u2bL4 zC0Y&#`Oa!L3BEYT=tT^YHs8cdSirsP5xfBkn6Ir+!T!_Eubgm#Kf(2kreD5~Oi(Jy%lNGIcuT|asxJv^58biDiVh-Cwk zJpqU^^2!-5aPE8Y%4zw{^%0)4kJh_ODCB2Ot`?P%So5{E1!R~&VHtB8u3fBF{C*Kv7l6e1Z(_K)##QR#nV4sqX z!(N^(F^%Wc@$AWtVsiC8xs5MDe!C&P zB)XAhlf#~+Ii>$*NH0WkesF$rc4`SMS!xzYT>zQ3bz0>^VZ9CBv(A}q`&`VOhNK}p zHOVOuzDPK~%Ds73}91QIHe=s}wv-Z;*4EU@UWER;=Pwn4Z$y z)S}E}SQw}j1Y8E$AToF7SN-Ke9#i|~?wfk&{%x@8$#29ZCER1{Pl2M+eL{bA@zX%3 z{=e}~B`u<#C(BAmO0eyzcnz}bjl=(xdGnSz?>O?~t%`JA++;rqH3^4ft>C~|Exh2v zky)}{%T_%g*O1XgN6fx|E#6gQ=nGK|0?m|2R;hqUpalGC)gr-ZZJnywbW7N%N(_l|(U1 zXXNac){dO~pRui@oZA zxw|y4Br_M*5H7u{vxp&qhapEp?BR##lb? z+d1zQb_Ml5X-sD)|CN@NVE%U>+!ng_R3SDK8^6jsGheGd^&v=Hz^%|WX#tFHj%4_2IGRwcK2D&IKPXP)T+sa8g+O)e@|F9WG#xWDgP?PPPezSyLFB3?&sg49iJ zmlKpYHs!}FkbiEZ9Syy-(#ml0h$5-vzfW-yj?FwBO$*Ys-4`k=bTNWI#UWv~8Z5&b=;$9q`oFFf&S+mPmK zWJ8xUy1T77{A%JPL33G%vSLsr3#wzc?LYq+WEzKe%0s#G_ud;02pBO=^!*M}1`S(_ zMY%gb8TGPv*_I;nVu$4~n6@9Ut;|)z2_h^{xQ9b@A&-JeeeA%Z}t~U zD1#LoP~el;>HF(HNP(rtbgu6wWF02XSXzBVwyz4TfLT#c{B&yCZIFWW=})EJOx*4{ z&ow+;JJ@e>yu93GPepOJUR#GXAo=&6r*4a{P4@0FaF5@+kTV6EZh}*jON%l~UVQ>l$Dbe4c9~h1@=o9V4ddi{ic*sgD6&c9F`e5DQgG?viXTclue#4ZR-i33 zJMQ=7dx{zo{hB9Vfg)WeeZiEENy{A;9~azVu%Iq&vYe8##0|+?-~khtvrZj7|2tea zD&LY$yp?J>IZ{bZ;%R90Mv$>3OQ)LhxH>xFh-IawT3|(CJgB;JMP&i?S<~zrRR^`x(87&r* z3zZez&u9Mo36flD%`UuMD#Ul6`*W?Q*F0pQp%?-hc|l73%yN8}KyAv!JXr}%xw5X) zMZSjTMT^-@zM-rtaV}hZ4Je6j3{gG#`dOaa(#K|#>ef$tK3P;nL4vI>9^6p?oh&rhDRnr)unR=KLTkLK@NKDkCkNn-uqTyW8n zKI`!x$-H=vy>B+Z{q*f*$>eP+YQ9nB)0TpCN@@ofY_s-pp57&RtAFiv*eF6sMp0^F z3aI%Ml31J#tA1Dg@#zHxnOe%mjv29!9TxiTl-GY%#Wz_~RYPLZ!JUmD$(PEfHXKbf z^6&gDW%PPwe$C`eRW%8}@1gTSl55RQq()bndapX(veGfE#%}UmRq@IDRM{npxqN4V z!qY~$Bx&cd3eV02r&<2f!s933RaKG*d}I3)r2D5;M~8r}s_){;J7o)RbIhA8t|lw- zcj?i?AW7r9JA^VUC476|Kj-mYct2sXkD8Rkk28G_K$4pKJp2CbyzJQWb%F6YfwIe! z>(mtLjprX(4zjbnrJ+82U#`=xcS^P!GNzP4<1j2Sr!+OBv;aQx!h3EuIA@)FK6U1t zzKuTXCC+|YR&d<|sy;jwHjH;LK^r_=;xjkmO_+P1+ww_1FVD$uKaQl(qoicAtU7=F zHsj;q{Pp2U#J-O<%}(2nike52ocaY*nwgi9Uk(d@>3J8x!*V;{xtxCYpw?~X;Sjz{ zT+wf!iX(Dit?JgoE8sDP+sQ}WWQ_Lvt`>dvKij{?4M`!m7n33j9(KHc(HlJUbz4>- z?eDg3mrjYpu1rh#swSUMmy?*cT=W$K0~5n$CBOLK^&Ebi3Yi!R)-Dd7{7+rp+tj3} z4HUlqZ!R?4Y2W7BQ<<{x&NCAyB#Rz*5sgyezuMD5DX)0y_aA@KFF4I%%}ia< zBGoWCLqk?#_J%19AoH3ub_q5z^Eu99dXvJ}?|N?XTn%}5)7v_`KrP@Kd(Ha~nh5(g zzKj&P{%J96Pz2=th=9bB3}^=5cG!Os$gBpb?~YurmikSLb8wGavO#__v!jVz4x;sp6s+y%K_e^7Q znx+Dy+2nT3ZH_IB%extuFJoN3fpPgB#^q-im)~Vv{)L~BhtZj_VHaa#5@Ta4lQbJ6 dBSYhNMgax}21XGZMg{{WWkCi;rb-3|1_0_{Tz>!n delta 6131 zcmeAO{Sz{wK*2_XfkD|RCqIdSfq`enE6oLao4q?Xx4pMJYMpx^jqlpT2?g?&42%qp z3``6?42%rZ8JHNJGcYoIW?*8_W@KbAXJneJ$S5qzz`)3$rp|x_7#XxCyEBGL{!&+G zU}lJ5RA*pe=wv>?z{()3ae#r1L3#34#wnBinG7`9^%)pB#pU!AmKL723{PC$H-G>6 z`RkjRT$Ep&pH`w5mRVeynB$aKoSCfWoL`ix zmy(miz`%IxuxI$vM17EMKKkYQiG6nz|A9>9^N>Bfvj2qV zUf(P0H73UdO`gxDC{gUuH4`LRcym|ef#@u^)vL?onu7h~Cf{I_&phJ9@Qh&r14HZl zWd~1wed@H`XK5Zo=;GVC#mV_aIhjdN+uu5#T?evV-etZ-Q7NnECZ8zP&huP5^HNKS zOA_-^5{puxDtAPC&S5yf$naryq(erJwPU}=nx+@e#Y-k9uuDkrrI_CbIfZprUZ-W^ zLdQ*~KiPjN`rkWw0=uZhuHb;3AjuVCGTqD>t6X}H_w(~x9XmRCA&2nf3+#dtiF;MH zf!tcy{^MY{!yh*QY2rPM&7^da@j+j)bwm?keP@{Yf7YGdo(eKvoo%W8i(B)2c5L99({kH7BDk`+BsEvhIX|yBKL?f?lcf~C zfvl=@v-h~f_QG>jIG3Eqe2*%)VtBe;Xk+^sq*5?7=Z(ufbFYP$gf45&a(V<+>5^EI zsOOxYo2!_g2TeiWO@BOLSir>a!`;;CBTK$dPgjb)ieT!x$zt3pPL_$13qY>C*MDc@ zjP6|yQ(JGdKjOdq8re8VQfT9FdjPU{%7eS=Hx;i;mQ5n0unFm zCtL;@<$HU}If+I`msZXNTiho8m<3jFiknAbX=U7Akb=M^;u}}=(jkYrud$7+x6|#%qsJXHelU*c?2c;kB6-X`9Lm# zUtyt*quc6>KffL`E@B2N_{k$Dv5f7>TafWf+3#?^ICH~wZ&~OEuJ7ewFRFy(=jRkd zgLLvkxrHEw3F~U_|C-_DFuVJpZ0L&GF0e_Jyh0K)8oXOURxvMmcmHkYXOHdoe*RmT ztTYF#U>h%=t?a76M<4}p1@1Tg8$I-2@L+lODRI9>Xt+ayypiYEbx`hWyL#k~*26X4 z^Z4H$`F?z|%Vb7AQSq(uVqZXR5ILm(__iT~|7HuDPviG3kb8fzDs7zKd&zN+Ki@Zrd)C(`pXHO3C^8Ry2hyp}D!s4ip|j6i z1P^(aVp>qI3G;?^;yTeW2tM?xeqN4FY-(M+vAnOtzW06(m`7ZRJ)DW+VT$ zur~j_&f(bz+aNKfJ@qZvv#wnCXUA-D_g`ztH4t z&+cDNR5|VPj=XC7wpn8Ge*tNUgxj;0fNT+HGP`8b*yb_o5ub^<=ygf3Zg)WeiEjP4 zPLP5!g$HZjEL-Ti)s^wi_t&QHC+7>QN!(RZJ_M4~eYc}UEUMq5-9}ZRZ;it4N!D4F3hxpT#p*fo>hOpX##aZ70SYyr7o(VU+r zep{q^uf4Wmg2#nVup-aXUoRlDAQhI0oImXP2(pK>{?3Q*Q`WexFg*Oi-*&n1bni9o}7a!A3ce5^KU;qD`-R6D{*9F0uqZ7b8=F1pynkOFzy7I zH_d``lJJ}(juWSE`SQ9wD-c;rU}*D$J40y9o+|gbRKX zyX9~Bt`e;Inct>wHCawXOk#zV@_SGVLcqw{F=^EszYf{tPl>y(Y?|yNA}#T7-%D^R zTcxF8+b?>;bN7r}qUnL_k|x)S$kne(e*@0Sr}@sEK2yNsHEW4UjX@!|5R(1AiJ7qA zn7Cp^BPclT?>@gWEH2oyeO6ZO%I^1qNUHr3b5ny-i}Q0zOAvXjT5i@ykmD_WPdKn| z!*tJ%>Cc}DYy4OcJXuCmxn5B#b3Mq^vWJfgxjW~0Pt%LIFnQloSS?4EQ_JMOx( zz)8=<>Zl!9PkpW)$cn(y%w(t)2d4NR2PMX3F^@%_@>KaxUm6&-_fPRbuyTm%pwz;2=pMhl95`t?Ru$1hc0s*hw@cxsY!PG)KzEcINgntTZ4qu#yBdKDMU{TfYlJ+1d} z%$fXDOx0_KxhuGB6t&u3Fi_CjdHaIHd-!IpkwrBxpeVlr=J-?@t1}?yy_vG@nVO=$ z|I%&!0S4MU-zWEri%rfF=au*?`t%`4!KU3ojkSvw`m|j)3psjomeAyWaT&KMP3z`? z921uV`>d8T@C1A~sxNMb9CxBBvGCOigVq=h#pM@Xo7RPXQrx1ghCwtc;IbGF}(3x|Bxvy>c$niiCrmXn%{ zl&m}X({6yAX0eZdO;wM$-;(#c($2n!-Z zLXLErf|N`ak`$Kc(JQ(Ga!^OogQG&-^3I!W#a1lk>b^JGR#H=f!S!P!$bz^B)-ASbI{L0{g8u~mv;BYnAMT&L zNJ?FTQ|9|YkWSTmi#{edZu6Mm?%Zdx_sxyT52fTJto_!5TWiXBE4g;n)w%2w(3a)( z`YJy;Mp|LAxiqhY<@LX3LAw7QdD-U2sp-0+HM4D<)P*mTW2E(5mjyAe1W8(m>}}JU z#_qM2=S5&w$5~j*6t(Trad6v9h6Emlg!hNNQ+L1dShiq0b9oTk-^u5t6(p`tjRnV< zN3GCu`4)Tsh7HxMKV_qnC$r1QN@z*mZUtE*Wgg4h#XG@!$LD`S*^_@XOg<a1@sWSBl0V(>@&eb=|*>>G`%90^WU-1!WZ^D(%(IGbk`Kv|POT_)vbo zOK0XXwr9p>&XYZ5Wh6xIoxccjTEo(g4I-IZZu@S@&zW)IcHQJ!SviS7YsDL&Y~c4= z=KDY9>)zdJ`#ECEIZ`ISla-x(L6%Koi_(f-kZzR~zJ2GGpYdO1U>dS2`ThCH?_~8P zp4O(k1-V%Lq)g(rZ;FnS&#j59{ne-i(yi=O3~w>@yxt8~w@*2*{m_SE$Ht{M7eu8= zGK17jW|0z<*uMQbxR@8?-*I}!=OsStCKjEVt*ovGQlJ!(3#&z5TBqIu1|CCel{n4s(3?#WQ_S=k8`wINpICpcJy^*Qx?GM-KgHY;D9c=pEZ$s6QlC%4NBN<_>#IRWJVIZb;$<%#k-Pr7r< zf0Bez-Q*4O>Jm#OPWON$IqbIUOC(yktX1iM-Rb7bIeCM;++II^>Hrr+uL?7?xt7Ta*&eAZ3_Gn$6k0o z0$FnS&8DfpBg(wjMa`G^lH2UXmiZtBWiQp&N~_KB-ZXd7Ou-#WMUy`$ z=t(gA?*`|)xff&l79Aq9Fm(79sfOJws$-K z?S8JiH{~X8P?S;&&&*57FNcNuVkK8_e?FltbfT@IvHRXERrR`bC+^7_O3HpKL@%EQ znbUh|+8Q74o`&*hFo554{qnrcj-O-{1UtO78_f6 zKf_gelV>W)NNgy|e*-eIL|G_7f65=H_UzSdg-wRBh!+dlr=7DgSJ^CiM%vaPb3#GfOZrhzg5b5;5_ zZC(ejsn^~aT|U3;?&NA^1$T+Hzrh`U6_);u41eCZOe}t^y?u_L57ZkWsTC!9NSUP4 zXjLPqkttlGSS)hsmiN-Vnj0HLHdar5q^v5@TDJ8#D4ni8eeTim{|kNR$1=6H)km04 zmRC`5KgaAc1LV0k@&7_E9r*1%@h`vJ-AgiuU>0Q*r6#5z+CdC?t7m}B>bS&^nK{+d zc|yGAm0y!;?oDn}QIb$Bd-D}!R&Lq03a5v-s~m0!Z<>ij7n7qoB~K6^Opq1()Js*=JzW^9u; zs>(@pB*uVClJy1|e{Nbla9+lsTx}s_wPx~0Rq@IHRJkRjb(O%4yc06A{)P`UoZC8E z%=nrj@+K>)$$DidC;R{z%{cATq4T~Iz2?|!)uo@Tga=enVqS3~xSock)8v2t-#|Lv zX-rA5&gJmipcc=d_>#+Oa)+9NM2v;#3{dP}X>NPtzD?47!S=R@C@$aX$p_RV9B&wS zf(QRf*;F?@5?t)RZqA!#>vc<{ppk+|x4u(mgG<=X*#r&w?sO zl%v5r_N@Rpoau|tUbf{0K8v=?RqN`$hJ^!2A*5>*3?Ah4dz}rgL!WQ7Ja9B8*?BA5 z?1~RvvLZ<42BjvZq!xu2WtPBuhZm-_-ezE6Vo 25) (kafka.controller.KafkaController) -[2023-11-03 19:38:21,195] DEBUG [Controller id=0] Register BrokerModifications handler for Set(0) (kafka.controller.KafkaController) -[2023-11-03 19:38:21,202] DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager) -[2023-11-03 19:38:21,206] INFO [RequestSendThread controllerId=0] Starting (kafka.controller.RequestSendThread) -[2023-11-03 19:38:21,208] INFO [Controller id=0] Currently active brokers in the cluster: Set(0) (kafka.controller.KafkaController) -[2023-11-03 19:38:21,209] INFO [Controller id=0] Currently shutting brokers in the cluster: HashSet() (kafka.controller.KafkaController) -[2023-11-03 19:38:21,209] INFO [Controller id=0] Current list of topics in the cluster: HashSet() (kafka.controller.KafkaController) -[2023-11-03 19:38:21,209] INFO [Controller id=0] Fetching topic deletions in progress (kafka.controller.KafkaController) -[2023-11-03 19:38:21,212] INFO [Controller id=0] List of topics to be deleted: (kafka.controller.KafkaController) -[2023-11-03 19:38:21,212] INFO [Controller id=0] List of topics ineligible for deletion: (kafka.controller.KafkaController) -[2023-11-03 19:38:21,212] INFO [Controller id=0] Initializing topic deletion manager (kafka.controller.KafkaController) -[2023-11-03 19:38:21,213] INFO [Topic Deletion Manager 0] Initializing manager with initial deletions: Set(), initial ineligible deletions: HashSet() (kafka.controller.TopicDeletionManager) -[2023-11-03 19:38:21,214] INFO [Controller id=0] Sending update metadata request (kafka.controller.KafkaController) -[2023-11-03 19:38:21,224] INFO [ReplicaStateMachine controllerId=0] Initializing replica state (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:38:21,224] INFO [ReplicaStateMachine controllerId=0] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:38:21,230] INFO [ReplicaStateMachine controllerId=0] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:38:21,230] DEBUG [ReplicaStateMachine controllerId=0] Started replica state machine with initial state -> HashMap() (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:38:21,231] INFO [PartitionStateMachine controllerId=0] Initializing partition state (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:38:21,231] INFO [PartitionStateMachine controllerId=0] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:38:21,233] DEBUG [PartitionStateMachine controllerId=0] Started partition state machine with initial state -> HashMap() (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:38:21,233] INFO [Controller id=0] Ready to serve as the new controller with epoch 1 (kafka.controller.KafkaController) -[2023-11-03 19:38:21,245] INFO [Controller id=0] Partitions undergoing preferred replica election: (kafka.controller.KafkaController) -[2023-11-03 19:38:21,245] INFO [Controller id=0] Partitions that completed preferred replica election: (kafka.controller.KafkaController) -[2023-11-03 19:38:21,246] INFO [Controller id=0] Skipping preferred replica election for partitions due to topic deletion: (kafka.controller.KafkaController) -[2023-11-03 19:38:21,246] WARN [RequestSendThread controllerId=0] Controller 0's connection to broker ThinkPadP53:9092 (id: 0 rack: null) was unsuccessful (kafka.controller.RequestSendThread) -java.io.IOException: Connection to ThinkPadP53:9092 (id: 0 rack: null) failed. +[2023-11-06 13:26:31,961] INFO [ControllerEventThread controllerId=0] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) +[2023-11-06 13:26:31,976] INFO [Controller id=0] 0 successfully elected as the controller. Epoch incremented to 1 and epoch zk version is now 1 (kafka.controller.KafkaController) +[2023-11-06 13:26:31,979] INFO [Controller id=0] Creating FeatureZNode at path: /feature with contents: FeatureZNode(2,Enabled,Map()) (kafka.controller.KafkaController) +[2023-11-06 13:26:31,997] INFO [Controller id=0] Registering handlers (kafka.controller.KafkaController) +[2023-11-06 13:26:32,000] INFO [Controller id=0] Deleting log dir event notifications (kafka.controller.KafkaController) +[2023-11-06 13:26:32,002] INFO [Controller id=0] Deleting isr change notifications (kafka.controller.KafkaController) +[2023-11-06 13:26:32,004] INFO [Controller id=0] Initializing controller context (kafka.controller.KafkaController) +[2023-11-06 13:26:32,013] INFO [Controller id=0] Initialized broker epochs cache: HashMap(0 -> 25) (kafka.controller.KafkaController) +[2023-11-06 13:26:32,016] DEBUG [Controller id=0] Register BrokerModifications handler for Set(0) (kafka.controller.KafkaController) +[2023-11-06 13:26:32,020] DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager) +[2023-11-06 13:26:32,023] INFO [RequestSendThread controllerId=0] Starting (kafka.controller.RequestSendThread) +[2023-11-06 13:26:32,024] INFO [Controller id=0] Currently active brokers in the cluster: Set(0) (kafka.controller.KafkaController) +[2023-11-06 13:26:32,024] INFO [Controller id=0] Currently shutting brokers in the cluster: HashSet() (kafka.controller.KafkaController) +[2023-11-06 13:26:32,024] INFO [Controller id=0] Current list of topics in the cluster: HashSet() (kafka.controller.KafkaController) +[2023-11-06 13:26:32,024] INFO [Controller id=0] Fetching topic deletions in progress (kafka.controller.KafkaController) +[2023-11-06 13:26:32,027] INFO [Controller id=0] List of topics to be deleted: (kafka.controller.KafkaController) +[2023-11-06 13:26:32,027] INFO [Controller id=0] List of topics ineligible for deletion: (kafka.controller.KafkaController) +[2023-11-06 13:26:32,028] INFO [Controller id=0] Initializing topic deletion manager (kafka.controller.KafkaController) +[2023-11-06 13:26:32,028] INFO [Topic Deletion Manager 0] Initializing manager with initial deletions: Set(), initial ineligible deletions: HashSet() (kafka.controller.TopicDeletionManager) +[2023-11-06 13:26:32,029] INFO [Controller id=0] Sending update metadata request (kafka.controller.KafkaController) +[2023-11-06 13:26:32,037] INFO [ReplicaStateMachine controllerId=0] Initializing replica state (kafka.controller.ZkReplicaStateMachine) +[2023-11-06 13:26:32,037] INFO [ReplicaStateMachine controllerId=0] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) +[2023-11-06 13:26:32,046] INFO [ReplicaStateMachine controllerId=0] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) +[2023-11-06 13:26:32,049] DEBUG [ReplicaStateMachine controllerId=0] Started replica state machine with initial state -> HashMap() (kafka.controller.ZkReplicaStateMachine) +[2023-11-06 13:26:32,049] INFO [PartitionStateMachine controllerId=0] Initializing partition state (kafka.controller.ZkPartitionStateMachine) +[2023-11-06 13:26:32,049] INFO [PartitionStateMachine controllerId=0] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) +[2023-11-06 13:26:32,051] DEBUG [PartitionStateMachine controllerId=0] Started partition state machine with initial state -> HashMap() (kafka.controller.ZkPartitionStateMachine) +[2023-11-06 13:26:32,051] INFO [Controller id=0] Ready to serve as the new controller with epoch 1 (kafka.controller.KafkaController) +[2023-11-06 13:26:32,054] WARN [RequestSendThread controllerId=0] Controller 0's connection to broker localhost:9092 (id: 0 rack: null) was unsuccessful (kafka.controller.RequestSendThread) +java.io.IOException: Connection to localhost:9092 (id: 0 rack: null) failed. at org.apache.kafka.clients.NetworkClientUtils.awaitReady(NetworkClientUtils.java:70) at kafka.controller.RequestSendThread.brokerReady(ControllerChannelManager.scala:298) at kafka.controller.RequestSendThread.doWork(ControllerChannelManager.scala:251) at org.apache.kafka.server.util.ShutdownableThread.run(ShutdownableThread.java:130) -[2023-11-03 19:38:21,246] INFO [Controller id=0] Resuming preferred replica election for partitions: (kafka.controller.KafkaController) -[2023-11-03 19:38:21,248] INFO [Controller id=0] Starting replica leader election (PREFERRED) for partitions triggered by ZkTriggered (kafka.controller.KafkaController) -[2023-11-03 19:38:21,256] INFO [Controller id=0] Starting the controller scheduler (kafka.controller.KafkaController) -[2023-11-03 19:38:21,349] INFO [RequestSendThread controllerId=0] Controller 0 connected to ThinkPadP53:9092 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread) -[2023-11-03 19:38:26,257] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) -[2023-11-03 19:38:26,258] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) -[2023-11-03 19:38:41,910] INFO [Controller id=0] New topics: [Set(__consumer_offsets)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(__consumer_offsets,Some(vfIIZeo7TYKNeTmKGzMlyg),HashMap(__consumer_offsets-22 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-30 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-25 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-35 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-37 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-38 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-13 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-8 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-21 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-4 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-27 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-7 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-9 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-46 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-41 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-33 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-23 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-49 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-47 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-16 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-28 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-31 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-36 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-42 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-3 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-18 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-15 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-24 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-17 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-48 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-19 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-11 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-2 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-43 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-6 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-14 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-20 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-44 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-39 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-12 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-45 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-1 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-5 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-26 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-29 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-34 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-10 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-32 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-40 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) -[2023-11-03 19:38:41,910] INFO [Controller id=0] New partition creation callback for __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-37,__consumer_offsets-38,__consumer_offsets-13,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.controller.KafkaController) -[2023-11-03 19:38:56,520] INFO [Controller id=0] New topics: [Set(test-topic)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(test-topic,Some(Pb9zfnlKRkmTGaMQyAABkw),Map(test-topic-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) -[2023-11-03 19:38:56,520] INFO [Controller id=0] New partition creation callback for test-topic-0 (kafka.controller.KafkaController) -[2023-11-03 19:40:48,862] INFO [Controller id=0] Shutting down broker 0 (kafka.controller.KafkaController) -[2023-11-03 19:40:48,862] DEBUG [Controller id=0] All shutting down brokers: 0 (kafka.controller.KafkaController) -[2023-11-03 19:40:48,862] DEBUG [Controller id=0] Live brokers: (kafka.controller.KafkaController) -[2023-11-03 19:40:48,864] TRACE [Controller id=0] All leaders = __consumer_offsets-13 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-46 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-9 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-42 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-21 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-17 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-30 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-26 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-5 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-38 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-1 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-34 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-16 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-45 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-12 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-41 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-24 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-20 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-49 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-0 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-29 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-25 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-8 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-37 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-4 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-33 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-15 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-48 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-11 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-44 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-23 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-19 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-32 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),test-topic-0 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-28 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-7 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-40 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-3 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-36 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-47 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-14 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-43 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-10 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-22 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-18 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-31 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-27 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-39 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-6 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-35 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1),__consumer_offsets-2 -> (Leader:0,ISR:0,LeaderRecoveryState:RECOVERED,LeaderEpoch:0,ZkVersion:0,ControllerEpoch:1) (kafka.controller.KafkaController) -[2023-11-03 19:40:48,920] INFO [ControllerEventThread controllerId=0] Shutting down (kafka.controller.ControllerEventManager$ControllerEventThread) -[2023-11-03 19:40:48,921] INFO [ControllerEventThread controllerId=0] Stopped (kafka.controller.ControllerEventManager$ControllerEventThread) -[2023-11-03 19:40:48,921] INFO [ControllerEventThread controllerId=0] Shutdown completed (kafka.controller.ControllerEventManager$ControllerEventThread) -[2023-11-03 19:40:48,921] DEBUG [Controller id=0] Resigning (kafka.controller.KafkaController) -[2023-11-03 19:40:48,921] DEBUG [Controller id=0] Unregister BrokerModifications handler for Set(0) (kafka.controller.KafkaController) -[2023-11-03 19:40:48,922] INFO [PartitionStateMachine controllerId=0] Stopped partition state machine (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:40:48,923] INFO [ReplicaStateMachine controllerId=0] Stopped replica state machine (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:40:48,923] INFO [RequestSendThread controllerId=0] Shutting down (kafka.controller.RequestSendThread) -[2023-11-03 19:40:48,923] INFO [RequestSendThread controllerId=0] Shutdown completed (kafka.controller.RequestSendThread) -[2023-11-03 19:40:48,923] INFO [RequestSendThread controllerId=0] Stopped (kafka.controller.RequestSendThread) -[2023-11-03 19:40:48,924] INFO [Controller id=0] Resigned (kafka.controller.KafkaController) -[2023-11-03 19:45:29,427] INFO [ControllerEventThread controllerId=0] Starting (kafka.controller.ControllerEventManager$ControllerEventThread) -[2023-11-03 19:45:29,441] INFO [Controller id=0] 0 successfully elected as the controller. Epoch incremented to 2 and epoch zk version is now 2 (kafka.controller.KafkaController) -[2023-11-03 19:45:29,445] INFO [Controller id=0] Registering handlers (kafka.controller.KafkaController) -[2023-11-03 19:45:29,449] INFO [Controller id=0] Deleting log dir event notifications (kafka.controller.KafkaController) -[2023-11-03 19:45:29,451] INFO [Controller id=0] Deleting isr change notifications (kafka.controller.KafkaController) -[2023-11-03 19:45:29,453] INFO [Controller id=0] Initializing controller context (kafka.controller.KafkaController) -[2023-11-03 19:45:29,464] INFO [Controller id=0] Initialized broker epochs cache: HashMap(0 -> 156) (kafka.controller.KafkaController) -[2023-11-03 19:45:29,476] DEBUG [Controller id=0] Register BrokerModifications handler for Set(0) (kafka.controller.KafkaController) -[2023-11-03 19:45:29,509] DEBUG [Channel manager on controller 0]: Controller 0 trying to connect to broker 0 (kafka.controller.ControllerChannelManager) -[2023-11-03 19:45:29,514] INFO [RequestSendThread controllerId=0] Starting (kafka.controller.RequestSendThread) -[2023-11-03 19:45:29,515] INFO [Controller id=0] Currently active brokers in the cluster: Set(0) (kafka.controller.KafkaController) -[2023-11-03 19:45:29,515] INFO [Controller id=0] Currently shutting brokers in the cluster: HashSet() (kafka.controller.KafkaController) -[2023-11-03 19:45:29,515] INFO [Controller id=0] Current list of topics in the cluster: HashSet(test-topic, __consumer_offsets) (kafka.controller.KafkaController) -[2023-11-03 19:45:29,516] INFO [Controller id=0] Fetching topic deletions in progress (kafka.controller.KafkaController) -[2023-11-03 19:45:29,521] INFO [Controller id=0] List of topics to be deleted: (kafka.controller.KafkaController) -[2023-11-03 19:45:29,521] INFO [Controller id=0] List of topics ineligible for deletion: (kafka.controller.KafkaController) -[2023-11-03 19:45:29,521] INFO [Controller id=0] Initializing topic deletion manager (kafka.controller.KafkaController) -[2023-11-03 19:45:29,521] INFO [Topic Deletion Manager 0] Initializing manager with initial deletions: Set(), initial ineligible deletions: HashSet() (kafka.controller.TopicDeletionManager) -[2023-11-03 19:45:29,522] INFO [Controller id=0] Sending update metadata request (kafka.controller.KafkaController) -[2023-11-03 19:45:29,532] INFO [ReplicaStateMachine controllerId=0] Initializing replica state (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:45:29,537] INFO [ReplicaStateMachine controllerId=0] Triggering online replica state changes (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:45:29,539] WARN [RequestSendThread controllerId=0] Controller 0's connection to broker ThinkPadP53:9092 (id: 0 rack: null) was unsuccessful (kafka.controller.RequestSendThread) -java.io.IOException: Connection to ThinkPadP53:9092 (id: 0 rack: null) failed. - at org.apache.kafka.clients.NetworkClientUtils.awaitReady(NetworkClientUtils.java:70) - at kafka.controller.RequestSendThread.brokerReady(ControllerChannelManager.scala:298) - at kafka.controller.RequestSendThread.doWork(ControllerChannelManager.scala:251) - at org.apache.kafka.server.util.ShutdownableThread.run(ShutdownableThread.java:130) -[2023-11-03 19:45:29,565] INFO [ReplicaStateMachine controllerId=0] Triggering offline replica state changes (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:45:29,565] DEBUG [ReplicaStateMachine controllerId=0] Started replica state machine with initial state -> HashMap([Topic=__consumer_offsets,Partition=40,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=27,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=49,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=47,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=3,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=18,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=44,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=8,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=34,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=25,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=14,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=24,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=36,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=42,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=45,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=11,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=32,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=12,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=30,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=9,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=39,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=38,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=23,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=19,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=17,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=41,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=37,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=48,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=29,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=10,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=46,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=1,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=16,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=5,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=15,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=4,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=6,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=7,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=43,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=0,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=20,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=31,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=28,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=26,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=2,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=33,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=22,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=21,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=13,Replica=0] -> OnlineReplica, [Topic=test-topic,Partition=0,Replica=0] -> OnlineReplica, [Topic=__consumer_offsets,Partition=35,Replica=0] -> OnlineReplica) (kafka.controller.ZkReplicaStateMachine) -[2023-11-03 19:45:29,567] INFO [PartitionStateMachine controllerId=0] Initializing partition state (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:45:29,570] INFO [PartitionStateMachine controllerId=0] Triggering online partition state changes (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:45:29,572] DEBUG [PartitionStateMachine controllerId=0] Started partition state machine with initial state -> HashMap(__consumer_offsets-13 -> OnlinePartition, __consumer_offsets-46 -> OnlinePartition, __consumer_offsets-9 -> OnlinePartition, __consumer_offsets-42 -> OnlinePartition, __consumer_offsets-21 -> OnlinePartition, __consumer_offsets-17 -> OnlinePartition, __consumer_offsets-30 -> OnlinePartition, __consumer_offsets-26 -> OnlinePartition, __consumer_offsets-5 -> OnlinePartition, __consumer_offsets-38 -> OnlinePartition, __consumer_offsets-1 -> OnlinePartition, __consumer_offsets-34 -> OnlinePartition, __consumer_offsets-16 -> OnlinePartition, __consumer_offsets-45 -> OnlinePartition, __consumer_offsets-12 -> OnlinePartition, __consumer_offsets-41 -> OnlinePartition, __consumer_offsets-24 -> OnlinePartition, __consumer_offsets-20 -> OnlinePartition, __consumer_offsets-49 -> OnlinePartition, __consumer_offsets-0 -> OnlinePartition, __consumer_offsets-29 -> OnlinePartition, __consumer_offsets-25 -> OnlinePartition, __consumer_offsets-8 -> OnlinePartition, __consumer_offsets-37 -> OnlinePartition, __consumer_offsets-4 -> OnlinePartition, __consumer_offsets-33 -> OnlinePartition, __consumer_offsets-15 -> OnlinePartition, __consumer_offsets-48 -> OnlinePartition, __consumer_offsets-11 -> OnlinePartition, __consumer_offsets-44 -> OnlinePartition, __consumer_offsets-23 -> OnlinePartition, __consumer_offsets-19 -> OnlinePartition, __consumer_offsets-32 -> OnlinePartition, test-topic-0 -> OnlinePartition, __consumer_offsets-28 -> OnlinePartition, __consumer_offsets-7 -> OnlinePartition, __consumer_offsets-40 -> OnlinePartition, __consumer_offsets-3 -> OnlinePartition, __consumer_offsets-36 -> OnlinePartition, __consumer_offsets-47 -> OnlinePartition, __consumer_offsets-14 -> OnlinePartition, __consumer_offsets-43 -> OnlinePartition, __consumer_offsets-10 -> OnlinePartition, __consumer_offsets-22 -> OnlinePartition, __consumer_offsets-18 -> OnlinePartition, __consumer_offsets-31 -> OnlinePartition, __consumer_offsets-27 -> OnlinePartition, __consumer_offsets-39 -> OnlinePartition, __consumer_offsets-6 -> OnlinePartition, __consumer_offsets-35 -> OnlinePartition, __consumer_offsets-2 -> OnlinePartition) (kafka.controller.ZkPartitionStateMachine) -[2023-11-03 19:45:29,572] INFO [Controller id=0] Ready to serve as the new controller with epoch 2 (kafka.controller.KafkaController) -[2023-11-03 19:45:29,579] INFO [Controller id=0] Partitions undergoing preferred replica election: (kafka.controller.KafkaController) -[2023-11-03 19:45:29,579] INFO [Controller id=0] Partitions that completed preferred replica election: (kafka.controller.KafkaController) -[2023-11-03 19:45:29,579] INFO [Controller id=0] Skipping preferred replica election for partitions due to topic deletion: (kafka.controller.KafkaController) -[2023-11-03 19:45:29,580] INFO [Controller id=0] Resuming preferred replica election for partitions: (kafka.controller.KafkaController) -[2023-11-03 19:45:29,581] INFO [Controller id=0] Starting replica leader election (PREFERRED) for partitions triggered by ZkTriggered (kafka.controller.KafkaController) -[2023-11-03 19:45:29,591] INFO [Controller id=0] Starting the controller scheduler (kafka.controller.KafkaController) -[2023-11-03 19:45:29,643] INFO [RequestSendThread controllerId=0] Controller 0 connected to ThinkPadP53:9092 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread) -[2023-11-03 19:45:34,592] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) -[2023-11-03 19:45:34,593] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) -[2023-11-03 19:45:34,595] DEBUG [Controller id=0] Topics not in preferred replica for broker 0 HashMap() (kafka.controller.KafkaController) -[2023-11-03 19:45:34,596] TRACE [Controller id=0] Leader imbalance ratio for broker 0 is 0.0 (kafka.controller.KafkaController) +[2023-11-06 13:26:32,055] INFO [Controller id=0] Partitions undergoing preferred replica election: (kafka.controller.KafkaController) +[2023-11-06 13:26:32,055] INFO [Controller id=0] Partitions that completed preferred replica election: (kafka.controller.KafkaController) +[2023-11-06 13:26:32,055] INFO [Controller id=0] Skipping preferred replica election for partitions due to topic deletion: (kafka.controller.KafkaController) +[2023-11-06 13:26:32,056] INFO [Controller id=0] Resuming preferred replica election for partitions: (kafka.controller.KafkaController) +[2023-11-06 13:26:32,056] INFO [Controller id=0] Starting replica leader election (PREFERRED) for partitions triggered by ZkTriggered (kafka.controller.KafkaController) +[2023-11-06 13:26:32,063] INFO [Controller id=0] Starting the controller scheduler (kafka.controller.KafkaController) +[2023-11-06 13:26:32,156] INFO [RequestSendThread controllerId=0] Controller 0 connected to localhost:9092 (id: 0 rack: null) for sending state change requests (kafka.controller.RequestSendThread) +[2023-11-06 13:26:37,064] INFO [Controller id=0] Processing automatic preferred replica leader election (kafka.controller.KafkaController) +[2023-11-06 13:26:37,064] TRACE [Controller id=0] Checking need to trigger auto leader balancing (kafka.controller.KafkaController) +[2023-11-06 13:27:16,510] INFO [Controller id=0] New topics: [Set(__consumer_offsets)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(__consumer_offsets,Some(f9d_z6FzSde58txrT_Qj9w),HashMap(__consumer_offsets-22 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-30 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-25 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-35 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-37 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-38 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-13 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-8 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-21 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-4 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-27 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-7 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-9 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-46 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-41 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-33 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-23 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-49 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-47 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-16 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-28 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-31 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-36 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-42 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-3 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-18 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-15 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-24 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-17 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-48 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-19 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-11 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-2 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-43 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-6 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-14 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-20 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-44 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-39 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-12 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-45 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-1 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-5 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-26 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-29 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-34 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-10 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-32 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=), __consumer_offsets-40 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) +[2023-11-06 13:27:16,511] INFO [Controller id=0] New partition creation callback for __consumer_offsets-22,__consumer_offsets-30,__consumer_offsets-25,__consumer_offsets-35,__consumer_offsets-37,__consumer_offsets-38,__consumer_offsets-13,__consumer_offsets-8,__consumer_offsets-21,__consumer_offsets-4,__consumer_offsets-27,__consumer_offsets-7,__consumer_offsets-9,__consumer_offsets-46,__consumer_offsets-41,__consumer_offsets-33,__consumer_offsets-23,__consumer_offsets-49,__consumer_offsets-47,__consumer_offsets-16,__consumer_offsets-28,__consumer_offsets-31,__consumer_offsets-36,__consumer_offsets-42,__consumer_offsets-3,__consumer_offsets-18,__consumer_offsets-15,__consumer_offsets-24,__consumer_offsets-17,__consumer_offsets-48,__consumer_offsets-19,__consumer_offsets-11,__consumer_offsets-2,__consumer_offsets-43,__consumer_offsets-6,__consumer_offsets-14,__consumer_offsets-20,__consumer_offsets-0,__consumer_offsets-44,__consumer_offsets-39,__consumer_offsets-12,__consumer_offsets-45,__consumer_offsets-1,__consumer_offsets-5,__consumer_offsets-26,__consumer_offsets-29,__consumer_offsets-34,__consumer_offsets-10,__consumer_offsets-32,__consumer_offsets-40 (kafka.controller.KafkaController) +[2023-11-06 13:27:32,128] INFO [Controller id=0] New topics: [Set(test-topic)], deleted topics: [HashSet()], new partition replica assignment [Set(TopicIdReplicaAssignment(test-topic,Some(Hx76FWANRJGp_-YQs8849Q),Map(test-topic-0 -> ReplicaAssignment(replicas=0, addingReplicas=, removingReplicas=))))] (kafka.controller.KafkaController) +[2023-11-06 13:27:32,128] INFO [Controller id=0] New partition creation callback for test-topic-0 (kafka.controller.KafkaController) diff --git a/logs/kafkaServer-gc.log b/logs/kafkaServer-gc.log index 17b30bc..c4bc3ac 100644 --- a/logs/kafkaServer-gc.log +++ b/logs/kafkaServer-gc.log @@ -1,125 +1,125 @@ -[2023-11-03T19:45:27.999-0400][gc] Using G1 -[2023-11-03T19:45:28.005-0400][gc,init] Version: 17.0.6+10 (release) -[2023-11-03T19:45:28.005-0400][gc,init] CPUs: 12 total, 12 available -[2023-11-03T19:45:28.005-0400][gc,init] Memory: 63941M -[2023-11-03T19:45:28.005-0400][gc,init] Large Page Support: Disabled -[2023-11-03T19:45:28.005-0400][gc,init] NUMA Support: Disabled -[2023-11-03T19:45:28.005-0400][gc,init] Compressed Oops: Enabled (32-bit) -[2023-11-03T19:45:28.005-0400][gc,init] Heap Region Size: 1M -[2023-11-03T19:45:28.005-0400][gc,init] Heap Min Capacity: 1G -[2023-11-03T19:45:28.005-0400][gc,init] Heap Initial Capacity: 1G -[2023-11-03T19:45:28.005-0400][gc,init] Heap Max Capacity: 1G -[2023-11-03T19:45:28.005-0400][gc,init] Pre-touch: Disabled -[2023-11-03T19:45:28.005-0400][gc,init] Parallel Workers: 10 -[2023-11-03T19:45:28.005-0400][gc,init] Concurrent Workers: 3 -[2023-11-03T19:45:28.005-0400][gc,init] Concurrent Refinement Workers: 10 -[2023-11-03T19:45:28.005-0400][gc,init] Periodic GC: Disabled -[2023-11-03T19:45:28.005-0400][gc,metaspace] CDS archive(s) mapped at: [0x0000000800000000-0x0000000800bd5000-0x0000000800bd5000), size 12406784, SharedBaseAddress: 0x0000000800000000, ArchiveRelocationMode: 0. -[2023-11-03T19:45:28.005-0400][gc,metaspace] Compressed class space mapped at: 0x0000000800c00000-0x0000000840c00000, reserved size: 1073741824 -[2023-11-03T19:45:28.005-0400][gc,metaspace] Narrow klass base: 0x0000000800000000, Narrow klass shift: 0, Narrow klass range: 0x100000000 -[2023-11-03T19:45:28.714-0400][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) -[2023-11-03T19:45:28.715-0400][gc,task ] GC(0) Using 10 workers of 10 for evacuation -[2023-11-03T19:45:28.722-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.1ms -[2023-11-03T19:45:28.722-0400][gc,phases ] GC(0) Merge Heap Roots: 0.1ms -[2023-11-03T19:45:28.722-0400][gc,phases ] GC(0) Evacuate Collection Set: 6.5ms -[2023-11-03T19:45:28.722-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.4ms -[2023-11-03T19:45:28.722-0400][gc,phases ] GC(0) Other: 0.8ms -[2023-11-03T19:45:28.722-0400][gc,heap ] GC(0) Eden regions: 51->0(44) -[2023-11-03T19:45:28.722-0400][gc,heap ] GC(0) Survivor regions: 0->7(7) -[2023-11-03T19:45:28.722-0400][gc,heap ] GC(0) Old regions: 0->1 -[2023-11-03T19:45:28.722-0400][gc,heap ] GC(0) Archive regions: 2->2 -[2023-11-03T19:45:28.722-0400][gc,heap ] GC(0) Humongous regions: 0->0 -[2023-11-03T19:45:28.722-0400][gc,metaspace] GC(0) Metaspace: 18659K(18880K)->18659K(18880K) NonClass: 16455K(16576K)->16455K(16576K) Class: 2203K(2304K)->2203K(2304K) -[2023-11-03T19:45:28.722-0400][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 51M->8M(1024M) 7.973ms -[2023-11-03T19:45:28.722-0400][gc,cpu ] GC(0) User=0.07s Sys=0.00s Real=0.01s -[2023-11-03T19:45:28.862-0400][gc,start ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) -[2023-11-03T19:45:28.862-0400][gc,task ] GC(1) Using 10 workers of 10 for evacuation -[2023-11-03T19:45:28.869-0400][gc,phases ] GC(1) Pre Evacuate Collection Set: 0.1ms -[2023-11-03T19:45:28.869-0400][gc,phases ] GC(1) Merge Heap Roots: 0.0ms -[2023-11-03T19:45:28.869-0400][gc,phases ] GC(1) Evacuate Collection Set: 6.4ms -[2023-11-03T19:45:28.869-0400][gc,phases ] GC(1) Post Evacuate Collection Set: 0.4ms -[2023-11-03T19:45:28.869-0400][gc,phases ] GC(1) Other: 0.2ms -[2023-11-03T19:45:28.869-0400][gc,heap ] GC(1) Eden regions: 10->0(50) -[2023-11-03T19:45:28.869-0400][gc,heap ] GC(1) Survivor regions: 7->1(7) -[2023-11-03T19:45:28.869-0400][gc,heap ] GC(1) Old regions: 1->8 -[2023-11-03T19:45:28.869-0400][gc,heap ] GC(1) Archive regions: 2->2 -[2023-11-03T19:45:28.869-0400][gc,heap ] GC(1) Humongous regions: 0->0 -[2023-11-03T19:45:28.869-0400][gc,metaspace] GC(1) Metaspace: 21310K(21504K)->21310K(21504K) NonClass: 18734K(18816K)->18734K(18816K) Class: 2575K(2688K)->2575K(2688K) -[2023-11-03T19:45:28.869-0400][gc ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) 18M->9M(1024M) 7.138ms -[2023-11-03T19:45:28.869-0400][gc,cpu ] GC(1) User=0.00s Sys=0.06s Real=0.01s -[2023-11-03T19:45:28.869-0400][gc ] GC(2) Concurrent Mark Cycle -[2023-11-03T19:45:28.869-0400][gc,marking ] GC(2) Concurrent Clear Claimed Marks -[2023-11-03T19:45:28.869-0400][gc,marking ] GC(2) Concurrent Clear Claimed Marks 0.011ms -[2023-11-03T19:45:28.869-0400][gc,marking ] GC(2) Concurrent Scan Root Regions -[2023-11-03T19:45:28.871-0400][gc,marking ] GC(2) Concurrent Scan Root Regions 1.137ms -[2023-11-03T19:45:28.871-0400][gc,marking ] GC(2) Concurrent Mark -[2023-11-03T19:45:28.871-0400][gc,marking ] GC(2) Concurrent Mark From Roots -[2023-11-03T19:45:28.871-0400][gc,task ] GC(2) Using 3 workers of 3 for marking -[2023-11-03T19:45:28.871-0400][gc,marking ] GC(2) Concurrent Mark From Roots 0.977ms -[2023-11-03T19:45:28.872-0400][gc,marking ] GC(2) Concurrent Preclean -[2023-11-03T19:45:28.872-0400][gc,marking ] GC(2) Concurrent Preclean 0.054ms -[2023-11-03T19:45:28.872-0400][gc,start ] GC(2) Pause Remark -[2023-11-03T19:45:28.873-0400][gc ] GC(2) Pause Remark 10M->10M(1024M) 0.908ms -[2023-11-03T19:45:28.873-0400][gc,cpu ] GC(2) User=0.00s Sys=0.00s Real=0.00s -[2023-11-03T19:45:28.873-0400][gc,marking ] GC(2) Concurrent Mark 2.112ms -[2023-11-03T19:45:28.873-0400][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets -[2023-11-03T19:45:28.874-0400][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets 1.239ms -[2023-11-03T19:45:28.874-0400][gc,start ] GC(2) Pause Cleanup -[2023-11-03T19:45:28.874-0400][gc ] GC(2) Pause Cleanup 10M->10M(1024M) 0.183ms -[2023-11-03T19:45:28.874-0400][gc,cpu ] GC(2) User=0.00s Sys=0.00s Real=0.00s -[2023-11-03T19:45:28.874-0400][gc,marking ] GC(2) Concurrent Cleanup for Next Mark -[2023-11-03T19:45:28.878-0400][gc,marking ] GC(2) Concurrent Cleanup for Next Mark 3.944ms -[2023-11-03T19:45:28.878-0400][gc ] GC(2) Concurrent Mark Cycle 8.835ms -[2023-11-03T19:45:29.405-0400][gc,start ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) -[2023-11-03T19:45:29.405-0400][gc,task ] GC(3) Using 10 workers of 10 for evacuation -[2023-11-03T19:45:29.410-0400][gc,phases ] GC(3) Pre Evacuate Collection Set: 0.1ms -[2023-11-03T19:45:29.410-0400][gc,phases ] GC(3) Merge Heap Roots: 0.1ms -[2023-11-03T19:45:29.410-0400][gc,phases ] GC(3) Evacuate Collection Set: 2.7ms -[2023-11-03T19:45:29.410-0400][gc,phases ] GC(3) Post Evacuate Collection Set: 1.7ms -[2023-11-03T19:45:29.410-0400][gc,phases ] GC(3) Other: 0.2ms -[2023-11-03T19:45:29.410-0400][gc,heap ] GC(3) Eden regions: 50->0(45) -[2023-11-03T19:45:29.410-0400][gc,heap ] GC(3) Survivor regions: 1->6(7) -[2023-11-03T19:45:29.410-0400][gc,heap ] GC(3) Old regions: 8->8 -[2023-11-03T19:45:29.410-0400][gc,heap ] GC(3) Archive regions: 2->2 -[2023-11-03T19:45:29.410-0400][gc,heap ] GC(3) Humongous regions: 129->129 -[2023-11-03T19:45:29.410-0400][gc,metaspace] GC(3) Metaspace: 30840K(31168K)->30840K(31168K) NonClass: 27360K(27520K)->27360K(27520K) Class: 3480K(3648K)->3480K(3648K) -[2023-11-03T19:45:29.410-0400][gc ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) 188M->143M(1024M) 4.864ms -[2023-11-03T19:45:29.410-0400][gc,cpu ] GC(3) User=0.03s Sys=0.00s Real=0.00s -[2023-11-03T19:45:29.651-0400][gc,start ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) -[2023-11-03T19:45:29.651-0400][gc,task ] GC(4) Using 10 workers of 10 for evacuation -[2023-11-03T19:45:29.656-0400][gc,phases ] GC(4) Pre Evacuate Collection Set: 0.1ms -[2023-11-03T19:45:29.656-0400][gc,phases ] GC(4) Merge Heap Roots: 0.0ms -[2023-11-03T19:45:29.656-0400][gc,phases ] GC(4) Evacuate Collection Set: 3.8ms -[2023-11-03T19:45:29.656-0400][gc,phases ] GC(4) Post Evacuate Collection Set: 0.3ms -[2023-11-03T19:45:29.656-0400][gc,phases ] GC(4) Other: 0.1ms -[2023-11-03T19:45:29.656-0400][gc,heap ] GC(4) Eden regions: 21->0(49) -[2023-11-03T19:45:29.656-0400][gc,heap ] GC(4) Survivor regions: 6->2(7) -[2023-11-03T19:45:29.656-0400][gc,heap ] GC(4) Old regions: 8->13 -[2023-11-03T19:45:29.656-0400][gc,heap ] GC(4) Archive regions: 2->2 -[2023-11-03T19:45:29.656-0400][gc,heap ] GC(4) Humongous regions: 129->129 -[2023-11-03T19:45:29.656-0400][gc,metaspace] GC(4) Metaspace: 35698K(35968K)->35698K(35968K) NonClass: 31646K(31808K)->31646K(31808K) Class: 4051K(4160K)->4051K(4160K) -[2023-11-03T19:45:29.656-0400][gc ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) 163M->144M(1024M) 4.413ms -[2023-11-03T19:45:29.656-0400][gc,cpu ] GC(4) User=0.05s Sys=0.01s Real=0.01s -[2023-11-03T19:45:29.656-0400][gc ] GC(5) Concurrent Mark Cycle -[2023-11-03T19:45:29.656-0400][gc,marking ] GC(5) Concurrent Clear Claimed Marks -[2023-11-03T19:45:29.656-0400][gc,marking ] GC(5) Concurrent Clear Claimed Marks 0.017ms -[2023-11-03T19:45:29.656-0400][gc,marking ] GC(5) Concurrent Scan Root Regions -[2023-11-03T19:45:29.658-0400][gc,marking ] GC(5) Concurrent Scan Root Regions 1.818ms -[2023-11-03T19:45:29.658-0400][gc,marking ] GC(5) Concurrent Mark -[2023-11-03T19:45:29.658-0400][gc,marking ] GC(5) Concurrent Mark From Roots -[2023-11-03T19:45:29.658-0400][gc,task ] GC(5) Using 3 workers of 3 for marking -[2023-11-03T19:45:29.662-0400][gc,marking ] GC(5) Concurrent Mark From Roots 4.776ms -[2023-11-03T19:45:29.662-0400][gc,marking ] GC(5) Concurrent Preclean -[2023-11-03T19:45:29.663-0400][gc,marking ] GC(5) Concurrent Preclean 0.125ms -[2023-11-03T19:45:29.663-0400][gc,start ] GC(5) Pause Remark -[2023-11-03T19:45:29.664-0400][gc ] GC(5) Pause Remark 145M->145M(1024M) 0.870ms -[2023-11-03T19:45:29.664-0400][gc,cpu ] GC(5) User=0.01s Sys=0.00s Real=0.00s -[2023-11-03T19:45:29.664-0400][gc,marking ] GC(5) Concurrent Mark 5.944ms -[2023-11-03T19:45:29.664-0400][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets -[2023-11-03T19:45:29.664-0400][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets 0.004ms -[2023-11-03T19:45:29.664-0400][gc,start ] GC(5) Pause Cleanup -[2023-11-03T19:45:29.664-0400][gc ] GC(5) Pause Cleanup 145M->145M(1024M) 0.006ms -[2023-11-03T19:45:29.664-0400][gc,cpu ] GC(5) User=0.00s Sys=0.00s Real=0.00s -[2023-11-03T19:45:29.664-0400][gc,marking ] GC(5) Concurrent Cleanup for Next Mark -[2023-11-03T19:45:29.665-0400][gc,marking ] GC(5) Concurrent Cleanup for Next Mark 1.530ms -[2023-11-03T19:45:29.665-0400][gc ] GC(5) Concurrent Mark Cycle 9.512ms +[2023-11-06T13:26:30.795-0500][gc] Using G1 +[2023-11-06T13:26:30.801-0500][gc,init] Version: 17.0.8.1+1 (release) +[2023-11-06T13:26:30.801-0500][gc,init] CPUs: 12 total, 12 available +[2023-11-06T13:26:30.801-0500][gc,init] Memory: 63941M +[2023-11-06T13:26:30.801-0500][gc,init] Large Page Support: Disabled +[2023-11-06T13:26:30.801-0500][gc,init] NUMA Support: Disabled +[2023-11-06T13:26:30.801-0500][gc,init] Compressed Oops: Enabled (32-bit) +[2023-11-06T13:26:30.801-0500][gc,init] Heap Region Size: 1M +[2023-11-06T13:26:30.801-0500][gc,init] Heap Min Capacity: 1G +[2023-11-06T13:26:30.801-0500][gc,init] Heap Initial Capacity: 1G +[2023-11-06T13:26:30.801-0500][gc,init] Heap Max Capacity: 1G +[2023-11-06T13:26:30.801-0500][gc,init] Pre-touch: Disabled +[2023-11-06T13:26:30.801-0500][gc,init] Parallel Workers: 10 +[2023-11-06T13:26:30.801-0500][gc,init] Concurrent Workers: 3 +[2023-11-06T13:26:30.801-0500][gc,init] Concurrent Refinement Workers: 10 +[2023-11-06T13:26:30.801-0500][gc,init] Periodic GC: Disabled +[2023-11-06T13:26:30.805-0500][gc,metaspace] CDS archive(s) mapped at: [0x00007f77f7000000-0x00007f77f7beb000-0x00007f77f7beb000), size 12496896, SharedBaseAddress: 0x00007f77f7000000, ArchiveRelocationMode: 1. +[2023-11-06T13:26:30.805-0500][gc,metaspace] Compressed class space mapped at: 0x00007f77f8000000-0x00007f7838000000, reserved size: 1073741824 +[2023-11-06T13:26:30.805-0500][gc,metaspace] Narrow klass base: 0x00007f77f7000000, Narrow klass shift: 0, Narrow klass range: 0x100000000 +[2023-11-06T13:26:31.511-0500][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) +[2023-11-06T13:26:31.512-0500][gc,task ] GC(0) Using 10 workers of 10 for evacuation +[2023-11-06T13:26:31.518-0500][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.1ms +[2023-11-06T13:26:31.518-0500][gc,phases ] GC(0) Merge Heap Roots: 0.1ms +[2023-11-06T13:26:31.518-0500][gc,phases ] GC(0) Evacuate Collection Set: 5.7ms +[2023-11-06T13:26:31.518-0500][gc,phases ] GC(0) Post Evacuate Collection Set: 0.5ms +[2023-11-06T13:26:31.518-0500][gc,phases ] GC(0) Other: 0.8ms +[2023-11-06T13:26:31.518-0500][gc,heap ] GC(0) Eden regions: 51->0(44) +[2023-11-06T13:26:31.518-0500][gc,heap ] GC(0) Survivor regions: 0->7(7) +[2023-11-06T13:26:31.518-0500][gc,heap ] GC(0) Old regions: 0->1 +[2023-11-06T13:26:31.518-0500][gc,heap ] GC(0) Archive regions: 2->2 +[2023-11-06T13:26:31.518-0500][gc,heap ] GC(0) Humongous regions: 0->0 +[2023-11-06T13:26:31.518-0500][gc,metaspace] GC(0) Metaspace: 18606K(18816K)->18606K(18816K) NonClass: 16396K(16512K)->16396K(16512K) Class: 2210K(2304K)->2210K(2304K) +[2023-11-06T13:26:31.518-0500][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 51M->8M(1024M) 7.181ms +[2023-11-06T13:26:31.518-0500][gc,cpu ] GC(0) User=0.05s Sys=0.00s Real=0.00s +[2023-11-06T13:26:31.655-0500][gc,start ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) +[2023-11-06T13:26:31.655-0500][gc,task ] GC(1) Using 10 workers of 10 for evacuation +[2023-11-06T13:26:31.662-0500][gc,phases ] GC(1) Pre Evacuate Collection Set: 0.1ms +[2023-11-06T13:26:31.662-0500][gc,phases ] GC(1) Merge Heap Roots: 0.1ms +[2023-11-06T13:26:31.662-0500][gc,phases ] GC(1) Evacuate Collection Set: 6.6ms +[2023-11-06T13:26:31.662-0500][gc,phases ] GC(1) Post Evacuate Collection Set: 0.5ms +[2023-11-06T13:26:31.662-0500][gc,phases ] GC(1) Other: 0.2ms +[2023-11-06T13:26:31.662-0500][gc,heap ] GC(1) Eden regions: 12->0(49) +[2023-11-06T13:26:31.662-0500][gc,heap ] GC(1) Survivor regions: 7->2(7) +[2023-11-06T13:26:31.662-0500][gc,heap ] GC(1) Old regions: 1->8 +[2023-11-06T13:26:31.662-0500][gc,heap ] GC(1) Archive regions: 2->2 +[2023-11-06T13:26:31.662-0500][gc,heap ] GC(1) Humongous regions: 129->129 +[2023-11-06T13:26:31.662-0500][gc,metaspace] GC(1) Metaspace: 21299K(21504K)->21299K(21504K) NonClass: 18739K(18816K)->18739K(18816K) Class: 2559K(2688K)->2559K(2688K) +[2023-11-06T13:26:31.662-0500][gc ] GC(1) Pause Young (Concurrent Start) (Metadata GC Threshold) 149M->138M(1024M) 7.560ms +[2023-11-06T13:26:31.662-0500][gc,cpu ] GC(1) User=0.00s Sys=0.06s Real=0.01s +[2023-11-06T13:26:31.662-0500][gc ] GC(2) Concurrent Mark Cycle +[2023-11-06T13:26:31.662-0500][gc,marking ] GC(2) Concurrent Clear Claimed Marks +[2023-11-06T13:26:31.662-0500][gc,marking ] GC(2) Concurrent Clear Claimed Marks 0.011ms +[2023-11-06T13:26:31.662-0500][gc,marking ] GC(2) Concurrent Scan Root Regions +[2023-11-06T13:26:31.664-0500][gc,marking ] GC(2) Concurrent Scan Root Regions 1.308ms +[2023-11-06T13:26:31.664-0500][gc,marking ] GC(2) Concurrent Mark +[2023-11-06T13:26:31.664-0500][gc,marking ] GC(2) Concurrent Mark From Roots +[2023-11-06T13:26:31.664-0500][gc,task ] GC(2) Using 3 workers of 3 for marking +[2023-11-06T13:26:31.665-0500][gc,marking ] GC(2) Concurrent Mark From Roots 1.090ms +[2023-11-06T13:26:31.665-0500][gc,marking ] GC(2) Concurrent Preclean +[2023-11-06T13:26:31.665-0500][gc,marking ] GC(2) Concurrent Preclean 0.055ms +[2023-11-06T13:26:31.665-0500][gc,start ] GC(2) Pause Remark +[2023-11-06T13:26:31.666-0500][gc ] GC(2) Pause Remark 139M->139M(1024M) 1.293ms +[2023-11-06T13:26:31.666-0500][gc,cpu ] GC(2) User=0.00s Sys=0.01s Real=0.00s +[2023-11-06T13:26:31.666-0500][gc,marking ] GC(2) Concurrent Mark 2.671ms +[2023-11-06T13:26:31.666-0500][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets +[2023-11-06T13:26:31.668-0500][gc,marking ] GC(2) Concurrent Rebuild Remembered Sets 1.262ms +[2023-11-06T13:26:31.668-0500][gc,start ] GC(2) Pause Cleanup +[2023-11-06T13:26:31.668-0500][gc ] GC(2) Pause Cleanup 139M->139M(1024M) 0.181ms +[2023-11-06T13:26:31.668-0500][gc,cpu ] GC(2) User=0.00s Sys=0.00s Real=0.00s +[2023-11-06T13:26:31.668-0500][gc,marking ] GC(2) Concurrent Cleanup for Next Mark +[2023-11-06T13:26:31.669-0500][gc,marking ] GC(2) Concurrent Cleanup for Next Mark 1.442ms +[2023-11-06T13:26:31.669-0500][gc ] GC(2) Concurrent Mark Cycle 7.132ms +[2023-11-06T13:26:32.046-0500][gc,start ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) +[2023-11-06T13:26:32.046-0500][gc,task ] GC(3) Using 10 workers of 10 for evacuation +[2023-11-06T13:26:32.048-0500][gc,phases ] GC(3) Pre Evacuate Collection Set: 0.1ms +[2023-11-06T13:26:32.048-0500][gc,phases ] GC(3) Merge Heap Roots: 0.1ms +[2023-11-06T13:26:32.048-0500][gc,phases ] GC(3) Evacuate Collection Set: 1.6ms +[2023-11-06T13:26:32.048-0500][gc,phases ] GC(3) Post Evacuate Collection Set: 0.3ms +[2023-11-06T13:26:32.048-0500][gc,phases ] GC(3) Other: 0.1ms +[2023-11-06T13:26:32.048-0500][gc,heap ] GC(3) Eden regions: 49->0(45) +[2023-11-06T13:26:32.048-0500][gc,heap ] GC(3) Survivor regions: 2->6(7) +[2023-11-06T13:26:32.048-0500][gc,heap ] GC(3) Old regions: 8->8 +[2023-11-06T13:26:32.048-0500][gc,heap ] GC(3) Archive regions: 2->2 +[2023-11-06T13:26:32.048-0500][gc,heap ] GC(3) Humongous regions: 129->129 +[2023-11-06T13:26:32.048-0500][gc,metaspace] GC(3) Metaspace: 32579K(32832K)->32579K(32832K) NonClass: 28947K(29120K)->28947K(29120K) Class: 3631K(3712K)->3631K(3712K) +[2023-11-06T13:26:32.048-0500][gc ] GC(3) Pause Young (Normal) (G1 Evacuation Pause) 187M->143M(1024M) 2.247ms +[2023-11-06T13:26:32.048-0500][gc,cpu ] GC(3) User=0.02s Sys=0.00s Real=0.00s +[2023-11-06T13:27:16.575-0500][gc,start ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) +[2023-11-06T13:27:16.575-0500][gc,task ] GC(4) Using 10 workers of 10 for evacuation +[2023-11-06T13:27:16.580-0500][gc,phases ] GC(4) Pre Evacuate Collection Set: 0.1ms +[2023-11-06T13:27:16.580-0500][gc,phases ] GC(4) Merge Heap Roots: 0.0ms +[2023-11-06T13:27:16.580-0500][gc,phases ] GC(4) Evacuate Collection Set: 4.0ms +[2023-11-06T13:27:16.580-0500][gc,phases ] GC(4) Post Evacuate Collection Set: 0.4ms +[2023-11-06T13:27:16.580-0500][gc,phases ] GC(4) Other: 0.1ms +[2023-11-06T13:27:16.580-0500][gc,heap ] GC(4) Eden regions: 17->0(49) +[2023-11-06T13:27:16.580-0500][gc,heap ] GC(4) Survivor regions: 6->2(7) +[2023-11-06T13:27:16.580-0500][gc,heap ] GC(4) Old regions: 8->14 +[2023-11-06T13:27:16.580-0500][gc,heap ] GC(4) Archive regions: 2->2 +[2023-11-06T13:27:16.580-0500][gc,heap ] GC(4) Humongous regions: 129->129 +[2023-11-06T13:27:16.580-0500][gc,metaspace] GC(4) Metaspace: 35667K(35968K)->35667K(35968K) NonClass: 31529K(31680K)->31529K(31680K) Class: 4138K(4288K)->4138K(4288K) +[2023-11-06T13:27:16.580-0500][gc ] GC(4) Pause Young (Concurrent Start) (Metadata GC Threshold) 159M->144M(1024M) 4.693ms +[2023-11-06T13:27:16.580-0500][gc,cpu ] GC(4) User=0.04s Sys=0.01s Real=0.01s +[2023-11-06T13:27:16.580-0500][gc ] GC(5) Concurrent Mark Cycle +[2023-11-06T13:27:16.580-0500][gc,marking ] GC(5) Concurrent Clear Claimed Marks +[2023-11-06T13:27:16.580-0500][gc,marking ] GC(5) Concurrent Clear Claimed Marks 0.047ms +[2023-11-06T13:27:16.580-0500][gc,marking ] GC(5) Concurrent Scan Root Regions +[2023-11-06T13:27:16.582-0500][gc,marking ] GC(5) Concurrent Scan Root Regions 1.685ms +[2023-11-06T13:27:16.582-0500][gc,marking ] GC(5) Concurrent Mark +[2023-11-06T13:27:16.582-0500][gc,marking ] GC(5) Concurrent Mark From Roots +[2023-11-06T13:27:16.582-0500][gc,task ] GC(5) Using 3 workers of 3 for marking +[2023-11-06T13:27:16.585-0500][gc,marking ] GC(5) Concurrent Mark From Roots 3.066ms +[2023-11-06T13:27:16.585-0500][gc,marking ] GC(5) Concurrent Preclean +[2023-11-06T13:27:16.585-0500][gc,marking ] GC(5) Concurrent Preclean 0.080ms +[2023-11-06T13:27:16.585-0500][gc,start ] GC(5) Pause Remark +[2023-11-06T13:27:16.586-0500][gc ] GC(5) Pause Remark 144M->144M(1024M) 0.978ms +[2023-11-06T13:27:16.586-0500][gc,cpu ] GC(5) User=0.01s Sys=0.00s Real=0.00s +[2023-11-06T13:27:16.586-0500][gc,marking ] GC(5) Concurrent Mark 4.710ms +[2023-11-06T13:27:16.586-0500][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets +[2023-11-06T13:27:16.589-0500][gc,marking ] GC(5) Concurrent Rebuild Remembered Sets 2.484ms +[2023-11-06T13:27:16.589-0500][gc,start ] GC(5) Pause Cleanup +[2023-11-06T13:27:16.589-0500][gc ] GC(5) Pause Cleanup 145M->145M(1024M) 0.124ms +[2023-11-06T13:27:16.589-0500][gc,cpu ] GC(5) User=0.00s Sys=0.00s Real=0.00s +[2023-11-06T13:27:16.589-0500][gc,marking ] GC(5) Concurrent Cleanup for Next Mark +[2023-11-06T13:27:16.592-0500][gc,marking ] GC(5) Concurrent Cleanup for Next Mark 2.206ms +[2023-11-06T13:27:16.592-0500][gc ] GC(5) Concurrent Mark Cycle 11.834ms diff --git a/logs/log-cleaner.log b/logs/log-cleaner.log index 36119a2..75ad872 100644 --- a/logs/log-cleaner.log +++ b/logs/log-cleaner.log @@ -1,3 +1 @@ -[2023-11-03 19:38:20,717] INFO Starting the log cleaner (kafka.log.LogCleaner) -[2023-11-03 19:40:48,894] INFO Shutting down the log cleaner. (kafka.log.LogCleaner) -[2023-11-03 19:45:29,067] INFO Starting the log cleaner (kafka.log.LogCleaner) +[2023-11-06 13:26:31,617] INFO Starting the log cleaner (kafka.log.LogCleaner) diff --git a/logs/server.log b/logs/server.log index 04f6372..2ed5810 100644 --- a/logs/server.log +++ b/logs/server.log @@ -1,127 +1,127 @@ -[2023-11-03 19:38:10,839] INFO Reading configuration from: ./config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,841] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,841] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,841] INFO observerMasterPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,841] INFO metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,844] INFO autopurge.snapRetainCount set to 3 (org.apache.zookeeper.server.DatadirCleanupManager) -[2023-11-03 19:38:10,844] INFO autopurge.purgeInterval set to 0 (org.apache.zookeeper.server.DatadirCleanupManager) -[2023-11-03 19:38:10,844] INFO Purge task is not scheduled. (org.apache.zookeeper.server.DatadirCleanupManager) -[2023-11-03 19:38:10,844] WARN Either no config or no quorum defined in config, running in standalone mode (org.apache.zookeeper.server.quorum.QuorumPeerMain) -[2023-11-03 19:38:10,845] INFO Log4j 1.2 jmx support not found; jmx disabled. (org.apache.zookeeper.jmx.ManagedUtil) -[2023-11-03 19:38:10,845] INFO Reading configuration from: ./config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,846] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,846] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,846] INFO observerMasterPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,846] INFO metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:38:10,846] INFO Starting server (org.apache.zookeeper.server.ZooKeeperServerMain) -[2023-11-03 19:38:10,854] INFO ServerMetrics initialized with provider org.apache.zookeeper.metrics.impl.DefaultMetricsProvider@e50a6f6 (org.apache.zookeeper.server.ServerMetrics) -[2023-11-03 19:38:10,857] INFO ACL digest algorithm is: SHA1 (org.apache.zookeeper.server.auth.DigestAuthenticationProvider) -[2023-11-03 19:38:10,857] INFO zookeeper.DigestAuthenticationProvider.enabled = true (org.apache.zookeeper.server.auth.DigestAuthenticationProvider) -[2023-11-03 19:38:10,861] INFO zookeeper.snapshot.trust.empty : false (org.apache.zookeeper.server.persistence.FileTxnSnapLog) -[2023-11-03 19:38:10,868] INFO (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,868] INFO ______ _ (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,868] INFO |___ / | | (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,868] INFO / / ___ ___ | | __ ___ ___ _ __ ___ _ __ (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,868] INFO / / / _ \ / _ \ | |/ / / _ \ / _ \ | '_ \ / _ \ | '__| (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,868] INFO / /__ | (_) | | (_) | | < | __/ | __/ | |_) | | __/ | | (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,868] INFO /_____| \___/ \___/ |_|\_\ \___| \___| | .__/ \___| |_| (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,869] INFO | | (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,869] INFO |_| (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,869] INFO (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:zookeeper.version=3.8.2-139d619b58292d7734b4fc83a0f44be4e7b0c986, built on 2023-07-05 19:24 UTC (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:host.name=ThinkPadP53 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.version=17.0.6 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.vendor=Eclipse Adoptium (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.home=/opt/openjdk-bin-17.0.6_p10 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.class.path=/scratch/Exemple_Kafka/bin/../libs/activation-1.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/argparse4j-0.7.0.jar:/scratch/Exemple_Kafka/bin/../libs/audience-annotations-0.12.0.jar:/scratch/Exemple_Kafka/bin/../libs/caffeine-2.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/checker-qual-3.19.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-beanutils-1.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-cli-1.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-collections-3.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-digester-2.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-io-2.11.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-lang3-3.8.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-logging-1.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-validator-1.7.jar:/scratch/Exemple_Kafka/bin/../libs/connect-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-basic-auth-extension-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-json-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-client-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-runtime-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-transforms-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/error_prone_annotations-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-api-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-locator-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-utils-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-core-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-databind-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-dataformat-csv-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-datatype-jdk8-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-base-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-json-provider-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-jaxb-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-scala_2.13-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.activation-api-1.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.inject-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.xml.bind-api-2.3.3.jar:/scratch/Exemple_Kafka/bin/../libs/javassist-3.29.2-GA.jar:/scratch/Exemple_Kafka/bin/../libs/javax.activation-api-1.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.annotation-api-1.3.2.jar:/scratch/Exemple_Kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/jaxb-api-2.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-client-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-common-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-core-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-hk2-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-server-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-client-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-continuation-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-http-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-io-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-security-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-server-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlet-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlets-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-ajax-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jline-3.22.0.jar:/scratch/Exemple_Kafka/bin/../libs/jopt-simple-5.0.4.jar:/scratch/Exemple_Kafka/bin/../libs/jose4j-0.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/jsr305-3.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-clients-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-group-coordinator-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-log4j-appender-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-metadata-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-raft-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-server-common-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-shell-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-examples-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-scala_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-test-utils-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/lz4-java-1.8.0.jar:/scratch/Exemple_Kafka/bin/../libs/maven-artifact-3.8.8.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-2.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-4.1.12.1.jar:/scratch/Exemple_Kafka/bin/../libs/netty-buffer-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-codec-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-handler-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-resolver-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-classes-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-unix-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/scratch/Exemple_Kafka/bin/../libs/paranamer-2.8.jar:/scratch/Exemple_Kafka/bin/../libs/pcollections-4.0.1.jar:/scratch/Exemple_Kafka/bin/../libs/plexus-utils-3.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/reflections-0.10.2.jar:/scratch/Exemple_Kafka/bin/../libs/reload4j-1.2.25.jar:/scratch/Exemple_Kafka/bin/../libs/rocksdbjni-7.9.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-collection-compat_2.13-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/scala-java8-compat_2.13-1.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-library-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/scala-logging_2.13-3.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/scala-reflect-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-api-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-reload4j-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/snappy-java-1.1.10.4.jar:/scratch/Exemple_Kafka/bin/../libs/swagger-annotations-2.2.8.jar:/scratch/Exemple_Kafka/bin/../libs/trogdor-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-jute-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zstd-jni-1.5.5-1.jar (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:java.compiler= (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,872] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:os.version=6.4.3-cachyosGentooThinkPadP53 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:user.name=memartel (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:user.home=/home/memartel (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:user.dir=/scratch/Exemple_Kafka (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:os.memory.free=494MB (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.enableEagerACLCheck = false (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.digest.enabled = true (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.closeSessionTxn.enabled = true (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.flushDelay = 0 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.maxWriteQueuePollTime = 0 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.maxBatchSize=1000 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,873] INFO zookeeper.intBufferStartingSizeBytes = 1024 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,874] INFO Weighed connection throttling is disabled (org.apache.zookeeper.server.BlueThrottle) -[2023-11-03 19:38:10,875] INFO minSessionTimeout set to 6000 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,876] INFO maxSessionTimeout set to 60000 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,877] INFO getData response cache size is initialized with value 400. (org.apache.zookeeper.server.ResponseCache) -[2023-11-03 19:38:10,878] INFO getChildren response cache size is initialized with value 400. (org.apache.zookeeper.server.ResponseCache) -[2023-11-03 19:38:10,879] INFO zookeeper.pathStats.slotCapacity = 60 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:38:10,879] INFO zookeeper.pathStats.slotDuration = 15 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:38:10,879] INFO zookeeper.pathStats.maxDepth = 6 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:38:10,879] INFO zookeeper.pathStats.initialDelay = 5 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:38:10,879] INFO zookeeper.pathStats.delay = 5 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:38:10,879] INFO zookeeper.pathStats.enabled = false (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:38:10,882] INFO The max bytes for all large requests are set to 104857600 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,882] INFO The large request threshold is set to -1 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,882] INFO zookeeper.enforce.auth.enabled = false (org.apache.zookeeper.server.AuthenticationHelper) -[2023-11-03 19:38:10,882] INFO zookeeper.enforce.auth.schemes = [] (org.apache.zookeeper.server.AuthenticationHelper) -[2023-11-03 19:38:10,882] INFO Created server with tickTime 3000 ms minSessionTimeout 6000 ms maxSessionTimeout 60000 ms clientPortListenBacklog -1 datadir /tmp/zookeeper/version-2 snapdir /tmp/zookeeper/version-2 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,887] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) -[2023-11-03 19:38:10,888] WARN maxCnxns is not configured, using default value 0. (org.apache.zookeeper.server.ServerCnxnFactory) -[2023-11-03 19:38:10,889] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 2 selector thread(s), 24 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) -[2023-11-03 19:38:10,896] INFO binding to port 0.0.0.0/0.0.0.0:2181 (org.apache.zookeeper.server.NIOServerCnxnFactory) -[2023-11-03 19:38:10,908] INFO Using org.apache.zookeeper.server.watch.WatchManager as watch manager (org.apache.zookeeper.server.watch.WatchManagerFactory) -[2023-11-03 19:38:10,908] INFO Using org.apache.zookeeper.server.watch.WatchManager as watch manager (org.apache.zookeeper.server.watch.WatchManagerFactory) -[2023-11-03 19:38:10,908] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) -[2023-11-03 19:38:10,908] INFO zookeeper.commitLogCount=500 (org.apache.zookeeper.server.ZKDatabase) -[2023-11-03 19:38:10,914] INFO zookeeper.snapshot.compression.method = CHECKED (org.apache.zookeeper.server.persistence.SnapStream) -[2023-11-03 19:38:10,914] INFO Snapshotting: 0x0 to /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) -[2023-11-03 19:38:10,917] INFO Snapshot loaded in 8 ms, highest zxid is 0x0, digest is 1371985504 (org.apache.zookeeper.server.ZKDatabase) -[2023-11-03 19:38:10,918] INFO Snapshotting: 0x0 to /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) -[2023-11-03 19:38:10,918] INFO Snapshot taken in 1 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:38:10,926] INFO PrepRequestProcessor (sid:0) started, reconfigEnabled=false (org.apache.zookeeper.server.PrepRequestProcessor) -[2023-11-03 19:38:10,926] INFO zookeeper.request_throttler.shutdownTimeout = 10000 ms (org.apache.zookeeper.server.RequestThrottler) -[2023-11-03 19:38:10,939] INFO Using checkIntervalMs=60000 maxPerMinute=10000 maxNeverUsedIntervalMs=0 (org.apache.zookeeper.server.ContainerManager) -[2023-11-03 19:38:10,940] INFO ZooKeeper audit is disabled. (org.apache.zookeeper.audit.ZKAuditProvider) -[2023-11-03 19:38:20,073] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) -[2023-11-03 19:38:20,292] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) -[2023-11-03 19:38:20,354] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) -[2023-11-03 19:38:20,355] INFO starting (kafka.server.KafkaServer) -[2023-11-03 19:38:20,355] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) -[2023-11-03 19:38:20,366] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:38:20,371] INFO Client environment:zookeeper.version=3.8.2-139d619b58292d7734b4fc83a0f44be4e7b0c986, built on 2023-07-05 19:24 UTC (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:host.name=ThinkPadP53 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.version=17.0.6 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.vendor=Eclipse Adoptium (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.home=/opt/openjdk-bin-17.0.6_p10 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.class.path=/scratch/Exemple_Kafka/bin/../libs/activation-1.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/argparse4j-0.7.0.jar:/scratch/Exemple_Kafka/bin/../libs/audience-annotations-0.12.0.jar:/scratch/Exemple_Kafka/bin/../libs/caffeine-2.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/checker-qual-3.19.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-beanutils-1.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-cli-1.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-collections-3.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-digester-2.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-io-2.11.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-lang3-3.8.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-logging-1.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-validator-1.7.jar:/scratch/Exemple_Kafka/bin/../libs/connect-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-basic-auth-extension-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-json-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-client-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-runtime-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-transforms-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/error_prone_annotations-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-api-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-locator-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-utils-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-core-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-databind-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-dataformat-csv-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-datatype-jdk8-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-base-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-json-provider-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-jaxb-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-scala_2.13-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.activation-api-1.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.inject-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.xml.bind-api-2.3.3.jar:/scratch/Exemple_Kafka/bin/../libs/javassist-3.29.2-GA.jar:/scratch/Exemple_Kafka/bin/../libs/javax.activation-api-1.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.annotation-api-1.3.2.jar:/scratch/Exemple_Kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/jaxb-api-2.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-client-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-common-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-core-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-hk2-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-server-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-client-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-continuation-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-http-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-io-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-security-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-server-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlet-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlets-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-ajax-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jline-3.22.0.jar:/scratch/Exemple_Kafka/bin/../libs/jopt-simple-5.0.4.jar:/scratch/Exemple_Kafka/bin/../libs/jose4j-0.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/jsr305-3.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-clients-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-group-coordinator-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-log4j-appender-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-metadata-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-raft-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-server-common-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-shell-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-examples-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-scala_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-test-utils-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/lz4-java-1.8.0.jar:/scratch/Exemple_Kafka/bin/../libs/maven-artifact-3.8.8.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-2.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-4.1.12.1.jar:/scratch/Exemple_Kafka/bin/../libs/netty-buffer-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-codec-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-handler-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-resolver-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-classes-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-unix-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/scratch/Exemple_Kafka/bin/../libs/paranamer-2.8.jar:/scratch/Exemple_Kafka/bin/../libs/pcollections-4.0.1.jar:/scratch/Exemple_Kafka/bin/../libs/plexus-utils-3.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/reflections-0.10.2.jar:/scratch/Exemple_Kafka/bin/../libs/reload4j-1.2.25.jar:/scratch/Exemple_Kafka/bin/../libs/rocksdbjni-7.9.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-collection-compat_2.13-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/scala-java8-compat_2.13-1.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-library-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/scala-logging_2.13-3.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/scala-reflect-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-api-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-reload4j-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/snappy-java-1.1.10.4.jar:/scratch/Exemple_Kafka/bin/../libs/swagger-annotations-2.2.8.jar:/scratch/Exemple_Kafka/bin/../libs/trogdor-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-jute-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zstd-jni-1.5.5-1.jar (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:os.version=6.4.3-cachyosGentooThinkPadP53 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:user.name=memartel (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,371] INFO Client environment:user.home=/home/memartel (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,372] INFO Client environment:user.dir=/scratch/Exemple_Kafka (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,372] INFO Client environment:os.memory.free=987MB (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,372] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,372] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,374] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@3fce8fd9 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:38:20,381] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) -[2023-11-03 19:38:20,385] INFO zookeeper.request.timeout value is 0. feature enabled=false (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:38:20,386] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:38:20,387] INFO Opening socket connection to server localhost/127.0.0.1:2181. (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:38:20,389] INFO Socket connection established, initiating session, client: /127.0.0.1:41842, server: localhost/127.0.0.1:2181 (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:38:20,397] INFO Creating new log file: log.1 (org.apache.zookeeper.server.persistence.FileTxnLog) -[2023-11-03 19:38:20,402] INFO Session establishment complete on server localhost/127.0.0.1:2181, session id = 0x100008497380000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:38:20,404] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:38:20,606] INFO Cluster ID = 8EdUX-SkTwmB2aSLdVSXoQ (kafka.server.KafkaServer) -[2023-11-03 19:38:20,608] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint) -[2023-11-03 19:38:20,639] INFO KafkaConfig values: +[2023-11-06 13:26:24,196] INFO Reading configuration from: ./config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,198] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,198] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,198] INFO observerMasterPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,198] INFO metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,199] INFO autopurge.snapRetainCount set to 3 (org.apache.zookeeper.server.DatadirCleanupManager) +[2023-11-06 13:26:24,199] INFO autopurge.purgeInterval set to 0 (org.apache.zookeeper.server.DatadirCleanupManager) +[2023-11-06 13:26:24,200] INFO Purge task is not scheduled. (org.apache.zookeeper.server.DatadirCleanupManager) +[2023-11-06 13:26:24,200] WARN Either no config or no quorum defined in config, running in standalone mode (org.apache.zookeeper.server.quorum.QuorumPeerMain) +[2023-11-06 13:26:24,201] INFO Log4j 1.2 jmx support not found; jmx disabled. (org.apache.zookeeper.jmx.ManagedUtil) +[2023-11-06 13:26:24,201] INFO Reading configuration from: ./config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,202] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,202] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,202] INFO observerMasterPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,202] INFO metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider (org.apache.zookeeper.server.quorum.QuorumPeerConfig) +[2023-11-06 13:26:24,202] INFO Starting server (org.apache.zookeeper.server.ZooKeeperServerMain) +[2023-11-06 13:26:24,212] INFO ServerMetrics initialized with provider org.apache.zookeeper.metrics.impl.DefaultMetricsProvider@e50a6f6 (org.apache.zookeeper.server.ServerMetrics) +[2023-11-06 13:26:24,214] INFO ACL digest algorithm is: SHA1 (org.apache.zookeeper.server.auth.DigestAuthenticationProvider) +[2023-11-06 13:26:24,214] INFO zookeeper.DigestAuthenticationProvider.enabled = true (org.apache.zookeeper.server.auth.DigestAuthenticationProvider) +[2023-11-06 13:26:24,217] INFO zookeeper.snapshot.trust.empty : false (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2023-11-06 13:26:24,224] INFO (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO ______ _ (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO |___ / | | (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO / / ___ ___ | | __ ___ ___ _ __ ___ _ __ (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO / / / _ \ / _ \ | |/ / / _ \ / _ \ | '_ \ / _ \ | '__| (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO / /__ | (_) | | (_) | | < | __/ | __/ | |_) | | __/ | | (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO /_____| \___/ \___/ |_|\_\ \___| \___| | .__/ \___| |_| (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO | | (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO |_| (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,224] INFO (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,225] INFO Server environment:zookeeper.version=3.8.2-139d619b58292d7734b4fc83a0f44be4e7b0c986, built on 2023-07-05 19:24 UTC (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,225] INFO Server environment:host.name=localhost (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,225] INFO Server environment:java.version=17.0.8.1 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,225] INFO Server environment:java.vendor=Eclipse Adoptium (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:java.home=/opt/openjdk-bin-17.0.8.1_p1 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:java.class.path=/scratch/repos/Exemple_Kafka/bin/../libs/activation-1.1.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/argparse4j-0.7.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/audience-annotations-0.12.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/caffeine-2.9.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/checker-qual-3.19.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-beanutils-1.9.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-cli-1.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-collections-3.2.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-digester-2.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-io-2.11.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-lang3-3.8.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-logging-1.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-validator-1.7.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-api-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-basic-auth-extension-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-json-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-mirror-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-mirror-client-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-runtime-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-transforms-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/error_prone_annotations-2.10.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/hk2-api-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/hk2-locator-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/hk2-utils-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-annotations-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-core-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-databind-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-dataformat-csv-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-datatype-jdk8-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-jaxrs-base-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-jaxrs-json-provider-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-module-jaxb-annotations-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-module-scala_2.13-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.activation-api-1.2.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.inject-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.xml.bind-api-2.3.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javassist-3.29.2-GA.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.activation-api-1.2.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.annotation-api-1.3.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jaxb-api-2.3.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-client-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-common-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-container-servlet-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-container-servlet-core-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-hk2-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-server-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-client-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-continuation-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-http-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-io-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-security-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-server-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-servlet-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-servlets-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-util-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-util-ajax-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jline-3.22.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jopt-simple-5.0.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jose4j-0.9.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jsr305-3.0.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-clients-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-group-coordinator-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-log4j-appender-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-metadata-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-raft-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-server-common-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-shell-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-storage-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-storage-api-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-examples-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-scala_2.13-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-test-utils-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-tools-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-tools-api-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka_2.13-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/lz4-java-1.8.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/maven-artifact-3.8.8.jar:/scratch/repos/Exemple_Kafka/bin/../libs/metrics-core-2.2.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/metrics-core-4.1.12.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-buffer-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-codec-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-common-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-handler-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-resolver-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-classes-epoll-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-native-epoll-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-native-unix-common-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/paranamer-2.8.jar:/scratch/repos/Exemple_Kafka/bin/../libs/pcollections-4.0.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/plexus-utils-3.3.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/reflections-0.10.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/reload4j-1.2.25.jar:/scratch/repos/Exemple_Kafka/bin/../libs/rocksdbjni-7.9.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-collection-compat_2.13-2.10.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-java8-compat_2.13-1.0.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-library-2.13.11.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-logging_2.13-3.9.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-reflect-2.13.11.jar:/scratch/repos/Exemple_Kafka/bin/../libs/slf4j-api-1.7.36.jar:/scratch/repos/Exemple_Kafka/bin/../libs/slf4j-reload4j-1.7.36.jar:/scratch/repos/Exemple_Kafka/bin/../libs/snappy-java-1.1.10.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/swagger-annotations-2.2.8.jar:/scratch/repos/Exemple_Kafka/bin/../libs/trogdor-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/zookeeper-3.8.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/zookeeper-jute-3.8.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/zstd-jni-1.5.5-1.jar (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:java.compiler= (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:os.version=6.4.3-cachyosGentooThinkPadP53 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,226] INFO Server environment:user.name=memartel (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO Server environment:user.home=/home/memartel (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO Server environment:user.dir=/scratch/repos/Exemple_Kafka (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO Server environment:os.memory.free=494MB (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.enableEagerACLCheck = false (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.digest.enabled = true (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.closeSessionTxn.enabled = true (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.flushDelay = 0 ms (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.maxWriteQueuePollTime = 0 ms (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.maxBatchSize=1000 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,227] INFO zookeeper.intBufferStartingSizeBytes = 1024 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,228] INFO Weighed connection throttling is disabled (org.apache.zookeeper.server.BlueThrottle) +[2023-11-06 13:26:24,229] INFO minSessionTimeout set to 6000 ms (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,229] INFO maxSessionTimeout set to 60000 ms (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,230] INFO getData response cache size is initialized with value 400. (org.apache.zookeeper.server.ResponseCache) +[2023-11-06 13:26:24,230] INFO getChildren response cache size is initialized with value 400. (org.apache.zookeeper.server.ResponseCache) +[2023-11-06 13:26:24,231] INFO zookeeper.pathStats.slotCapacity = 60 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) +[2023-11-06 13:26:24,231] INFO zookeeper.pathStats.slotDuration = 15 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) +[2023-11-06 13:26:24,231] INFO zookeeper.pathStats.maxDepth = 6 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) +[2023-11-06 13:26:24,231] INFO zookeeper.pathStats.initialDelay = 5 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) +[2023-11-06 13:26:24,231] INFO zookeeper.pathStats.delay = 5 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) +[2023-11-06 13:26:24,231] INFO zookeeper.pathStats.enabled = false (org.apache.zookeeper.server.util.RequestPathMetricsCollector) +[2023-11-06 13:26:24,233] INFO The max bytes for all large requests are set to 104857600 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,233] INFO The large request threshold is set to -1 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,234] INFO zookeeper.enforce.auth.enabled = false (org.apache.zookeeper.server.AuthenticationHelper) +[2023-11-06 13:26:24,234] INFO zookeeper.enforce.auth.schemes = [] (org.apache.zookeeper.server.AuthenticationHelper) +[2023-11-06 13:26:24,234] INFO Created server with tickTime 3000 ms minSessionTimeout 6000 ms maxSessionTimeout 60000 ms clientPortListenBacklog -1 datadir /tmp/zookeeper/version-2 snapdir /tmp/zookeeper/version-2 (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,238] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) +[2023-11-06 13:26:24,238] WARN maxCnxns is not configured, using default value 0. (org.apache.zookeeper.server.ServerCnxnFactory) +[2023-11-06 13:26:24,239] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 2 selector thread(s), 24 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2023-11-06 13:26:24,244] INFO binding to port 0.0.0.0/0.0.0.0:2181 (org.apache.zookeeper.server.NIOServerCnxnFactory) +[2023-11-06 13:26:24,254] INFO Using org.apache.zookeeper.server.watch.WatchManager as watch manager (org.apache.zookeeper.server.watch.WatchManagerFactory) +[2023-11-06 13:26:24,254] INFO Using org.apache.zookeeper.server.watch.WatchManager as watch manager (org.apache.zookeeper.server.watch.WatchManagerFactory) +[2023-11-06 13:26:24,254] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) +[2023-11-06 13:26:24,254] INFO zookeeper.commitLogCount=500 (org.apache.zookeeper.server.ZKDatabase) +[2023-11-06 13:26:24,260] INFO zookeeper.snapshot.compression.method = CHECKED (org.apache.zookeeper.server.persistence.SnapStream) +[2023-11-06 13:26:24,260] INFO Snapshotting: 0x0 to /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2023-11-06 13:26:24,262] INFO Snapshot loaded in 8 ms, highest zxid is 0x0, digest is 1371985504 (org.apache.zookeeper.server.ZKDatabase) +[2023-11-06 13:26:24,263] INFO Snapshotting: 0x0 to /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileTxnSnapLog) +[2023-11-06 13:26:24,263] INFO Snapshot taken in 0 ms (org.apache.zookeeper.server.ZooKeeperServer) +[2023-11-06 13:26:24,270] INFO PrepRequestProcessor (sid:0) started, reconfigEnabled=false (org.apache.zookeeper.server.PrepRequestProcessor) +[2023-11-06 13:26:24,271] INFO zookeeper.request_throttler.shutdownTimeout = 10000 ms (org.apache.zookeeper.server.RequestThrottler) +[2023-11-06 13:26:24,282] INFO Using checkIntervalMs=60000 maxPerMinute=10000 maxNeverUsedIntervalMs=0 (org.apache.zookeeper.server.ContainerManager) +[2023-11-06 13:26:24,282] INFO ZooKeeper audit is disabled. (org.apache.zookeeper.audit.ZKAuditProvider) +[2023-11-06 13:26:31,049] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) +[2023-11-06 13:26:31,233] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) +[2023-11-06 13:26:31,292] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) +[2023-11-06 13:26:31,293] INFO starting (kafka.server.KafkaServer) +[2023-11-06 13:26:31,293] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) +[2023-11-06 13:26:31,304] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) +[2023-11-06 13:26:31,308] INFO Client environment:zookeeper.version=3.8.2-139d619b58292d7734b4fc83a0f44be4e7b0c986, built on 2023-07-05 19:24 UTC (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,308] INFO Client environment:host.name=localhost (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,308] INFO Client environment:java.version=17.0.8.1 (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,308] INFO Client environment:java.vendor=Eclipse Adoptium (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,308] INFO Client environment:java.home=/opt/openjdk-bin-17.0.8.1_p1 (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,308] INFO Client environment:java.class.path=/scratch/repos/Exemple_Kafka/bin/../libs/activation-1.1.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/argparse4j-0.7.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/audience-annotations-0.12.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/caffeine-2.9.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/checker-qual-3.19.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-beanutils-1.9.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-cli-1.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-collections-3.2.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-digester-2.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-io-2.11.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-lang3-3.8.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-logging-1.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/commons-validator-1.7.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-api-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-basic-auth-extension-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-json-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-mirror-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-mirror-client-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-runtime-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/connect-transforms-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/error_prone_annotations-2.10.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/hk2-api-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/hk2-locator-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/hk2-utils-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-annotations-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-core-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-databind-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-dataformat-csv-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-datatype-jdk8-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-jaxrs-base-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-jaxrs-json-provider-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-module-jaxb-annotations-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jackson-module-scala_2.13-2.13.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.activation-api-1.2.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.inject-2.6.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jakarta.xml.bind-api-2.3.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javassist-3.29.2-GA.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.activation-api-1.2.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.annotation-api-1.3.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jaxb-api-2.3.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-client-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-common-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-container-servlet-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-container-servlet-core-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-hk2-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jersey-server-2.39.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-client-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-continuation-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-http-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-io-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-security-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-server-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-servlet-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-servlets-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-util-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jetty-util-ajax-9.4.52.v20230823.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jline-3.22.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jopt-simple-5.0.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jose4j-0.9.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/jsr305-3.0.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-clients-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-group-coordinator-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-log4j-appender-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-metadata-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-raft-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-server-common-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-shell-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-storage-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-storage-api-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-examples-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-scala_2.13-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-streams-test-utils-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-tools-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka-tools-api-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/kafka_2.13-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/lz4-java-1.8.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/maven-artifact-3.8.8.jar:/scratch/repos/Exemple_Kafka/bin/../libs/metrics-core-2.2.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/metrics-core-4.1.12.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-buffer-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-codec-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-common-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-handler-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-resolver-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-classes-epoll-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-native-epoll-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/netty-transport-native-unix-common-4.1.94.Final.jar:/scratch/repos/Exemple_Kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/scratch/repos/Exemple_Kafka/bin/../libs/paranamer-2.8.jar:/scratch/repos/Exemple_Kafka/bin/../libs/pcollections-4.0.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/plexus-utils-3.3.1.jar:/scratch/repos/Exemple_Kafka/bin/../libs/reflections-0.10.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/reload4j-1.2.25.jar:/scratch/repos/Exemple_Kafka/bin/../libs/rocksdbjni-7.9.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-collection-compat_2.13-2.10.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-java8-compat_2.13-1.0.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-library-2.13.11.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-logging_2.13-3.9.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/scala-reflect-2.13.11.jar:/scratch/repos/Exemple_Kafka/bin/../libs/slf4j-api-1.7.36.jar:/scratch/repos/Exemple_Kafka/bin/../libs/slf4j-reload4j-1.7.36.jar:/scratch/repos/Exemple_Kafka/bin/../libs/snappy-java-1.1.10.4.jar:/scratch/repos/Exemple_Kafka/bin/../libs/swagger-annotations-2.2.8.jar:/scratch/repos/Exemple_Kafka/bin/../libs/trogdor-3.6.0.jar:/scratch/repos/Exemple_Kafka/bin/../libs/zookeeper-3.8.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/zookeeper-jute-3.8.2.jar:/scratch/repos/Exemple_Kafka/bin/../libs/zstd-jni-1.5.5-1.jar (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:os.version=6.4.3-cachyosGentooThinkPadP53 (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:user.name=memartel (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:user.home=/home/memartel (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:user.dir=/scratch/repos/Exemple_Kafka (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:os.memory.free=987MB (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,309] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,311] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@3fce8fd9 (org.apache.zookeeper.ZooKeeper) +[2023-11-06 13:26:31,316] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) +[2023-11-06 13:26:31,321] INFO zookeeper.request.timeout value is 0. feature enabled=false (org.apache.zookeeper.ClientCnxn) +[2023-11-06 13:26:31,322] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) +[2023-11-06 13:26:31,323] INFO Opening socket connection to server localhost/[0:0:0:0:0:0:0:1]:2181. (org.apache.zookeeper.ClientCnxn) +[2023-11-06 13:26:31,325] INFO Socket connection established, initiating session, client: /[0:0:0:0:0:0:0:1]:60976, server: localhost/[0:0:0:0:0:0:0:1]:2181 (org.apache.zookeeper.ClientCnxn) +[2023-11-06 13:26:31,331] INFO Creating new log file: log.1 (org.apache.zookeeper.server.persistence.FileTxnLog) +[2023-11-06 13:26:31,336] INFO Session establishment complete on server localhost/[0:0:0:0:0:0:0:1]:2181, session id = 0x1000000d8b20000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) +[2023-11-06 13:26:31,338] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) +[2023-11-06 13:26:31,519] INFO Cluster ID = tHet8_ZjRl6pr3Pp3utDPw (kafka.server.KafkaServer) +[2023-11-06 13:26:31,521] WARN No meta.properties file under dir /tmp/kafka-logs/meta.properties (kafka.server.BrokerMetadataCheckpoint) +[2023-11-06 13:26:31,553] INFO KafkaConfig values: advertised.listeners = null alter.config.policy.class.name = null alter.log.dirs.replication.quota.window.num = 11 @@ -411,1299 +411,417 @@ zookeeper.ssl.truststore.password = null zookeeper.ssl.truststore.type = null (kafka.server.KafkaConfig) -[2023-11-03 19:38:20,668] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:38:20,668] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:38:20,669] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:38:20,670] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:38:20,683] INFO Log directory /tmp/kafka-logs not found, creating it. (kafka.log.LogManager) -[2023-11-03 19:38:20,695] INFO Loading logs from log dirs ArraySeq(/tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:38:20,698] INFO No logs found to be loaded in /tmp/kafka-logs (kafka.log.LogManager) -[2023-11-03 19:38:20,704] INFO Loaded 0 logs in 9ms (kafka.log.LogManager) -[2023-11-03 19:38:20,705] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) -[2023-11-03 19:38:20,706] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) -[2023-11-03 19:38:20,739] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner$CleanerThread) -[2023-11-03 19:38:20,749] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) -[2023-11-03 19:38:20,755] INFO Feature ZK node at path: /feature does not exist (kafka.server.FinalizedFeatureChangeListener) -[2023-11-03 19:38:20,788] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Starting (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:38:20,988] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas) -[2023-11-03 19:38:21,001] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) -[2023-11-03 19:38:21,004] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Starting (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:38:21,021] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,022] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,023] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,024] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,025] INFO [ExpirationReaper-0-RemoteFetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,036] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) -[2023-11-03 19:38:21,037] INFO [AddPartitionsToTxnSenderThread-0]: Starting (kafka.server.AddPartitionsToTxnManager) -[2023-11-03 19:38:21,064] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) -[2023-11-03 19:38:21,083] INFO Stat of the created znode at /brokers/ids/0 is: 25,25,1699054701074,1699054701074,1,0,0,72058163510640640,206,0,25 +[2023-11-06 13:26:31,577] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2023-11-06 13:26:31,577] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2023-11-06 13:26:31,578] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2023-11-06 13:26:31,580] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) +[2023-11-06 13:26:31,588] INFO Log directory /tmp/kafka-logs not found, creating it. (kafka.log.LogManager) +[2023-11-06 13:26:31,599] INFO Loading logs from log dirs ArraySeq(/tmp/kafka-logs) (kafka.log.LogManager) +[2023-11-06 13:26:31,601] INFO No logs found to be loaded in /tmp/kafka-logs (kafka.log.LogManager) +[2023-11-06 13:26:31,607] INFO Loaded 0 logs in 8ms (kafka.log.LogManager) +[2023-11-06 13:26:31,608] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) +[2023-11-06 13:26:31,609] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) +[2023-11-06 13:26:31,634] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner$CleanerThread) +[2023-11-06 13:26:31,642] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) +[2023-11-06 13:26:31,649] INFO Feature ZK node at path: /feature does not exist (kafka.server.FinalizedFeatureChangeListener) +[2023-11-06 13:26:31,674] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Starting (kafka.server.BrokerToControllerRequestThread) +[2023-11-06 13:26:31,857] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas) +[2023-11-06 13:26:31,868] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) +[2023-11-06 13:26:31,871] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Starting (kafka.server.BrokerToControllerRequestThread) +[2023-11-06 13:26:31,885] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,886] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,888] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,888] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,889] INFO [ExpirationReaper-0-RemoteFetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,897] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) +[2023-11-06 13:26:31,898] INFO [AddPartitionsToTxnSenderThread-0]: Starting (kafka.server.AddPartitionsToTxnManager) +[2023-11-06 13:26:31,911] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) +[2023-11-06 13:26:31,924] INFO Stat of the created znode at /brokers/ids/0 is: 25,25,1699295191920,1699295191920,1,0,0,72057597673472000,202,0,25 (kafka.zk.KafkaZkClient) -[2023-11-03 19:38:21,083] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://ThinkPadP53:9092, czxid (broker epoch): 25 (kafka.zk.KafkaZkClient) -[2023-11-03 19:38:21,127] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,133] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,133] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,134] INFO Successfully created /controller_epoch with initial epoch 0 (kafka.zk.KafkaZkClient) -[2023-11-03 19:38:21,147] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:21,148] INFO Feature ZK node created at path: /feature (kafka.server.FinalizedFeatureChangeListener) -[2023-11-03 19:38:21,152] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:21,164] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) -[2023-11-03 19:38:21,167] INFO [MetadataCache brokerId=0] Updated cache from existing None to latest Features(version=3.6-IV2, finalizedFeatures={}, finalizedFeaturesEpoch=0). (kafka.server.metadata.ZkMetadataCache) -[2023-11-03 19:38:21,167] INFO [TxnMarkerSenderThread-0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) -[2023-11-03 19:38:21,168] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) -[2023-11-03 19:38:21,199] INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:38:21,219] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) -[2023-11-03 19:38:21,232] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Enabling request processing. (kafka.network.SocketServer) -[2023-11-03 19:38:21,243] INFO [Controller id=0, targetBrokerId=0] Node 0 disconnected. (org.apache.kafka.clients.NetworkClient) -[2023-11-03 19:38:21,243] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.DataPlaneAcceptor) -[2023-11-03 19:38:21,245] WARN [Controller id=0, targetBrokerId=0] Connection to node 0 (ThinkPadP53/192.168.37.3:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) -[2023-11-03 19:38:21,247] INFO [Controller id=0, targetBrokerId=0] Client requested connection close from node 0 (org.apache.kafka.clients.NetworkClient) -[2023-11-03 19:38:21,249] INFO Kafka version: 3.6.0 (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:38:21,249] INFO Kafka commitId: 60e845626d8a465a (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:38:21,249] INFO Kafka startTimeMs: 1699054701246 (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:38:21,250] INFO [KafkaServer id=0] started (kafka.server.KafkaServer) -[2023-11-03 19:38:21,398] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Recorded new controller, from now on will use node ThinkPadP53:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:38:21,405] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Recorded new controller, from now on will use node ThinkPadP53:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:38:41,888] INFO Creating topic __consumer_offsets with configuration {compression.type=producer, cleanup.policy=compact, segment.bytes=104857600} and initial partition assignment HashMap(0 -> ArrayBuffer(0), 1 -> ArrayBuffer(0), 2 -> ArrayBuffer(0), 3 -> ArrayBuffer(0), 4 -> ArrayBuffer(0), 5 -> ArrayBuffer(0), 6 -> ArrayBuffer(0), 7 -> ArrayBuffer(0), 8 -> ArrayBuffer(0), 9 -> ArrayBuffer(0), 10 -> ArrayBuffer(0), 11 -> ArrayBuffer(0), 12 -> ArrayBuffer(0), 13 -> ArrayBuffer(0), 14 -> ArrayBuffer(0), 15 -> ArrayBuffer(0), 16 -> ArrayBuffer(0), 17 -> ArrayBuffer(0), 18 -> ArrayBuffer(0), 19 -> ArrayBuffer(0), 20 -> ArrayBuffer(0), 21 -> ArrayBuffer(0), 22 -> ArrayBuffer(0), 23 -> ArrayBuffer(0), 24 -> ArrayBuffer(0), 25 -> ArrayBuffer(0), 26 -> ArrayBuffer(0), 27 -> ArrayBuffer(0), 28 -> ArrayBuffer(0), 29 -> ArrayBuffer(0), 30 -> ArrayBuffer(0), 31 -> ArrayBuffer(0), 32 -> ArrayBuffer(0), 33 -> ArrayBuffer(0), 34 -> ArrayBuffer(0), 35 -> ArrayBuffer(0), 36 -> ArrayBuffer(0), 37 -> ArrayBuffer(0), 38 -> ArrayBuffer(0), 39 -> ArrayBuffer(0), 40 -> ArrayBuffer(0), 41 -> ArrayBuffer(0), 42 -> ArrayBuffer(0), 43 -> ArrayBuffer(0), 44 -> ArrayBuffer(0), 45 -> ArrayBuffer(0), 46 -> ArrayBuffer(0), 47 -> ArrayBuffer(0), 48 -> ArrayBuffer(0), 49 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) -[2023-11-03 19:38:42,029] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(__consumer_offsets-22, __consumer_offsets-30, __consumer_offsets-25, __consumer_offsets-35, __consumer_offsets-37, __consumer_offsets-38, __consumer_offsets-13, __consumer_offsets-8, __consumer_offsets-21, __consumer_offsets-4, __consumer_offsets-27, __consumer_offsets-7, __consumer_offsets-9, __consumer_offsets-46, __consumer_offsets-41, __consumer_offsets-33, __consumer_offsets-23, __consumer_offsets-49, __consumer_offsets-47, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, __consumer_offsets-17, __consumer_offsets-48, __consumer_offsets-19, __consumer_offsets-11, __consumer_offsets-2, __consumer_offsets-43, __consumer_offsets-6, __consumer_offsets-14, __consumer_offsets-20, __consumer_offsets-0, __consumer_offsets-44, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45, __consumer_offsets-1, __consumer_offsets-5, __consumer_offsets-26, __consumer_offsets-29, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager) -[2023-11-03 19:38:42,067] INFO [LogLoader partition=__consumer_offsets-3, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,077] INFO Created log for partition __consumer_offsets-3 in /tmp/kafka-logs/__consumer_offsets-3 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,079] INFO [Partition __consumer_offsets-3 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition) -[2023-11-03 19:38:42,080] INFO [Partition __consumer_offsets-3 broker=0] Log loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,088] INFO [LogLoader partition=__consumer_offsets-18, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,089] INFO Created log for partition __consumer_offsets-18 in /tmp/kafka-logs/__consumer_offsets-18 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,089] INFO [Partition __consumer_offsets-18 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition) -[2023-11-03 19:38:42,089] INFO [Partition __consumer_offsets-18 broker=0] Log loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,095] INFO [LogLoader partition=__consumer_offsets-41, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,095] INFO Created log for partition __consumer_offsets-41 in /tmp/kafka-logs/__consumer_offsets-41 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,096] INFO [Partition __consumer_offsets-41 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition) -[2023-11-03 19:38:42,096] INFO [Partition __consumer_offsets-41 broker=0] Log loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,099] INFO [LogLoader partition=__consumer_offsets-10, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,100] INFO Created log for partition __consumer_offsets-10 in /tmp/kafka-logs/__consumer_offsets-10 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,100] INFO [Partition __consumer_offsets-10 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition) -[2023-11-03 19:38:42,100] INFO [Partition __consumer_offsets-10 broker=0] Log loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,103] INFO [LogLoader partition=__consumer_offsets-33, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,103] INFO Created log for partition __consumer_offsets-33 in /tmp/kafka-logs/__consumer_offsets-33 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,103] INFO [Partition __consumer_offsets-33 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition) -[2023-11-03 19:38:42,104] INFO [Partition __consumer_offsets-33 broker=0] Log loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,107] INFO [LogLoader partition=__consumer_offsets-48, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,108] INFO Created log for partition __consumer_offsets-48 in /tmp/kafka-logs/__consumer_offsets-48 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,108] INFO [Partition __consumer_offsets-48 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition) -[2023-11-03 19:38:42,108] INFO [Partition __consumer_offsets-48 broker=0] Log loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,113] INFO [LogLoader partition=__consumer_offsets-19, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,114] INFO Created log for partition __consumer_offsets-19 in /tmp/kafka-logs/__consumer_offsets-19 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,114] INFO [Partition __consumer_offsets-19 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition) -[2023-11-03 19:38:42,114] INFO [Partition __consumer_offsets-19 broker=0] Log loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,117] INFO [LogLoader partition=__consumer_offsets-34, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,118] INFO Created log for partition __consumer_offsets-34 in /tmp/kafka-logs/__consumer_offsets-34 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,118] INFO [Partition __consumer_offsets-34 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition) -[2023-11-03 19:38:42,118] INFO [Partition __consumer_offsets-34 broker=0] Log loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,121] INFO [LogLoader partition=__consumer_offsets-4, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,121] INFO Created log for partition __consumer_offsets-4 in /tmp/kafka-logs/__consumer_offsets-4 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,121] INFO [Partition __consumer_offsets-4 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition) -[2023-11-03 19:38:42,122] INFO [Partition __consumer_offsets-4 broker=0] Log loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,127] INFO [LogLoader partition=__consumer_offsets-11, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,127] INFO Created log for partition __consumer_offsets-11 in /tmp/kafka-logs/__consumer_offsets-11 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,127] INFO [Partition __consumer_offsets-11 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition) -[2023-11-03 19:38:42,127] INFO [Partition __consumer_offsets-11 broker=0] Log loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,130] INFO [LogLoader partition=__consumer_offsets-26, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,131] INFO Created log for partition __consumer_offsets-26 in /tmp/kafka-logs/__consumer_offsets-26 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,131] INFO [Partition __consumer_offsets-26 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition) -[2023-11-03 19:38:42,131] INFO [Partition __consumer_offsets-26 broker=0] Log loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,134] INFO [LogLoader partition=__consumer_offsets-49, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,134] INFO Created log for partition __consumer_offsets-49 in /tmp/kafka-logs/__consumer_offsets-49 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,135] INFO [Partition __consumer_offsets-49 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition) -[2023-11-03 19:38:42,135] INFO [Partition __consumer_offsets-49 broker=0] Log loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,137] INFO [LogLoader partition=__consumer_offsets-39, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,137] INFO Created log for partition __consumer_offsets-39 in /tmp/kafka-logs/__consumer_offsets-39 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,137] INFO [Partition __consumer_offsets-39 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition) -[2023-11-03 19:38:42,137] INFO [Partition __consumer_offsets-39 broker=0] Log loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,141] INFO [LogLoader partition=__consumer_offsets-9, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,142] INFO Created log for partition __consumer_offsets-9 in /tmp/kafka-logs/__consumer_offsets-9 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,143] INFO [Partition __consumer_offsets-9 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition) -[2023-11-03 19:38:42,143] INFO [Partition __consumer_offsets-9 broker=0] Log loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,145] INFO [LogLoader partition=__consumer_offsets-24, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,146] INFO Created log for partition __consumer_offsets-24 in /tmp/kafka-logs/__consumer_offsets-24 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,146] INFO [Partition __consumer_offsets-24 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition) -[2023-11-03 19:38:42,146] INFO [Partition __consumer_offsets-24 broker=0] Log loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,149] INFO [LogLoader partition=__consumer_offsets-31, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,149] INFO Created log for partition __consumer_offsets-31 in /tmp/kafka-logs/__consumer_offsets-31 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,149] INFO [Partition __consumer_offsets-31 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition) -[2023-11-03 19:38:42,149] INFO [Partition __consumer_offsets-31 broker=0] Log loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,152] INFO [LogLoader partition=__consumer_offsets-46, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,152] INFO Created log for partition __consumer_offsets-46 in /tmp/kafka-logs/__consumer_offsets-46 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,152] INFO [Partition __consumer_offsets-46 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition) -[2023-11-03 19:38:42,152] INFO [Partition __consumer_offsets-46 broker=0] Log loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,155] INFO [LogLoader partition=__consumer_offsets-1, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,156] INFO Created log for partition __consumer_offsets-1 in /tmp/kafka-logs/__consumer_offsets-1 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,156] INFO [Partition __consumer_offsets-1 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition) -[2023-11-03 19:38:42,156] INFO [Partition __consumer_offsets-1 broker=0] Log loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,162] INFO [LogLoader partition=__consumer_offsets-16, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,163] INFO Created log for partition __consumer_offsets-16 in /tmp/kafka-logs/__consumer_offsets-16 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,163] INFO [Partition __consumer_offsets-16 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition) -[2023-11-03 19:38:42,163] INFO [Partition __consumer_offsets-16 broker=0] Log loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,167] INFO [LogLoader partition=__consumer_offsets-2, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,167] INFO Created log for partition __consumer_offsets-2 in /tmp/kafka-logs/__consumer_offsets-2 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,167] INFO [Partition __consumer_offsets-2 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition) -[2023-11-03 19:38:42,167] INFO [Partition __consumer_offsets-2 broker=0] Log loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,170] INFO [LogLoader partition=__consumer_offsets-25, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,170] INFO Created log for partition __consumer_offsets-25 in /tmp/kafka-logs/__consumer_offsets-25 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,170] INFO [Partition __consumer_offsets-25 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition) -[2023-11-03 19:38:42,170] INFO [Partition __consumer_offsets-25 broker=0] Log loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,173] INFO [LogLoader partition=__consumer_offsets-40, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,175] INFO Created log for partition __consumer_offsets-40 in /tmp/kafka-logs/__consumer_offsets-40 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,175] INFO [Partition __consumer_offsets-40 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition) -[2023-11-03 19:38:42,175] INFO [Partition __consumer_offsets-40 broker=0] Log loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,180] INFO [LogLoader partition=__consumer_offsets-47, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,181] INFO Created log for partition __consumer_offsets-47 in /tmp/kafka-logs/__consumer_offsets-47 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,181] INFO [Partition __consumer_offsets-47 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition) -[2023-11-03 19:38:42,181] INFO [Partition __consumer_offsets-47 broker=0] Log loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,185] INFO [LogLoader partition=__consumer_offsets-17, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,185] INFO Created log for partition __consumer_offsets-17 in /tmp/kafka-logs/__consumer_offsets-17 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,185] INFO [Partition __consumer_offsets-17 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition) -[2023-11-03 19:38:42,185] INFO [Partition __consumer_offsets-17 broker=0] Log loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,188] INFO [LogLoader partition=__consumer_offsets-32, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,188] INFO Created log for partition __consumer_offsets-32 in /tmp/kafka-logs/__consumer_offsets-32 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,188] INFO [Partition __consumer_offsets-32 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition) -[2023-11-03 19:38:42,188] INFO [Partition __consumer_offsets-32 broker=0] Log loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,194] INFO [LogLoader partition=__consumer_offsets-37, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,195] INFO Created log for partition __consumer_offsets-37 in /tmp/kafka-logs/__consumer_offsets-37 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,195] INFO [Partition __consumer_offsets-37 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition) -[2023-11-03 19:38:42,195] INFO [Partition __consumer_offsets-37 broker=0] Log loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,200] INFO [LogLoader partition=__consumer_offsets-7, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,201] INFO Created log for partition __consumer_offsets-7 in /tmp/kafka-logs/__consumer_offsets-7 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,201] INFO [Partition __consumer_offsets-7 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition) -[2023-11-03 19:38:42,201] INFO [Partition __consumer_offsets-7 broker=0] Log loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,204] INFO [LogLoader partition=__consumer_offsets-22, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,204] INFO Created log for partition __consumer_offsets-22 in /tmp/kafka-logs/__consumer_offsets-22 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,204] INFO [Partition __consumer_offsets-22 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition) -[2023-11-03 19:38:42,204] INFO [Partition __consumer_offsets-22 broker=0] Log loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,209] INFO [LogLoader partition=__consumer_offsets-29, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,210] INFO Created log for partition __consumer_offsets-29 in /tmp/kafka-logs/__consumer_offsets-29 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,210] INFO [Partition __consumer_offsets-29 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition) -[2023-11-03 19:38:42,210] INFO [Partition __consumer_offsets-29 broker=0] Log loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,215] INFO [LogLoader partition=__consumer_offsets-44, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,216] INFO Created log for partition __consumer_offsets-44 in /tmp/kafka-logs/__consumer_offsets-44 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,216] INFO [Partition __consumer_offsets-44 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition) -[2023-11-03 19:38:42,216] INFO [Partition __consumer_offsets-44 broker=0] Log loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,220] INFO [LogLoader partition=__consumer_offsets-14, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,220] INFO Created log for partition __consumer_offsets-14 in /tmp/kafka-logs/__consumer_offsets-14 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,220] INFO [Partition __consumer_offsets-14 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition) -[2023-11-03 19:38:42,220] INFO [Partition __consumer_offsets-14 broker=0] Log loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,225] INFO [LogLoader partition=__consumer_offsets-23, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,226] INFO Created log for partition __consumer_offsets-23 in /tmp/kafka-logs/__consumer_offsets-23 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,226] INFO [Partition __consumer_offsets-23 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition) -[2023-11-03 19:38:42,226] INFO [Partition __consumer_offsets-23 broker=0] Log loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,230] INFO [LogLoader partition=__consumer_offsets-38, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,231] INFO Created log for partition __consumer_offsets-38 in /tmp/kafka-logs/__consumer_offsets-38 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,231] INFO [Partition __consumer_offsets-38 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition) -[2023-11-03 19:38:42,231] INFO [Partition __consumer_offsets-38 broker=0] Log loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,235] INFO [LogLoader partition=__consumer_offsets-8, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,236] INFO Created log for partition __consumer_offsets-8 in /tmp/kafka-logs/__consumer_offsets-8 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,236] INFO [Partition __consumer_offsets-8 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition) -[2023-11-03 19:38:42,236] INFO [Partition __consumer_offsets-8 broker=0] Log loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,238] INFO [LogLoader partition=__consumer_offsets-45, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,239] INFO Created log for partition __consumer_offsets-45 in /tmp/kafka-logs/__consumer_offsets-45 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,239] INFO [Partition __consumer_offsets-45 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition) -[2023-11-03 19:38:42,239] INFO [Partition __consumer_offsets-45 broker=0] Log loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,244] INFO [LogLoader partition=__consumer_offsets-15, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,244] INFO Created log for partition __consumer_offsets-15 in /tmp/kafka-logs/__consumer_offsets-15 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,244] INFO [Partition __consumer_offsets-15 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition) -[2023-11-03 19:38:42,244] INFO [Partition __consumer_offsets-15 broker=0] Log loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,248] INFO [LogLoader partition=__consumer_offsets-30, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,249] INFO Created log for partition __consumer_offsets-30 in /tmp/kafka-logs/__consumer_offsets-30 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,249] INFO [Partition __consumer_offsets-30 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition) -[2023-11-03 19:38:42,249] INFO [Partition __consumer_offsets-30 broker=0] Log loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,251] INFO [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,251] INFO Created log for partition __consumer_offsets-0 in /tmp/kafka-logs/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,252] INFO [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,252] INFO [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,254] INFO [LogLoader partition=__consumer_offsets-35, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,254] INFO Created log for partition __consumer_offsets-35 in /tmp/kafka-logs/__consumer_offsets-35 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,254] INFO [Partition __consumer_offsets-35 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition) -[2023-11-03 19:38:42,254] INFO [Partition __consumer_offsets-35 broker=0] Log loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,260] INFO [LogLoader partition=__consumer_offsets-5, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,260] INFO Created log for partition __consumer_offsets-5 in /tmp/kafka-logs/__consumer_offsets-5 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,261] INFO [Partition __consumer_offsets-5 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition) -[2023-11-03 19:38:42,261] INFO [Partition __consumer_offsets-5 broker=0] Log loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,264] INFO [LogLoader partition=__consumer_offsets-20, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,265] INFO Created log for partition __consumer_offsets-20 in /tmp/kafka-logs/__consumer_offsets-20 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,265] INFO [Partition __consumer_offsets-20 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition) -[2023-11-03 19:38:42,266] INFO [Partition __consumer_offsets-20 broker=0] Log loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,269] INFO [LogLoader partition=__consumer_offsets-27, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,269] INFO Created log for partition __consumer_offsets-27 in /tmp/kafka-logs/__consumer_offsets-27 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,269] INFO [Partition __consumer_offsets-27 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition) -[2023-11-03 19:38:42,270] INFO [Partition __consumer_offsets-27 broker=0] Log loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,272] INFO [LogLoader partition=__consumer_offsets-42, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,273] INFO Created log for partition __consumer_offsets-42 in /tmp/kafka-logs/__consumer_offsets-42 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,273] INFO [Partition __consumer_offsets-42 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition) -[2023-11-03 19:38:42,273] INFO [Partition __consumer_offsets-42 broker=0] Log loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,277] INFO [LogLoader partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,277] INFO Created log for partition __consumer_offsets-12 in /tmp/kafka-logs/__consumer_offsets-12 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,277] INFO [Partition __consumer_offsets-12 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition) -[2023-11-03 19:38:42,277] INFO [Partition __consumer_offsets-12 broker=0] Log loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,280] INFO [LogLoader partition=__consumer_offsets-21, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,280] INFO Created log for partition __consumer_offsets-21 in /tmp/kafka-logs/__consumer_offsets-21 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,280] INFO [Partition __consumer_offsets-21 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition) -[2023-11-03 19:38:42,280] INFO [Partition __consumer_offsets-21 broker=0] Log loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,284] INFO [LogLoader partition=__consumer_offsets-36, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,285] INFO Created log for partition __consumer_offsets-36 in /tmp/kafka-logs/__consumer_offsets-36 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,285] INFO [Partition __consumer_offsets-36 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition) -[2023-11-03 19:38:42,285] INFO [Partition __consumer_offsets-36 broker=0] Log loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,288] INFO [LogLoader partition=__consumer_offsets-6, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,288] INFO Created log for partition __consumer_offsets-6 in /tmp/kafka-logs/__consumer_offsets-6 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,288] INFO [Partition __consumer_offsets-6 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition) -[2023-11-03 19:38:42,288] INFO [Partition __consumer_offsets-6 broker=0] Log loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,291] INFO [LogLoader partition=__consumer_offsets-43, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,292] INFO Created log for partition __consumer_offsets-43 in /tmp/kafka-logs/__consumer_offsets-43 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,292] INFO [Partition __consumer_offsets-43 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition) -[2023-11-03 19:38:42,292] INFO [Partition __consumer_offsets-43 broker=0] Log loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,295] INFO [LogLoader partition=__consumer_offsets-13, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,295] INFO Created log for partition __consumer_offsets-13 in /tmp/kafka-logs/__consumer_offsets-13 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,295] INFO [Partition __consumer_offsets-13 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition) -[2023-11-03 19:38:42,296] INFO [Partition __consumer_offsets-13 broker=0] Log loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,298] INFO [LogLoader partition=__consumer_offsets-28, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:42,299] INFO Created log for partition __consumer_offsets-28 in /tmp/kafka-logs/__consumer_offsets-28 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) -[2023-11-03 19:38:42,299] INFO [Partition __consumer_offsets-28 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition) -[2023-11-03 19:38:42,299] INFO [Partition __consumer_offsets-28 broker=0] Log loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:38:42,303] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 3 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,303] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-3 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 18 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-18 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 41 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-41 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 10 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 33 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-33 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 48 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-48 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 19 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 34 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 4 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 11 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-11 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 26 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-26 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 49 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 39 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-39 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 9 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-9 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 24 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-24 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 31 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,304] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,304] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 46 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 1 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 16 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 2 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-2 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 25 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 40 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 47 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-47 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 17 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-17 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 32 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-32 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 37 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 7 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 22 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 29 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-29 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 44 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-44 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 14 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-14 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 23 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-23 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 38 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-38 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 8 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-8 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 45 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-45 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 15 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-15 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 30 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-30 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 0 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,305] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-0 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,305] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 35 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-35 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 5 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-5 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 20 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-20 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 27 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-27 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 42 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-42 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 12 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-12 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 21 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-21 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 36 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-36 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 6 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-6 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 43 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 13 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,306] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 28 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:38:42,306] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,310] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-3 in 4 milliseconds for epoch 0, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,310] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-18 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,310] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-41 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 7 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-33 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-48 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-11 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-26 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,311] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-39 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-9 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-24 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,312] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-2 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-47 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-17 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-32 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-29 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-44 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,313] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-14 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-23 in 9 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-38 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-8 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-45 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-15 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-30 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-0 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-35 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-5 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-20 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-27 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-42 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-12 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-21 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,314] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-36 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,315] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-6 in 8 milliseconds for epoch 0, of which 8 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,315] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,315] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:42,315] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 9 milliseconds for epoch 0, of which 9 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:38:56,514] INFO Creating topic test-topic with configuration {} and initial partition assignment HashMap(0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) -[2023-11-03 19:38:56,527] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(test-topic-0) (kafka.server.ReplicaFetcherManager) -[2023-11-03 19:38:56,529] INFO [LogLoader partition=test-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:38:56,529] INFO Created log for partition test-topic-0 in /tmp/kafka-logs/test-topic-0 with properties {} (kafka.log.LogManager) -[2023-11-03 19:38:56,530] INFO [Partition test-topic-0 broker=0] No checkpointed highwatermark is found for partition test-topic-0 (kafka.cluster.Partition) -[2023-11-03 19:38:56,530] INFO [Partition test-topic-0 broker=0] Log loaded for partition test-topic-0 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:39:07,767] INFO [GroupCoordinator 0]: Dynamic member with unknown member id joins group test-group in Empty state. Created a new member id rdkafka-d8d257cb-90fd-4aed-a191-0c8705f20f1a and request the member to rejoin with this id. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:39:07,775] INFO [GroupCoordinator 0]: Preparing to rebalance group test-group in state PreparingRebalance with old generation 0 (__consumer_offsets-12) (reason: Adding new member rdkafka-d8d257cb-90fd-4aed-a191-0c8705f20f1a with group instance id None; client reason: not provided) (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:39:07,791] INFO [GroupCoordinator 0]: Stabilized group test-group generation 1 (__consumer_offsets-12) with 1 members (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:39:07,797] INFO [GroupCoordinator 0]: Assignment received from leader rdkafka-d8d257cb-90fd-4aed-a191-0c8705f20f1a for group test-group for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:40:46,816] INFO [GroupCoordinator 0]: Member rdkafka-d8d257cb-90fd-4aed-a191-0c8705f20f1a in group test-group has failed, removing it from the group (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:40:46,817] INFO [GroupCoordinator 0]: Preparing to rebalance group test-group in state PreparingRebalance with old generation 1 (__consumer_offsets-12) (reason: removing member rdkafka-d8d257cb-90fd-4aed-a191-0c8705f20f1a on heartbeat expiration) (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:40:46,818] INFO [GroupCoordinator 0]: Group test-group with generation 2 is now empty (__consumer_offsets-12) (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:40:48,852] INFO Terminating process due to signal SIGTERM (org.apache.kafka.common.utils.LoggingSignalHandler) -[2023-11-03 19:40:48,853] INFO [KafkaServer id=0] shutting down (kafka.server.KafkaServer) -[2023-11-03 19:40:48,854] INFO [KafkaServer id=0] Starting controlled shutdown (kafka.server.KafkaServer) -[2023-11-03 19:40:48,865] INFO [KafkaServer id=0] Controlled shutdown request returned successfully after 5ms (kafka.server.KafkaServer) -[2023-11-03 19:40:48,867] INFO [/config/changes-event-process-thread]: Shutting down (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) -[2023-11-03 19:40:48,867] INFO [/config/changes-event-process-thread]: Stopped (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) -[2023-11-03 19:40:48,867] INFO [/config/changes-event-process-thread]: Shutdown completed (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) -[2023-11-03 19:40:48,868] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopping socket server request processors (kafka.network.SocketServer) -[2023-11-03 19:40:48,875] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Stopped socket server request processors (kafka.network.SocketServer) -[2023-11-03 19:40:48,876] INFO [data-plane Kafka Request Handler on Broker 0], shutting down (kafka.server.KafkaRequestHandlerPool) -[2023-11-03 19:40:48,876] INFO [data-plane Kafka Request Handler on Broker 0], shut down completely (kafka.server.KafkaRequestHandlerPool) -[2023-11-03 19:40:48,878] INFO [ExpirationReaper-0-AlterAcls]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,878] INFO [ExpirationReaper-0-AlterAcls]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,878] INFO [ExpirationReaper-0-AlterAcls]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,879] INFO [KafkaApi-0] Shutdown complete. (kafka.server.KafkaApis) -[2023-11-03 19:40:48,879] INFO [ExpirationReaper-0-topic]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,879] INFO [ExpirationReaper-0-topic]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,879] INFO [ExpirationReaper-0-topic]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,880] INFO [TransactionCoordinator id=0] Shutting down. (kafka.coordinator.transaction.TransactionCoordinator) -[2023-11-03 19:40:48,880] INFO [Transaction State Manager 0]: Shutdown complete (kafka.coordinator.transaction.TransactionStateManager) -[2023-11-03 19:40:48,880] INFO [TxnMarkerSenderThread-0]: Shutting down (kafka.coordinator.transaction.TransactionMarkerChannelManager) -[2023-11-03 19:40:48,881] INFO [TxnMarkerSenderThread-0]: Stopped (kafka.coordinator.transaction.TransactionMarkerChannelManager) -[2023-11-03 19:40:48,881] INFO [TxnMarkerSenderThread-0]: Shutdown completed (kafka.coordinator.transaction.TransactionMarkerChannelManager) -[2023-11-03 19:40:48,881] INFO [TransactionCoordinator id=0] Shutdown complete. (kafka.coordinator.transaction.TransactionCoordinator) -[2023-11-03 19:40:48,881] INFO [GroupCoordinator 0]: Shutting down. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:40:48,882] INFO [ExpirationReaper-0-Heartbeat]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,882] INFO [ExpirationReaper-0-Heartbeat]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,882] INFO [ExpirationReaper-0-Heartbeat]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,882] INFO [ExpirationReaper-0-Rebalance]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,882] INFO [ExpirationReaper-0-Rebalance]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,882] INFO [ExpirationReaper-0-Rebalance]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,882] INFO [GroupCoordinator 0]: Shutdown complete. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:40:48,883] INFO [ReplicaManager broker=0] Shutting down (kafka.server.ReplicaManager) -[2023-11-03 19:40:48,883] INFO [LogDirFailureHandler]: Shutting down (kafka.server.ReplicaManager$LogDirFailureHandler) -[2023-11-03 19:40:48,883] INFO [LogDirFailureHandler]: Stopped (kafka.server.ReplicaManager$LogDirFailureHandler) -[2023-11-03 19:40:48,883] INFO [LogDirFailureHandler]: Shutdown completed (kafka.server.ReplicaManager$LogDirFailureHandler) -[2023-11-03 19:40:48,884] INFO [ReplicaFetcherManager on broker 0] shutting down (kafka.server.ReplicaFetcherManager) -[2023-11-03 19:40:48,884] INFO [ReplicaFetcherManager on broker 0] shutdown completed (kafka.server.ReplicaFetcherManager) -[2023-11-03 19:40:48,884] INFO [ReplicaAlterLogDirsManager on broker 0] shutting down (kafka.server.ReplicaAlterLogDirsManager) -[2023-11-03 19:40:48,884] INFO [ReplicaAlterLogDirsManager on broker 0] shutdown completed (kafka.server.ReplicaAlterLogDirsManager) -[2023-11-03 19:40:48,884] INFO [ExpirationReaper-0-Fetch]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,884] INFO [ExpirationReaper-0-Fetch]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,884] INFO [ExpirationReaper-0-Fetch]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-RemoteFetch]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-RemoteFetch]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-RemoteFetch]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-Produce]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-Produce]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-Produce]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-DeleteRecords]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,885] INFO [ExpirationReaper-0-DeleteRecords]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,886] INFO [ExpirationReaper-0-DeleteRecords]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,886] INFO [ExpirationReaper-0-ElectLeader]: Shutting down (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,886] INFO [ExpirationReaper-0-ElectLeader]: Stopped (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,886] INFO [ExpirationReaper-0-ElectLeader]: Shutdown completed (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:40:48,889] INFO [AddPartitionsToTxnSenderThread-0]: Shutting down (kafka.server.AddPartitionsToTxnManager) -[2023-11-03 19:40:48,889] INFO [AddPartitionsToTxnSenderThread-0]: Stopped (kafka.server.AddPartitionsToTxnManager) -[2023-11-03 19:40:48,889] INFO [AddPartitionsToTxnSenderThread-0]: Shutdown completed (kafka.server.AddPartitionsToTxnManager) -[2023-11-03 19:40:48,889] INFO [ReplicaManager broker=0] Shut down completely (kafka.server.ReplicaManager) -[2023-11-03 19:40:48,890] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Shutting down (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:40:48,890] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Stopped (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:40:48,890] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Shutdown completed (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:40:48,892] INFO Broker to controller channel manager for alter-partition shutdown (kafka.server.BrokerToControllerChannelManagerImpl) -[2023-11-03 19:40:48,892] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Shutting down (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:40:48,893] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Stopped (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:40:48,893] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Shutdown completed (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:40:48,893] INFO Broker to controller channel manager for forwarding shutdown (kafka.server.BrokerToControllerChannelManagerImpl) -[2023-11-03 19:40:48,894] INFO Shutting down. (kafka.log.LogManager) -[2023-11-03 19:40:48,894] INFO [kafka-log-cleaner-thread-0]: Shutting down (kafka.log.LogCleaner$CleanerThread) -[2023-11-03 19:40:48,894] INFO [kafka-log-cleaner-thread-0]: Stopped (kafka.log.LogCleaner$CleanerThread) -[2023-11-03 19:40:48,894] INFO [kafka-log-cleaner-thread-0]: Shutdown completed (kafka.log.LogCleaner$CleanerThread) -[2023-11-03 19:40:48,912] INFO [ProducerStateManager partition=__consumer_offsets-12]Wrote producer snapshot at offset 7 with 0 producer ids in 0 ms. (org.apache.kafka.storage.internals.log.ProducerStateManager) -[2023-11-03 19:40:48,914] INFO [ProducerStateManager partition=test-topic-0]Wrote producer snapshot at offset 5 with 0 producer ids in 0 ms. (org.apache.kafka.storage.internals.log.ProducerStateManager) -[2023-11-03 19:40:48,920] INFO Shutdown complete. (kafka.log.LogManager) -[2023-11-03 19:40:48,925] INFO [feature-zk-node-event-process-thread]: Shutting down (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) -[2023-11-03 19:40:48,925] INFO [feature-zk-node-event-process-thread]: Stopped (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) -[2023-11-03 19:40:48,925] INFO [feature-zk-node-event-process-thread]: Shutdown completed (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) -[2023-11-03 19:40:48,925] INFO [ZooKeeperClient Kafka server] Closing. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:40:49,028] INFO Session: 0x100008497380000 closed (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:40:49,028] INFO EventThread shut down for session: 0x100008497380000 (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:40:49,028] INFO [ZooKeeperClient Kafka server] Closed. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:40:49,028] INFO [ThrottledChannelReaper-Fetch]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Fetch]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Fetch]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Produce]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Produce]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Produce]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Request]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Request]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-Request]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-ControllerMutation]: Shutting down (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-ControllerMutation]: Stopped (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,029] INFO [ThrottledChannelReaper-ControllerMutation]: Shutdown completed (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:40:49,030] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutting down socket server (kafka.network.SocketServer) -[2023-11-03 19:40:49,039] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Shutdown completed (kafka.network.SocketServer) -[2023-11-03 19:40:49,039] INFO Metrics scheduler closed (org.apache.kafka.common.metrics.Metrics) -[2023-11-03 19:40:49,039] INFO Closing reporter org.apache.kafka.common.metrics.JmxReporter (org.apache.kafka.common.metrics.Metrics) -[2023-11-03 19:40:49,039] INFO Metrics reporters closed (org.apache.kafka.common.metrics.Metrics) -[2023-11-03 19:40:49,040] INFO Broker and topic stats closed (kafka.server.BrokerTopicStats) -[2023-11-03 19:40:49,040] INFO App info kafka.server for 0 unregistered (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:40:49,040] INFO [KafkaServer id=0] shut down completed (kafka.server.KafkaServer) -[2023-11-03 19:45:15,871] INFO Reading configuration from: ./config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,874] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,874] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,874] INFO observerMasterPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,874] INFO metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,877] INFO autopurge.snapRetainCount set to 3 (org.apache.zookeeper.server.DatadirCleanupManager) -[2023-11-03 19:45:15,877] INFO autopurge.purgeInterval set to 0 (org.apache.zookeeper.server.DatadirCleanupManager) -[2023-11-03 19:45:15,877] INFO Purge task is not scheduled. (org.apache.zookeeper.server.DatadirCleanupManager) -[2023-11-03 19:45:15,877] WARN Either no config or no quorum defined in config, running in standalone mode (org.apache.zookeeper.server.quorum.QuorumPeerMain) -[2023-11-03 19:45:15,878] INFO Log4j 1.2 jmx support not found; jmx disabled. (org.apache.zookeeper.jmx.ManagedUtil) -[2023-11-03 19:45:15,879] INFO Reading configuration from: ./config/zookeeper.properties (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,879] INFO clientPortAddress is 0.0.0.0:2181 (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,879] INFO secureClientPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,879] INFO observerMasterPort is not set (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,879] INFO metricsProvider.className is org.apache.zookeeper.metrics.impl.DefaultMetricsProvider (org.apache.zookeeper.server.quorum.QuorumPeerConfig) -[2023-11-03 19:45:15,879] INFO Starting server (org.apache.zookeeper.server.ZooKeeperServerMain) -[2023-11-03 19:45:15,887] INFO ServerMetrics initialized with provider org.apache.zookeeper.metrics.impl.DefaultMetricsProvider@4034c28c (org.apache.zookeeper.server.ServerMetrics) -[2023-11-03 19:45:15,888] INFO ACL digest algorithm is: SHA1 (org.apache.zookeeper.server.auth.DigestAuthenticationProvider) -[2023-11-03 19:45:15,888] INFO zookeeper.DigestAuthenticationProvider.enabled = true (org.apache.zookeeper.server.auth.DigestAuthenticationProvider) -[2023-11-03 19:45:15,891] INFO zookeeper.snapshot.trust.empty : false (org.apache.zookeeper.server.persistence.FileTxnSnapLog) -[2023-11-03 19:45:15,899] INFO (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,899] INFO ______ _ (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,899] INFO |___ / | | (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,899] INFO / / ___ ___ | | __ ___ ___ _ __ ___ _ __ (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,899] INFO / / / _ \ / _ \ | |/ / / _ \ / _ \ | '_ \ / _ \ | '__| (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,899] INFO / /__ | (_) | | (_) | | < | __/ | __/ | |_) | | __/ | | (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,899] INFO /_____| \___/ \___/ |_|\_\ \___| \___| | .__/ \___| |_| (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,900] INFO | | (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,900] INFO |_| (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,900] INFO (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:zookeeper.version=3.8.2-139d619b58292d7734b4fc83a0f44be4e7b0c986, built on 2023-07-05 19:24 UTC (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:host.name=ThinkPadP53 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.version=17.0.6 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.vendor=Eclipse Adoptium (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.home=/opt/openjdk-bin-17.0.6_p10 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.class.path=/scratch/Exemple_Kafka/bin/../libs/activation-1.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/argparse4j-0.7.0.jar:/scratch/Exemple_Kafka/bin/../libs/audience-annotations-0.12.0.jar:/scratch/Exemple_Kafka/bin/../libs/caffeine-2.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/checker-qual-3.19.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-beanutils-1.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-cli-1.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-collections-3.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-digester-2.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-io-2.11.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-lang3-3.8.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-logging-1.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-validator-1.7.jar:/scratch/Exemple_Kafka/bin/../libs/connect-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-basic-auth-extension-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-json-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-client-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-runtime-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-transforms-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/error_prone_annotations-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-api-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-locator-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-utils-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-core-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-databind-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-dataformat-csv-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-datatype-jdk8-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-base-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-json-provider-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-jaxb-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-scala_2.13-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.activation-api-1.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.inject-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.xml.bind-api-2.3.3.jar:/scratch/Exemple_Kafka/bin/../libs/javassist-3.29.2-GA.jar:/scratch/Exemple_Kafka/bin/../libs/javax.activation-api-1.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.annotation-api-1.3.2.jar:/scratch/Exemple_Kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/jaxb-api-2.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-client-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-common-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-core-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-hk2-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-server-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-client-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-continuation-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-http-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-io-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-security-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-server-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlet-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlets-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-ajax-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jline-3.22.0.jar:/scratch/Exemple_Kafka/bin/../libs/jopt-simple-5.0.4.jar:/scratch/Exemple_Kafka/bin/../libs/jose4j-0.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/jsr305-3.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-clients-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-group-coordinator-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-log4j-appender-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-metadata-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-raft-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-server-common-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-shell-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-examples-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-scala_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-test-utils-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/lz4-java-1.8.0.jar:/scratch/Exemple_Kafka/bin/../libs/maven-artifact-3.8.8.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-2.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-4.1.12.1.jar:/scratch/Exemple_Kafka/bin/../libs/netty-buffer-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-codec-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-handler-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-resolver-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-classes-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-unix-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/scratch/Exemple_Kafka/bin/../libs/paranamer-2.8.jar:/scratch/Exemple_Kafka/bin/../libs/pcollections-4.0.1.jar:/scratch/Exemple_Kafka/bin/../libs/plexus-utils-3.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/reflections-0.10.2.jar:/scratch/Exemple_Kafka/bin/../libs/reload4j-1.2.25.jar:/scratch/Exemple_Kafka/bin/../libs/rocksdbjni-7.9.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-collection-compat_2.13-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/scala-java8-compat_2.13-1.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-library-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/scala-logging_2.13-3.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/scala-reflect-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-api-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-reload4j-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/snappy-java-1.1.10.4.jar:/scratch/Exemple_Kafka/bin/../libs/swagger-annotations-2.2.8.jar:/scratch/Exemple_Kafka/bin/../libs/trogdor-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-jute-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zstd-jni-1.5.5-1.jar (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.io.tmpdir=/tmp (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,902] INFO Server environment:java.compiler= (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:os.name=Linux (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:os.arch=amd64 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:os.version=6.4.3-cachyosGentooThinkPadP53 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:user.name=memartel (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:user.home=/home/memartel (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:user.dir=/scratch/Exemple_Kafka (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:os.memory.free=494MB (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:os.memory.max=512MB (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO Server environment:os.memory.total=512MB (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.enableEagerACLCheck = false (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.digest.enabled = true (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.closeSessionTxn.enabled = true (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.flushDelay = 0 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.maxWriteQueuePollTime = 0 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.maxBatchSize=1000 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,903] INFO zookeeper.intBufferStartingSizeBytes = 1024 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,904] INFO Weighed connection throttling is disabled (org.apache.zookeeper.server.BlueThrottle) -[2023-11-03 19:45:15,905] INFO minSessionTimeout set to 6000 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,905] INFO maxSessionTimeout set to 60000 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,906] INFO getData response cache size is initialized with value 400. (org.apache.zookeeper.server.ResponseCache) -[2023-11-03 19:45:15,906] INFO getChildren response cache size is initialized with value 400. (org.apache.zookeeper.server.ResponseCache) -[2023-11-03 19:45:15,907] INFO zookeeper.pathStats.slotCapacity = 60 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:45:15,907] INFO zookeeper.pathStats.slotDuration = 15 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:45:15,907] INFO zookeeper.pathStats.maxDepth = 6 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:45:15,907] INFO zookeeper.pathStats.initialDelay = 5 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:45:15,907] INFO zookeeper.pathStats.delay = 5 (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:45:15,907] INFO zookeeper.pathStats.enabled = false (org.apache.zookeeper.server.util.RequestPathMetricsCollector) -[2023-11-03 19:45:15,909] INFO The max bytes for all large requests are set to 104857600 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,910] INFO The large request threshold is set to -1 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,911] INFO zookeeper.enforce.auth.enabled = false (org.apache.zookeeper.server.AuthenticationHelper) -[2023-11-03 19:45:15,911] INFO zookeeper.enforce.auth.schemes = [] (org.apache.zookeeper.server.AuthenticationHelper) -[2023-11-03 19:45:15,911] INFO Created server with tickTime 3000 ms minSessionTimeout 6000 ms maxSessionTimeout 60000 ms clientPortListenBacklog -1 datadir /tmp/zookeeper/version-2 snapdir /tmp/zookeeper/version-2 (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,916] INFO Using org.apache.zookeeper.server.NIOServerCnxnFactory as server connection factory (org.apache.zookeeper.server.ServerCnxnFactory) -[2023-11-03 19:45:15,917] WARN maxCnxns is not configured, using default value 0. (org.apache.zookeeper.server.ServerCnxnFactory) -[2023-11-03 19:45:15,918] INFO Configuring NIO connection handler with 10s sessionless connection timeout, 2 selector thread(s), 24 worker threads, and 64 kB direct buffers. (org.apache.zookeeper.server.NIOServerCnxnFactory) -[2023-11-03 19:45:15,922] INFO binding to port 0.0.0.0/0.0.0.0:2181 (org.apache.zookeeper.server.NIOServerCnxnFactory) -[2023-11-03 19:45:15,935] INFO Using org.apache.zookeeper.server.watch.WatchManager as watch manager (org.apache.zookeeper.server.watch.WatchManagerFactory) -[2023-11-03 19:45:15,936] INFO Using org.apache.zookeeper.server.watch.WatchManager as watch manager (org.apache.zookeeper.server.watch.WatchManagerFactory) -[2023-11-03 19:45:15,936] INFO zookeeper.snapshotSizeFactor = 0.33 (org.apache.zookeeper.server.ZKDatabase) -[2023-11-03 19:45:15,936] INFO zookeeper.commitLogCount=500 (org.apache.zookeeper.server.ZKDatabase) -[2023-11-03 19:45:15,938] INFO zookeeper.snapshot.compression.method = CHECKED (org.apache.zookeeper.server.persistence.SnapStream) -[2023-11-03 19:45:15,938] INFO Reading snapshot /tmp/zookeeper/version-2/snapshot.0 (org.apache.zookeeper.server.persistence.FileSnap) -[2023-11-03 19:45:15,940] INFO The digest value is empty in snapshot (org.apache.zookeeper.server.DataTree) -[2023-11-03 19:45:15,971] INFO ZooKeeper audit is disabled. (org.apache.zookeeper.audit.ZKAuditProvider) -[2023-11-03 19:45:15,971] INFO 140 txns loaded in 26 ms (org.apache.zookeeper.server.persistence.FileTxnSnapLog) -[2023-11-03 19:45:15,971] INFO Snapshot loaded in 35 ms, highest zxid is 0x8c, digest is 307986004310 (org.apache.zookeeper.server.ZKDatabase) -[2023-11-03 19:45:15,972] INFO Snapshotting: 0x8c to /tmp/zookeeper/version-2/snapshot.8c (org.apache.zookeeper.server.persistence.FileTxnSnapLog) -[2023-11-03 19:45:15,974] INFO Snapshot taken in 2 ms (org.apache.zookeeper.server.ZooKeeperServer) -[2023-11-03 19:45:15,983] INFO zookeeper.request_throttler.shutdownTimeout = 10000 ms (org.apache.zookeeper.server.RequestThrottler) -[2023-11-03 19:45:15,983] INFO PrepRequestProcessor (sid:0) started, reconfigEnabled=false (org.apache.zookeeper.server.PrepRequestProcessor) -[2023-11-03 19:45:16,000] INFO Using checkIntervalMs=60000 maxPerMinute=10000 maxNeverUsedIntervalMs=0 (org.apache.zookeeper.server.ContainerManager) -[2023-11-03 19:45:28,280] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) -[2023-11-03 19:45:28,451] INFO Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation (org.apache.zookeeper.common.X509Util) -[2023-11-03 19:45:28,513] INFO Registered signal handlers for TERM, INT, HUP (org.apache.kafka.common.utils.LoggingSignalHandler) -[2023-11-03 19:45:28,515] INFO starting (kafka.server.KafkaServer) -[2023-11-03 19:45:28,515] INFO Connecting to zookeeper on localhost:2181 (kafka.server.KafkaServer) -[2023-11-03 19:45:28,523] INFO [ZooKeeperClient Kafka server] Initializing a new session to localhost:2181. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:45:28,528] INFO Client environment:zookeeper.version=3.8.2-139d619b58292d7734b4fc83a0f44be4e7b0c986, built on 2023-07-05 19:24 UTC (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:host.name=ThinkPadP53 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.version=17.0.6 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.vendor=Eclipse Adoptium (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.home=/opt/openjdk-bin-17.0.6_p10 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.class.path=/scratch/Exemple_Kafka/bin/../libs/activation-1.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/aopalliance-repackaged-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/argparse4j-0.7.0.jar:/scratch/Exemple_Kafka/bin/../libs/audience-annotations-0.12.0.jar:/scratch/Exemple_Kafka/bin/../libs/caffeine-2.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/checker-qual-3.19.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-beanutils-1.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-cli-1.4.jar:/scratch/Exemple_Kafka/bin/../libs/commons-collections-3.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-digester-2.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-io-2.11.0.jar:/scratch/Exemple_Kafka/bin/../libs/commons-lang3-3.8.1.jar:/scratch/Exemple_Kafka/bin/../libs/commons-logging-1.2.jar:/scratch/Exemple_Kafka/bin/../libs/commons-validator-1.7.jar:/scratch/Exemple_Kafka/bin/../libs/connect-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-basic-auth-extension-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-json-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-mirror-client-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-runtime-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/connect-transforms-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/error_prone_annotations-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-api-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-locator-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/hk2-utils-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-core-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-databind-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-dataformat-csv-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-datatype-jdk8-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-base-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-jaxrs-json-provider-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-jaxb-annotations-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jackson-module-scala_2.13-2.13.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.activation-api-1.2.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.annotation-api-1.3.5.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.inject-2.6.1.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.validation-api-2.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.ws.rs-api-2.1.6.jar:/scratch/Exemple_Kafka/bin/../libs/jakarta.xml.bind-api-2.3.3.jar:/scratch/Exemple_Kafka/bin/../libs/javassist-3.29.2-GA.jar:/scratch/Exemple_Kafka/bin/../libs/javax.activation-api-1.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.annotation-api-1.3.2.jar:/scratch/Exemple_Kafka/bin/../libs/javax.servlet-api-3.1.0.jar:/scratch/Exemple_Kafka/bin/../libs/javax.ws.rs-api-2.1.1.jar:/scratch/Exemple_Kafka/bin/../libs/jaxb-api-2.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-client-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-common-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-container-servlet-core-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-hk2-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jersey-server-2.39.1.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-client-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-continuation-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-http-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-io-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-security-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-server-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlet-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-servlets-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jetty-util-ajax-9.4.52.v20230823.jar:/scratch/Exemple_Kafka/bin/../libs/jline-3.22.0.jar:/scratch/Exemple_Kafka/bin/../libs/jopt-simple-5.0.4.jar:/scratch/Exemple_Kafka/bin/../libs/jose4j-0.9.3.jar:/scratch/Exemple_Kafka/bin/../libs/jsr305-3.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-clients-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-group-coordinator-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-log4j-appender-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-metadata-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-raft-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-server-common-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-shell-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-storage-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-examples-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-scala_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-streams-test-utils-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka-tools-api-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/kafka_2.13-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/lz4-java-1.8.0.jar:/scratch/Exemple_Kafka/bin/../libs/maven-artifact-3.8.8.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-2.2.0.jar:/scratch/Exemple_Kafka/bin/../libs/metrics-core-4.1.12.1.jar:/scratch/Exemple_Kafka/bin/../libs/netty-buffer-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-codec-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-handler-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-resolver-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-classes-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-epoll-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/netty-transport-native-unix-common-4.1.94.Final.jar:/scratch/Exemple_Kafka/bin/../libs/osgi-resource-locator-1.0.3.jar:/scratch/Exemple_Kafka/bin/../libs/paranamer-2.8.jar:/scratch/Exemple_Kafka/bin/../libs/pcollections-4.0.1.jar:/scratch/Exemple_Kafka/bin/../libs/plexus-utils-3.3.1.jar:/scratch/Exemple_Kafka/bin/../libs/reflections-0.10.2.jar:/scratch/Exemple_Kafka/bin/../libs/reload4j-1.2.25.jar:/scratch/Exemple_Kafka/bin/../libs/rocksdbjni-7.9.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-collection-compat_2.13-2.10.0.jar:/scratch/Exemple_Kafka/bin/../libs/scala-java8-compat_2.13-1.0.2.jar:/scratch/Exemple_Kafka/bin/../libs/scala-library-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/scala-logging_2.13-3.9.4.jar:/scratch/Exemple_Kafka/bin/../libs/scala-reflect-2.13.11.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-api-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/slf4j-reload4j-1.7.36.jar:/scratch/Exemple_Kafka/bin/../libs/snappy-java-1.1.10.4.jar:/scratch/Exemple_Kafka/bin/../libs/swagger-annotations-2.2.8.jar:/scratch/Exemple_Kafka/bin/../libs/trogdor-3.6.0.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zookeeper-jute-3.8.2.jar:/scratch/Exemple_Kafka/bin/../libs/zstd-jni-1.5.5-1.jar (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:java.compiler= (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,529] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:os.version=6.4.3-cachyosGentooThinkPadP53 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:user.name=memartel (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:user.home=/home/memartel (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:user.dir=/scratch/Exemple_Kafka (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:os.memory.free=987MB (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:os.memory.max=1024MB (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,530] INFO Client environment:os.memory.total=1024MB (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,532] INFO Initiating client connection, connectString=localhost:2181 sessionTimeout=18000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@3fce8fd9 (org.apache.zookeeper.ZooKeeper) -[2023-11-03 19:45:28,535] INFO jute.maxbuffer value is 4194304 Bytes (org.apache.zookeeper.ClientCnxnSocket) -[2023-11-03 19:45:28,540] INFO zookeeper.request.timeout value is 0. feature enabled=false (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:45:28,542] INFO [ZooKeeperClient Kafka server] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:45:28,542] INFO Opening socket connection to server localhost/[0:0:0:0:0:0:0:1]:2181. (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:45:28,544] INFO Socket connection established, initiating session, client: /[0:0:0:0:0:0:0:1]:60254, server: localhost/[0:0:0:0:0:0:0:1]:2181 (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:45:28,550] INFO Creating new log file: log.8d (org.apache.zookeeper.server.persistence.FileTxnLog) -[2023-11-03 19:45:28,553] INFO Session establishment complete on server localhost/[0:0:0:0:0:0:0:1]:2181, session id = 0x100008b139a0000, negotiated timeout = 18000 (org.apache.zookeeper.ClientCnxn) -[2023-11-03 19:45:28,555] INFO [ZooKeeperClient Kafka server] Connected. (kafka.zookeeper.ZooKeeperClient) -[2023-11-03 19:45:28,704] INFO Cluster ID = 8EdUX-SkTwmB2aSLdVSXoQ (kafka.server.KafkaServer) -[2023-11-03 19:45:28,753] INFO KafkaConfig values: - advertised.listeners = null - alter.config.policy.class.name = null - alter.log.dirs.replication.quota.window.num = 11 - alter.log.dirs.replication.quota.window.size.seconds = 1 - authorizer.class.name = - auto.create.topics.enable = true - auto.include.jmx.reporter = true - auto.leader.rebalance.enable = true - background.threads = 10 - broker.heartbeat.interval.ms = 2000 - broker.id = 0 - broker.id.generation.enable = true - broker.rack = null - broker.session.timeout.ms = 9000 - client.quota.callback.class = null - compression.type = producer - connection.failed.authentication.delay.ms = 100 - connections.max.idle.ms = 600000 - connections.max.reauth.ms = 0 - control.plane.listener.name = null - controlled.shutdown.enable = true - controlled.shutdown.max.retries = 3 - controlled.shutdown.retry.backoff.ms = 5000 - controller.listener.names = null - controller.quorum.append.linger.ms = 25 - controller.quorum.election.backoff.max.ms = 1000 - controller.quorum.election.timeout.ms = 1000 - controller.quorum.fetch.timeout.ms = 2000 - controller.quorum.request.timeout.ms = 2000 - controller.quorum.retry.backoff.ms = 20 - controller.quorum.voters = [] - controller.quota.window.num = 11 - controller.quota.window.size.seconds = 1 - controller.socket.timeout.ms = 30000 - create.topic.policy.class.name = null - default.replication.factor = 1 - delegation.token.expiry.check.interval.ms = 3600000 - delegation.token.expiry.time.ms = 86400000 - delegation.token.master.key = null - delegation.token.max.lifetime.ms = 604800000 - delegation.token.secret.key = null - delete.records.purgatory.purge.interval.requests = 1 - delete.topic.enable = true - early.start.listeners = null - fetch.max.bytes = 57671680 - fetch.purgatory.purge.interval.requests = 1000 - group.consumer.assignors = [org.apache.kafka.coordinator.group.assignor.RangeAssignor] - group.consumer.heartbeat.interval.ms = 5000 - group.consumer.max.heartbeat.interval.ms = 15000 - group.consumer.max.session.timeout.ms = 60000 - group.consumer.max.size = 2147483647 - group.consumer.min.heartbeat.interval.ms = 5000 - group.consumer.min.session.timeout.ms = 45000 - group.consumer.session.timeout.ms = 45000 - group.coordinator.new.enable = false - group.coordinator.threads = 1 - group.initial.rebalance.delay.ms = 0 - group.max.session.timeout.ms = 1800000 - group.max.size = 2147483647 - group.min.session.timeout.ms = 6000 - initial.broker.registration.timeout.ms = 60000 - inter.broker.listener.name = null - inter.broker.protocol.version = 3.6-IV2 - kafka.metrics.polling.interval.secs = 10 - kafka.metrics.reporters = [] - leader.imbalance.check.interval.seconds = 300 - leader.imbalance.per.broker.percentage = 10 - listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL - listeners = PLAINTEXT://:9092 - log.cleaner.backoff.ms = 15000 - log.cleaner.dedupe.buffer.size = 134217728 - log.cleaner.delete.retention.ms = 86400000 - log.cleaner.enable = true - log.cleaner.io.buffer.load.factor = 0.9 - log.cleaner.io.buffer.size = 524288 - log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 - log.cleaner.max.compaction.lag.ms = 9223372036854775807 - log.cleaner.min.cleanable.ratio = 0.5 - log.cleaner.min.compaction.lag.ms = 0 - log.cleaner.threads = 1 - log.cleanup.policy = [delete] - log.dir = /tmp/kafka-logs - log.dirs = /tmp/kafka-logs - log.flush.interval.messages = 9223372036854775807 - log.flush.interval.ms = null - log.flush.offset.checkpoint.interval.ms = 60000 - log.flush.scheduler.interval.ms = 9223372036854775807 - log.flush.start.offset.checkpoint.interval.ms = 60000 - log.index.interval.bytes = 4096 - log.index.size.max.bytes = 10485760 - log.local.retention.bytes = -2 - log.local.retention.ms = -2 - log.message.downconversion.enable = true - log.message.format.version = 3.0-IV1 - log.message.timestamp.after.max.ms = 9223372036854775807 - log.message.timestamp.before.max.ms = 9223372036854775807 - log.message.timestamp.difference.max.ms = 9223372036854775807 - log.message.timestamp.type = CreateTime - log.preallocate = false - log.retention.bytes = -1 - log.retention.check.interval.ms = 300000 - log.retention.hours = 168 - log.retention.minutes = null - log.retention.ms = null - log.roll.hours = 168 - log.roll.jitter.hours = 0 - log.roll.jitter.ms = null - log.roll.ms = null - log.segment.bytes = 1073741824 - log.segment.delete.delay.ms = 60000 - max.connection.creation.rate = 2147483647 - max.connections = 2147483647 - max.connections.per.ip = 2147483647 - max.connections.per.ip.overrides = - max.incremental.fetch.session.cache.slots = 1000 - message.max.bytes = 1048588 - metadata.log.dir = null - metadata.log.max.record.bytes.between.snapshots = 20971520 - metadata.log.max.snapshot.interval.ms = 3600000 - metadata.log.segment.bytes = 1073741824 - metadata.log.segment.min.bytes = 8388608 - metadata.log.segment.ms = 604800000 - metadata.max.idle.interval.ms = 500 - metadata.max.retention.bytes = 104857600 - metadata.max.retention.ms = 604800000 - metric.reporters = [] - metrics.num.samples = 2 - metrics.recording.level = INFO - metrics.sample.window.ms = 30000 - min.insync.replicas = 1 - node.id = 0 - num.io.threads = 8 - num.network.threads = 3 - num.partitions = 1 - num.recovery.threads.per.data.dir = 1 - num.replica.alter.log.dirs.threads = null - num.replica.fetchers = 1 - offset.metadata.max.bytes = 4096 - offsets.commit.required.acks = -1 - offsets.commit.timeout.ms = 5000 - offsets.load.buffer.size = 5242880 - offsets.retention.check.interval.ms = 600000 - offsets.retention.minutes = 10080 - offsets.topic.compression.codec = 0 - offsets.topic.num.partitions = 50 - offsets.topic.replication.factor = 1 - offsets.topic.segment.bytes = 104857600 - password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding - password.encoder.iterations = 4096 - password.encoder.key.length = 128 - password.encoder.keyfactory.algorithm = null - password.encoder.old.secret = null - password.encoder.secret = null - principal.builder.class = class org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder - process.roles = [] - producer.id.expiration.check.interval.ms = 600000 - producer.id.expiration.ms = 86400000 - producer.purgatory.purge.interval.requests = 1000 - queued.max.request.bytes = -1 - queued.max.requests = 500 - quota.window.num = 11 - quota.window.size.seconds = 1 - remote.log.index.file.cache.total.size.bytes = 1073741824 - remote.log.manager.task.interval.ms = 30000 - remote.log.manager.task.retry.backoff.max.ms = 30000 - remote.log.manager.task.retry.backoff.ms = 500 - remote.log.manager.task.retry.jitter = 0.2 - remote.log.manager.thread.pool.size = 10 - remote.log.metadata.custom.metadata.max.bytes = 128 - remote.log.metadata.manager.class.name = org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManager - remote.log.metadata.manager.class.path = null - remote.log.metadata.manager.impl.prefix = rlmm.config. - remote.log.metadata.manager.listener.name = null - remote.log.reader.max.pending.tasks = 100 - remote.log.reader.threads = 10 - remote.log.storage.manager.class.name = null - remote.log.storage.manager.class.path = null - remote.log.storage.manager.impl.prefix = rsm.config. - remote.log.storage.system.enable = false - replica.fetch.backoff.ms = 1000 - replica.fetch.max.bytes = 1048576 - replica.fetch.min.bytes = 1 - replica.fetch.response.max.bytes = 10485760 - replica.fetch.wait.max.ms = 500 - replica.high.watermark.checkpoint.interval.ms = 5000 - replica.lag.time.max.ms = 30000 - replica.selector.class = null - replica.socket.receive.buffer.bytes = 65536 - replica.socket.timeout.ms = 30000 - replication.quota.window.num = 11 - replication.quota.window.size.seconds = 1 - request.timeout.ms = 30000 - reserved.broker.max.id = 1000 - sasl.client.callback.handler.class = null - sasl.enabled.mechanisms = [GSSAPI] - sasl.jaas.config = null - sasl.kerberos.kinit.cmd = /usr/bin/kinit - sasl.kerberos.min.time.before.relogin = 60000 - sasl.kerberos.principal.to.local.rules = [DEFAULT] - sasl.kerberos.service.name = null - sasl.kerberos.ticket.renew.jitter = 0.05 - sasl.kerberos.ticket.renew.window.factor = 0.8 - sasl.login.callback.handler.class = null - sasl.login.class = null - sasl.login.connect.timeout.ms = null - sasl.login.read.timeout.ms = null - sasl.login.refresh.buffer.seconds = 300 - sasl.login.refresh.min.period.seconds = 60 - sasl.login.refresh.window.factor = 0.8 - sasl.login.refresh.window.jitter = 0.05 - sasl.login.retry.backoff.max.ms = 10000 - sasl.login.retry.backoff.ms = 100 - sasl.mechanism.controller.protocol = GSSAPI - sasl.mechanism.inter.broker.protocol = GSSAPI - sasl.oauthbearer.clock.skew.seconds = 30 - sasl.oauthbearer.expected.audience = null - sasl.oauthbearer.expected.issuer = null - sasl.oauthbearer.jwks.endpoint.refresh.ms = 3600000 - sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms = 10000 - sasl.oauthbearer.jwks.endpoint.retry.backoff.ms = 100 - sasl.oauthbearer.jwks.endpoint.url = null - sasl.oauthbearer.scope.claim.name = scope - sasl.oauthbearer.sub.claim.name = sub - sasl.oauthbearer.token.endpoint.url = null - sasl.server.callback.handler.class = null - sasl.server.max.receive.size = 524288 - security.inter.broker.protocol = PLAINTEXT - security.providers = null - server.max.startup.time.ms = 9223372036854775807 - socket.connection.setup.timeout.max.ms = 30000 - socket.connection.setup.timeout.ms = 10000 - socket.listen.backlog.size = 50 - socket.receive.buffer.bytes = 102400 - socket.request.max.bytes = 104857600 - socket.send.buffer.bytes = 102400 - ssl.cipher.suites = [] - ssl.client.auth = none - ssl.enabled.protocols = [TLSv1.2, TLSv1.3] - ssl.endpoint.identification.algorithm = https - ssl.engine.factory.class = null - ssl.key.password = null - ssl.keymanager.algorithm = SunX509 - ssl.keystore.certificate.chain = null - ssl.keystore.key = null - ssl.keystore.location = null - ssl.keystore.password = null - ssl.keystore.type = JKS - ssl.principal.mapping.rules = DEFAULT - ssl.protocol = TLSv1.3 - ssl.provider = null - ssl.secure.random.implementation = null - ssl.trustmanager.algorithm = PKIX - ssl.truststore.certificates = null - ssl.truststore.location = null - ssl.truststore.password = null - ssl.truststore.type = JKS - transaction.abort.timed.out.transaction.cleanup.interval.ms = 10000 - transaction.max.timeout.ms = 900000 - transaction.partition.verification.enable = true - transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 - transaction.state.log.load.buffer.size = 5242880 - transaction.state.log.min.isr = 1 - transaction.state.log.num.partitions = 50 - transaction.state.log.replication.factor = 1 - transaction.state.log.segment.bytes = 104857600 - transactional.id.expiration.ms = 604800000 - unclean.leader.election.enable = false - unstable.api.versions.enable = false - zookeeper.clientCnxnSocket = null - zookeeper.connect = localhost:2181 - zookeeper.connection.timeout.ms = 18000 - zookeeper.max.in.flight.requests = 10 - zookeeper.metadata.migration.enable = false - zookeeper.session.timeout.ms = 18000 - zookeeper.set.acl = false - zookeeper.ssl.cipher.suites = null - zookeeper.ssl.client.enable = false - zookeeper.ssl.crl.enable = false - zookeeper.ssl.enabled.protocols = null - zookeeper.ssl.endpoint.identification.algorithm = HTTPS - zookeeper.ssl.keystore.location = null - zookeeper.ssl.keystore.password = null - zookeeper.ssl.keystore.type = null - zookeeper.ssl.ocsp.enable = false - zookeeper.ssl.protocol = TLSv1.2 - zookeeper.ssl.truststore.location = null - zookeeper.ssl.truststore.password = null - zookeeper.ssl.truststore.type = null - (kafka.server.KafkaConfig) -[2023-11-03 19:45:28,782] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:45:28,782] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:45:28,783] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:45:28,784] INFO [ThrottledChannelReaper-ControllerMutation]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) -[2023-11-03 19:45:28,816] INFO Loading logs from log dirs ArraySeq(/tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,825] INFO Skipping recovery of 51 logs from /tmp/kafka-logs since clean shutdown file was found (kafka.log.LogManager) -[2023-11-03 19:45:28,862] INFO [LogLoader partition=test-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 5 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,870] INFO [LogLoader partition=test-topic-0, dir=/tmp/kafka-logs] Reloading from producer snapshot and rebuilding producer state from offset 5 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,870] INFO [ProducerStateManager partition=test-topic-0]Loading producer state from snapshot file 'SnapshotFile(offset=5, file=/tmp/kafka-logs/test-topic-0/00000000000000000005.snapshot)' (org.apache.kafka.storage.internals.log.ProducerStateManager) -[2023-11-03 19:45:28,880] INFO [LogLoader partition=test-topic-0, dir=/tmp/kafka-logs] Producer state recovery took 10ms for snapshot load and 0ms for segment recovery from offset 5 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,894] INFO Completed load of Log(dir=/tmp/kafka-logs/test-topic-0, topicId=Pb9zfnlKRkmTGaMQyAABkw, topic=test-topic, partition=0, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=5) with 1 segments, local-log-start-offset 0 and log-end-offset 5 in 61ms (1/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,897] INFO [LogLoader partition=__consumer_offsets-28, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,899] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-28, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=28, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 5ms (2/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,901] INFO [LogLoader partition=__consumer_offsets-13, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,903] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-13, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=13, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (3/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,905] INFO [LogLoader partition=__consumer_offsets-43, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,907] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-43, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=43, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (4/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,911] INFO [LogLoader partition=__consumer_offsets-6, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,913] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-6, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=6, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 6ms (5/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,915] INFO [LogLoader partition=__consumer_offsets-36, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,917] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-36, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=36, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (6/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,919] INFO [LogLoader partition=__consumer_offsets-21, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,920] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-21, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=21, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (7/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,922] INFO [LogLoader partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Loading producer state till offset 7 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,922] INFO [LogLoader partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Reloading from producer snapshot and rebuilding producer state from offset 7 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,922] INFO [ProducerStateManager partition=__consumer_offsets-12]Loading producer state from snapshot file 'SnapshotFile(offset=7, file=/tmp/kafka-logs/__consumer_offsets-12/00000000000000000007.snapshot)' (org.apache.kafka.storage.internals.log.ProducerStateManager) -[2023-11-03 19:45:28,923] INFO [LogLoader partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Producer state recovery took 1ms for snapshot load and 0ms for segment recovery from offset 7 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,924] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-12, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=12, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=7) with 1 segments, local-log-start-offset 0 and log-end-offset 7 in 4ms (8/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,927] INFO [LogLoader partition=__consumer_offsets-42, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,928] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-42, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=42, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (9/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,930] INFO [LogLoader partition=__consumer_offsets-27, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,932] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-27, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=27, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (10/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,934] INFO [LogLoader partition=__consumer_offsets-20, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,935] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-20, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=20, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (11/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,937] INFO [LogLoader partition=__consumer_offsets-5, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,938] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-5, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=5, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (12/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,940] INFO [LogLoader partition=__consumer_offsets-35, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,942] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-35, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=35, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (13/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,944] INFO [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,946] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-0, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=0, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (14/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,948] INFO [LogLoader partition=__consumer_offsets-30, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,949] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-30, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=30, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (15/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,951] INFO [LogLoader partition=__consumer_offsets-15, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,952] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-15, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=15, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (16/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,954] INFO [LogLoader partition=__consumer_offsets-45, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,955] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-45, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=45, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (17/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,958] INFO [LogLoader partition=__consumer_offsets-8, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,960] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-8, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=8, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 5ms (18/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,962] INFO [LogLoader partition=__consumer_offsets-38, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,963] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-38, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=38, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (19/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,965] INFO [LogLoader partition=__consumer_offsets-23, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,966] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-23, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=23, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (20/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,968] INFO [LogLoader partition=__consumer_offsets-14, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,969] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-14, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=14, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (21/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,970] INFO [LogLoader partition=__consumer_offsets-44, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,971] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-44, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=44, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (22/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,973] INFO [LogLoader partition=__consumer_offsets-29, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,975] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-29, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=29, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (23/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,978] INFO [LogLoader partition=__consumer_offsets-22, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,980] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-22, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=22, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 5ms (24/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,981] INFO [LogLoader partition=__consumer_offsets-7, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,982] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-7, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=7, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (25/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,984] INFO [LogLoader partition=__consumer_offsets-37, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,985] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-37, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=37, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (26/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,987] INFO [LogLoader partition=__consumer_offsets-32, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,987] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-32, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=32, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (27/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,989] INFO [LogLoader partition=__consumer_offsets-17, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,991] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-17, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=17, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (28/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,995] INFO [LogLoader partition=__consumer_offsets-47, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,996] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-47, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=47, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (29/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:28,998] INFO [LogLoader partition=__consumer_offsets-40, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:28,999] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-40, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=40, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (30/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,001] INFO [LogLoader partition=__consumer_offsets-25, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,002] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-25, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=25, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (31/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,004] INFO [LogLoader partition=__consumer_offsets-2, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,005] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-2, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=2, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (32/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,006] INFO [LogLoader partition=__consumer_offsets-16, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,008] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-16, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=16, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (33/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,011] INFO [LogLoader partition=__consumer_offsets-1, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,012] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-1, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=1, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 5ms (34/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,014] INFO [LogLoader partition=__consumer_offsets-46, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,016] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-46, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=46, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (35/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,017] INFO [LogLoader partition=__consumer_offsets-31, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,018] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-31, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=31, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (36/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,019] INFO [LogLoader partition=__consumer_offsets-24, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,020] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-24, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=24, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (37/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,022] INFO [LogLoader partition=__consumer_offsets-9, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,023] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-9, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=9, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (38/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,026] INFO [LogLoader partition=__consumer_offsets-39, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,027] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-39, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=39, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (39/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,029] INFO [LogLoader partition=__consumer_offsets-49, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,030] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-49, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=49, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (40/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,032] INFO [LogLoader partition=__consumer_offsets-26, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,033] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-26, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=26, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (41/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,035] INFO [LogLoader partition=__consumer_offsets-11, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,036] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-11, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=11, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 3ms (42/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,037] INFO [LogLoader partition=__consumer_offsets-4, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,038] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-4, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=4, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (43/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,040] INFO [LogLoader partition=__consumer_offsets-34, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,042] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-34, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=34, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 4ms (44/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,044] INFO [LogLoader partition=__consumer_offsets-19, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,045] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-19, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=19, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (45/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,046] INFO [LogLoader partition=__consumer_offsets-48, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,046] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-48, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=48, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (46/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,047] INFO [LogLoader partition=__consumer_offsets-33, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,048] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-33, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=33, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 1ms (47/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,049] INFO [LogLoader partition=__consumer_offsets-10, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,050] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-10, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=10, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (48/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,051] INFO [LogLoader partition=__consumer_offsets-41, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,052] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-41, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=41, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (49/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,053] INFO [LogLoader partition=__consumer_offsets-18, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,053] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-18, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=18, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 2ms (50/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,054] INFO [LogLoader partition=__consumer_offsets-3, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) -[2023-11-03 19:45:29,055] INFO Completed load of Log(dir=/tmp/kafka-logs/__consumer_offsets-3, topicId=vfIIZeo7TYKNeTmKGzMlyg, topic=__consumer_offsets, partition=3, highWatermark=0, lastStableOffset=0, logStartOffset=0, logEndOffset=0) with 1 segments, local-log-start-offset 0 and log-end-offset 0 in 1ms (51/51 completed in /tmp/kafka-logs) (kafka.log.LogManager) -[2023-11-03 19:45:29,058] INFO Loaded 51 logs in 241ms (kafka.log.LogManager) -[2023-11-03 19:45:29,060] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) -[2023-11-03 19:45:29,060] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) -[2023-11-03 19:45:29,089] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner$CleanerThread) -[2023-11-03 19:45:29,102] INFO [feature-zk-node-event-process-thread]: Starting (kafka.server.FinalizedFeatureChangeListener$ChangeNotificationProcessorThread) -[2023-11-03 19:45:29,115] INFO [MetadataCache brokerId=0] Updated cache from existing None to latest Features(version=3.6-IV2, finalizedFeatures={}, finalizedFeaturesEpoch=0). (kafka.server.metadata.ZkMetadataCache) -[2023-11-03 19:45:29,131] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Starting (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:45:29,305] INFO Updated connection-accept-rate max connection creation rate to 2147483647 (kafka.network.ConnectionQuotas) -[2023-11-03 19:45:29,317] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Created data-plane acceptor and processors for endpoint : ListenerName(PLAINTEXT) (kafka.network.SocketServer) -[2023-11-03 19:45:29,319] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Starting (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:45:29,335] INFO [ExpirationReaper-0-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,336] INFO [ExpirationReaper-0-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,336] INFO [ExpirationReaper-0-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,336] INFO [ExpirationReaper-0-ElectLeader]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,337] INFO [ExpirationReaper-0-RemoteFetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,346] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) -[2023-11-03 19:45:29,346] INFO [AddPartitionsToTxnSenderThread-0]: Starting (kafka.server.AddPartitionsToTxnManager) -[2023-11-03 19:45:29,373] INFO Creating /brokers/ids/0 (is it secure? false) (kafka.zk.KafkaZkClient) -[2023-11-03 19:45:29,390] INFO Stat of the created znode at /brokers/ids/0 is: 156,156,1699055129385,1699055129385,1,0,0,72058191367241728,206,0,156 - (kafka.zk.KafkaZkClient) -[2023-11-03 19:45:29,390] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://ThinkPadP53:9092, czxid (broker epoch): 156 (kafka.zk.KafkaZkClient) -[2023-11-03 19:45:29,432] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,436] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,437] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,451] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,462] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,473] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) -[2023-11-03 19:45:29,479] INFO [TxnMarkerSenderThread-0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) -[2023-11-03 19:45:29,479] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) -[2023-11-03 19:45:29,515] INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) -[2023-11-03 19:45:29,536] INFO [Controller id=0, targetBrokerId=0] Node 0 disconnected. (org.apache.kafka.clients.NetworkClient) -[2023-11-03 19:45:29,538] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) -[2023-11-03 19:45:29,538] WARN [Controller id=0, targetBrokerId=0] Connection to node 0 (ThinkPadP53/192.168.37.3:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) -[2023-11-03 19:45:29,541] INFO [Controller id=0, targetBrokerId=0] Client requested connection close from node 0 (org.apache.kafka.clients.NetworkClient) -[2023-11-03 19:45:29,559] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Enabling request processing. (kafka.network.SocketServer) -[2023-11-03 19:45:29,562] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.DataPlaneAcceptor) -[2023-11-03 19:45:29,567] INFO Kafka version: 3.6.0 (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:45:29,567] INFO Kafka commitId: 60e845626d8a465a (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:45:29,567] INFO Kafka startTimeMs: 1699055129564 (org.apache.kafka.common.utils.AppInfoParser) -[2023-11-03 19:45:29,568] INFO [KafkaServer id=0] started (kafka.server.KafkaServer) -[2023-11-03 19:45:29,710] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(__consumer_offsets-22, __consumer_offsets-30, __consumer_offsets-25, __consumer_offsets-35, __consumer_offsets-37, __consumer_offsets-38, __consumer_offsets-13, test-topic-0, __consumer_offsets-8, __consumer_offsets-21, __consumer_offsets-4, __consumer_offsets-27, __consumer_offsets-7, __consumer_offsets-9, __consumer_offsets-46, __consumer_offsets-41, __consumer_offsets-33, __consumer_offsets-23, __consumer_offsets-49, __consumer_offsets-47, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, __consumer_offsets-17, __consumer_offsets-48, __consumer_offsets-19, __consumer_offsets-11, __consumer_offsets-2, __consumer_offsets-43, __consumer_offsets-6, __consumer_offsets-14, __consumer_offsets-20, __consumer_offsets-0, __consumer_offsets-44, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45, __consumer_offsets-1, __consumer_offsets-5, __consumer_offsets-26, __consumer_offsets-29, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager) -[2023-11-03 19:45:29,717] INFO [Partition __consumer_offsets-3 broker=0] Log loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,721] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Recorded new controller, from now on will use node ThinkPadP53:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:45:29,721] INFO [Partition __consumer_offsets-18 broker=0] Log loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,722] INFO [Partition __consumer_offsets-41 broker=0] Log loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,722] INFO [Partition __consumer_offsets-10 broker=0] Log loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,722] INFO [Partition __consumer_offsets-33 broker=0] Log loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,723] INFO [Partition __consumer_offsets-48 broker=0] Log loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,723] INFO [Partition __consumer_offsets-19 broker=0] Log loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,723] INFO [Partition __consumer_offsets-34 broker=0] Log loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,724] INFO [Partition __consumer_offsets-4 broker=0] Log loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,724] INFO [Partition __consumer_offsets-11 broker=0] Log loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,724] INFO [Partition __consumer_offsets-26 broker=0] Log loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,724] INFO [Partition __consumer_offsets-49 broker=0] Log loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,725] INFO [Partition __consumer_offsets-39 broker=0] Log loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,725] INFO [Partition __consumer_offsets-9 broker=0] Log loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,725] INFO [Partition __consumer_offsets-24 broker=0] Log loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,726] INFO [Partition __consumer_offsets-31 broker=0] Log loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,726] INFO [Partition __consumer_offsets-46 broker=0] Log loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,726] INFO [Partition __consumer_offsets-1 broker=0] Log loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,727] INFO [Partition __consumer_offsets-16 broker=0] Log loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,727] INFO [Partition __consumer_offsets-2 broker=0] Log loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,728] INFO [Partition __consumer_offsets-25 broker=0] Log loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,728] INFO [Partition __consumer_offsets-40 broker=0] Log loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,728] INFO [Partition __consumer_offsets-47 broker=0] Log loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,729] INFO [Partition __consumer_offsets-17 broker=0] Log loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,729] INFO [Partition __consumer_offsets-32 broker=0] Log loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,730] INFO [Partition __consumer_offsets-37 broker=0] Log loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,730] INFO [Partition __consumer_offsets-7 broker=0] Log loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,730] INFO [Partition __consumer_offsets-22 broker=0] Log loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,730] INFO [Partition __consumer_offsets-29 broker=0] Log loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,731] INFO [Partition __consumer_offsets-44 broker=0] Log loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,731] INFO [Partition __consumer_offsets-14 broker=0] Log loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,731] INFO [Partition __consumer_offsets-23 broker=0] Log loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,732] INFO [Partition __consumer_offsets-38 broker=0] Log loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,732] INFO [Partition __consumer_offsets-8 broker=0] Log loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,732] INFO [Partition __consumer_offsets-45 broker=0] Log loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,732] INFO [Partition __consumer_offsets-15 broker=0] Log loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,733] INFO [Partition __consumer_offsets-30 broker=0] Log loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,733] INFO [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,733] INFO [Partition __consumer_offsets-35 broker=0] Log loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,734] INFO [Partition __consumer_offsets-5 broker=0] Log loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,734] INFO [Partition __consumer_offsets-20 broker=0] Log loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,734] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Recorded new controller, from now on will use node ThinkPadP53:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) -[2023-11-03 19:45:29,734] INFO [Partition __consumer_offsets-27 broker=0] Log loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,734] INFO [Partition __consumer_offsets-42 broker=0] Log loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,734] INFO [Partition __consumer_offsets-12 broker=0] Log loaded for partition __consumer_offsets-12 with initial high watermark 7 (kafka.cluster.Partition) -[2023-11-03 19:45:29,735] INFO [Partition __consumer_offsets-21 broker=0] Log loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,735] INFO [Partition test-topic-0 broker=0] Log loaded for partition test-topic-0 with initial high watermark 5 (kafka.cluster.Partition) -[2023-11-03 19:45:29,735] INFO [Partition __consumer_offsets-36 broker=0] Log loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,736] INFO [Partition __consumer_offsets-6 broker=0] Log loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,736] INFO [Partition __consumer_offsets-43 broker=0] Log loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,736] INFO [Partition __consumer_offsets-13 broker=0] Log loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,736] INFO [Partition __consumer_offsets-28 broker=0] Log loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Partition) -[2023-11-03 19:45:29,740] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 3 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,741] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-3 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 18 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-18 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 41 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-41 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 10 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 33 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-33 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 48 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-48 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 19 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 34 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 4 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 11 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-11 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 26 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-26 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 49 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 39 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-39 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,743] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 9 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,743] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-9 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 24 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-24 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 31 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 46 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 1 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 16 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 2 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-2 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 25 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 40 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 47 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-47 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 17 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-17 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 32 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-32 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 37 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 7 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,744] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 22 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,744] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 29 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-29 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 44 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-44 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 14 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-14 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 23 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-23 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 38 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-38 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 8 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-8 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 45 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-45 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 15 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-15 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 30 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-30 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 0 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-0 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 35 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-35 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 5 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-5 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 20 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-20 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 27 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-27 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 42 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-42 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 12 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-12 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 21 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-21 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,745] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 36 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,745] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-36 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,746] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 6 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,746] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-6 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,746] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 43 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,746] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,746] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 13 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,746] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,746] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 28 in epoch 0 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,746] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,747] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-3 in 5 milliseconds for epoch 0, of which 2 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,747] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-18 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-41 in 5 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-33 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-48 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,748] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-11 in 6 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-26 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-39 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-9 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-24 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,749] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 6 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-2 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-47 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-17 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,750] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-32 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 7 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-29 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-44 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-14 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-23 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-38 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,751] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-8 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-45 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-15 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-30 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-0 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-35 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-5 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-20 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-27 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,752] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-42 in 7 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,771] INFO Loaded member MemberMetadata(memberId=rdkafka-d8d257cb-90fd-4aed-a191-0c8705f20f1a, groupInstanceId=None, clientId=rdkafka, clientHost=/192.168.37.3, sessionTimeoutMs=45000, rebalanceTimeoutMs=300000, supportedProtocols=List(range)) in group test-group with generation 1. (kafka.coordinator.group.GroupMetadata$) -[2023-11-03 19:45:29,781] INFO [GroupCoordinator 0]: Loading group metadata for test-group with generation 2 (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:29,782] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-12 in 37 milliseconds for epoch 0, of which 7 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,782] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-21 in 37 milliseconds for epoch 0, of which 37 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,782] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-36 in 36 milliseconds for epoch 0, of which 36 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,782] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-6 in 36 milliseconds for epoch 0, of which 36 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,782] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 36 milliseconds for epoch 0, of which 36 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,783] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 37 milliseconds for epoch 0, of which 36 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:29,783] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 37 milliseconds for epoch 0, of which 37 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) -[2023-11-03 19:45:47,781] INFO [GroupCoordinator 0]: Dynamic member with unknown member id joins group test-group in Empty state. Created a new member id rdkafka-cf033a01-fd53-46c7-9ac9-d92cbfe8c9d8 and request the member to rejoin with this id. (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:47,789] INFO [GroupCoordinator 0]: Preparing to rebalance group test-group in state PreparingRebalance with old generation 2 (__consumer_offsets-12) (reason: Adding new member rdkafka-cf033a01-fd53-46c7-9ac9-d92cbfe8c9d8 with group instance id None; client reason: not provided) (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:47,795] INFO [GroupCoordinator 0]: Stabilized group test-group generation 3 (__consumer_offsets-12) with 1 members (kafka.coordinator.group.GroupCoordinator) -[2023-11-03 19:45:47,801] INFO [GroupCoordinator 0]: Assignment received from leader rdkafka-cf033a01-fd53-46c7-9ac9-d92cbfe8c9d8 for group test-group for generation 3. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:26:31,925] INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT://localhost:9092, czxid (broker epoch): 25 (kafka.zk.KafkaZkClient) +[2023-11-06 13:26:31,966] INFO [ExpirationReaper-0-topic]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,971] INFO [ExpirationReaper-0-Heartbeat]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,972] INFO [ExpirationReaper-0-Rebalance]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:31,972] INFO Successfully created /controller_epoch with initial epoch 0 (kafka.zk.KafkaZkClient) +[2023-11-06 13:26:31,981] INFO Feature ZK node created at path: /feature (kafka.server.FinalizedFeatureChangeListener) +[2023-11-06 13:26:31,982] INFO [GroupCoordinator 0]: Starting up. (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:26:31,986] INFO [GroupCoordinator 0]: Startup complete. (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:26:31,995] INFO [TransactionCoordinator id=0] Starting up. (kafka.coordinator.transaction.TransactionCoordinator) +[2023-11-06 13:26:31,997] INFO [MetadataCache brokerId=0] Updated cache from existing None to latest Features(version=3.6-IV2, finalizedFeatures={}, finalizedFeaturesEpoch=0). (kafka.server.metadata.ZkMetadataCache) +[2023-11-06 13:26:31,998] INFO [TxnMarkerSenderThread-0]: Starting (kafka.coordinator.transaction.TransactionMarkerChannelManager) +[2023-11-06 13:26:31,998] INFO [TransactionCoordinator id=0] Startup complete. (kafka.coordinator.transaction.TransactionCoordinator) +[2023-11-06 13:26:32,022] INFO [ExpirationReaper-0-AlterAcls]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) +[2023-11-06 13:26:32,049] INFO [/config/changes-event-process-thread]: Starting (kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread) +[2023-11-06 13:26:32,051] INFO [Controller id=0, targetBrokerId=0] Node 0 disconnected. (org.apache.kafka.clients.NetworkClient) +[2023-11-06 13:26:32,053] WARN [Controller id=0, targetBrokerId=0] Connection to node 0 (localhost/127.0.0.1:9092) could not be established. Broker may not be available. (org.apache.kafka.clients.NetworkClient) +[2023-11-06 13:26:32,055] INFO [Controller id=0, targetBrokerId=0] Client requested connection close from node 0 (org.apache.kafka.clients.NetworkClient) +[2023-11-06 13:26:32,056] INFO [SocketServer listenerType=ZK_BROKER, nodeId=0] Enabling request processing. (kafka.network.SocketServer) +[2023-11-06 13:26:32,058] INFO Awaiting socket connections on 0.0.0.0:9092. (kafka.network.DataPlaneAcceptor) +[2023-11-06 13:26:32,063] INFO Kafka version: 3.6.0 (org.apache.kafka.common.utils.AppInfoParser) +[2023-11-06 13:26:32,063] INFO Kafka commitId: 60e845626d8a465a (org.apache.kafka.common.utils.AppInfoParser) +[2023-11-06 13:26:32,063] INFO Kafka startTimeMs: 1699295192060 (org.apache.kafka.common.utils.AppInfoParser) +[2023-11-06 13:26:32,064] INFO [KafkaServer id=0] started (kafka.server.KafkaServer) +[2023-11-06 13:26:32,273] INFO [zk-broker-0-to-controller-alter-partition-channel-manager]: Recorded new controller, from now on will use node localhost:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) +[2023-11-06 13:26:32,278] INFO [zk-broker-0-to-controller-forwarding-channel-manager]: Recorded new controller, from now on will use node localhost:9092 (id: 0 rack: null) (kafka.server.BrokerToControllerRequestThread) +[2023-11-06 13:27:16,491] INFO Creating topic __consumer_offsets with configuration {compression.type=producer, cleanup.policy=compact, segment.bytes=104857600} and initial partition assignment HashMap(0 -> ArrayBuffer(0), 1 -> ArrayBuffer(0), 2 -> ArrayBuffer(0), 3 -> ArrayBuffer(0), 4 -> ArrayBuffer(0), 5 -> ArrayBuffer(0), 6 -> ArrayBuffer(0), 7 -> ArrayBuffer(0), 8 -> ArrayBuffer(0), 9 -> ArrayBuffer(0), 10 -> ArrayBuffer(0), 11 -> ArrayBuffer(0), 12 -> ArrayBuffer(0), 13 -> ArrayBuffer(0), 14 -> ArrayBuffer(0), 15 -> ArrayBuffer(0), 16 -> ArrayBuffer(0), 17 -> ArrayBuffer(0), 18 -> ArrayBuffer(0), 19 -> ArrayBuffer(0), 20 -> ArrayBuffer(0), 21 -> ArrayBuffer(0), 22 -> ArrayBuffer(0), 23 -> ArrayBuffer(0), 24 -> ArrayBuffer(0), 25 -> ArrayBuffer(0), 26 -> ArrayBuffer(0), 27 -> ArrayBuffer(0), 28 -> ArrayBuffer(0), 29 -> ArrayBuffer(0), 30 -> ArrayBuffer(0), 31 -> ArrayBuffer(0), 32 -> ArrayBuffer(0), 33 -> ArrayBuffer(0), 34 -> ArrayBuffer(0), 35 -> ArrayBuffer(0), 36 -> ArrayBuffer(0), 37 -> ArrayBuffer(0), 38 -> ArrayBuffer(0), 39 -> ArrayBuffer(0), 40 -> ArrayBuffer(0), 41 -> ArrayBuffer(0), 42 -> ArrayBuffer(0), 43 -> ArrayBuffer(0), 44 -> ArrayBuffer(0), 45 -> ArrayBuffer(0), 46 -> ArrayBuffer(0), 47 -> ArrayBuffer(0), 48 -> ArrayBuffer(0), 49 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) +[2023-11-06 13:27:16,613] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions HashSet(__consumer_offsets-22, __consumer_offsets-30, __consumer_offsets-25, __consumer_offsets-35, __consumer_offsets-37, __consumer_offsets-38, __consumer_offsets-13, __consumer_offsets-8, __consumer_offsets-21, __consumer_offsets-4, __consumer_offsets-27, __consumer_offsets-7, __consumer_offsets-9, __consumer_offsets-46, __consumer_offsets-41, __consumer_offsets-33, __consumer_offsets-23, __consumer_offsets-49, __consumer_offsets-47, __consumer_offsets-16, __consumer_offsets-28, __consumer_offsets-31, __consumer_offsets-36, __consumer_offsets-42, __consumer_offsets-3, __consumer_offsets-18, __consumer_offsets-15, __consumer_offsets-24, __consumer_offsets-17, __consumer_offsets-48, __consumer_offsets-19, __consumer_offsets-11, __consumer_offsets-2, __consumer_offsets-43, __consumer_offsets-6, __consumer_offsets-14, __consumer_offsets-20, __consumer_offsets-0, __consumer_offsets-44, __consumer_offsets-39, __consumer_offsets-12, __consumer_offsets-45, __consumer_offsets-1, __consumer_offsets-5, __consumer_offsets-26, __consumer_offsets-29, __consumer_offsets-34, __consumer_offsets-10, __consumer_offsets-32, __consumer_offsets-40) (kafka.server.ReplicaFetcherManager) +[2023-11-06 13:27:16,652] INFO [LogLoader partition=__consumer_offsets-3, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,662] INFO Created log for partition __consumer_offsets-3 in /tmp/kafka-logs/__consumer_offsets-3 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,662] INFO [Partition __consumer_offsets-3 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-3 (kafka.cluster.Partition) +[2023-11-06 13:27:16,663] INFO [Partition __consumer_offsets-3 broker=0] Log loaded for partition __consumer_offsets-3 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,671] INFO [LogLoader partition=__consumer_offsets-18, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,672] INFO Created log for partition __consumer_offsets-18 in /tmp/kafka-logs/__consumer_offsets-18 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,672] INFO [Partition __consumer_offsets-18 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-18 (kafka.cluster.Partition) +[2023-11-06 13:27:16,672] INFO [Partition __consumer_offsets-18 broker=0] Log loaded for partition __consumer_offsets-18 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,675] INFO [LogLoader partition=__consumer_offsets-41, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,675] INFO Created log for partition __consumer_offsets-41 in /tmp/kafka-logs/__consumer_offsets-41 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,675] INFO [Partition __consumer_offsets-41 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-41 (kafka.cluster.Partition) +[2023-11-06 13:27:16,675] INFO [Partition __consumer_offsets-41 broker=0] Log loaded for partition __consumer_offsets-41 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,678] INFO [LogLoader partition=__consumer_offsets-10, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,679] INFO Created log for partition __consumer_offsets-10 in /tmp/kafka-logs/__consumer_offsets-10 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,679] INFO [Partition __consumer_offsets-10 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-10 (kafka.cluster.Partition) +[2023-11-06 13:27:16,679] INFO [Partition __consumer_offsets-10 broker=0] Log loaded for partition __consumer_offsets-10 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,681] INFO [LogLoader partition=__consumer_offsets-33, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,682] INFO Created log for partition __consumer_offsets-33 in /tmp/kafka-logs/__consumer_offsets-33 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,682] INFO [Partition __consumer_offsets-33 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-33 (kafka.cluster.Partition) +[2023-11-06 13:27:16,682] INFO [Partition __consumer_offsets-33 broker=0] Log loaded for partition __consumer_offsets-33 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,685] INFO [LogLoader partition=__consumer_offsets-48, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,685] INFO Created log for partition __consumer_offsets-48 in /tmp/kafka-logs/__consumer_offsets-48 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,686] INFO [Partition __consumer_offsets-48 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-48 (kafka.cluster.Partition) +[2023-11-06 13:27:16,686] INFO [Partition __consumer_offsets-48 broker=0] Log loaded for partition __consumer_offsets-48 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,688] INFO [LogLoader partition=__consumer_offsets-19, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,689] INFO Created log for partition __consumer_offsets-19 in /tmp/kafka-logs/__consumer_offsets-19 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,689] INFO [Partition __consumer_offsets-19 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-19 (kafka.cluster.Partition) +[2023-11-06 13:27:16,689] INFO [Partition __consumer_offsets-19 broker=0] Log loaded for partition __consumer_offsets-19 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,692] INFO [LogLoader partition=__consumer_offsets-34, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,692] INFO Created log for partition __consumer_offsets-34 in /tmp/kafka-logs/__consumer_offsets-34 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,692] INFO [Partition __consumer_offsets-34 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-34 (kafka.cluster.Partition) +[2023-11-06 13:27:16,693] INFO [Partition __consumer_offsets-34 broker=0] Log loaded for partition __consumer_offsets-34 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,695] INFO [LogLoader partition=__consumer_offsets-4, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,696] INFO Created log for partition __consumer_offsets-4 in /tmp/kafka-logs/__consumer_offsets-4 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,696] INFO [Partition __consumer_offsets-4 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-4 (kafka.cluster.Partition) +[2023-11-06 13:27:16,696] INFO [Partition __consumer_offsets-4 broker=0] Log loaded for partition __consumer_offsets-4 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,699] INFO [LogLoader partition=__consumer_offsets-11, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,699] INFO Created log for partition __consumer_offsets-11 in /tmp/kafka-logs/__consumer_offsets-11 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,699] INFO [Partition __consumer_offsets-11 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-11 (kafka.cluster.Partition) +[2023-11-06 13:27:16,699] INFO [Partition __consumer_offsets-11 broker=0] Log loaded for partition __consumer_offsets-11 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,702] INFO [LogLoader partition=__consumer_offsets-26, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,703] INFO Created log for partition __consumer_offsets-26 in /tmp/kafka-logs/__consumer_offsets-26 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,703] INFO [Partition __consumer_offsets-26 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-26 (kafka.cluster.Partition) +[2023-11-06 13:27:16,703] INFO [Partition __consumer_offsets-26 broker=0] Log loaded for partition __consumer_offsets-26 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,706] INFO [LogLoader partition=__consumer_offsets-49, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,706] INFO Created log for partition __consumer_offsets-49 in /tmp/kafka-logs/__consumer_offsets-49 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,706] INFO [Partition __consumer_offsets-49 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-49 (kafka.cluster.Partition) +[2023-11-06 13:27:16,706] INFO [Partition __consumer_offsets-49 broker=0] Log loaded for partition __consumer_offsets-49 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,709] INFO [LogLoader partition=__consumer_offsets-39, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,710] INFO Created log for partition __consumer_offsets-39 in /tmp/kafka-logs/__consumer_offsets-39 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,710] INFO [Partition __consumer_offsets-39 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-39 (kafka.cluster.Partition) +[2023-11-06 13:27:16,710] INFO [Partition __consumer_offsets-39 broker=0] Log loaded for partition __consumer_offsets-39 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,713] INFO [LogLoader partition=__consumer_offsets-9, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,713] INFO Created log for partition __consumer_offsets-9 in /tmp/kafka-logs/__consumer_offsets-9 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,713] INFO [Partition __consumer_offsets-9 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-9 (kafka.cluster.Partition) +[2023-11-06 13:27:16,713] INFO [Partition __consumer_offsets-9 broker=0] Log loaded for partition __consumer_offsets-9 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,716] INFO [LogLoader partition=__consumer_offsets-24, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,716] INFO Created log for partition __consumer_offsets-24 in /tmp/kafka-logs/__consumer_offsets-24 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,716] INFO [Partition __consumer_offsets-24 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-24 (kafka.cluster.Partition) +[2023-11-06 13:27:16,716] INFO [Partition __consumer_offsets-24 broker=0] Log loaded for partition __consumer_offsets-24 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,720] INFO [LogLoader partition=__consumer_offsets-31, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,720] INFO Created log for partition __consumer_offsets-31 in /tmp/kafka-logs/__consumer_offsets-31 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,720] INFO [Partition __consumer_offsets-31 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-31 (kafka.cluster.Partition) +[2023-11-06 13:27:16,720] INFO [Partition __consumer_offsets-31 broker=0] Log loaded for partition __consumer_offsets-31 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,723] INFO [LogLoader partition=__consumer_offsets-46, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,723] INFO Created log for partition __consumer_offsets-46 in /tmp/kafka-logs/__consumer_offsets-46 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,724] INFO [Partition __consumer_offsets-46 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-46 (kafka.cluster.Partition) +[2023-11-06 13:27:16,724] INFO [Partition __consumer_offsets-46 broker=0] Log loaded for partition __consumer_offsets-46 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,727] INFO [LogLoader partition=__consumer_offsets-1, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,727] INFO Created log for partition __consumer_offsets-1 in /tmp/kafka-logs/__consumer_offsets-1 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,727] INFO [Partition __consumer_offsets-1 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-1 (kafka.cluster.Partition) +[2023-11-06 13:27:16,727] INFO [Partition __consumer_offsets-1 broker=0] Log loaded for partition __consumer_offsets-1 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,731] INFO [LogLoader partition=__consumer_offsets-16, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,731] INFO Created log for partition __consumer_offsets-16 in /tmp/kafka-logs/__consumer_offsets-16 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,731] INFO [Partition __consumer_offsets-16 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-16 (kafka.cluster.Partition) +[2023-11-06 13:27:16,731] INFO [Partition __consumer_offsets-16 broker=0] Log loaded for partition __consumer_offsets-16 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,736] INFO [LogLoader partition=__consumer_offsets-2, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,736] INFO Created log for partition __consumer_offsets-2 in /tmp/kafka-logs/__consumer_offsets-2 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,736] INFO [Partition __consumer_offsets-2 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-2 (kafka.cluster.Partition) +[2023-11-06 13:27:16,736] INFO [Partition __consumer_offsets-2 broker=0] Log loaded for partition __consumer_offsets-2 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,740] INFO [LogLoader partition=__consumer_offsets-25, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,740] INFO Created log for partition __consumer_offsets-25 in /tmp/kafka-logs/__consumer_offsets-25 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,740] INFO [Partition __consumer_offsets-25 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-25 (kafka.cluster.Partition) +[2023-11-06 13:27:16,740] INFO [Partition __consumer_offsets-25 broker=0] Log loaded for partition __consumer_offsets-25 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,744] INFO [LogLoader partition=__consumer_offsets-40, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,744] INFO Created log for partition __consumer_offsets-40 in /tmp/kafka-logs/__consumer_offsets-40 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,744] INFO [Partition __consumer_offsets-40 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-40 (kafka.cluster.Partition) +[2023-11-06 13:27:16,744] INFO [Partition __consumer_offsets-40 broker=0] Log loaded for partition __consumer_offsets-40 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,748] INFO [LogLoader partition=__consumer_offsets-47, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,748] INFO Created log for partition __consumer_offsets-47 in /tmp/kafka-logs/__consumer_offsets-47 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,748] INFO [Partition __consumer_offsets-47 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-47 (kafka.cluster.Partition) +[2023-11-06 13:27:16,748] INFO [Partition __consumer_offsets-47 broker=0] Log loaded for partition __consumer_offsets-47 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,752] INFO [LogLoader partition=__consumer_offsets-17, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,753] INFO Created log for partition __consumer_offsets-17 in /tmp/kafka-logs/__consumer_offsets-17 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,753] INFO [Partition __consumer_offsets-17 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-17 (kafka.cluster.Partition) +[2023-11-06 13:27:16,753] INFO [Partition __consumer_offsets-17 broker=0] Log loaded for partition __consumer_offsets-17 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,756] INFO [LogLoader partition=__consumer_offsets-32, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,757] INFO Created log for partition __consumer_offsets-32 in /tmp/kafka-logs/__consumer_offsets-32 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,757] INFO [Partition __consumer_offsets-32 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-32 (kafka.cluster.Partition) +[2023-11-06 13:27:16,757] INFO [Partition __consumer_offsets-32 broker=0] Log loaded for partition __consumer_offsets-32 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,759] INFO [LogLoader partition=__consumer_offsets-37, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,760] INFO Created log for partition __consumer_offsets-37 in /tmp/kafka-logs/__consumer_offsets-37 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,760] INFO [Partition __consumer_offsets-37 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-37 (kafka.cluster.Partition) +[2023-11-06 13:27:16,760] INFO [Partition __consumer_offsets-37 broker=0] Log loaded for partition __consumer_offsets-37 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,763] INFO [LogLoader partition=__consumer_offsets-7, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,763] INFO Created log for partition __consumer_offsets-7 in /tmp/kafka-logs/__consumer_offsets-7 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,763] INFO [Partition __consumer_offsets-7 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-7 (kafka.cluster.Partition) +[2023-11-06 13:27:16,763] INFO [Partition __consumer_offsets-7 broker=0] Log loaded for partition __consumer_offsets-7 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,766] INFO [LogLoader partition=__consumer_offsets-22, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,766] INFO Created log for partition __consumer_offsets-22 in /tmp/kafka-logs/__consumer_offsets-22 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,766] INFO [Partition __consumer_offsets-22 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-22 (kafka.cluster.Partition) +[2023-11-06 13:27:16,766] INFO [Partition __consumer_offsets-22 broker=0] Log loaded for partition __consumer_offsets-22 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,769] INFO [LogLoader partition=__consumer_offsets-29, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,770] INFO Created log for partition __consumer_offsets-29 in /tmp/kafka-logs/__consumer_offsets-29 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,770] INFO [Partition __consumer_offsets-29 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-29 (kafka.cluster.Partition) +[2023-11-06 13:27:16,770] INFO [Partition __consumer_offsets-29 broker=0] Log loaded for partition __consumer_offsets-29 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,773] INFO [LogLoader partition=__consumer_offsets-44, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,774] INFO Created log for partition __consumer_offsets-44 in /tmp/kafka-logs/__consumer_offsets-44 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,774] INFO [Partition __consumer_offsets-44 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-44 (kafka.cluster.Partition) +[2023-11-06 13:27:16,774] INFO [Partition __consumer_offsets-44 broker=0] Log loaded for partition __consumer_offsets-44 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,777] INFO [LogLoader partition=__consumer_offsets-14, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,778] INFO Created log for partition __consumer_offsets-14 in /tmp/kafka-logs/__consumer_offsets-14 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,778] INFO [Partition __consumer_offsets-14 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-14 (kafka.cluster.Partition) +[2023-11-06 13:27:16,778] INFO [Partition __consumer_offsets-14 broker=0] Log loaded for partition __consumer_offsets-14 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,781] INFO [LogLoader partition=__consumer_offsets-23, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,781] INFO Created log for partition __consumer_offsets-23 in /tmp/kafka-logs/__consumer_offsets-23 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,781] INFO [Partition __consumer_offsets-23 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-23 (kafka.cluster.Partition) +[2023-11-06 13:27:16,781] INFO [Partition __consumer_offsets-23 broker=0] Log loaded for partition __consumer_offsets-23 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,784] INFO [LogLoader partition=__consumer_offsets-38, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,784] INFO Created log for partition __consumer_offsets-38 in /tmp/kafka-logs/__consumer_offsets-38 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,784] INFO [Partition __consumer_offsets-38 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-38 (kafka.cluster.Partition) +[2023-11-06 13:27:16,784] INFO [Partition __consumer_offsets-38 broker=0] Log loaded for partition __consumer_offsets-38 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,787] INFO [LogLoader partition=__consumer_offsets-8, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,787] INFO Created log for partition __consumer_offsets-8 in /tmp/kafka-logs/__consumer_offsets-8 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,787] INFO [Partition __consumer_offsets-8 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-8 (kafka.cluster.Partition) +[2023-11-06 13:27:16,788] INFO [Partition __consumer_offsets-8 broker=0] Log loaded for partition __consumer_offsets-8 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,791] INFO [LogLoader partition=__consumer_offsets-45, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,791] INFO Created log for partition __consumer_offsets-45 in /tmp/kafka-logs/__consumer_offsets-45 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,792] INFO [Partition __consumer_offsets-45 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-45 (kafka.cluster.Partition) +[2023-11-06 13:27:16,792] INFO [Partition __consumer_offsets-45 broker=0] Log loaded for partition __consumer_offsets-45 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,795] INFO [LogLoader partition=__consumer_offsets-15, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,795] INFO Created log for partition __consumer_offsets-15 in /tmp/kafka-logs/__consumer_offsets-15 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,795] INFO [Partition __consumer_offsets-15 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-15 (kafka.cluster.Partition) +[2023-11-06 13:27:16,796] INFO [Partition __consumer_offsets-15 broker=0] Log loaded for partition __consumer_offsets-15 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,798] INFO [LogLoader partition=__consumer_offsets-30, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,799] INFO Created log for partition __consumer_offsets-30 in /tmp/kafka-logs/__consumer_offsets-30 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,799] INFO [Partition __consumer_offsets-30 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-30 (kafka.cluster.Partition) +[2023-11-06 13:27:16,799] INFO [Partition __consumer_offsets-30 broker=0] Log loaded for partition __consumer_offsets-30 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,802] INFO [LogLoader partition=__consumer_offsets-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,803] INFO Created log for partition __consumer_offsets-0 in /tmp/kafka-logs/__consumer_offsets-0 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,803] INFO [Partition __consumer_offsets-0 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,803] INFO [Partition __consumer_offsets-0 broker=0] Log loaded for partition __consumer_offsets-0 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,806] INFO [LogLoader partition=__consumer_offsets-35, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,806] INFO Created log for partition __consumer_offsets-35 in /tmp/kafka-logs/__consumer_offsets-35 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,806] INFO [Partition __consumer_offsets-35 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-35 (kafka.cluster.Partition) +[2023-11-06 13:27:16,807] INFO [Partition __consumer_offsets-35 broker=0] Log loaded for partition __consumer_offsets-35 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,810] INFO [LogLoader partition=__consumer_offsets-5, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,810] INFO Created log for partition __consumer_offsets-5 in /tmp/kafka-logs/__consumer_offsets-5 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,810] INFO [Partition __consumer_offsets-5 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-5 (kafka.cluster.Partition) +[2023-11-06 13:27:16,810] INFO [Partition __consumer_offsets-5 broker=0] Log loaded for partition __consumer_offsets-5 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,813] INFO [LogLoader partition=__consumer_offsets-20, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,814] INFO Created log for partition __consumer_offsets-20 in /tmp/kafka-logs/__consumer_offsets-20 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,814] INFO [Partition __consumer_offsets-20 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-20 (kafka.cluster.Partition) +[2023-11-06 13:27:16,814] INFO [Partition __consumer_offsets-20 broker=0] Log loaded for partition __consumer_offsets-20 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,817] INFO [LogLoader partition=__consumer_offsets-27, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,817] INFO Created log for partition __consumer_offsets-27 in /tmp/kafka-logs/__consumer_offsets-27 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,817] INFO [Partition __consumer_offsets-27 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-27 (kafka.cluster.Partition) +[2023-11-06 13:27:16,817] INFO [Partition __consumer_offsets-27 broker=0] Log loaded for partition __consumer_offsets-27 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,821] INFO [LogLoader partition=__consumer_offsets-42, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,821] INFO Created log for partition __consumer_offsets-42 in /tmp/kafka-logs/__consumer_offsets-42 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,821] INFO [Partition __consumer_offsets-42 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-42 (kafka.cluster.Partition) +[2023-11-06 13:27:16,821] INFO [Partition __consumer_offsets-42 broker=0] Log loaded for partition __consumer_offsets-42 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,825] INFO [LogLoader partition=__consumer_offsets-12, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,825] INFO Created log for partition __consumer_offsets-12 in /tmp/kafka-logs/__consumer_offsets-12 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,825] INFO [Partition __consumer_offsets-12 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-12 (kafka.cluster.Partition) +[2023-11-06 13:27:16,825] INFO [Partition __consumer_offsets-12 broker=0] Log loaded for partition __consumer_offsets-12 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,828] INFO [LogLoader partition=__consumer_offsets-21, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,829] INFO Created log for partition __consumer_offsets-21 in /tmp/kafka-logs/__consumer_offsets-21 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,829] INFO [Partition __consumer_offsets-21 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-21 (kafka.cluster.Partition) +[2023-11-06 13:27:16,829] INFO [Partition __consumer_offsets-21 broker=0] Log loaded for partition __consumer_offsets-21 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,832] INFO [LogLoader partition=__consumer_offsets-36, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,832] INFO Created log for partition __consumer_offsets-36 in /tmp/kafka-logs/__consumer_offsets-36 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,832] INFO [Partition __consumer_offsets-36 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-36 (kafka.cluster.Partition) +[2023-11-06 13:27:16,832] INFO [Partition __consumer_offsets-36 broker=0] Log loaded for partition __consumer_offsets-36 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,835] INFO [LogLoader partition=__consumer_offsets-6, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,836] INFO Created log for partition __consumer_offsets-6 in /tmp/kafka-logs/__consumer_offsets-6 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,836] INFO [Partition __consumer_offsets-6 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-6 (kafka.cluster.Partition) +[2023-11-06 13:27:16,836] INFO [Partition __consumer_offsets-6 broker=0] Log loaded for partition __consumer_offsets-6 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,839] INFO [LogLoader partition=__consumer_offsets-43, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,840] INFO Created log for partition __consumer_offsets-43 in /tmp/kafka-logs/__consumer_offsets-43 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,840] INFO [Partition __consumer_offsets-43 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-43 (kafka.cluster.Partition) +[2023-11-06 13:27:16,840] INFO [Partition __consumer_offsets-43 broker=0] Log loaded for partition __consumer_offsets-43 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,843] INFO [LogLoader partition=__consumer_offsets-13, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,844] INFO Created log for partition __consumer_offsets-13 in /tmp/kafka-logs/__consumer_offsets-13 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,844] INFO [Partition __consumer_offsets-13 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-13 (kafka.cluster.Partition) +[2023-11-06 13:27:16,844] INFO [Partition __consumer_offsets-13 broker=0] Log loaded for partition __consumer_offsets-13 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,846] INFO [LogLoader partition=__consumer_offsets-28, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:16,846] INFO Created log for partition __consumer_offsets-28 in /tmp/kafka-logs/__consumer_offsets-28 with properties {cleanup.policy=compact, compression.type="producer", segment.bytes=104857600} (kafka.log.LogManager) +[2023-11-06 13:27:16,846] INFO [Partition __consumer_offsets-28 broker=0] No checkpointed highwatermark is found for partition __consumer_offsets-28 (kafka.cluster.Partition) +[2023-11-06 13:27:16,847] INFO [Partition __consumer_offsets-28 broker=0] Log loaded for partition __consumer_offsets-28 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:16,851] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 3 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,851] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-3 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 18 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-18 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 41 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-41 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 10 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-10 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 33 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-33 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 48 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-48 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 19 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-19 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 34 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-34 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 4 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-4 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 11 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-11 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 26 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-26 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 49 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-49 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 39 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-39 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 9 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-9 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 24 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-24 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 31 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-31 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 46 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-46 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 1 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-1 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 16 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-16 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 2 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-2 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 25 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-25 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 40 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-40 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 47 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-47 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 17 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-17 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 32 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-32 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 37 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-37 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 7 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-7 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 22 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-22 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 29 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-29 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 44 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-44 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 14 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-14 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 23 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,852] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-23 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,852] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 38 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-38 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 8 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-8 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 45 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-45 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 15 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-15 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 30 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-30 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 0 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-0 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 35 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-35 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 5 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-5 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 20 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-20 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 27 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-27 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 42 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-42 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 12 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-12 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 21 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-21 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 36 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-36 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 6 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-6 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 43 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-43 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 13 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-13 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,853] INFO [GroupCoordinator 0]: Elected as the group coordinator for partition 28 in epoch 0 (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:16,853] INFO [GroupMetadataManager brokerId=0] Scheduling loading of offsets and group metadata from __consumer_offsets-28 for epoch 0 (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-3 in 3 milliseconds for epoch 0, of which 1 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-18 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-41 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-10 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-33 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-48 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-19 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-34 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-4 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-11 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,855] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-26 in 3 milliseconds for epoch 0, of which 3 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-49 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-39 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-9 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-24 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-31 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-46 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-1 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-16 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-2 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-25 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-40 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-47 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-17 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,856] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-32 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-37 in 5 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-7 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-22 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-29 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-44 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-14 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-23 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-38 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-8 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-45 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-15 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-30 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,857] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-0 in 4 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-35 in 5 milliseconds for epoch 0, of which 4 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-5 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-20 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-27 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-42 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-12 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-21 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-36 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-6 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-43 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,858] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-13 in 5 milliseconds for epoch 0, of which 5 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:16,859] INFO [GroupMetadataManager brokerId=0] Finished loading offsets and group metadata from __consumer_offsets-28 in 6 milliseconds for epoch 0, of which 6 milliseconds was spent in the scheduler. (kafka.coordinator.group.GroupMetadataManager) +[2023-11-06 13:27:32,123] INFO Creating topic test-topic with configuration {} and initial partition assignment HashMap(0 -> ArrayBuffer(0)) (kafka.zk.AdminZkClient) +[2023-11-06 13:27:32,133] INFO [ReplicaFetcherManager on broker 0] Removed fetcher for partitions Set(test-topic-0) (kafka.server.ReplicaFetcherManager) +[2023-11-06 13:27:32,134] INFO [LogLoader partition=test-topic-0, dir=/tmp/kafka-logs] Loading producer state till offset 0 with message format version 2 (kafka.log.UnifiedLog$) +[2023-11-06 13:27:32,135] INFO Created log for partition test-topic-0 in /tmp/kafka-logs/test-topic-0 with properties {} (kafka.log.LogManager) +[2023-11-06 13:27:32,136] INFO [Partition test-topic-0 broker=0] No checkpointed highwatermark is found for partition test-topic-0 (kafka.cluster.Partition) +[2023-11-06 13:27:32,136] INFO [Partition test-topic-0 broker=0] Log loaded for partition test-topic-0 with initial high watermark 0 (kafka.cluster.Partition) +[2023-11-06 13:27:34,588] INFO [GroupCoordinator 0]: Dynamic member with unknown member id joins group test-group in Empty state. Created a new member id rdkafka-ca297002-849e-4f49-bd14-13700d89fb0f and request the member to rejoin with this id. (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:34,594] INFO [GroupCoordinator 0]: Preparing to rebalance group test-group in state PreparingRebalance with old generation 0 (__consumer_offsets-12) (reason: Adding new member rdkafka-ca297002-849e-4f49-bd14-13700d89fb0f with group instance id None; client reason: not provided) (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:34,608] INFO [GroupCoordinator 0]: Stabilized group test-group generation 1 (__consumer_offsets-12) with 1 members (kafka.coordinator.group.GroupCoordinator) +[2023-11-06 13:27:34,614] INFO [GroupCoordinator 0]: Assignment received from leader rdkafka-ca297002-849e-4f49-bd14-13700d89fb0f for group test-group for generation 1. The group has 1 members, 0 of which are static. (kafka.coordinator.group.GroupCoordinator) diff --git a/logs/state-change.log b/logs/state-change.log index 14e0dc6..c5dfb2f 100644 --- a/logs/state-change.log +++ b/logs/state-change.log @@ -1,231 +1,172 @@ -[2023-11-03 19:38:21,217] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 0 partitions (state.change.logger) -[2023-11-03 19:38:41,912] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-22 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,912] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-30 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,912] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-25 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,912] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-35 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-37 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-38 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-13 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-8 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-21 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-4 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-27 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-7 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-9 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-46 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-41 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-33 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-23 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-49 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-47 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-16 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-28 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-31 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-36 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-42 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-3 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-18 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-15 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-24 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-17 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-48 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-19 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-11 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-2 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-43 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-6 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-14 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-20 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,913] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-44 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-39 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-12 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-45 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-1 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-5 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-26 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-29 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-34 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-10 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-32 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-40 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:41,914] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:38:41,917] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-22 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-30 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-25 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-35 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-37 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-38 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-13 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-8 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-21 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-4 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-27 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-7 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-9 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-46 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-41 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-33 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-23 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-49 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-47 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-16 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-28 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-31 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-36 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-42 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-3 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,973] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-18 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-15 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-24 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-17 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-48 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-19 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-11 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-2 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-43 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-6 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-14 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-20 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-44 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,974] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-39 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-12 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-45 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-1 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-5 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-26 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-29 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-34 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-10 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-32 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,975] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-40 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:41,978] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 50 become-leader and 0 become-follower partitions (state.change.logger) -[2023-11-03 19:38:41,980] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 50 partitions (state.change.logger) -[2023-11-03 19:38:41,981] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:38:41,985] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 for 50 partitions (state.change.logger) -[2023-11-03 19:38:42,030] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 1 from controller 0 epoch 1 as part of the become-leader transition for 50 partitions (state.change.logger) -[2023-11-03 19:38:42,082] INFO [Broker id=0] Leader __consumer_offsets-3 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,089] INFO [Broker id=0] Leader __consumer_offsets-18 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,096] INFO [Broker id=0] Leader __consumer_offsets-41 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,100] INFO [Broker id=0] Leader __consumer_offsets-10 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,104] INFO [Broker id=0] Leader __consumer_offsets-33 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,108] INFO [Broker id=0] Leader __consumer_offsets-48 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,114] INFO [Broker id=0] Leader __consumer_offsets-19 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,118] INFO [Broker id=0] Leader __consumer_offsets-34 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,122] INFO [Broker id=0] Leader __consumer_offsets-4 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,127] INFO [Broker id=0] Leader __consumer_offsets-11 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,131] INFO [Broker id=0] Leader __consumer_offsets-26 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,135] INFO [Broker id=0] Leader __consumer_offsets-49 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,138] INFO [Broker id=0] Leader __consumer_offsets-39 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,143] INFO [Broker id=0] Leader __consumer_offsets-9 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,146] INFO [Broker id=0] Leader __consumer_offsets-24 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,149] INFO [Broker id=0] Leader __consumer_offsets-31 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,153] INFO [Broker id=0] Leader __consumer_offsets-46 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,156] INFO [Broker id=0] Leader __consumer_offsets-1 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,163] INFO [Broker id=0] Leader __consumer_offsets-16 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,167] INFO [Broker id=0] Leader __consumer_offsets-2 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,171] INFO [Broker id=0] Leader __consumer_offsets-25 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,176] INFO [Broker id=0] Leader __consumer_offsets-40 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,181] INFO [Broker id=0] Leader __consumer_offsets-47 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,185] INFO [Broker id=0] Leader __consumer_offsets-17 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,188] INFO [Broker id=0] Leader __consumer_offsets-32 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,195] INFO [Broker id=0] Leader __consumer_offsets-37 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,201] INFO [Broker id=0] Leader __consumer_offsets-7 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,204] INFO [Broker id=0] Leader __consumer_offsets-22 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,210] INFO [Broker id=0] Leader __consumer_offsets-29 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,216] INFO [Broker id=0] Leader __consumer_offsets-44 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,220] INFO [Broker id=0] Leader __consumer_offsets-14 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,226] INFO [Broker id=0] Leader __consumer_offsets-23 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,231] INFO [Broker id=0] Leader __consumer_offsets-38 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,236] INFO [Broker id=0] Leader __consumer_offsets-8 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,239] INFO [Broker id=0] Leader __consumer_offsets-45 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,244] INFO [Broker id=0] Leader __consumer_offsets-15 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,249] INFO [Broker id=0] Leader __consumer_offsets-30 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,252] INFO [Broker id=0] Leader __consumer_offsets-0 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,255] INFO [Broker id=0] Leader __consumer_offsets-35 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,261] INFO [Broker id=0] Leader __consumer_offsets-5 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,266] INFO [Broker id=0] Leader __consumer_offsets-20 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,270] INFO [Broker id=0] Leader __consumer_offsets-27 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,273] INFO [Broker id=0] Leader __consumer_offsets-42 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,277] INFO [Broker id=0] Leader __consumer_offsets-12 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,280] INFO [Broker id=0] Leader __consumer_offsets-21 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,285] INFO [Broker id=0] Leader __consumer_offsets-36 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,288] INFO [Broker id=0] Leader __consumer_offsets-6 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,292] INFO [Broker id=0] Leader __consumer_offsets-43 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,296] INFO [Broker id=0] Leader __consumer_offsets-13 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,299] INFO [Broker id=0] Leader __consumer_offsets-28 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:42,310] INFO [Broker id=0] Finished LeaderAndIsr request in 325ms correlationId 1 from controller 0 for 50 partitions (state.change.logger) -[2023-11-03 19:38:42,318] INFO [Broker id=0] Add 50 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger) -[2023-11-03 19:38:56,520] INFO [Controller id=0 epoch=1] Changed partition test-topic-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) -[2023-11-03 19:38:56,520] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:38:56,520] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:38:56,526] INFO [Controller id=0 epoch=1] Changed partition test-topic-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) -[2023-11-03 19:38:56,526] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 1 become-leader and 0 become-follower partitions (state.change.logger) -[2023-11-03 19:38:56,526] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 1 partitions (state.change.logger) -[2023-11-03 19:38:56,526] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:38:56,526] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 3 from controller 0 for 1 partitions (state.change.logger) -[2023-11-03 19:38:56,527] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 3 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger) -[2023-11-03 19:38:56,530] INFO [Broker id=0] Leader test-topic-0 with topic id Some(Pb9zfnlKRkmTGaMQyAABkw) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:38:56,530] INFO [Broker id=0] Finished LeaderAndIsr request in 4ms correlationId 3 from controller 0 for 1 partitions (state.change.logger) -[2023-11-03 19:38:56,532] INFO [Broker id=0] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger) -[2023-11-03 19:40:48,863] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) -[2023-11-03 19:45:29,526] INFO [Controller id=0 epoch=2] Sending UpdateMetadata request to brokers HashSet(0) for 0 partitions (state.change.logger) -[2023-11-03 19:45:29,562] INFO [Controller id=0 epoch=2] Sending LeaderAndIsr request to broker 0 with 51 become-leader and 0 become-follower partitions (state.change.logger) -[2023-11-03 19:45:29,564] INFO [Controller id=0 epoch=2] Sending UpdateMetadata request to brokers HashSet(0) for 51 partitions (state.change.logger) -[2023-11-03 19:45:29,686] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 for 51 partitions (state.change.logger) -[2023-11-03 19:45:29,711] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 1 from controller 0 epoch 2 as part of the become-leader transition for 51 partitions (state.change.logger) -[2023-11-03 19:45:29,719] INFO [Broker id=0] Leader __consumer_offsets-3 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,722] INFO [Broker id=0] Leader __consumer_offsets-18 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,722] INFO [Broker id=0] Leader __consumer_offsets-41 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,722] INFO [Broker id=0] Leader __consumer_offsets-10 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,722] INFO [Broker id=0] Leader __consumer_offsets-33 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,723] INFO [Broker id=0] Leader __consumer_offsets-48 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,723] INFO [Broker id=0] Leader __consumer_offsets-19 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,723] INFO [Broker id=0] Leader __consumer_offsets-34 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,724] INFO [Broker id=0] Leader __consumer_offsets-4 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,724] INFO [Broker id=0] Leader __consumer_offsets-11 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,724] INFO [Broker id=0] Leader __consumer_offsets-26 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,724] INFO [Broker id=0] Leader __consumer_offsets-49 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,725] INFO [Broker id=0] Leader __consumer_offsets-39 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,725] INFO [Broker id=0] Leader __consumer_offsets-9 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,726] INFO [Broker id=0] Leader __consumer_offsets-24 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,726] INFO [Broker id=0] Leader __consumer_offsets-31 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,726] INFO [Broker id=0] Leader __consumer_offsets-46 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,726] INFO [Broker id=0] Leader __consumer_offsets-1 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,727] INFO [Broker id=0] Leader __consumer_offsets-16 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,727] INFO [Broker id=0] Leader __consumer_offsets-2 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,728] INFO [Broker id=0] Leader __consumer_offsets-25 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,728] INFO [Broker id=0] Leader __consumer_offsets-40 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,729] INFO [Broker id=0] Leader __consumer_offsets-47 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,729] INFO [Broker id=0] Leader __consumer_offsets-17 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,729] INFO [Broker id=0] Leader __consumer_offsets-32 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,730] INFO [Broker id=0] Leader __consumer_offsets-37 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,730] INFO [Broker id=0] Leader __consumer_offsets-7 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,730] INFO [Broker id=0] Leader __consumer_offsets-22 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,730] INFO [Broker id=0] Leader __consumer_offsets-29 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,731] INFO [Broker id=0] Leader __consumer_offsets-44 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,731] INFO [Broker id=0] Leader __consumer_offsets-14 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,732] INFO [Broker id=0] Leader __consumer_offsets-23 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,732] INFO [Broker id=0] Leader __consumer_offsets-38 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,732] INFO [Broker id=0] Leader __consumer_offsets-8 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,732] INFO [Broker id=0] Leader __consumer_offsets-45 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,732] INFO [Broker id=0] Leader __consumer_offsets-15 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,733] INFO [Broker id=0] Leader __consumer_offsets-30 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,733] INFO [Broker id=0] Leader __consumer_offsets-0 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,733] INFO [Broker id=0] Leader __consumer_offsets-35 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,734] INFO [Broker id=0] Leader __consumer_offsets-5 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,734] INFO [Broker id=0] Leader __consumer_offsets-20 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,734] INFO [Broker id=0] Leader __consumer_offsets-27 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,734] INFO [Broker id=0] Leader __consumer_offsets-42 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,735] INFO [Broker id=0] Leader __consumer_offsets-12 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 7 with partition epoch 0, high watermark 7, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,735] INFO [Broker id=0] Leader __consumer_offsets-21 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,735] INFO [Broker id=0] Leader test-topic-0 with topic id Some(Pb9zfnlKRkmTGaMQyAABkw) starts at leader epoch 0 from offset 5 with partition epoch 0, high watermark 5, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,735] INFO [Broker id=0] Leader __consumer_offsets-36 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,736] INFO [Broker id=0] Leader __consumer_offsets-6 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,736] INFO [Broker id=0] Leader __consumer_offsets-43 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,736] INFO [Broker id=0] Leader __consumer_offsets-13 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,736] INFO [Broker id=0] Leader __consumer_offsets-28 with topic id Some(vfIIZeo7TYKNeTmKGzMlyg) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) -[2023-11-03 19:45:29,749] INFO [Broker id=0] Finished LeaderAndIsr request in 63ms correlationId 1 from controller 0 for 51 partitions (state.change.logger) -[2023-11-03 19:45:29,757] INFO [Broker id=0] Add 51 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 2 with correlation id 2 (state.change.logger) +[2023-11-06 13:26:32,032] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 0 partitions (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-22 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-30 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-25 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-35 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-37 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-38 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-13 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-8 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-21 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-4 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-27 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-7 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-9 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-46 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,513] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-41 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-33 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-23 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-49 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-47 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-16 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-28 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-31 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-36 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-42 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-3 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-18 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-15 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-24 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-17 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-48 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-19 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-11 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-2 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-43 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-6 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-14 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-20 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-44 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-39 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-12 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-45 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-1 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-5 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-26 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-29 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-34 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-10 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-32 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-40 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:16,514] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2023-11-06 13:27:16,517] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-22 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-30 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-25 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-35 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-37 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-38 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,564] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-13 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-8 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-21 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-4 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-27 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-7 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-9 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-46 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-41 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-33 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-23 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-49 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-47 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-16 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-28 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-31 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-36 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-42 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-3 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-18 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-15 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-24 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-17 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-48 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-19 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-11 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-2 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-43 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-6 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-14 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-20 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-44 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-39 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-12 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-45 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-1 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-5 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-26 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-29 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,565] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-34 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,566] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-10 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,566] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-32 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,566] INFO [Controller id=0 epoch=1] Changed partition __consumer_offsets-40 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:16,567] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 50 become-leader and 0 become-follower partitions (state.change.logger) +[2023-11-06 13:27:16,568] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 50 partitions (state.change.logger) +[2023-11-06 13:27:16,569] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2023-11-06 13:27:16,574] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 1 from controller 0 for 50 partitions (state.change.logger) +[2023-11-06 13:27:16,614] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 1 from controller 0 epoch 1 as part of the become-leader transition for 50 partitions (state.change.logger) +[2023-11-06 13:27:16,665] INFO [Broker id=0] Leader __consumer_offsets-3 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,672] INFO [Broker id=0] Leader __consumer_offsets-18 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,675] INFO [Broker id=0] Leader __consumer_offsets-41 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,679] INFO [Broker id=0] Leader __consumer_offsets-10 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,682] INFO [Broker id=0] Leader __consumer_offsets-33 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,686] INFO [Broker id=0] Leader __consumer_offsets-48 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,689] INFO [Broker id=0] Leader __consumer_offsets-19 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,693] INFO [Broker id=0] Leader __consumer_offsets-34 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,696] INFO [Broker id=0] Leader __consumer_offsets-4 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,699] INFO [Broker id=0] Leader __consumer_offsets-11 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,703] INFO [Broker id=0] Leader __consumer_offsets-26 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,706] INFO [Broker id=0] Leader __consumer_offsets-49 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,710] INFO [Broker id=0] Leader __consumer_offsets-39 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,713] INFO [Broker id=0] Leader __consumer_offsets-9 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,716] INFO [Broker id=0] Leader __consumer_offsets-24 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,720] INFO [Broker id=0] Leader __consumer_offsets-31 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,724] INFO [Broker id=0] Leader __consumer_offsets-46 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,727] INFO [Broker id=0] Leader __consumer_offsets-1 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,732] INFO [Broker id=0] Leader __consumer_offsets-16 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,736] INFO [Broker id=0] Leader __consumer_offsets-2 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,740] INFO [Broker id=0] Leader __consumer_offsets-25 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,745] INFO [Broker id=0] Leader __consumer_offsets-40 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,749] INFO [Broker id=0] Leader __consumer_offsets-47 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,753] INFO [Broker id=0] Leader __consumer_offsets-17 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,757] INFO [Broker id=0] Leader __consumer_offsets-32 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,760] INFO [Broker id=0] Leader __consumer_offsets-37 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,764] INFO [Broker id=0] Leader __consumer_offsets-7 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,766] INFO [Broker id=0] Leader __consumer_offsets-22 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,770] INFO [Broker id=0] Leader __consumer_offsets-29 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,774] INFO [Broker id=0] Leader __consumer_offsets-44 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,778] INFO [Broker id=0] Leader __consumer_offsets-14 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,781] INFO [Broker id=0] Leader __consumer_offsets-23 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,784] INFO [Broker id=0] Leader __consumer_offsets-38 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,788] INFO [Broker id=0] Leader __consumer_offsets-8 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,792] INFO [Broker id=0] Leader __consumer_offsets-45 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,796] INFO [Broker id=0] Leader __consumer_offsets-15 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,799] INFO [Broker id=0] Leader __consumer_offsets-30 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,803] INFO [Broker id=0] Leader __consumer_offsets-0 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,807] INFO [Broker id=0] Leader __consumer_offsets-35 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,810] INFO [Broker id=0] Leader __consumer_offsets-5 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,814] INFO [Broker id=0] Leader __consumer_offsets-20 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,817] INFO [Broker id=0] Leader __consumer_offsets-27 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,821] INFO [Broker id=0] Leader __consumer_offsets-42 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,825] INFO [Broker id=0] Leader __consumer_offsets-12 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,829] INFO [Broker id=0] Leader __consumer_offsets-21 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,832] INFO [Broker id=0] Leader __consumer_offsets-36 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,836] INFO [Broker id=0] Leader __consumer_offsets-6 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,840] INFO [Broker id=0] Leader __consumer_offsets-43 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,844] INFO [Broker id=0] Leader __consumer_offsets-13 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,847] INFO [Broker id=0] Leader __consumer_offsets-28 with topic id Some(f9d_z6FzSde58txrT_Qj9w) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:16,855] INFO [Broker id=0] Finished LeaderAndIsr request in 282ms correlationId 1 from controller 0 for 50 partitions (state.change.logger) +[2023-11-06 13:27:16,861] INFO [Broker id=0] Add 50 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 2 (state.change.logger) +[2023-11-06 13:27:32,128] INFO [Controller id=0 epoch=1] Changed partition test-topic-0 state from NonExistentPartition to NewPartition with assigned replicas 0 (state.change.logger) +[2023-11-06 13:27:32,128] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2023-11-06 13:27:32,128] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2023-11-06 13:27:32,131] INFO [Controller id=0 epoch=1] Changed partition test-topic-0 from NewPartition to OnlinePartition with state LeaderAndIsr(leader=0, leaderEpoch=0, isrWithBrokerEpoch=List(BrokerState(brokerId=0, brokerEpoch=-1)), leaderRecoveryState=RECOVERED, partitionEpoch=0) (state.change.logger) +[2023-11-06 13:27:32,131] INFO [Controller id=0 epoch=1] Sending LeaderAndIsr request to broker 0 with 1 become-leader and 0 become-follower partitions (state.change.logger) +[2023-11-06 13:27:32,131] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet(0) for 1 partitions (state.change.logger) +[2023-11-06 13:27:32,132] INFO [Controller id=0 epoch=1] Sending UpdateMetadata request to brokers HashSet() for 0 partitions (state.change.logger) +[2023-11-06 13:27:32,132] INFO [Broker id=0] Handling LeaderAndIsr request correlationId 3 from controller 0 for 1 partitions (state.change.logger) +[2023-11-06 13:27:32,133] INFO [Broker id=0] Stopped fetchers as part of LeaderAndIsr request correlationId 3 from controller 0 epoch 1 as part of the become-leader transition for 1 partitions (state.change.logger) +[2023-11-06 13:27:32,136] INFO [Broker id=0] Leader test-topic-0 with topic id Some(Hx76FWANRJGp_-YQs8849Q) starts at leader epoch 0 from offset 0 with partition epoch 0, high watermark 0, ISR [0], adding replicas [] and removing replicas [] . Previous leader None and previous leader epoch was -1. (state.change.logger) +[2023-11-06 13:27:32,136] INFO [Broker id=0] Finished LeaderAndIsr request in 4ms correlationId 3 from controller 0 for 1 partitions (state.change.logger) +[2023-11-06 13:27:32,137] INFO [Broker id=0] Add 1 partitions and deleted 0 partitions from metadata cache in response to UpdateMetadata request sent by controller 0 epoch 1 with correlation id 4 (state.change.logger) diff --git a/logs/zookeeper-gc.log b/logs/zookeeper-gc.log index fc9ec45..b823933 100644 --- a/logs/zookeeper-gc.log +++ b/logs/zookeeper-gc.log @@ -1,34 +1,34 @@ -[2023-11-03T19:45:15.598-0400][gc] Using G1 -[2023-11-03T19:45:15.601-0400][gc,init] Version: 17.0.6+10 (release) -[2023-11-03T19:45:15.601-0400][gc,init] CPUs: 12 total, 12 available -[2023-11-03T19:45:15.601-0400][gc,init] Memory: 63941M -[2023-11-03T19:45:15.601-0400][gc,init] Large Page Support: Disabled -[2023-11-03T19:45:15.601-0400][gc,init] NUMA Support: Disabled -[2023-11-03T19:45:15.601-0400][gc,init] Compressed Oops: Enabled (32-bit) -[2023-11-03T19:45:15.601-0400][gc,init] Heap Region Size: 1M -[2023-11-03T19:45:15.601-0400][gc,init] Heap Min Capacity: 512M -[2023-11-03T19:45:15.601-0400][gc,init] Heap Initial Capacity: 512M -[2023-11-03T19:45:15.601-0400][gc,init] Heap Max Capacity: 512M -[2023-11-03T19:45:15.601-0400][gc,init] Pre-touch: Disabled -[2023-11-03T19:45:15.601-0400][gc,init] Parallel Workers: 10 -[2023-11-03T19:45:15.601-0400][gc,init] Concurrent Workers: 3 -[2023-11-03T19:45:15.601-0400][gc,init] Concurrent Refinement Workers: 10 -[2023-11-03T19:45:15.601-0400][gc,init] Periodic GC: Disabled -[2023-11-03T19:45:15.601-0400][gc,metaspace] CDS archive(s) mapped at: [0x0000000800000000-0x0000000800bd5000-0x0000000800bd5000), size 12406784, SharedBaseAddress: 0x0000000800000000, ArchiveRelocationMode: 0. -[2023-11-03T19:45:15.601-0400][gc,metaspace] Compressed class space mapped at: 0x0000000800c00000-0x0000000840c00000, reserved size: 1073741824 -[2023-11-03T19:45:15.601-0400][gc,metaspace] Narrow klass base: 0x0000000800000000, Narrow klass shift: 0, Narrow klass range: 0x100000000 -[2023-11-03T19:45:28.577-0400][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) -[2023-11-03T19:45:28.578-0400][gc,task ] GC(0) Using 10 workers of 10 for evacuation -[2023-11-03T19:45:28.587-0400][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.1ms -[2023-11-03T19:45:28.587-0400][gc,phases ] GC(0) Merge Heap Roots: 0.1ms -[2023-11-03T19:45:28.587-0400][gc,phases ] GC(0) Evacuate Collection Set: 8.0ms -[2023-11-03T19:45:28.587-0400][gc,phases ] GC(0) Post Evacuate Collection Set: 0.3ms -[2023-11-03T19:45:28.587-0400][gc,phases ] GC(0) Other: 1.0ms -[2023-11-03T19:45:28.587-0400][gc,heap ] GC(0) Eden regions: 25->0(21) -[2023-11-03T19:45:28.587-0400][gc,heap ] GC(0) Survivor regions: 0->4(4) -[2023-11-03T19:45:28.587-0400][gc,heap ] GC(0) Old regions: 0->4 -[2023-11-03T19:45:28.587-0400][gc,heap ] GC(0) Archive regions: 2->2 -[2023-11-03T19:45:28.587-0400][gc,heap ] GC(0) Humongous regions: 0->0 -[2023-11-03T19:45:28.587-0400][gc,metaspace] GC(0) Metaspace: 8164K(8320K)->8164K(8320K) NonClass: 7262K(7360K)->7262K(7360K) Class: 902K(960K)->902K(960K) -[2023-11-03T19:45:28.587-0400][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 25M->8M(512M) 9.440ms -[2023-11-03T19:45:28.587-0400][gc,cpu ] GC(0) User=0.00s Sys=0.08s Real=0.01s +[2023-11-06T13:26:23.854-0500][gc] Using G1 +[2023-11-06T13:26:23.857-0500][gc,init] Version: 17.0.8.1+1 (release) +[2023-11-06T13:26:23.857-0500][gc,init] CPUs: 12 total, 12 available +[2023-11-06T13:26:23.857-0500][gc,init] Memory: 63941M +[2023-11-06T13:26:23.857-0500][gc,init] Large Page Support: Disabled +[2023-11-06T13:26:23.857-0500][gc,init] NUMA Support: Disabled +[2023-11-06T13:26:23.857-0500][gc,init] Compressed Oops: Enabled (32-bit) +[2023-11-06T13:26:23.857-0500][gc,init] Heap Region Size: 1M +[2023-11-06T13:26:23.857-0500][gc,init] Heap Min Capacity: 512M +[2023-11-06T13:26:23.857-0500][gc,init] Heap Initial Capacity: 512M +[2023-11-06T13:26:23.857-0500][gc,init] Heap Max Capacity: 512M +[2023-11-06T13:26:23.857-0500][gc,init] Pre-touch: Disabled +[2023-11-06T13:26:23.857-0500][gc,init] Parallel Workers: 10 +[2023-11-06T13:26:23.857-0500][gc,init] Concurrent Workers: 3 +[2023-11-06T13:26:23.857-0500][gc,init] Concurrent Refinement Workers: 10 +[2023-11-06T13:26:23.857-0500][gc,init] Periodic GC: Disabled +[2023-11-06T13:26:23.861-0500][gc,metaspace] CDS archive(s) mapped at: [0x00007f767f000000-0x00007f767fbeb000-0x00007f767fbeb000), size 12496896, SharedBaseAddress: 0x00007f767f000000, ArchiveRelocationMode: 1. +[2023-11-06T13:26:23.861-0500][gc,metaspace] Compressed class space mapped at: 0x00007f7680000000-0x00007f76c0000000, reserved size: 1073741824 +[2023-11-06T13:26:23.861-0500][gc,metaspace] Narrow klass base: 0x00007f767f000000, Narrow klass shift: 0, Narrow klass range: 0x100000000 +[2023-11-06T13:26:31.373-0500][gc,start ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) +[2023-11-06T13:26:31.374-0500][gc,task ] GC(0) Using 10 workers of 10 for evacuation +[2023-11-06T13:26:31.383-0500][gc,phases ] GC(0) Pre Evacuate Collection Set: 0.1ms +[2023-11-06T13:26:31.383-0500][gc,phases ] GC(0) Merge Heap Roots: 0.1ms +[2023-11-06T13:26:31.383-0500][gc,phases ] GC(0) Evacuate Collection Set: 8.2ms +[2023-11-06T13:26:31.383-0500][gc,phases ] GC(0) Post Evacuate Collection Set: 0.6ms +[2023-11-06T13:26:31.383-0500][gc,phases ] GC(0) Other: 1.0ms +[2023-11-06T13:26:31.383-0500][gc,heap ] GC(0) Eden regions: 25->0(21) +[2023-11-06T13:26:31.383-0500][gc,heap ] GC(0) Survivor regions: 0->4(4) +[2023-11-06T13:26:31.383-0500][gc,heap ] GC(0) Old regions: 0->4 +[2023-11-06T13:26:31.383-0500][gc,heap ] GC(0) Archive regions: 2->2 +[2023-11-06T13:26:31.383-0500][gc,heap ] GC(0) Humongous regions: 0->0 +[2023-11-06T13:26:31.383-0500][gc,metaspace] GC(0) Metaspace: 8086K(8320K)->8086K(8320K) NonClass: 7171K(7296K)->7171K(7296K) Class: 914K(1024K)->914K(1024K) +[2023-11-06T13:26:31.383-0500][gc ] GC(0) Pause Young (Normal) (G1 Evacuation Pause) 25M->8M(512M) 10.024ms +[2023-11-06T13:26:31.383-0500][gc,cpu ] GC(0) User=0.02s Sys=0.07s Real=0.01s