<!-- 
RSS generated by JIRA (9.7.1#970001-sha1:2222b88b221c4928ef0de3161136cc90c8356a66) at Thu Feb 08 09:05:36 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>MongoDB Jira</title>
    <link>https://jira.mongodb.org</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.7.1</version>
        <build-number>970001</build-number>
        <build-date>13-04-2023</build-date>
    </build-info>


<item>
            <title>[KAFKA-105] Support errors.tolerance</title>
                <link>https://jira.mongodb.org/browse/KAFKA-105</link>
                <project id="16285" key="KAFKA">Kafka Connector</project>
                    <description>&lt;p&gt;Look to support &lt;tt&gt;errors.tolerance&lt;/tt&gt; configuration. &lt;/p&gt;

&lt;p&gt;For the Sink bulk write errors (eg: Duplicate key) should head to the DLQ  if configured rather than stop the world.&lt;/p&gt;

&lt;p&gt;For the Source connector there is no DLQ - but perhaps invalid configurations could push the original data to a Sink style DLQ if any of the conversions fail. Also test messages over 16MB (&lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-127&quot; title=&quot;Kafka Source connector handling documents greater than 16MB BSON&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-127&quot;&gt;&lt;del&gt;KAFKA-127&lt;/del&gt;&lt;/a&gt;)&lt;/p&gt;</description>
                <environment></environment>
        <key id="1341837">KAFKA-105</key>
            <summary>Support errors.tolerance</summary>
                <type id="4" iconUrl="https://jira.mongodb.org/secure/viewavatar?size=xsmall&amp;avatarId=14710&amp;avatarType=issuetype">Improvement</type>
                                            <priority id="3" iconUrl="https://jira.mongodb.org/images/icons/priorities/major.svg">Major - P3</priority>
                        <status id="6" iconUrl="https://jira.mongodb.org/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="13201">Fixed</resolution>
                                        <assignee username="ross@mongodb.com">Ross Lawley</assignee>
                                    <reporter username="ross@mongodb.com">Ross Lawley</reporter>
                        <labels>
                    </labels>
                <created>Wed, 6 May 2020 13:30:06 +0000</created>
                <updated>Sat, 28 Oct 2023 10:46:00 +0000</updated>
                            <resolved>Mon, 21 Sep 2020 13:05:26 +0000</resolved>
                                    <version>1.1</version>
                                    <fixVersion>1.3.0</fixVersion>
                                                        <votes>5</votes>
                                    <watches>6</watches>
                                                                                                                <comments>
                            <comment id="3418367" author="xgen-internal-githook" created="Wed, 30 Sep 2020 13:14:50 +0000"  >&lt;p&gt;Author:&lt;/p&gt;
{&apos;name&apos;: &apos;Ross Lawley&apos;, &apos;email&apos;: &apos;ross.lawley@gmail.com&apos;, &apos;username&apos;: &apos;rozza&apos;}
&lt;p&gt;Message: Ensure resume token missing errors respect errors.tolerance&lt;/p&gt;

&lt;p&gt;Throw an exception if there is no errors tolerance and&lt;br/&gt;
the resume token is missing.&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-105&quot; title=&quot;Support errors.tolerance&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-105&quot;&gt;&lt;del&gt;KAFKA-105&lt;/del&gt;&lt;/a&gt;&lt;br/&gt;
Branch: master&lt;br/&gt;
&lt;a href=&quot;https://github.com/mongodb/mongo-kafka/commit/a5b269c084b057fbe65ecebc263dcc37cc67d016&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/mongodb/mongo-kafka/commit/a5b269c084b057fbe65ecebc263dcc37cc67d016&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="3402960" author="xgen-internal-githook" created="Mon, 21 Sep 2020 13:03:42 +0000"  >&lt;p&gt;Author:&lt;/p&gt;
{&apos;name&apos;: &apos;Ross Lawley&apos;, &apos;email&apos;: &apos;ross.lawley@gmail.com&apos;, &apos;username&apos;: &apos;rozza&apos;}
&lt;p&gt;Message: Support errors.tolerance&lt;/p&gt;

&lt;p&gt;Scenarios covered:&lt;/p&gt;

&lt;p&gt;Source&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;Missing / invalid / not found Resume Tokens (Integration test with a mocked offsetStorageReader)&lt;/li&gt;
	&lt;li&gt;Poison pill message - invalid schema&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Sink&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;Poison pill messages - Invalid Key / Values types &amp;amp; invalid documents&lt;/li&gt;
	&lt;li&gt;Errors thrown by PostProcessors&lt;/li&gt;
	&lt;li&gt;Debezium CDC handler errors / poison pills&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;&lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-105&quot; title=&quot;Support errors.tolerance&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-105&quot;&gt;&lt;del&gt;KAFKA-105&lt;/del&gt;&lt;/a&gt;&lt;br/&gt;
Branch: master&lt;br/&gt;
&lt;a href=&quot;https://github.com/mongodb/mongo-kafka/commit/17ef77bc10174500f9629f26c04216a95d5bb0fc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/mongodb/mongo-kafka/commit/17ef77bc10174500f9629f26c04216a95d5bb0fc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="3396548" author="ross@10gen.com" created="Wed, 16 Sep 2020 13:33:15 +0000"  >&lt;p&gt;PR: &lt;a href=&quot;https://github.com/mongodb/mongo-kafka/pull/38&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/mongodb/mongo-kafka/pull/38&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="3287616" author="ross@10gen.com" created="Thu, 16 Jul 2020 09:45:27 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.mongodb.org/secure/ViewProfile.jspa?name=sabari.mgn%40gmail.com&quot; class=&quot;user-hover&quot; rel=&quot;sabari.mgn@gmail.com&quot;&gt;sabari.mgn@gmail.com&lt;/a&gt; no set release date for 1.3 but work has started on 1.3.&lt;/p&gt;</comment>
                            <comment id="3215869" author="sabari.mgn@gmail.com" created="Fri, 19 Jun 2020 13:30:30 +0000"  >&lt;p&gt;Hi Ross, Yes I see dead letter queue support isn&apos;t supported for source connectors:&#160;&lt;a href=&quot;https://www.confluent.io/blog/kafka-connect-deep-dive-error-handling-dead-letter-queues/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://www.confluent.io/blog/kafka-connect-deep-dive-error-handling-dead-letter-queues/&lt;/a&gt;&#160;. But&#160;errors.tolerance can ensure that connectors are resilient for failures. Can you please share your thoughts whether it will be supported? Also, can you please provide a tentative release date for 1.3.&lt;/p&gt;</comment>
                            <comment id="3214318" author="sabari.mgn@gmail.com" created="Thu, 18 Jun 2020 15:29:32 +0000"  >&lt;p&gt;Hi Ross, Thanks for the response. yes, max.request.size also have to be configured on the broker side. But my question was even with the configuration (errors.tolerance: all) enabled in the connector it goes into a failed state. But was checking if the processing can still continue after the error.&#160;&lt;/p&gt;</comment>
                            <comment id="3211785" author="ross@10gen.com" created="Wed, 17 Jun 2020 07:43:51 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.mongodb.org/secure/ViewProfile.jspa?name=sabari.mgn%40gmail.com&quot; class=&quot;user-hover&quot; rel=&quot;sabari.mgn@gmail.com&quot;&gt;sabari.mgn@gmail.com&lt;/a&gt; that is a KAFKA limitation of how it supports errors.tolerance and the dlq.&lt;/p&gt;</comment>
                            <comment id="3210795" author="sabari.mgn@gmail.com" created="Tue, 16 Jun 2020 20:52:34 +0000"  >&lt;p&gt;The source connector also lacks the support for errors.tolerance and dead letter queue. If there are conversion issues or&#160;max.request.size issue the connector fails.&lt;/p&gt;</comment>
                            <comment id="3104327" author="yaramati@adobe.com" created="Fri, 22 May 2020 14:16:54 +0000"  >&lt;p&gt;Moving my comment from&#160;&lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-98&quot; class=&quot;external-link&quot; rel=&quot;nofollow&quot;&gt;https://jira.mongodb.org/browse/KAFKA-98&lt;/a&gt;&#160;to here.&lt;/p&gt;

&lt;p&gt;My Environment:&lt;/p&gt;

&lt;p&gt;Kafka version 2.4.0&lt;/p&gt;

&lt;p&gt;Source MongoDB 3.6.8&lt;br/&gt;
Target MongoDB 3.6.8&lt;br/&gt;
Source connector debezium&lt;br/&gt;
Sink connector MongoDB Kafka Sink Connector version 1.0&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;As per my understanding, fix should handle sink connector to continue with the next message after logging error message into logfile.&lt;/p&gt;

&lt;p&gt;&lt;b&gt;Error from sink connector:&lt;/b&gt;&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,176&amp;#93;&lt;/span&gt;&#160;INFO&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;Opened connection&#160;[connectionId&lt;br class=&quot;atl-forced-newline&quot; /&gt;&lt;/p&gt;
{localValue:2}
&lt;p&gt;]&#160;to 10.74.1.50:27021 (org.mongodb.driver.connection:71)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,176&amp;#93;&lt;/span&gt;&#160;INFO&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;Opened connection&#160;[connectionId&lt;br class=&quot;atl-forced-newline&quot; /&gt;&lt;/p&gt;
{localValue:2}
&lt;p&gt;]&#160;to 10.74.1.50:27021 (org.mongodb.driver.connection:71)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,260&amp;#93;&lt;/span&gt;&#160;ERROR&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;Mongodb bulk write (partially) failed (com.mongodb.kafka.connect.sink.MongoSinkTask:184)com.mongodb.MongoBulkWriteException: Bulk write operation error on server xxx.xxx.xxx.xxx:27021. Write errors: [BulkWriteError{index=67, code=28, message=&apos;Cannot create field &apos;renditions&apos; in element&lt;/p&gt;

{12: null}

&lt;p&gt;&apos;, details={}}]. at com.mongodb.connection.BulkWriteBatchCombiner.getError(BulkWriteBatchCombiner.java:173) at com.mongodb.connection.BulkWriteBatchCombiner.throwOnError(BulkWriteBatchCombiner.java:202) at com.mongodb.connection.BulkWriteBatchCombiner.getResult(BulkWriteBatchCombiner.java:143) at com.mongodb.operation.BulkWriteBatch.getResult(BulkWriteBatch.java:227) at com.mongodb.operation.MixedBulkWriteOperation.executeBulkWriteBatch(MixedBulkWriteOperation.java:282) at com.mongodb.operation.MixedBulkWriteOperation.access$700(MixedBulkWriteOperation.java:72) at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:205) at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:196) at com.mongodb.operation.OperationHelper.withReleasableConnection(OperationHelper.java:501) at com.mongodb.operation.MixedBulkWriteOperation.execute(MixedBulkWriteOperation.java:196) at com.mongodb.operation.MixedBulkWriteOperation.execute(MixedBulkWriteOperation.java:71) at com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.execute(MongoClientDelegate.java:213) at com.mongodb.client.internal.MongoCollectionImpl.executeBulkWrite(MongoCollectionImpl.java:476) at com.mongodb.client.internal.MongoCollectionImpl.bulkWrite(MongoCollectionImpl.java:456) at com.mongodb.kafka.connect.sink.MongoSinkTask.processSinkRecords(MongoSinkTask.java:180) at com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$null$2(MongoSinkTask.java:120) at java.base/java.util.ArrayList.forEach(ArrayList.java:1507) at com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$put$3(MongoSinkTask.java:119) at java.base/java.util.HashMap.forEach(HashMap.java:1338) at com.mongodb.kafka.connect.sink.MongoSinkTask.put(MongoSinkTask.java:117) at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:539) at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322) at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224) at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192) at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:177) at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:227) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:830)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,261&amp;#93;&lt;/span&gt;&#160;ERROR&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;WriteResult: AcknowledgedBulkWriteResult{insertedCount=0, matchedCount=38, removedCount=0, modifiedCount=9, upserts=[]} (com.mongodb.kafka.connect.sink.MongoSinkTask:185)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,261&amp;#93;&lt;/span&gt;&#160;ERROR&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;WriteErrors: [BulkWriteError{index=67, code=28, message=&apos;Cannot create field &apos;renditions&apos; in element&lt;/p&gt;

{12: null}

&lt;p&gt;&apos;, details={}}] (com.mongodb.kafka.connect.sink.MongoSinkTask:186)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,261&amp;#93;&lt;/span&gt;&#160;ERROR&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;WriteConcernError: null (com.mongodb.kafka.connect.sink.MongoSinkTask:187)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,262&amp;#93;&lt;/span&gt;&#160;ERROR&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt;&#160;WorkerSinkTask{id=mongo-sink-assets-shard24-new-0} RetriableException from SinkTask: (org.apache.kafka.connect.runtime.WorkerSinkTask:552)org.apache.kafka.connect.errors.RetriableException: Bulk write operation error on server xxx.xxx.xxx.xxx:27021. Write errors: [BulkWriteError{index=67, code=28, message=&apos;Cannot create field &apos;renditions&apos; in element&lt;/p&gt;

{12: null}

&lt;p&gt;&apos;, details={}}]. at com.mongodb.kafka.connect.sink.MongoSinkTask.checkRetriableException(MongoSinkTask.java:212) at com.mongodb.kafka.connect.sink.MongoSinkTask.processSinkRecords(MongoSinkTask.java:188)&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&lt;b&gt;Error from mongod log:&lt;/b&gt;&lt;/p&gt;

&lt;p&gt;2020-05-20T20:33:37.358-0700 D NETWORK&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;Compressing message with snappy&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D WRITE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;UpdateResult &#8211; upserted: {} modifiers: 1 existing: 0 numDocsModified: 0 numMatched: 0&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;WT rollback_transaction for snapshot id 980803764&lt;br/&gt;
2020-05-20T20:33:37.359-0700 I WRITE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;update poc_oz_prod.poc_assets_new command: { q:&lt;/p&gt;

{ _id: &quot;50efcb5b9d0a431cbe7c3162bac6374e&quot; }

&lt;p&gt;, u: { $set: { dna:&lt;/p&gt;

{ match: false, date: &quot;2020-05-07T00:01:57.957556Z&quot; }

&lt;p&gt;} }, multi: false, upsert: false } planSummary: IDHACK keysExamined:0 docsExamined:0 nMatched:0 nModified:0 numYields:0 locks:{ Global: { acquireCount:&lt;/p&gt;

{ r: 1, w: 1 }

&lt;p&gt;}, Database: { acquireCount:&lt;/p&gt;

{ w: 1 }

&lt;p&gt;}, Collection: { acquireCount:&lt;/p&gt;

{ w: 1 }

&lt;p&gt;} } 0ms&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D QUERY&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;Using idhack: { _id: &quot;84725dbcc52bd2019a08f0f9327e3fee&quot; }&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;WT begin_transaction for snapshot id 980803766&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D -&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;User Assertion: 28:Cannot create field &apos;renditions&apos; in element {12: null} src/mongo/db/update/modifier_node.cpp 239&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;WT rollback_transaction for snapshot id 980803766&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D WRITE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;Caught Assertion in update: PathNotViable: Cannot create field &apos;renditions&apos; in element {12: null}&lt;br/&gt;
2020-05-20T20:33:37.359-0700 I WRITE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;update poc_oz_prod.poc_assets_new command: { q:&lt;/p&gt;

{ _id: &quot;84725dbcc52bd2019a08f0f9327e3fee&quot; }

&lt;p&gt;, u: { $set: { revisions.12.renditions.thumbnail2x:&lt;/p&gt;

{ id: &quot;1e5dbcda2d290412a0f88928821c310c&quot;, created_by: &quot;d49d14c1bfb74dbf2334c027fe3c1c61&quot;, created: 1588809718024706, created_by_ip: &quot;73.185.39.99, 10.92.211.1, 10.92.214.94, 34.213.157.249, 35.160.101.122&quot; }

&lt;p&gt;, revisions.12.updated: 1588809718024706 } }, multi: false, upsert: false } planSummary: IDHACK exception: Cannot create field &apos;renditions&apos; in element {12: null} code:PathNotViable numYields:0 locks:{ Global: { acquireCount:&lt;/p&gt;

{ r: 2, w: 2 }

&lt;p&gt;}, Database: { acquireCount:&lt;/p&gt;

{ w: 2 }

&lt;p&gt;}, Collection: { acquireCount:&lt;/p&gt;

{ w: 2 }

&lt;p&gt;} } 0ms&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D REPL&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;Waiting for write concern. OpTime: { ts: Timestamp(1590032017, 49), t: 2 }, write concern: { w: 1, wtimeout: 0 }&lt;br/&gt;
2020-05-20T20:33:37.359-0700 I COMMAND&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;command poc_oz_prod.$cmd command: update { update: &quot;poc_assets_new&quot;, bypassDocumentValidation: false, ordered: true, stmtIds: [ 0, 1, 2, 3, 4 ], updates: 5, shardVersion: [ Timestamp(1, 92037), ObjectId(&apos;5ea31e40cd8c9318b72b5e16&apos;) ], lsid:&lt;/p&gt;

{ id: UUID(&quot;0a2dba1e-be97-4eb8-aff8-978182ff58c6&quot;), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }

&lt;p&gt;, txnNumber: 35, $clusterTime: { clusterTime: Timestamp(1590032017, 49), signature:&lt;/p&gt;

{ hash: BinData(0, 9782D442613737E518ABEAB61EC775B9B4E30F9B), keyId: 6818528695733452825 }

&lt;p&gt;}, $client: { driver:&lt;/p&gt;

{ name: &quot;mongo-java-driver|sync|mongo-kafka|sink&quot;, version: &quot;3.12.4|1.1.0-5-g32f5458-dirty&quot; }

&lt;p&gt;, os: { type: &quot;Linux&quot;, name: &quot;Linux&quot;, architecture: &quot;amd64&quot;, version: &quot;4.4.0-1095-aws&quot; }, platform: &quot;Java/Oracle Corporation/13.0.2+8&quot;, mongos: { host: &quot;poc-config-mongos:27021&quot;, client: &quot;10.74.1.240:33674&quot;, version: &quot;3.6.8&quot; } }, $configServerState: { opTime:&lt;/p&gt;

{ ts: Timestamp(1590032011, 1), t: 1 }

&lt;p&gt;}, $db: &quot;poc_oz_prod&quot; } numYields:0 reslen:484 locks:{ Global: { acquireCount:&lt;/p&gt;

{ r: 2, w: 2 }

&lt;p&gt;}, Database: { acquireCount:&lt;/p&gt;

{ w: 2 }

&lt;p&gt;}, Collection: { acquireCount:&lt;/p&gt;

{ w: 2 }

&lt;p&gt;} } protocol:op_msg 1ms&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D NETWORK&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt;&#160;Compressing message with snappy&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D NETWORK&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;Decompressing message with snappy&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D COMMAND&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;run command local.$cmd { getMore: 38469818473, collection: &quot;oplog.rs&quot;, batchSize: 13981010, maxTimeMS: 5000, term: 2, lastKnownCommittedOpTime:&lt;/p&gt;

{ ts: Timestamp(1590032017, 48), t: 2 }

&lt;p&gt;, $replData: 1, $oplogQueryData: 1, $readPreference: { mode: &quot;secondaryPreferred&quot; }, $clusterTime: { clusterTime: Timestamp(1590032017, 49), signature:&lt;/p&gt;

{ hash: BinData(0, 9782D442613737E518ABEAB61EC775B9B4E30F9B), keyId: 6818528695733452825 }

&lt;p&gt;}, $db: &quot;local&quot; }&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;NamespaceUUIDCache: registered namespace local.oplog.rs with UUID 86258ee6-3d26-4417-886e-fcaaf736a2c4&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;WT begin_transaction for snapshot id 980803768&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;WT rollback_transaction for snapshot id 980803768&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;WT begin_transaction for snapshot id 980803769&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt;&#160;WT rollback_transaction for snapshot id 980803769&lt;br/&gt;
2020-05-20T20:33:37.365-0700 D NETWORK&#160;&lt;span class=&quot;error&quot;&gt;&amp;#91;conn885&amp;#93;&lt;/span&gt;&#160;Decompressing message with snappy&lt;/p&gt;</comment>
                            <comment id="3096962" author="martin.andersson@kambi.com" created="Wed, 20 May 2020 11:53:28 +0000"  >&lt;p&gt;Duplicate of &lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-78&quot; title=&quot;Publish error messages to a topic&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-78&quot;&gt;&lt;del&gt;KAFKA-78&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10320">
                    <name>Documented</name>
                                                                <inwardlinks description="is documented by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="1309483">KAFKA-96</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="1406811">KAFKA-127</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="1437641">KAFKA-140</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10012">
                    <name>Related</name>
                                            <outwardlinks description="related to">
                                        <issuelink>
            <issuekey id="1380440">KAFKA-115</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="1667744">KAFKA-215</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                                                        <customfield id="customfield_13552" key="com.go2group.jira.plugin.crm:crm_generic_field">
                        <customfieldname>Case</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue><![CDATA[[5002K00000pEUpNQAW, 5002K00000pkt9sQAA]]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                    <customfield id="customfield_15850" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10257" key="com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons">
                        <customfieldname>Documentation Changes</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10250"><![CDATA[Needed]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_14266" key="com.atlassian.jira.plugin.system.customfieldtypes:textarea">
                        <customfieldname>Documentation Changes Summary</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>&lt;p&gt;There will be a change in configuration&lt;/p&gt;</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10857" key="com.pyxis.greenhopper.jira:gh-epic-link">
                        <customfieldname>Epic Link</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>KAFKA-134</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    <customfield id="customfield_12550" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>2|hx76rj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10558" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>