<!-- 
RSS generated by JIRA (9.7.1#970001-sha1:2222b88b221c4928ef0de3161136cc90c8356a66) at Thu Feb 08 09:05:34 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>MongoDB Jira</title>
    <link>https://jira.mongodb.org</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.7.1</version>
        <build-number>970001</build-number>
        <build-date>13-04-2023</build-date>
    </build-info>


<item>
            <title>[KAFKA-98] MongoSinkTask stops with java.lang.NullPointerException</title>
                <link>https://jira.mongodb.org/browse/KAFKA-98</link>
                <project id="16285" key="KAFKA">Kafka Connector</project>
                    <description>&lt;p&gt;I am doing migration of data about 330 million records and about after completion if 85% I got below exception from sink connector.&#160;&lt;/p&gt;

&lt;p&gt;{&lt;br/&gt;
 &quot;name&quot;: &quot;mongo-sink-assets-shard24&quot;,&lt;br/&gt;
 &quot;connector&quot;: &lt;/p&gt;
{
 &quot;state&quot;: &quot;RUNNING&quot;,
 &quot;worker_id&quot;: &quot;xxx.xxx.xxx.xxx:9083&quot;
 }
&lt;p&gt;,&lt;br/&gt;
 &quot;tasks&quot;: [&lt;/p&gt;
 {
 &quot;id&quot;: 0,
 &quot;state&quot;: &quot;FAILED&quot;,
 &quot;worker_id&quot;: &quot;xxx.xxx.xxx.xxx:9083&quot;,
 &quot;trace&quot;: &quot;org.apache.kafka.connect.errors.ConnectException: Exiting WorkerSinkTask due to unrecoverable exception.\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:561)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:177)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:227)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:830)\nCaused by: java.lang.NullPointerException\n\tat com.mongodb.kafka.connect.sink.MongoSinkTask.processSinkRecords(MongoSinkTask.java:184)\n\tat com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$put$2(MongoSinkTask.java:117)\n\tat java.base/java.util.ArrayList.forEach(ArrayList.java:1507)\n\tat com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$put$3(MongoSinkTask.java:116)\n\tat java.base/java.util.HashMap.forEach(HashMap.java:1338)\n\tat com.mongodb.kafka.connect.sink.MongoSinkTask.put(MongoSinkTask.java:114)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:539)\n\t... 10 more\n&quot;
 }
&lt;p&gt; ],&lt;br/&gt;
 &quot;type&quot;: &quot;sink&quot;&lt;br/&gt;
}&lt;/p&gt;

&lt;p&gt;Error from connect.log&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-04-19 17:09:41,322&amp;#93;&lt;/span&gt; ERROR Mongodb bulk write (partially) failed (com.mongodb.kafka.connect.sink.MongoSinkTask:181)&lt;br/&gt;
com.mongodb.MongoBulkWriteException: Bulk write operation error on server 10.74.1.50:27021. Write errors: [BulkWriteError{index=305, code=28, message=&apos;Cannot create field &apos;sha256&apos; in element &lt;/p&gt;
{xmpCameraRaw: &quot;&amp;lt;x:xmpmeta xmlns:x=&quot;adobe:ns:meta/&quot; x:xmptk=&quot;Adobe XMP Core 5.6-c140 79.160451, 2017/05/06-01:08:21 &quot;&amp;gt;
 &amp;lt;rdf:RDF xmlns:rdf=&quot;http://www.w3.org/1...&quot;}
&lt;p&gt;&apos;, details={}}].&lt;br/&gt;
 at com.mongodb.connection.BulkWriteBatchCombiner.getError(BulkWriteBatchCombiner.java:173)&lt;br/&gt;
 at com.mongodb.connection.BulkWriteBatchCombiner.throwOnError(BulkWriteBatchCombiner.java:202)&lt;br/&gt;
 at com.mongodb.connection.BulkWriteBatchCombiner.getResult(BulkWriteBatchCombiner.java:143)&lt;br/&gt;
 at com.mongodb.operation.BulkWriteBatch.getResult(BulkWriteBatch.java:227)&lt;br/&gt;
 at com.mongodb.operation.MixedBulkWriteOperation.executeBulkWriteBatch(MixedBulkWriteOperation.java:282)&lt;br/&gt;
 at com.mongodb.operation.MixedBulkWriteOperation.access$700(MixedBulkWriteOperation.java:72)&lt;br/&gt;
 at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:205)&lt;br/&gt;
 at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:196)&lt;br/&gt;
 at com.mongodb.operation.OperationHelper.withReleasableConnection(OperationHelper.java:501)&lt;br/&gt;
 at com.mongodb.operation.MixedBulkWriteOperation.execute(MixedBulkWriteOperation.java:196)&lt;br/&gt;
 at com.mongodb.operation.MixedBulkWriteOperation.execute(MixedBulkWriteOperation.java:71)&lt;br/&gt;
 at com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.execute(MongoClientDelegate.java:213)&lt;br/&gt;
 at com.mongodb.client.internal.MongoCollectionImpl.executeBulkWrite(MongoCollectionImpl.java:476)&lt;br/&gt;
 at com.mongodb.client.internal.MongoCollectionImpl.bulkWrite(MongoCollectionImpl.java:456)&lt;br/&gt;
 at com.mongodb.kafka.connect.sink.MongoSinkTask.processSinkRecords(MongoSinkTask.java:177)&lt;br/&gt;
 at com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$put$2(MongoSinkTask.java:117)&lt;br/&gt;
 at java.base/java.util.ArrayList.forEach(ArrayList.java:1507)&lt;br/&gt;
 at com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$put$3(MongoSinkTask.java:116)&lt;br/&gt;
 at java.base/java.util.HashMap.forEach(HashMap.java:1338)&lt;br/&gt;
 at com.mongodb.kafka.connect.sink.MongoSinkTask.put(MongoSinkTask.java:114)&lt;br/&gt;
 at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:539)&lt;br/&gt;
 at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)&lt;br/&gt;
 at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224)&lt;br/&gt;
 at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192)&lt;br/&gt;
 at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:177)&lt;br/&gt;
 at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:227)&lt;br/&gt;
 at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)&lt;br/&gt;
 at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)&lt;br/&gt;
 at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)&lt;br/&gt;
 at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)&lt;br/&gt;
 at java.base/java.lang.Thread.run(Thread.java:830)&lt;/p&gt;

&lt;p&gt;&lt;b&gt;I am using &quot;writemodel.strategy&quot; default value.&lt;/b&gt;&#160;&lt;/p&gt;

&lt;p&gt;Can you please suggest what could cause this?&#160;&lt;/p&gt;

&lt;p&gt;Thanks,&lt;/p&gt;

&lt;p&gt;Rajaramesh.&#160;&lt;/p&gt;</description>
                <environment>Source MongoDB 3.6.8&lt;br/&gt;
Target MongoDB 3.6.8&lt;br/&gt;
Source connector debezium &lt;br/&gt;
Sink connector MongoDB Kafka Sink Connector</environment>
        <key id="1323142">KAFKA-98</key>
            <summary>MongoSinkTask stops with java.lang.NullPointerException</summary>
                <type id="1" iconUrl="https://jira.mongodb.org/secure/viewavatar?size=xsmall&amp;avatarId=14703&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.mongodb.org/images/icons/priorities/major.svg">Major - P3</priority>
                        <status id="6" iconUrl="https://jira.mongodb.org/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="13201">Fixed</resolution>
                                        <assignee username="ross@mongodb.com">Ross Lawley</assignee>
                                    <reporter username="yaramati@adobe.com">Rajaramesh Yaramati</reporter>
                        <labels>
                    </labels>
                <created>Mon, 20 Apr 2020 17:17:27 +0000</created>
                <updated>Sat, 28 Oct 2023 10:46:26 +0000</updated>
                            <resolved>Thu, 7 May 2020 08:42:53 +0000</resolved>
                                    <version>1.0</version>
                                    <fixVersion>1.2.0</fixVersion>
                                    <component>Sink</component>
                                        <votes>0</votes>
                                    <watches>3</watches>
                                                                                                                <comments>
                            <comment id="3104024" author="ross@10gen.com" created="Fri, 22 May 2020 07:29:36 +0000"  >&lt;p&gt;Hi &lt;a href=&quot;https://jira.mongodb.org/secure/ViewProfile.jspa?name=yaramati%40adobe.com&quot; class=&quot;user-hover&quot; rel=&quot;yaramati@adobe.com&quot;&gt;yaramati@adobe.com&lt;/a&gt;,&lt;/p&gt;

&lt;p&gt;The NPE error has been fixed but not yet released, so this ticket has been closed. &lt;/p&gt;

&lt;p&gt;However, the cause remains and was due to the bulk operation failing.  &lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-105&quot; title=&quot;Support errors.tolerance&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-105&quot;&gt;&lt;del&gt;KAFKA-105&lt;/del&gt;&lt;/a&gt; &amp;amp; &lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-106&quot; title=&quot;Review error retrying&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-106&quot;&gt;&lt;del&gt;KAFKA-106&lt;/del&gt;&lt;/a&gt; are tickets to look at error tolerance and handling of errors.  The expected behaviour is the error should still be logged and Kafka itself should then process as per the error.tolerance configuration.&lt;/p&gt;

&lt;p&gt;Please could you post your config in &lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-105&quot; title=&quot;Support errors.tolerance&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-105&quot;&gt;&lt;del&gt;KAFKA-105&lt;/del&gt;&lt;/a&gt; and the error there as that ticket is open and ready for future work. Also please state the Kafka version as well.&lt;/p&gt;</comment>
                            <comment id="3098731" author="yaramati@adobe.com" created="Thu, 21 May 2020 04:12:36 +0000"  >&lt;p&gt;Ross Lawley,&lt;/p&gt;

&lt;p&gt;I just tested your changes related to &quot;Fix possible NPE when logging&quot; and I am still getting the same issue and sink connector terminates after retry limit reaches. As per my understanding, your fix should handle sink connector to continue with the next message after logging error message into logfile.&lt;/p&gt;

&lt;p&gt;I have log files to share with you but I am not able to attach here getting some token missing error.&lt;/p&gt;

&lt;p&gt;&lt;b&gt;Error from sink connector:&lt;/b&gt;&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,176&amp;#93;&lt;/span&gt; INFO &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; Opened connection &lt;span class=&quot;error&quot;&gt;&amp;#91;connectionId\{localValue:2}&amp;#93;&lt;/span&gt; to 10.74.1.50:27021 (org.mongodb.driver.connection:71)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,176&amp;#93;&lt;/span&gt; INFO &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; Opened connection &lt;span class=&quot;error&quot;&gt;&amp;#91;connectionId\{localValue:2}&amp;#93;&lt;/span&gt; to 10.74.1.50:27021 (org.mongodb.driver.connection:71)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,260&amp;#93;&lt;/span&gt; ERROR &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; Mongodb bulk write (partially) failed (com.mongodb.kafka.connect.sink.MongoSinkTask:184)com.mongodb.MongoBulkWriteException: Bulk write operation error on server 10.74.1.50:27021. Write errors: [BulkWriteError{index=67, code=28, message=&apos;Cannot create field &apos;renditions&apos; in element &lt;/p&gt;
{12: null}
&lt;p&gt;&apos;, details={}}]. at com.mongodb.connection.BulkWriteBatchCombiner.getError(BulkWriteBatchCombiner.java:173) at com.mongodb.connection.BulkWriteBatchCombiner.throwOnError(BulkWriteBatchCombiner.java:202) at com.mongodb.connection.BulkWriteBatchCombiner.getResult(BulkWriteBatchCombiner.java:143) at com.mongodb.operation.BulkWriteBatch.getResult(BulkWriteBatch.java:227) at com.mongodb.operation.MixedBulkWriteOperation.executeBulkWriteBatch(MixedBulkWriteOperation.java:282) at com.mongodb.operation.MixedBulkWriteOperation.access$700(MixedBulkWriteOperation.java:72) at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:205) at com.mongodb.operation.MixedBulkWriteOperation$1.call(MixedBulkWriteOperation.java:196) at com.mongodb.operation.OperationHelper.withReleasableConnection(OperationHelper.java:501) at com.mongodb.operation.MixedBulkWriteOperation.execute(MixedBulkWriteOperation.java:196) at com.mongodb.operation.MixedBulkWriteOperation.execute(MixedBulkWriteOperation.java:71) at com.mongodb.client.internal.MongoClientDelegate$DelegateOperationExecutor.execute(MongoClientDelegate.java:213) at com.mongodb.client.internal.MongoCollectionImpl.executeBulkWrite(MongoCollectionImpl.java:476) at com.mongodb.client.internal.MongoCollectionImpl.bulkWrite(MongoCollectionImpl.java:456) at com.mongodb.kafka.connect.sink.MongoSinkTask.processSinkRecords(MongoSinkTask.java:180) at com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$null$2(MongoSinkTask.java:120) at java.base/java.util.ArrayList.forEach(ArrayList.java:1507) at com.mongodb.kafka.connect.sink.MongoSinkTask.lambda$put$3(MongoSinkTask.java:119) at java.base/java.util.HashMap.forEach(HashMap.java:1338) at com.mongodb.kafka.connect.sink.MongoSinkTask.put(MongoSinkTask.java:117) at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:539) at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322) at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224) at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192) at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:177) at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:227) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:830)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,261&amp;#93;&lt;/span&gt; ERROR &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; WriteResult: AcknowledgedBulkWriteResult{insertedCount=0, matchedCount=38, removedCount=0, modifiedCount=9, upserts=[]} (com.mongodb.kafka.connect.sink.MongoSinkTask:185)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,261&amp;#93;&lt;/span&gt; ERROR &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; WriteErrors: [BulkWriteError{index=67, code=28, message=&apos;Cannot create field &apos;renditions&apos; in element &lt;/p&gt;
{12: null}
&lt;p&gt;&apos;, details={}}] (com.mongodb.kafka.connect.sink.MongoSinkTask:186)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,261&amp;#93;&lt;/span&gt; ERROR &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; WriteConcernError: null (com.mongodb.kafka.connect.sink.MongoSinkTask:187)&lt;span class=&quot;error&quot;&gt;&amp;#91;2020-05-20 20:50:49,262&amp;#93;&lt;/span&gt; ERROR &lt;span class=&quot;error&quot;&gt;&amp;#91;mongo-sink-assets-shard24-new|task-0&amp;#93;&lt;/span&gt; WorkerSinkTask{id=mongo-sink-assets-shard24-new-0} RetriableException from SinkTask: (org.apache.kafka.connect.runtime.WorkerSinkTask:552)org.apache.kafka.connect.errors.RetriableException: Bulk write operation error on server 10.74.1.50:27021. Write errors: [BulkWriteError{index=67, code=28, message=&apos;Cannot create field &apos;renditions&apos; in element &lt;/p&gt;
{12: null}
&lt;p&gt;&apos;, details={}}]. at com.mongodb.kafka.connect.sink.MongoSinkTask.checkRetriableException(MongoSinkTask.java:212) at com.mongodb.kafka.connect.sink.MongoSinkTask.processSinkRecords(MongoSinkTask.java:188)&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&lt;b&gt;Error from mongod log:&lt;/b&gt;&lt;/p&gt;

&lt;p&gt;2020-05-20T20:33:37.358-0700 D NETWORK &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; Compressing message with snappy&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D WRITE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; UpdateResult &amp;#8211; upserted: {} modifiers: 1 existing: 0 numDocsModified: 0 numMatched: 0&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; WT rollback_transaction for snapshot id 980803764&lt;br/&gt;
2020-05-20T20:33:37.359-0700 I WRITE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; update poc_oz_prod.poc_assets_new command: { q: &lt;/p&gt;
{ _id: &quot;50efcb5b9d0a431cbe7c3162bac6374e&quot; }
&lt;p&gt;, u: { $set: { dna: &lt;/p&gt;
{ match: false, date: &quot;2020-05-07T00:01:57.957556Z&quot; }
&lt;p&gt; } }, multi: false, upsert: false } planSummary: IDHACK keysExamined:0 docsExamined:0 nMatched:0 nModified:0 numYields:0 locks:{ Global: { acquireCount: &lt;/p&gt;
{ r: 1, w: 1 }
&lt;p&gt; }, Database: { acquireCount: &lt;/p&gt;
{ w: 1 }
&lt;p&gt; }, Collection: { acquireCount: &lt;/p&gt;
{ w: 1 }
&lt;p&gt; } } 0ms&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D QUERY &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; Using idhack: { _id: &quot;84725dbcc52bd2019a08f0f9327e3fee&quot; }&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; WT begin_transaction for snapshot id 980803766&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D - &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; User Assertion: 28:Cannot create field &apos;renditions&apos; in element {12: null} src/mongo/db/update/modifier_node.cpp 239&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; WT rollback_transaction for snapshot id 980803766&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D WRITE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; Caught Assertion in update: PathNotViable: Cannot create field &apos;renditions&apos; in element {12: null}&lt;br/&gt;
2020-05-20T20:33:37.359-0700 I WRITE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; update poc_oz_prod.poc_assets_new command: { q: &lt;/p&gt;
{ _id: &quot;84725dbcc52bd2019a08f0f9327e3fee&quot; }
&lt;p&gt;, u: { $set: { revisions.12.renditions.thumbnail2x: &lt;/p&gt;
{ id: &quot;1e5dbcda2d290412a0f88928821c310c&quot;, created_by: &quot;d49d14c1bfb74dbf2334c027fe3c1c61&quot;, created: 1588809718024706, created_by_ip: &quot;73.185.39.99, 10.92.211.1, 10.92.214.94, 34.213.157.249, 35.160.101.122&quot; }
&lt;p&gt;, revisions.12.updated: 1588809718024706 } }, multi: false, upsert: false } planSummary: IDHACK exception: Cannot create field &apos;renditions&apos; in element {12: null} code:PathNotViable numYields:0 locks:{ Global: { acquireCount: &lt;/p&gt;
{ r: 2, w: 2 }
&lt;p&gt; }, Database: { acquireCount: &lt;/p&gt;
{ w: 2 }
&lt;p&gt; }, Collection: { acquireCount: &lt;/p&gt;
{ w: 2 }
&lt;p&gt; } } 0ms&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D REPL &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; Waiting for write concern. OpTime: { ts: Timestamp(1590032017, 49), t: 2 }, write concern: { w: 1, wtimeout: 0 }&lt;br/&gt;
2020-05-20T20:33:37.359-0700 I COMMAND &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; command poc_oz_prod.$cmd command: update { update: &quot;poc_assets_new&quot;, bypassDocumentValidation: false, ordered: true, stmtIds: [ 0, 1, 2, 3, 4 ], updates: 5, shardVersion: [ Timestamp(1, 92037), ObjectId(&apos;5ea31e40cd8c9318b72b5e16&apos;) ], lsid: &lt;/p&gt;
{ id: UUID(&quot;0a2dba1e-be97-4eb8-aff8-978182ff58c6&quot;), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }
&lt;p&gt;, txnNumber: 35, $clusterTime: { clusterTime: Timestamp(1590032017, 49), signature: &lt;/p&gt;
{ hash: BinData(0, 9782D442613737E518ABEAB61EC775B9B4E30F9B), keyId: 6818528695733452825 }
&lt;p&gt; }, $client: { driver: &lt;/p&gt;
{ name: &quot;mongo-java-driver|sync|mongo-kafka|sink&quot;, version: &quot;3.12.4|1.1.0-5-g32f5458-dirty&quot; }
&lt;p&gt;, os: { type: &quot;Linux&quot;, name: &quot;Linux&quot;, architecture: &quot;amd64&quot;, version: &quot;4.4.0-1095-aws&quot; }, platform: &quot;Java/Oracle Corporation/13.0.2+8&quot;, mongos: { host: &quot;poc-config-mongos:27021&quot;, client: &quot;10.74.1.240:33674&quot;, version: &quot;3.6.8&quot; } }, $configServerState: { opTime: &lt;/p&gt;
{ ts: Timestamp(1590032011, 1), t: 1 }
&lt;p&gt; }, $db: &quot;poc_oz_prod&quot; } numYields:0 reslen:484 locks:{ Global: { acquireCount: &lt;/p&gt;
{ r: 2, w: 2 }
&lt;p&gt; }, Database: { acquireCount: &lt;/p&gt;
{ w: 2 }
&lt;p&gt; }, Collection: { acquireCount: &lt;/p&gt;
{ w: 2 }
&lt;p&gt; } } protocol:op_msg 1ms&lt;br/&gt;
2020-05-20T20:33:37.359-0700 D NETWORK &lt;span class=&quot;error&quot;&gt;&amp;#91;conn894&amp;#93;&lt;/span&gt; Compressing message with snappy&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D NETWORK &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; Decompressing message with snappy&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D COMMAND &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; run command local.$cmd { getMore: 38469818473, collection: &quot;oplog.rs&quot;, batchSize: 13981010, maxTimeMS: 5000, term: 2, lastKnownCommittedOpTime: &lt;/p&gt;
{ ts: Timestamp(1590032017, 48), t: 2 }
&lt;p&gt;, $replData: 1, $oplogQueryData: 1, $readPreference: { mode: &quot;secondaryPreferred&quot; }, $clusterTime: { clusterTime: Timestamp(1590032017, 49), signature: &lt;/p&gt;
{ hash: BinData(0, 9782D442613737E518ABEAB61EC775B9B4E30F9B), keyId: 6818528695733452825 }
&lt;p&gt; }, $db: &quot;local&quot; }&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; NamespaceUUIDCache: registered namespace local.oplog.rs with UUID 86258ee6-3d26-4417-886e-fcaaf736a2c4&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; WT begin_transaction for snapshot id 980803768&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; WT rollback_transaction for snapshot id 980803768&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; WT begin_transaction for snapshot id 980803769&lt;br/&gt;
2020-05-20T20:33:37.361-0700 D STORAGE &lt;span class=&quot;error&quot;&gt;&amp;#91;conn658&amp;#93;&lt;/span&gt; WT rollback_transaction for snapshot id 980803769&lt;br/&gt;
2020-05-20T20:33:37.365-0700 D NETWORK &lt;span class=&quot;error&quot;&gt;&amp;#91;conn885&amp;#93;&lt;/span&gt; Decompressing message with snappy&lt;/p&gt;</comment>
                            <comment id="3073271" author="xgen-internal-githook" created="Thu, 7 May 2020 08:42:42 +0000"  >&lt;p&gt;Author:&lt;/p&gt;
{&apos;name&apos;: &apos;Ross Lawley&apos;, &apos;email&apos;: &apos;ross.lawley@gmail.com&apos;, &apos;username&apos;: &apos;rozza&apos;}
&lt;p&gt;Message: Fix possible NPE when logging&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.mongodb.org/browse/KAFKA-98&quot; title=&quot;MongoSinkTask stops with java.lang.NullPointerException&quot; class=&quot;issue-link&quot; data-issue-key=&quot;KAFKA-98&quot;&gt;&lt;del&gt;KAFKA-98&lt;/del&gt;&lt;/a&gt;&lt;br/&gt;
Branch: master&lt;br/&gt;
&lt;a href=&quot;https://github.com/mongodb/mongo-kafka/commit/68ea3055c346d5a8b23b0b391c854d2c2a92431f&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/mongodb/mongo-kafka/commit/68ea3055c346d5a8b23b0b391c854d2c2a92431f&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="3058357" author="yaramati@adobe.com" created="Tue, 28 Apr 2020 14:04:47 +0000"  >&lt;p&gt;Thank you, Ross Lawley, for looking into and update.&#160;&lt;/p&gt;

&lt;p&gt;Do you know TBD of release 1.2.0?&#160;&lt;/p&gt;

&lt;p&gt;And just want to check, is there a plan to handle errors.tolerence in future releases?&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;Thanks,&lt;/p&gt;

&lt;p&gt;Rajaramesh.&lt;/p&gt;</comment>
                            <comment id="3058340" author="ross@10gen.com" created="Tue, 28 Apr 2020 13:56:56 +0000"  >&lt;p&gt;Hi &lt;a href=&quot;https://jira.mongodb.org/secure/ViewProfile.jspa?name=yaramati%40adobe.com&quot; class=&quot;user-hover&quot; rel=&quot;yaramati@adobe.com&quot;&gt;yaramati@adobe.com&lt;/a&gt;,&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;Looks like there was a write error when trying to write the data to mongodb.&lt;/p&gt;

&lt;p/&gt;
&lt;div id=&quot;syntaxplugin&quot; class=&quot;syntaxplugin&quot; style=&quot;border: 1px dashed #bbb; border-radius: 5px !important; overflow: auto; max-height: 30em;&quot;&gt;
&lt;table cellspacing=&quot;0&quot; cellpadding=&quot;0&quot; border=&quot;0&quot; width=&quot;100%&quot; style=&quot;font-size: 1em; line-height: 1.4em !important; font-weight: normal; font-style: normal; color: black;&quot;&gt;
		&lt;tbody &gt;
				&lt;tr id=&quot;syntaxplugin_code_and_gutter&quot;&gt;
						&lt;td  style=&quot; line-height: 1.4em !important; padding: 0em; vertical-align: top;&quot;&gt;
					&lt;pre style=&quot;font-size: 1em; margin: 0 10px;  margin-top: 10px;   width: auto; padding: 0;&quot;&gt;&lt;span style=&quot;color: black; font-family: &apos;Consolas&apos;, &apos;Bitstream Vera Sans Mono&apos;, &apos;Courier New&apos;, Courier, monospace !important;&quot;&gt;com.mongodb.MongoBulkWriteException: Bulk write operation error on server 10.74.1.50:27021. Write errors: [BulkWriteError{index=305, code=28, message=&apos;Cannot create field &apos;sha256&apos; in element&lt;/span&gt;&lt;/pre&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
				&lt;tr id=&quot;syntaxplugin_code_and_gutter&quot;&gt;
						&lt;td  style=&quot; line-height: 1.4em !important; padding: 0em; vertical-align: top;&quot;&gt;
					&lt;pre style=&quot;font-size: 1em; margin: 0 10px;   width: auto; padding: 0;&quot;&gt;&amp;nbsp;&lt;/pre&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
				&lt;tr id=&quot;syntaxplugin_code_and_gutter&quot;&gt;
						&lt;td  style=&quot; line-height: 1.4em !important; padding: 0em; vertical-align: top;&quot;&gt;
					&lt;pre style=&quot;font-size: 1em; margin: 0 10px;   width: auto; padding: 0;&quot;&gt;&lt;span style=&quot;color: black; font-family: &apos;Consolas&apos;, &apos;Bitstream Vera Sans Mono&apos;, &apos;Courier New&apos;, Courier, monospace !important;&quot;&gt;{xmpCameraRaw: &quot;&amp;lt;x:xmpmeta xmlns:x=&quot;adobe:ns:meta/&quot; x:xmptk=&quot;Adobe XMP Core 5.6-c140 79.160451, 2017/05/06-01:08:21 &quot;&amp;gt; &amp;lt;rdf:RDF xmlns:rdf=&quot;http://www.w3.org/1...&quot;}&lt;/span&gt;&lt;/pre&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
				&lt;tr id=&quot;syntaxplugin_code_and_gutter&quot;&gt;
						&lt;td  style=&quot; line-height: 1.4em !important; padding: 0em; vertical-align: top;&quot;&gt;
					&lt;pre style=&quot;font-size: 1em; margin: 0 10px;   width: auto; padding: 0;&quot;&gt;&amp;nbsp;&lt;/pre&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
				&lt;tr id=&quot;syntaxplugin_code_and_gutter&quot;&gt;
						&lt;td  style=&quot; line-height: 1.4em !important; padding: 0em; vertical-align: top;&quot;&gt;
					&lt;pre style=&quot;font-size: 1em; margin: 0 10px;   width: auto; padding: 0;&quot;&gt;&lt;span style=&quot;color: black; font-family: &apos;Consolas&apos;, &apos;Bitstream Vera Sans Mono&apos;, &apos;Courier New&apos;, Courier, monospace !important;&quot;&gt;&apos;, details={}}].&lt;/span&gt;&lt;/pre&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
				&lt;tr id=&quot;syntaxplugin_code_and_gutter&quot;&gt;
						&lt;td  style=&quot; line-height: 1.4em !important; padding: 0em; vertical-align: top;&quot;&gt;
					&lt;pre style=&quot;font-size: 1em; margin: 0 10px;   margin-bottom: 10px;  width: auto; padding: 0;&quot;&gt;&lt;span style=&quot;color: black; font-family: &apos;Consolas&apos;, &apos;Bitstream Vera Sans Mono&apos;, &apos;Courier New&apos;, Courier, monospace !important;&quot;&gt; &lt;/span&gt;&lt;/pre&gt;
			&lt;/td&gt;
		&lt;/tr&gt;
			&lt;/tbody&gt;
&lt;/table&gt;
&lt;/div&gt;
&lt;p/&gt;

&lt;p&gt;It looks like the NPE was caused by a logging error which will be fixed in the next release.&lt;/p&gt;

&lt;p&gt;Unfortunately, &lt;tt&gt;errors.tolerance=all&lt;/tt&gt; does not include errors created inside a connector.&lt;/p&gt;

&lt;p&gt;Ross&lt;/p&gt;</comment>
                            <comment id="3047583" author="yaramati@adobe.com" created="Mon, 20 Apr 2020 17:42:57 +0000"  >&lt;p&gt;Also, I am using a parameter to&#160;errors.tolerance=all to continue the sink. But sink connector still stops upon this error. Here is my sink configuration if it is useful for you.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

{
 &quot;connector.class&quot;: &quot;com.mongodb.kafka.connect.MongoSinkConnector&quot;,
 &quot;errors.log.include.messages&quot;: &quot;true&quot;,
 &quot;topics&quot;: &quot;shard24.oz_mongo.oz_prod.assets&quot;,
 &quot;tasks.max&quot;: &quot;1&quot;,
 &quot;max.num.retries&quot;: &quot;3&quot;,
 &quot;collection&quot;: &quot;poc_assets&quot;,
 &quot;internal.key.converter.schemas.enable&quot;: &quot;false&quot;,
 &quot;errors.deadletterqueue.context.headers.enable&quot;: &quot;true&quot;,
 &quot;change.data.capture.handler&quot;: &quot;com.mongodb.kafka.connect.sink.cdc.debezium.mongodb.MongoDbHandler&quot;,
 &quot;key.converter.schemas.enable&quot;: &quot;false&quot;,
 &quot;internal.key.converter&quot;: &quot;org.apache.kafka.connect.json.JsonConverter&quot;,
 &quot;database&quot;: &quot;poc_oz_prod&quot;,
 &quot;errors.deadletterqueue.topic.name&quot;: &quot;error-messages&quot;,
 &quot;internal.value.converter.schemas.enable&quot;: &quot;false&quot;,
 &quot;value.converter.schemas.enable&quot;: &quot;false&quot;,
 &quot;internal.value.converter&quot;: &quot;org.apache.kafka.connect.json.JsonConverter&quot;,
 &quot;connection.uri&quot;: &quot;mongodb://xxx.xxx.xxx.xxx:27021&quot;,
 &quot;name&quot;: &quot;mongo-sink-assets-shard24&quot;,
 &quot;errors.tolerance&quot;: &quot;all&quot;,
 &quot;value.converter&quot;: &quot;org.apache.kafka.connect.json.JsonConverter&quot;,
 &quot;retries.defer.timeout&quot;: &quot;5000&quot;,
 &quot;session.timeout.ms&quot;: &quot;25000&quot;,
 &quot;errors.log.enable&quot;: &quot;true&quot;,
 &quot;key.converter&quot;: &quot;org.apache.kafka.connect.json.JsonConverter&quot;
}</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_15850" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    <customfield id="customfield_12550" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>2|hx44kf:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10558" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>