- 
    Type:
Build Failure
 - 
    Resolution: Unresolved
 - 
    Priority:
Unknown
 - 
    None
 - 
    Affects Version/s: None
 - 
    Component/s: None
 
- 
        None
 
- 
        Python Drivers
 
- 
        None
 - 
        None
 - 
        None
 - 
        None
 - 
        None
 - 
        None
 
Name of Failure:
test.test_client_bulk_write.TestClientBulkWriteCSOT.test_timeout_in_multi_batch_bulk_write, test.asynchronous.test_client_bulk_write.TestClientBulkWriteCSOT.test_timeout_in_multi_batch_bulk_write
Link to task:
Context of when and why the failure occurred:
Stack trace:
Sync:
[2025/04/08 10:19:57.112] FAILURE: AssertionError: 1 != 2 () [2025/04/08 10:19:57.112] self = <test.test_client_bulk_write.TestClientBulkWriteCSOT testMethod=test_timeout_in_multi_batch_bulk_write> [2025/04/08 10:19:57.112] @client_context.require_version_min(8, 0, 0, -24) [2025/04/08 10:19:57.112] @client_context.require_no_serverless [2025/04/08 10:19:57.112] @client_context.require_failCommand_fail_point [2025/04/08 10:19:57.112] def test_timeout_in_multi_batch_bulk_write(self): [2025/04/08 10:19:57.112] _OVERHEAD = 500 [2025/04/08 10:19:57.112] [2025/04/08 10:19:57.112] internal_client = self.rs_or_single_client(timeoutMS=None) [2025/04/08 10:19:57.112] [2025/04/08 10:19:57.112] collection = internal_client.db["coll"] [2025/04/08 10:19:57.112] self.addCleanup(collection.drop) [2025/04/08 10:19:57.112] collection.drop() [2025/04/08 10:19:57.112] [2025/04/08 10:19:57.112] fail_command = { [2025/04/08 10:19:57.112] "configureFailPoint": "failCommand", [2025/04/08 10:19:57.112] "mode": {"times": 2}, [2025/04/08 10:19:57.112] "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, [2025/04/08 10:19:57.112] } [2025/04/08 10:19:57.112] with self.fail_point(fail_command): [2025/04/08 10:19:57.112] models = [] [2025/04/08 10:19:57.112] num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) [2025/04/08 10:19:57.112] b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) [2025/04/08 10:19:57.112] for _ in range(num_models): [2025/04/08 10:19:57.112] models.append( [2025/04/08 10:19:57.112] InsertOne( [2025/04/08 10:19:57.112] namespace="db.coll", [2025/04/08 10:19:57.112] document={"a": b_repeated}, [2025/04/08 10:19:57.112] ) [2025/04/08 10:19:57.112] ) [2025/04/08 10:19:57.112] [2025/04/08 10:19:57.112] listener = OvertCommandListener() [2025/04/08 10:19:57.112] client = self.rs_or_single_client( [2025/04/08 10:19:57.112] event_listeners=[listener], [2025/04/08 10:19:57.112] readConcernLevel="majority", [2025/04/08 10:19:57.112] readPreference="primary", [2025/04/08 10:19:57.112] timeoutMS=2000, [2025/04/08 10:19:57.112] w="majority", [2025/04/08 10:19:57.112] ) [2025/04/08 10:19:57.112] # Initialize the client with a larger timeout to help make test less flakey [2025/04/08 10:19:57.112] with pymongo.timeout(10): [2025/04/08 10:19:57.112] client.admin.command("ping") [2025/04/08 10:19:57.112] with self.assertRaises(ClientBulkWriteException) as context: [2025/04/08 10:19:57.112] client.bulk_write(models=models) [2025/04/08 10:19:57.112] self.assertIsInstance(context.exception.error, NetworkTimeout) [2025/04/08 10:19:57.112] [2025/04/08 10:19:57.112] bulk_write_events = [] [2025/04/08 10:19:57.112] for event in listener.started_events: [2025/04/08 10:19:57.112] if event.command_name == "bulkWrite": [2025/04/08 10:19:57.112] bulk_write_events.append(event) [2025/04/08 10:19:57.112] > self.assertEqual(len(bulk_write_events), 2) [2025/04/08 10:19:57.112] E AssertionError: 1 != 2 [2025/04/08 10:19:57.112] test/test_client_bulk_write.py:691: AssertionError
Async:
[2025/04/08 10:06:15.748] FAILURE: AssertionError: 1 != 2 () [2025/04/08 10:06:15.748] self = <test.asynchronous.test_client_bulk_write.TestClientBulkWriteCSOT testMethod=test_timeout_in_multi_batch_bulk_write> [2025/04/08 10:06:15.748] @async_client_context.require_version_min(8, 0, 0, -24) [2025/04/08 10:06:15.748] @async_client_context.require_no_serverless [2025/04/08 10:06:15.748] @async_client_context.require_failCommand_fail_point [2025/04/08 10:06:15.748] async def test_timeout_in_multi_batch_bulk_write(self): [2025/04/08 10:06:15.748] _OVERHEAD = 500 [2025/04/08 10:06:15.748] [2025/04/08 10:06:15.748] internal_client = await self.async_rs_or_single_client(timeoutMS=None) [2025/04/08 10:06:15.748] [2025/04/08 10:06:15.748] collection = internal_client.db["coll"] [2025/04/08 10:06:15.748] self.addAsyncCleanup(collection.drop) [2025/04/08 10:06:15.748] await collection.drop() [2025/04/08 10:06:15.748] [2025/04/08 10:06:15.748] fail_command = { [2025/04/08 10:06:15.748] "configureFailPoint": "failCommand", [2025/04/08 10:06:15.748] "mode": {"times": 2}, [2025/04/08 10:06:15.748] "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, [2025/04/08 10:06:15.748] } [2025/04/08 10:06:15.748] async with self.fail_point(fail_command): [2025/04/08 10:06:15.748] models = [] [2025/04/08 10:06:15.748] num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) [2025/04/08 10:06:15.748] b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) [2025/04/08 10:06:15.748] for _ in range(num_models): [2025/04/08 10:06:15.748] models.append( [2025/04/08 10:06:15.748] InsertOne( [2025/04/08 10:06:15.748] namespace="db.coll", [2025/04/08 10:06:15.748] document={"a": b_repeated}, [2025/04/08 10:06:15.748] ) [2025/04/08 10:06:15.748] ) [2025/04/08 10:06:15.748] [2025/04/08 10:06:15.748] listener = OvertCommandListener() [2025/04/08 10:06:15.748] client = await self.async_rs_or_single_client( [2025/04/08 10:06:15.748] event_listeners=[listener], [2025/04/08 10:06:15.748] readConcernLevel="majority", [2025/04/08 10:06:15.748] readPreference="primary", [2025/04/08 10:06:15.748] timeoutMS=2000, [2025/04/08 10:06:15.748] w="majority", [2025/04/08 10:06:15.748] ) [2025/04/08 10:06:15.748] # Initialize the client with a larger timeout to help make test less flakey [2025/04/08 10:06:15.748] with pymongo.timeout(10): [2025/04/08 10:06:15.748] await client.admin.command("ping") [2025/04/08 10:06:15.748] with self.assertRaises(ClientBulkWriteException) as context: [2025/04/08 10:06:15.748] await client.bulk_write(models=models) [2025/04/08 10:06:15.748] self.assertIsInstance(context.exception.error, NetworkTimeout) [2025/04/08 10:06:15.748] [2025/04/08 10:06:15.748] bulk_write_events = [] [2025/04/08 10:06:15.748] for event in listener.started_events: [2025/04/08 10:06:15.748] if event.command_name == "bulkWrite": [2025/04/08 10:06:15.748] bulk_write_events.append(event) [2025/04/08 10:06:15.748] > self.assertEqual(len(bulk_write_events), 2) [2025/04/08 10:06:15.748] E AssertionError: 1 != 2 [2025/04/08 10:06:15.748] test/asynchronous/test_client_bulk_write.py:695: AssertionError
Also seeing ExecutionTimeout: operation would exceed time limit in https://evergreen.mongodb.com/test_log/mongo_python_driver_mongodb_v4.0_test_pypy3.10_auth_nossl_replica_set_patch_e6a4a7145eccb0b6c76c84aa539f29b40f3424b3_67fec6138250720007260f95_25_04_15_20_49_09/0?test_name=2720a96659569f45e784e26287defb3d#L0:
 [2025/04/15 14:11:13.061] FAILURE: pymongo.errors.ExecutionTimeout: operation would exceed time limit, remaining timeout:-0.00514 <= network round trip time:0.00000  (configured timeouts: timeoutMS: 2000.0ms, connectTimeoutMS: 20000.0ms), full error: {'ok': 0, 'errmsg': 'operation would exceed time limit, remaining timeout:-0.00514 <= network round trip time:0.00000  (configured timeouts: timeoutMS: 2000.0ms, connectTimeoutMS: 20000.0ms)', 'code': 50} ()
 [2025/04/15 14:11:13.061] self = <test.asynchronous.test_client_bulk_write.TestClientBulkWriteCSOT testMethod=test_timeout_in_multi_batch_bulk_write>
 [2025/04/15 14:11:13.061]     @async_client_context.require_version_min(8, 0, 0, -24)
 [2025/04/15 14:11:13.061]     @async_client_context.require_no_serverless
 [2025/04/15 14:11:13.061]     @async_client_context.require_failCommand_fail_point
 [2025/04/15 14:11:13.061]     async def test_timeout_in_multi_batch_bulk_write(self):
 [2025/04/15 14:11:13.061]         _OVERHEAD = 500
 [2025/04/15 14:11:13.061]     
 [2025/04/15 14:11:13.061]         internal_client = await self.async_rs_or_single_client(timeoutMS=None)
 [2025/04/15 14:11:13.061]     
 [2025/04/15 14:11:13.061]         collection = internal_client.db["coll"]
 [2025/04/15 14:11:13.061]         self.addAsyncCleanup(collection.drop)
 [2025/04/15 14:11:13.061]         await collection.drop()
 [2025/04/15 14:11:13.061]     
 [2025/04/15 14:11:13.061]         fail_command = {
 [2025/04/15 14:11:13.061]             "configureFailPoint": "failCommand",
 [2025/04/15 14:11:13.061]             "mode": {"times": 2},
 [2025/04/15 14:11:13.061]             "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010},
 [2025/04/15 14:11:13.061]         }
 [2025/04/15 14:11:13.061]         async with self.fail_point(fail_command):
 [2025/04/15 14:11:13.061]             models = []
 [2025/04/15 14:11:13.061]             num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1)
 [2025/04/15 14:11:13.061]             b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD)
 [2025/04/15 14:11:13.061]             for _ in range(num_models):
 [2025/04/15 14:11:13.061]                 models.append(
 [2025/04/15 14:11:13.061]                     InsertOne(
 [2025/04/15 14:11:13.061]                         namespace="db.coll",
 [2025/04/15 14:11:13.061]                         document={"a": b_repeated},
 [2025/04/15 14:11:13.061]                     )
 [2025/04/15 14:11:13.061]                 )
 [2025/04/15 14:11:13.061]     
 [2025/04/15 14:11:13.061]             listener = OvertCommandListener()
 [2025/04/15 14:11:13.061]             client = await self.async_rs_or_single_client(
 [2025/04/15 14:11:13.061]                 event_listeners=[listener],
 [2025/04/15 14:11:13.061]                 readConcernLevel="majority",
 [2025/04/15 14:11:13.061]                 readPreference="primary",
 [2025/04/15 14:11:13.061]                 timeoutMS=2000,
 [2025/04/15 14:11:13.061]                 w="majority",
 [2025/04/15 14:11:13.061]             )
 [2025/04/15 14:11:13.061]             # Initialize the client with a larger timeout to help make test less flakey
 [2025/04/15 14:11:13.061]             with pymongo.timeout(10):
 [2025/04/15 14:11:13.061]                 await client.admin.command("ping")
 [2025/04/15 14:11:13.061]             with self.assertRaises(ClientBulkWriteException) as context:
 [2025/04/15 14:11:13.061] >               await client.bulk_write(models=models)
 [2025/04/15 14:11:13.061] test/asynchronous/test_client_bulk_write.py:688: 
 [2025/04/15 14:11:13.061] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
 [2025/04/15 14:11:13.061] pymongo/_csot.py:108: in csot_wrapper
 [2025/04/15 14:11:13.061]     return await func(self, *args, **kwargs)
 [2025/04/15 14:11:13.061] pymongo/asynchronous/mongo_client.py:2535: in bulk_write
 [2025/04/15 14:11:13.061]     return await blk.execute(session, _Op.BULK_WRITE)
 [2025/04/15 14:11:13.061] pymongo/asynchronous/client_bulk.py:750: in execute
 [2025/04/15 14:11:13.061]     result = await self.execute_command(session, operation)
 [2025/04/15 14:11:13.061] pymongo/asynchronous/client_bulk.py:656: in execute_command
 [2025/04/15 14:11:13.061]     await self.client._retryable_write(
 [2025/04/15 14:11:13.061] pymongo/asynchronous/mongo_client.py:2046: in _retryable_write
 [2025/04/15 14:11:13.061]     return await self._retry_with_session(retryable, func, s, bulk, operation, operation_id)
 [2025/04/15 14:11:13.061] pymongo/asynchronous/mongo_client.py:1932: in _retry_with_session
 [2025/04/15 14:11:13.061]     return await self._retry_internal(
 [2025/04/15 14:11:13.061] pymongo/_csot.py:109: in csot_wrapper
 [2025/04/15 14:11:13.061]     return await func(self, *args, **kwargs)
 [2025/04/15 14:11:13.061] pymongo/asynchronous/mongo_client.py:1967: in _retry_internal
 [2025/04/15 14:11:13.061]     return await _ClientConnectionRetryable(
 [2025/04/15 14:11:13.061] pymongo/asynchronous/mongo_client.py:2723: in run
 [2025/04/15 14:11:13.061]     return await self._read() if self._is_read else await self._write()
 [2025/04/15 14:11:13.061] pymongo/asynchronous/mongo_client.py:2855: in _write
 [2025/04/15 14:11:13.061]     return await self._func(self._session, conn, self._retryable)  # type: ignore
 [2025/04/15 14:11:13.061] pymongo/asynchronous/client_bulk.py:647: in retryable_bulk
 [2025/04/15 14:11:13.061]     await self._execute_command(
 [2025/04/15 14:11:13.061] pymongo/asynchronous/client_bulk.py:544: in _execute_command
 [2025/04/15 14:11:13.061]     conn.apply_timeout(self.client, cmd)
 [2025/04/15 14:11:13.061] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
 [2025/04/15 14:11:13.061]     def apply_timeout(
 [2025/04/15 14:11:13.061]         self, client: AsyncMongoClient, cmd: Optional[MutableMapping[str, Any]]
 [2025/04/15 14:11:13.061]     ) -> Optional[float]:
 [2025/04/15 14:11:13.061]         # CSOT: use remaining timeout when set.
 [2025/04/15 14:11:13.061]         timeout = _csot.remaining()
 [2025/04/15 14:11:13.061]         if timeout is None:
 [2025/04/15 14:11:13.061]             # Reset the socket timeout unless we're performing a streaming monitor check.
 [2025/04/15 14:11:13.061]             if not self.more_to_come:
 [2025/04/15 14:11:13.061]                 self.set_conn_timeout(self.opts.socket_timeout)
 [2025/04/15 14:11:13.061]             return None
 [2025/04/15 14:11:13.061]         # RTT validation.
 [2025/04/15 14:11:13.061]         rtt = _csot.get_rtt()
 [2025/04/15 14:11:13.061]         if rtt is None:
 [2025/04/15 14:11:13.061]             rtt = self.connect_rtt
 [2025/04/15 14:11:13.061]         max_time_ms = timeout - rtt
 [2025/04/15 14:11:13.061]         if max_time_ms < 0:
 [2025/04/15 14:11:13.061]             timeout_details = _get_timeout_details(self.opts)
 [2025/04/15 14:11:13.061]             formatted = format_timeout_details(timeout_details)
 [2025/04/15 14:11:13.061]             # CSOT: raise an error without running the command since we know it will time out.
 [2025/04/15 14:11:13.061]             errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}"
 [2025/04/15 14:11:13.061] >           raise ExecutionTimeout(
 [2025/04/15 14:11:13.061]                 errmsg,
 [2025/04/15 14:11:13.061]                 50,
 [2025/04/15 14:11:13.061]                 {"ok": 0, "errmsg": errmsg, "code": 50},
 [2025/04/15 14:11:13.061]                 self.max_wire_version,
 [2025/04/15 14:11:13.061]             )
 [2025/04/15 14:11:13.061] E           pymongo.errors.ExecutionTimeout: operation would exceed time limit, remaining timeout:-0.00514 <= network round trip time:0.00000  (configured timeouts: timeoutMS: 2000.0ms, connectTimeoutMS: 20000.0ms), full error: {'ok': 0, 'errmsg': 'operation would exceed time limit, remaining timeout:-0.00514 <= network round trip time:0.00000  (configured timeouts: timeoutMS: 2000.0ms, connectTimeoutMS: 20000.0ms)', 'code': 50}
 [2025/04/15 14:11:13.061] pymongo/asynchronous/pool.py:219: ExecutionTimeout