def _sync_oplog(self, oplog_start):
""" Replay oplog.
"""
self._last_optime = oplog_start
n_total = 0
n_skip = 0
while True:
# try to get cursor until success
try:
start_optime_valid = False
host, port = self._src.client().address
log.info('try to sync oplog from %s on %s:%d' % (self._last_optime, host, port))
# set codec options to guarantee the order of keys in command
coll = self._src.client()['local'].get_collection('oplog.rs',
codec_options=bson.codec_options.CodecOptions(document_class=bson.son.SON))
cursor = coll.find({'ts': {'$gte': oplog_start}},
cursor_type=pymongo.cursor.CursorType.TAILABLE_AWAIT,
no_cursor_timeout=True)
# New in version 3.2
# src_version = mongo_utils.get_version(self._src.client())
# if mongo_utils.version_higher_or_equal(src_version, '3.2.0'):
# cursor.max_await_time_ms(1000)
while True:
try:
if not cursor.alive:
log.error('cursor is dead')
raise pymongo.errors.AutoReconnect
....
Maybe I am wrong. But what I am confused is that when you sync the oplog and create new cursor, Why dont you create it from self._last_optime ? that make the oplog sync progress more efficient under bad network env
Maybe I am wrong. But what I am confused is that when you sync the oplog and create new cursor, Why dont you create it from
self._last_optime
? that make the oplog sync progress more efficient under bad network env