From 32f220e02c9135669beee7d95093c8fc0fd650b6 Mon Sep 17 00:00:00 2001 From: "Richard Kuo (Danswer)" Date: Wed, 22 Jan 2025 16:23:24 -0800 Subject: [PATCH] remove debugging for specific problem tenants --- .../background/celery/tasks/indexing/tasks.py | 103 ------------------ 1 file changed, 103 deletions(-) diff --git a/backend/onyx/background/celery/tasks/indexing/tasks.py b/backend/onyx/background/celery/tasks/indexing/tasks.py index 650b3da83f..e3987e490b 100644 --- a/backend/onyx/background/celery/tasks/indexing/tasks.py +++ b/backend/onyx/background/celery/tasks/indexing/tasks.py @@ -68,10 +68,6 @@ logger = setup_logger() def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: """a lightweight task used to kick off indexing tasks. Occcasionally does some validation of existing state to clear up error conditions""" - debug_tenants = { - "tenant_i-043470d740845ec56", - "tenant_82b497ce-88aa-4fbd-841a-92cae43529c8", - } time_start = time.monotonic() tasks_created = 0 @@ -123,16 +119,6 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: # kick off index attempts for cc_pair_id in cc_pair_ids: - # debugging logic - remove after we're done - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing cc_pair lock: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - lock_beat.reacquire() redis_connector = RedisConnector(tenant_id, cc_pair_id) @@ -141,30 +127,12 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: db_session ) for search_settings_instance in search_settings_list: - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing cc_pair search settings lock: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - redis_connector_index = redis_connector.new_index( search_settings_instance.id ) if redis_connector_index.fenced: continue - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing get_connector_credential_pair_from_id: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - cc_pair = get_connector_credential_pair_from_id( db_session=db_session, cc_pair_id=cc_pair_id, @@ -172,28 +140,10 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: if not cc_pair: continue - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing get_last_attempt_for_cc_pair: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - last_attempt = get_last_attempt_for_cc_pair( cc_pair.id, search_settings_instance.id, db_session ) - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing cc_pair should index: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - search_settings_primary = False if search_settings_instance.id == search_settings_list[0].id: search_settings_primary = True @@ -226,15 +176,6 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: cc_pair.id, None, db_session ) - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing cc_pair try_creating_indexing_task: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - # using a task queue and only allowing one task per cc_pair/search_setting # prevents us from starving out certain attempts attempt_id = try_creating_indexing_task( @@ -255,24 +196,6 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: ) tasks_created += 1 - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing cc_pair try_creating_indexing_task finished: " - f"tenant={tenant_id} " - f"cc_pair={cc_pair_id} " - f"ttl={ttl}" - ) - - # debugging logic - remove after we're done - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing unfenced lock: " - f"tenant={tenant_id} " - f"ttl={ttl}" - ) - lock_beat.reacquire() # Fail any index attempts in the DB that don't have fences @@ -282,24 +205,7 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: db_session, redis_client ) - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing after get unfenced lock: " - f"tenant={tenant_id} " - f"ttl={ttl}" - ) - for attempt_id in unfenced_attempt_ids: - # debugging logic - remove after we're done - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing unfenced attempt id lock: " - f"tenant={tenant_id} " - f"ttl={ttl}" - ) - lock_beat.reacquire() attempt = get_index_attempt(db_session, attempt_id) @@ -317,15 +223,6 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None: attempt.id, db_session, failure_reason=failure_reason ) - # debugging logic - remove after we're done - if tenant_id in debug_tenants: - ttl = redis_client.ttl(OnyxRedisLocks.CHECK_INDEXING_BEAT_LOCK) - task_logger.info( - f"check_for_indexing validate fences lock: " - f"tenant={tenant_id} " - f"ttl={ttl}" - ) - lock_beat.reacquire() # we want to run this less frequently than the overall task if not redis_client.exists(OnyxRedisSignals.VALIDATE_INDEXING_FENCES):