WARNING: THIS SITE IS A MIRROR OF GITHUB.COM / IT CANNOT LOGIN OR REGISTER ACCOUNTS / THE CONTENTS ARE PROVIDED AS-IS / THIS SITE ASSUMES NO RESPONSIBILITY FOR ANY DISPLAYED CONTENT OR LINKS / IF YOU FOUND SOMETHING MAY NOT GOOD FOR EVERYONE, CONTACT ADMIN AT ilovescratch@foxmail.com
Skip to content

Commit b081221

Browse files
CCMRG-1903: Centralize lock logging in BaseCodecovTask (#588)
* CCMRG-1903: Centralize lock logging in BaseCodecovTask - Add with_logged_lock() context manager to BaseCodecovTask for consistent lock logging - Refactor all tasks to use centralized lock logging method - Remove duplicate lock logging code from: - upload.py - preprocess_upload.py - sync_repos.py - sync_pull.py - manual_trigger.py - crontasks.py - upload_finisher.py - All locks now log: Acquiring lock, Acquired lock, Releasing lock (with duration) - No changes to actual lock logic, only added observability * CCMRG-1903: Add tests for with_logged_lock() method - Add comprehensive test suite for with_logged_lock() context manager - Test that 'Acquiring lock' is logged before lock acquisition - Test that 'Acquired lock' is logged after successful acquisition - Test that 'Releasing lock' is logged with duration after release - Test that extra context is included in all log messages - Test that code executes correctly within lock context - Test that LockError propagates correctly (not caught) - Test that 'Releasing lock' is logged even on exceptions * Fix ruff lint error: rename loop variable to avoid shadowing imported 'call' * Fix floating point precision issue in lock duration tests Use approximate comparison instead of exact equality for lock_duration_seconds to handle floating point precision issues * Refactor lock duration assertions to resolve flakes Updated assertions in test_base.py to use multi-line format for better clarity in floating point precision checks for lock_duration_seconds. * Fix maintainability issue: ensure lock_name matches actual lock Modify get_report_lock to return both lock and lock_name as a tuple, eliminating duplication and ensuring the lock_name passed to with_logged_lock always matches the actual lock being used. This prevents potential bugs if get_report_lock's internal lock naming logic changes in the future. * Remove commented log verification in test_base.py Removed comments verifying log messages in unit tests.
1 parent b980824 commit b081221

File tree

9 files changed

+301
-28
lines changed

9 files changed

+301
-28
lines changed

apps/worker/tasks/base.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
import logging
2+
import time
3+
from contextlib import contextmanager
24
from datetime import datetime
35

46
import sentry_sdk
@@ -533,6 +535,60 @@ def get_repo_provider_service(
533535

534536
return None
535537

538+
@contextmanager
539+
def with_logged_lock(self, lock, lock_name: str, **extra_log_context):
540+
"""
541+
Context manager that wraps a Redis lock with standardized logging.
542+
543+
This method provides consistent lock logging across all tasks:
544+
- Logs "Acquiring lock" before attempting to acquire
545+
- Logs "Acquired lock" after successful acquisition
546+
- Logs "Releasing lock" with duration after release
547+
548+
Args:
549+
lock: A Redis lock object (from redis_connection.lock())
550+
lock_name: Name of the lock for logging purposes
551+
**extra_log_context: Additional context to include in all log messages
552+
553+
Example:
554+
with self.with_logged_lock(
555+
redis_connection.lock(lock_name, timeout=300),
556+
lock_name=lock_name,
557+
repoid=repoid,
558+
commitid=commitid,
559+
):
560+
# Your code here
561+
pass
562+
"""
563+
log.info(
564+
"Acquiring lock",
565+
extra={
566+
"lock_name": lock_name,
567+
**extra_log_context,
568+
},
569+
)
570+
with lock:
571+
lock_acquired_time = time.time()
572+
log.info(
573+
"Acquired lock",
574+
extra={
575+
"lock_name": lock_name,
576+
**extra_log_context,
577+
},
578+
)
579+
try:
580+
yield
581+
finally:
582+
lock_duration = time.time() - lock_acquired_time
583+
log.info(
584+
"Releasing lock",
585+
extra={
586+
"lock_name": lock_name,
587+
"lock_duration_seconds": lock_duration,
588+
**extra_log_context,
589+
},
590+
)
591+
536592
def _call_upload_breadcrumb_task(
537593
self,
538594
commit_sha: str,

apps/worker/tasks/crontasks.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,14 @@ def run_impl(self, db_session, *args, cron_task_generation_time_iso, **kwargs):
3737
redis_connection = get_redis_connection()
3838
generation_time = datetime.fromisoformat(cron_task_generation_time_iso)
3939
try:
40-
with redis_connection.lock(
41-
lock_name,
42-
timeout=max(60 * 5, self.hard_time_limit_task),
43-
blocking_timeout=1,
40+
with self.with_logged_lock(
41+
redis_connection.lock(
42+
lock_name,
43+
timeout=max(60 * 5, self.hard_time_limit_task),
44+
blocking_timeout=1,
45+
),
46+
lock_name=lock_name,
47+
task_name=self.name,
4448
):
4549
min_seconds_interval = (
4650
self.get_min_seconds_interval_between_executions()

apps/worker/tasks/manual_trigger.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,15 @@ def run_impl(
4141
lock_name = f"manual_trigger_lock_{repoid}_{commitid}"
4242
redis_connection = get_redis_connection()
4343
try:
44-
with redis_connection.lock(
45-
lock_name,
46-
timeout=60 * 5,
47-
blocking_timeout=5,
44+
with self.with_logged_lock(
45+
redis_connection.lock(
46+
lock_name,
47+
timeout=60 * 5,
48+
blocking_timeout=5,
49+
),
50+
lock_name=lock_name,
51+
repoid=repoid,
52+
commitid=commitid,
4853
):
4954
return self.process_impl_within_lock(
5055
db_session=db_session,

apps/worker/tasks/preprocess_upload.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -57,10 +57,15 @@ def run_impl(self, db_session, *, repoid: int, commitid: str, **kwargs):
5757
)
5858
return {"preprocessed_upload": False, "reason": "already_running"}
5959
try:
60-
with redis_connection.lock(
61-
lock_name,
62-
timeout=60 * 5,
63-
blocking_timeout=None,
60+
with self.with_logged_lock(
61+
redis_connection.lock(
62+
lock_name,
63+
timeout=60 * 5,
64+
blocking_timeout=None,
65+
),
66+
lock_name=lock_name,
67+
repoid=repoid,
68+
commitid=commitid,
6469
):
6570
return self.process_impl_within_lock(
6671
db_session=db_session,

apps/worker/tasks/sync_pull.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,10 +77,15 @@ def run_impl(
7777
lock_name = f"pullsync_{repoid}_{pullid}"
7878
start_wait = time.monotonic()
7979
try:
80-
with redis_connection.lock(
81-
lock_name,
82-
timeout=60 * 5,
83-
blocking_timeout=0.5,
80+
with self.with_logged_lock(
81+
redis_connection.lock(
82+
lock_name,
83+
timeout=60 * 5,
84+
blocking_timeout=0.5,
85+
),
86+
lock_name=lock_name,
87+
repoid=repoid,
88+
pullid=pullid,
8489
):
8590
return self.run_impl_within_lock(
8691
db_session,

apps/worker/tasks/sync_repos.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,10 +91,15 @@ def run_impl(
9191
lock_name = f"syncrepos_lock_{ownerid}_{using_integration}"
9292
redis_connection = get_redis_connection()
9393
try:
94-
with redis_connection.lock(
95-
lock_name,
96-
timeout=max(300, self.hard_time_limit_task),
97-
blocking_timeout=5,
94+
with self.with_logged_lock(
95+
redis_connection.lock(
96+
lock_name,
97+
timeout=max(300, self.hard_time_limit_task),
98+
blocking_timeout=5,
99+
),
100+
lock_name=lock_name,
101+
ownerid=ownerid,
102+
using_integration=using_integration,
98103
):
99104
git = get_owner_provider_service(
100105
owner,

apps/worker/tasks/tests/unit/test_base.py

Lines changed: 165 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
from celery.contrib.testing.mocks import TaskMessage
99
from celery.exceptions import MaxRetriesExceededError, Retry, SoftTimeLimitExceeded
1010
from prometheus_client import REGISTRY
11+
from redis.exceptions import LockError
12+
from redis.lock import Lock
1113
from sqlalchemy.exc import (
1214
DBAPIError,
1315
IntegrityError,
@@ -917,3 +919,166 @@ def test_real_example_override_from_upload(
917919
time_limit=450,
918920
user_plan="users-enterprisey",
919921
)
922+
923+
924+
@pytest.mark.django_db(databases={"default", "timeseries"})
925+
class TestBaseCodecovTaskWithLoggedLock:
926+
def test_with_logged_lock_logs_acquiring_and_acquired(self, mocker):
927+
"""Test that with_logged_lock logs 'Acquiring lock' and 'Acquired lock'."""
928+
mock_log = mocker.patch("tasks.base.log")
929+
mock_lock = mocker.MagicMock(spec=Lock)
930+
mock_lock.__enter__ = mocker.MagicMock(return_value=None)
931+
mock_lock.__exit__ = mocker.MagicMock(return_value=None)
932+
933+
task = BaseCodecovTask()
934+
with task.with_logged_lock(mock_lock, lock_name="test_lock", repoid=123):
935+
pass
936+
937+
acquiring_calls = [
938+
call
939+
for call in mock_log.info.call_args_list
940+
if call[0][0] == "Acquiring lock"
941+
]
942+
assert len(acquiring_calls) == 1
943+
assert acquiring_calls[0][1]["extra"]["lock_name"] == "test_lock"
944+
assert acquiring_calls[0][1]["extra"]["repoid"] == 123
945+
946+
acquired_calls = [
947+
call
948+
for call in mock_log.info.call_args_list
949+
if call[0][0] == "Acquired lock"
950+
]
951+
assert len(acquired_calls) == 1
952+
assert acquired_calls[0][1]["extra"]["lock_name"] == "test_lock"
953+
assert acquired_calls[0][1]["extra"]["repoid"] == 123
954+
955+
def test_with_logged_lock_logs_releasing_with_duration(self, mocker):
956+
"""Test that with_logged_lock logs 'Releasing lock' with duration."""
957+
mock_log = mocker.patch("tasks.base.log")
958+
mock_lock = mocker.MagicMock(spec=Lock)
959+
mock_lock.__enter__ = mocker.MagicMock(return_value=None)
960+
mock_lock.__exit__ = mocker.MagicMock(return_value=None)
961+
962+
# Mock time.time to control duration
963+
mock_time = mocker.patch("tasks.base.time.time")
964+
mock_time.side_effect = [1000.0, 1000.5] # 0.5 second duration
965+
966+
task = BaseCodecovTask()
967+
with task.with_logged_lock(mock_lock, lock_name="test_lock", commitid="abc123"):
968+
pass
969+
970+
releasing_calls = [
971+
call
972+
for call in mock_log.info.call_args_list
973+
if call[0][0] == "Releasing lock"
974+
]
975+
assert len(releasing_calls) == 1
976+
assert releasing_calls[0][1]["extra"]["lock_name"] == "test_lock"
977+
assert releasing_calls[0][1]["extra"]["commitid"] == "abc123"
978+
# Use approximate comparison due to floating point precision
979+
assert (
980+
abs(releasing_calls[0][1]["extra"]["lock_duration_seconds"] - 0.5) < 0.001
981+
)
982+
983+
def test_with_logged_lock_includes_extra_context(self, mocker):
984+
"""Test that with_logged_lock includes all extra context in logs."""
985+
mock_log = mocker.patch("tasks.base.log")
986+
mock_lock = mocker.MagicMock(spec=Lock)
987+
mock_lock.__enter__ = mocker.MagicMock(return_value=None)
988+
mock_lock.__exit__ = mocker.MagicMock(return_value=None)
989+
990+
task = BaseCodecovTask()
991+
with task.with_logged_lock(
992+
mock_lock,
993+
lock_name="test_lock",
994+
repoid=123,
995+
commitid="abc123",
996+
report_type="coverage",
997+
custom_field="custom_value",
998+
):
999+
pass
1000+
1001+
# Check that all extra context is included in all log calls
1002+
for log_call in mock_log.info.call_args_list:
1003+
extra = log_call[1]["extra"]
1004+
assert extra["lock_name"] == "test_lock"
1005+
assert extra["repoid"] == 123
1006+
assert extra["commitid"] == "abc123"
1007+
assert extra["report_type"] == "coverage"
1008+
assert extra["custom_field"] == "custom_value"
1009+
1010+
def test_with_logged_lock_executes_code_within_lock(self, mocker):
1011+
"""Test that code within with_logged_lock executes correctly."""
1012+
mock_log = mocker.patch("tasks.base.log")
1013+
mock_lock = mocker.MagicMock(spec=Lock)
1014+
mock_lock.__enter__ = mocker.MagicMock(return_value=None)
1015+
mock_lock.__exit__ = mocker.MagicMock(return_value=None)
1016+
1017+
task = BaseCodecovTask()
1018+
result = None
1019+
with task.with_logged_lock(mock_lock, lock_name="test_lock"):
1020+
result = "executed"
1021+
1022+
assert result == "executed"
1023+
mock_lock.__enter__.assert_called_once()
1024+
mock_lock.__exit__.assert_called_once()
1025+
1026+
def test_with_logged_lock_propagates_lock_error(self, mocker):
1027+
"""Test that LockError from lock acquisition is not caught by with_logged_lock."""
1028+
mock_log = mocker.patch("tasks.base.log")
1029+
mock_lock = mocker.MagicMock(spec=Lock)
1030+
mock_lock.__enter__ = mocker.MagicMock(side_effect=LockError("Lock failed"))
1031+
1032+
task = BaseCodecovTask()
1033+
with pytest.raises(LockError, match="Lock failed"):
1034+
with task.with_logged_lock(mock_lock, lock_name="test_lock"):
1035+
pass
1036+
1037+
# Should have logged "Acquiring lock" but not "Acquired lock" or "Releasing lock"
1038+
acquiring_calls = [
1039+
call
1040+
for call in mock_log.info.call_args_list
1041+
if call[0][0] == "Acquiring lock"
1042+
]
1043+
assert len(acquiring_calls) == 1
1044+
1045+
acquired_calls = [
1046+
call
1047+
for call in mock_log.info.call_args_list
1048+
if call[0][0] == "Acquired lock"
1049+
]
1050+
assert len(acquired_calls) == 0
1051+
1052+
releasing_calls = [
1053+
call
1054+
for call in mock_log.info.call_args_list
1055+
if call[0][0] == "Releasing lock"
1056+
]
1057+
assert len(releasing_calls) == 0
1058+
1059+
def test_with_logged_lock_logs_release_even_on_exception(self, mocker):
1060+
"""Test that 'Releasing lock' is logged even if code within raises an exception."""
1061+
mock_log = mocker.patch("tasks.base.log")
1062+
mock_lock = mocker.MagicMock(spec=Lock)
1063+
mock_lock.__enter__ = mocker.MagicMock(return_value=None)
1064+
mock_lock.__exit__ = mocker.MagicMock(return_value=None)
1065+
1066+
# Mock time.time to control duration
1067+
mock_time = mocker.patch("tasks.base.time.time")
1068+
mock_time.side_effect = [1000.0, 1000.2] # 0.2 second duration
1069+
1070+
task = BaseCodecovTask()
1071+
with pytest.raises(ValueError):
1072+
with task.with_logged_lock(mock_lock, lock_name="test_lock"):
1073+
raise ValueError("Test exception")
1074+
1075+
releasing_calls = [
1076+
call
1077+
for call in mock_log.info.call_args_list
1078+
if call[0][0] == "Releasing lock"
1079+
]
1080+
assert len(releasing_calls) == 1
1081+
# Use approximate comparison due to floating point precision
1082+
assert (
1083+
abs(releasing_calls[0][1]["extra"]["lock_duration_seconds"] - 0.2) < 0.001
1084+
)

apps/worker/tasks/upload.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -334,10 +334,16 @@ def run_impl(
334334

335335
lock_name = upload_context.lock_name("upload")
336336
try:
337-
with upload_context.redis_connection.lock(
338-
lock_name,
339-
timeout=max(300, self.hard_time_limit_task),
340-
blocking_timeout=5,
337+
with self.with_logged_lock(
338+
upload_context.redis_connection.lock(
339+
lock_name,
340+
timeout=max(300, self.hard_time_limit_task),
341+
blocking_timeout=5,
342+
),
343+
lock_name=lock_name,
344+
repoid=repoid,
345+
commitid=commitid,
346+
report_type=report_type,
341347
):
342348
# Check whether a different `Upload` task has "stolen" our uploads
343349
if not upload_context.has_pending_jobs():

0 commit comments

Comments
 (0)