Skip to content

Commit 6ebf9d2

Browse files
Strip internal issue-tracker references from tests and comments
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
1 parent eaf304b commit 6ebf9d2

12 files changed

+76
-79
lines changed

src/dqliteclient/cluster.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
# Default attempt count for connect(). Three attempts cover one leader
2626
# change plus one transport hiccup; substantially higher counts risk
2727
# hiding genuine cluster instability under what looks like "a slow
28-
# connect" (ISSUE-109). Operators can override via ClusterClient.connect(
28+
# connect". Operators can override via ClusterClient.connect(
2929
# max_attempts=...).
3030
_DEFAULT_CONNECT_MAX_ATTEMPTS = 3
3131

@@ -180,12 +180,12 @@ async def connect(
180180
governors from one place.
181181
182182
``max_attempts`` overrides the default
183-
:data:`_DEFAULT_CONNECT_MAX_ATTEMPTS` (ISSUE-109).
183+
:data:`_DEFAULT_CONNECT_MAX_ATTEMPTS`.
184184
185185
Each attempt's failure is logged at DEBUG level with the
186186
attempted leader address and the error, so operators can
187187
enable debug logging to diagnose cluster churn instead of
188-
seeing only the final exception (ISSUE-78).
188+
seeing only the final exception.
189189
"""
190190
attempts_cap = max_attempts if max_attempts is not None else _DEFAULT_CONNECT_MAX_ATTEMPTS
191191
if attempts_cap < 1:

src/dqliteclient/connection.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -96,13 +96,12 @@ def __init__(
9696
max_continuation_frames: Maximum number of continuation
9797
frames in a single query result. Caps the per-query
9898
Python-side decode work a hostile server can inflict
99-
by sending many 1-row frames (ISSUE-98). Set to
99+
by sending many 1-row frames. Set to
100100
``None`` to disable.
101101
trust_server_heartbeat: When True, widen the per-read
102102
deadline to the server-advertised heartbeat (subject
103103
to a 300 s hard cap). When False (default), ``timeout``
104-
is authoritative — the server value cannot amplify it
105-
(ISSUE-101).
104+
is authoritative — the server value cannot amplify it.
106105
"""
107106
if not math.isfinite(timeout) or timeout <= 0:
108107
raise ValueError(f"timeout must be a positive finite number, got {timeout}")
@@ -213,11 +212,11 @@ async def _abort_protocol(self) -> None:
213212
214213
Close the writer, then give ``wait_closed`` a bounded budget so
215214
the transport drains under normal conditions but never hangs on
216-
an unresponsive peer (ISSUE-72). Cycle-2 ISSUE-38 rejected an
217-
unbounded ``wait_closed`` in the leader-query finally block
218-
because leader-query is a discovery path that runs against
219-
arbitrary nodes; connect is a retry-loop path that benefits
220-
from draining, so the two sites take different decisions.
215+
an unresponsive peer. The leader-query finally block rejects an
216+
unbounded ``wait_closed`` because leader-query is a discovery
217+
path that runs against arbitrary nodes; connect is a retry-loop
218+
path that benefits from draining, so the two sites take
219+
different decisions.
221220
"""
222221
protocol = self._protocol
223222
if protocol is None:
@@ -402,7 +401,7 @@ async def fetchval(self, sql: str, params: Sequence[Any] | None = None) -> Any:
402401
async def transaction(self) -> AsyncIterator[None]:
403402
"""Context manager for transactions.
404403
405-
Cancellation contract (ISSUE-75 / ISSUE-79):
404+
Cancellation contract:
406405
- Cancellation during BEGIN: state cleared, CancelledError
407406
propagates.
408407
- Cancellation during the body: ROLLBACK is attempted. If
@@ -415,7 +414,7 @@ async def transaction(self) -> AsyncIterator[None]:
415414
invalidated, CancelledError propagates and supersedes the
416415
body exception (Python chains it via ``__context__``).
417416
418-
Non-cancellation ROLLBACK failure (ISSUE-73): connection is
417+
Non-cancellation ROLLBACK failure: connection is
419418
invalidated so the pool discards it instead of reusing a
420419
Python-side "_in_transaction=False" connection with live
421420
server-side transaction state.
@@ -451,13 +450,13 @@ async def transaction(self) -> AsyncIterator[None]:
451450
# CancelledError / KeyboardInterrupt / SystemExit must
452451
# propagate. Previously ``suppress(BaseException)``
453452
# swallowed cancellation, breaking structured-concurrency
454-
# contracts (ISSUE-75).
453+
# contracts.
455454
#
456455
# If ROLLBACK fails for any reason (including the narrow
457456
# cancellation catch below), the connection's transaction
458457
# state is unknowable from our side and the connection
459458
# must be invalidated so the pool discards it on return
460-
# (ISSUE-73). The original body exception is still the
459+
# from our side. The original body exception is still the
461460
# one that propagates, except for cancellation which
462461
# takes precedence.
463462
try:

src/dqliteclient/pool.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -101,16 +101,16 @@ def __init__(
101101
connection inherits the same governor. ``None`` disables
102102
the cap entirely (not recommended in production —
103103
bounds memory against slow-drip attacks).
104-
max_continuation_frames: Per-query continuation-frame cap
105-
(ISSUE-98). Complements ``max_total_rows``: a server
104+
max_continuation_frames: Per-query continuation-frame cap.
105+
Complements ``max_total_rows``: a server
106106
sending 1-row-per-frame can inflict O(n) Python decode
107107
work where n is the row cap; the frame cap bounds that.
108108
Forwarded to every :class:`DqliteConnection`.
109109
trust_server_heartbeat: When True, the per-read deadline on
110110
every connection widens to the server-advertised
111111
heartbeat (up to a 300 s hard cap). Default False —
112112
operator-configured ``timeout`` is authoritative and
113-
the server cannot amplify it (ISSUE-101).
113+
the server cannot amplify it.
114114
"""
115115
if min_size < 0:
116116
raise ValueError(f"min_size must be non-negative, got {min_size}")
@@ -155,7 +155,7 @@ async def initialize(self) -> None:
155155
Idempotent: concurrent callers share the same initialization —
156156
only one performs the TCP work, the others await its result.
157157
158-
Partial-failure behavior (ISSUE-77): ``asyncio.gather`` with the
158+
Partial-failure behavior: ``asyncio.gather`` with the
159159
default ``return_exceptions=False`` cancels sibling tasks on
160160
first failure but does NOT close connections that already
161161
succeeded — they leak as orphaned transports. Use
@@ -315,7 +315,7 @@ async def acquire(self) -> AsyncIterator[DqliteConnection]:
315315
# state change (close, size decrement, drain) wakes waiters
316316
# promptly. The check-_closed-then-clear pair runs under
317317
# the lock so a concurrent close() can't set() the event
318-
# between our read and our clear (ISSUE-74).
318+
# between our read and our clear.
319319
async with self._lock:
320320
closed_event = self._get_closed_event()
321321
if self._closed:
@@ -362,7 +362,7 @@ async def acquire(self) -> AsyncIterator[DqliteConnection]:
362362
yield conn
363363
except BaseException:
364364
# Cleanup must complete even if a second cancellation lands
365-
# mid-await (ISSUE-76). ``returned_to_queue`` tracks whether
365+
# mid-await. ``returned_to_queue`` tracks whether
366366
# the reservation was transferred to an in-queue connection;
367367
# if not, the ``finally`` below releases it. ``asyncio.shield``
368368
# around ``_release_reservation`` makes the decrement itself
@@ -452,9 +452,8 @@ async def _release(self, conn: DqliteConnection) -> None:
452452
453453
``conn.close()`` has an early-return guard against
454454
``_pool_released=True``, so close MUST run before the flag is
455-
set — otherwise the transport leaks (ISSUE-76 review found
456-
this bug across every branch that closes a pool-owned
457-
connection).
455+
set — otherwise the transport leaks (a bug that affects every
456+
branch that closes a pool-owned connection).
458457
"""
459458
if self._closed:
460459
await conn.close()

src/dqliteclient/protocol.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535
def _validate_positive_int_or_none(value: int | None, name: str) -> int | None:
3636
"""Shared validation for positive-int-or-None parameters.
3737
38-
Used for both ``max_total_rows`` and ``max_continuation_frames``
39-
(ISSUE-98). None disables the cap; any int value must be > 0.
38+
Used for both ``max_total_rows`` and ``max_continuation_frames``.
39+
None disables the cap; any int value must be > 0.
4040
"""
4141
if value is None:
4242
return None
@@ -89,7 +89,7 @@ def __init__(
8989
# Per-query frame cap. Complements max_total_rows: a server
9090
# sending 10M 1-row frames to reach the row cap would still
9191
# burn 10M × decode-cost of Python work; the frame cap bounds
92-
# that at ~100k iterations (ISSUE-98).
92+
# that at ~100k iterations.
9393
self._max_continuation_frames = _validate_positive_int_or_none(
9494
max_continuation_frames, "max_continuation_frames"
9595
)
@@ -98,8 +98,7 @@ def __init__(
9898
# hard cap). When False (default), the server value is recorded
9999
# for diagnostics only and the operator-configured ``timeout``
100100
# is authoritative. Opt-in protects operators whose timeout is
101-
# a latency-SLO boundary from server-induced amplification
102-
# (ISSUE-101).
101+
# a latency-SLO boundary from server-induced amplification.
103102
self._trust_server_heartbeat = trust_server_heartbeat
104103

105104
async def handshake(self, client_id: int | None = None) -> int:
@@ -137,8 +136,8 @@ async def handshake(self, client_id: int | None = None) -> int:
137136
# Use the server-advertised heartbeat only when explicitly
138137
# trusted. Previously we always widened ``self._timeout`` up
139138
# to 300 s based on the server value, which let a hostile
140-
# server amplify the operator's configured timeout up to 30×
141-
# (ISSUE-101). Default is now opt-out: the server value is
139+
# server amplify the operator's configured timeout up to 30×.
140+
# Default is now opt-out: the server value is
142141
# recorded for diagnostics but does not change the per-read
143142
# deadline.
144143
if self._trust_server_heartbeat and response.heartbeat_timeout > 0:
@@ -283,7 +282,7 @@ async def _drain_continuations(
283282
"ROWS continuation made no progress: frame had 0 rows and has_more=True"
284283
)
285284
if self._max_continuation_frames is not None and frames > self._max_continuation_frames:
286-
# Per-frame cap complements max_total_rows (ISSUE-98): a
285+
# Per-frame cap complements max_total_rows: a
287286
# slow-drip server sending 1-row-per-frame would
288287
# otherwise pin a client CPU with O(n) iterations of
289288
# decode work, where n is max_total_rows.

tests/integration/test_continuation_frames.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""Integration test for multi-frame rows continuation (ISSUE-66).
1+
"""Integration test for multi-frame rows continuation.
22
33
The dqlite server batches query results into frames that fit in its
44
per-response buffer. For result sets larger than one frame's worth of

tests/test_cluster.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ async def test_update_nodes(self) -> None:
371371

372372

373373
class TestConnectMaxAttempts:
374-
"""ISSUE-109: connect() exposes a max_attempts parameter.
374+
"""connect() exposes a max_attempts parameter.
375375
376376
The previous hardcoded ``max_attempts=3`` forced operators to patch
377377
the library to tune retry behavior. The default is unchanged; the
@@ -407,7 +407,7 @@ async def test_max_attempts_zero_rejected(self) -> None:
407407

408408

409409
class TestConnectObservability:
410-
"""ISSUE-78: per-attempt failures are logged at DEBUG for diagnosis."""
410+
"""Per-attempt failures are logged at DEBUG for diagnosis."""
411411

412412
async def test_failed_attempts_logged(self, caplog: pytest.LogCaptureFixture) -> None:
413413
import logging

tests/test_max_total_rows_cap.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
"""max_total_rows cap actually fires in the continuation-drain loop.
22
3-
Cycle 9 wired the cap through the layers; cycle 16 adds the missing
4-
test that exercises the enforcement itself. Uses a mocked protocol
5-
response stream so we don't need a cluster to deliver millions of rows.
3+
The cap is wired through every layer; this test exercises the
4+
enforcement itself. Uses a mocked protocol response stream so we
5+
don't need a cluster to deliver millions of rows.
66
"""
77

88
import asyncio
@@ -72,7 +72,7 @@ def test_none_disables_cap(self) -> None:
7272

7373

7474
class TestMaxContinuationFramesEnforcement:
75-
"""ISSUE-98: per-frame cap complements max_total_rows.
75+
"""Per-frame cap complements max_total_rows.
7676
7777
A slow-drip server sending many 1-row frames could pin a client CPU
7878
with ~max_total_rows iterations of Python-level decode work. The
@@ -139,7 +139,7 @@ def test_none_disables_frame_cap(self) -> None:
139139

140140

141141
class TestTrustServerHeartbeat:
142-
"""ISSUE-101: server heartbeat no longer widens client timeout by default."""
142+
"""Server heartbeat no longer widens client timeout by default."""
143143

144144
def test_default_does_not_amplify_timeout(self) -> None:
145145
reader = MagicMock()
@@ -163,7 +163,7 @@ def test_opt_in_amplifies_timeout_via_handshake(self) -> None:
163163
"""With trust_server_heartbeat=True, a handshake reply with a
164164
larger-than-local heartbeat widens the per-read deadline (up to
165165
the 300 s hard cap). Exercises the actual handshake code path
166-
rather than just checking the flag is set (ISSUE-101 review)."""
166+
rather than just checking the flag is set."""
167167
from dqlitewire.messages import WelcomeResponse
168168

169169
reader = AsyncMock()

tests/test_pool.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ async def waiter():
302302
async def test_dead_conn_replacement_respects_max_size(self) -> None:
303303
"""Dead-connection replacement must not allow _size to exceed max_size.
304304
305-
ISSUE-34/58 changed the pool to a reservation pattern: the
305+
An earlier change introduced the reservation pattern: the
306306
lock is released before the TCP handshake. The invariant we
307307
assert here is the one that actually matters — that
308308
``_size`` never exceeds ``_max_size`` at any observation

tests/test_pool_cancellation.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -3,21 +3,21 @@
33
Pins the ``ConnectionPool`` lifecycle guarantees against cancellation
44
and partial failures:
55
6-
- Cancelling a parked ``acquire()`` caller must not leak ``_size``
7-
(ISSUE-71 + ISSUE-76). ``_size`` must reflect reality at all times.
6+
- Cancelling a parked ``acquire()`` caller must not leak ``_size``.
7+
``_size`` must reflect reality at all times.
88
- ``pool.close()`` must wake every parked acquirer via the close
9-
signal, not via the per-poll timeout (ISSUE-74). Waking via timeout
10-
would produce "Timed out waiting for a connection" instead of
9+
signal, not via the per-poll timeout. Waking via timeout would
10+
produce "Timed out waiting for a connection" instead of
1111
"Pool is closed" — the message is the observable signal.
1212
- ``_reset_connection`` failure during cleanup must release the
13-
reservation and effectively close the connection (ISSUE-76).
14-
``_FakeConn`` replicates the real ``DqliteConnection.close``
15-
early-return guard on ``_pool_released``, so any regression that
16-
flips the flag before calling close() surfaces as a failed
17-
``close_effective`` assertion.
13+
reservation and effectively close the connection. ``_FakeConn``
14+
replicates the real ``DqliteConnection.close`` early-return guard
15+
on ``_pool_released``, so any regression that flips the flag
16+
before calling close() surfaces as a failed ``close_effective``
17+
assertion.
1818
- ``initialize()`` partial failure: sibling connections that already
1919
succeeded must be effectively closed before ``gather()`` propagates
20-
the first failure (ISSUE-77). The previous default
20+
the first failure. The previous default
2121
``return_exceptions=False`` cancelled siblings but did NOT close
2222
them, leaking transports.
2323
@@ -120,10 +120,10 @@ async def _default_connect(**kwargs: Any) -> _FakeConn:
120120

121121

122122
class TestInitializePartialFailureClosesSurvivors:
123-
"""ISSUE-77: if a single _create_connection fails in initialize's
124-
gather, the connections that already succeeded must be closed.
125-
Currently they leak (asyncio.gather cancels siblings but does not
126-
await their .close()).
123+
"""If a single _create_connection fails in initialize's gather,
124+
the connections that already succeeded must be closed. Previously
125+
they leaked (asyncio.gather cancels siblings but does not await
126+
their .close()).
127127
"""
128128

129129
async def test_survivors_closed_on_partial_init_failure(self) -> None:
@@ -170,10 +170,10 @@ async def _flaky_connect(**kwargs: Any) -> _FakeConn:
170170

171171

172172
class TestAcquireCancellationRestoresSize:
173-
"""ISSUE-71 + ISSUE-76: a caller cancelled while parked in acquire()
174-
must not leak the pool reservation. If a connection was pulled off
175-
the queue and handed to the cancelled task's get_task, it must either
176-
be returned to the pool or closed with _size decremented. Likewise
173+
"""A caller cancelled while parked in acquire() must not leak the
174+
pool reservation. If a connection was pulled off the queue and
175+
handed to the cancelled task's get_task, it must either be
176+
returned to the pool or closed with _size decremented. Likewise
177177
for body-raised cancellation during yield cleanup.
178178
"""
179179

@@ -215,9 +215,9 @@ async def waiter() -> None:
215215
await pool.close()
216216

217217
async def test_reset_connection_failure_releases_reservation(self) -> None:
218-
"""ISSUE-76: when the pool's cleanup path runs _reset_connection
219-
and it fails (e.g. ROLLBACK raises), the reservation must be
220-
released and the connection closed — never leaked.
218+
"""When the pool's cleanup path runs _reset_connection and it
219+
fails (e.g. ROLLBACK raises), the reservation must be released
220+
and the connection closed — never leaked.
221221
"""
222222

223223
class _RollbackFailingConn(_FakeConn):
@@ -271,8 +271,8 @@ class _BodyError(Exception):
271271

272272

273273
class TestCloseWakesAllWaiters:
274-
"""ISSUE-74: when pool.close() runs, every parked acquire() must
275-
return DqliteConnectionError promptly. The current clear()-then-wait
274+
"""When pool.close() runs, every parked acquire() must return
275+
DqliteConnectionError promptly. The current clear()-then-wait
276276
pattern has a tiny window where close()'s set() can be erased,
277277
leaving waiters stalled until timeout.
278278
"""

tests/test_pool_size_invariants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""ISSUE-34 / ISSUE-58: concurrent-access invariants of ConnectionPool.
1+
"""Concurrent-access invariants of ConnectionPool.
22
33
Verifies that _size never exceeds max_size under concurrent acquires,
44
even when _create_connection is slow (simulating TCP handshake

0 commit comments

Comments
 (0)