Make geo lookups non-blocking with bulk DB writes and background tasks

This commit is contained in:
2026-03-12 18:10:00 +01:00
parent a61c9dc969
commit 28f7b1cfcd
8 changed files with 496 additions and 36 deletions

View File

@@ -435,6 +435,41 @@ async def lookup(
# ---------------------------------------------------------------------------
def lookup_cached_only(
ips: list[str],
) -> tuple[dict[str, GeoInfo], list[str]]:
"""Return cached geo data for *ips* without making any external API calls.
Used by callers that want to return a fast response using only what is
already in memory, while deferring resolution of uncached IPs to a
background task.
Args:
ips: IP address strings to look up.
Returns:
A ``(geo_map, uncached)`` tuple where *geo_map* maps every IP that
was already in the in-memory cache to its :class:`GeoInfo`, and
*uncached* is the list of IPs that were not found in the cache.
Entries in the negative cache (recently failed) are **not** included
in *uncached* so they are not re-queued immediately.
"""
geo_map: dict[str, GeoInfo] = {}
uncached: list[str] = []
now = time.monotonic()
for ip in dict.fromkeys(ips): # deduplicate, preserve order
if ip in _cache:
geo_map[ip] = _cache[ip]
elif ip in _neg_cache and (now - _neg_cache[ip]) < _NEG_CACHE_TTL:
# Still within the cool-down window — do not re-queue.
pass
else:
uncached.append(ip)
return geo_map, uncached
async def lookup_batch(
ips: list[str],
http_session: aiohttp.ClientSession,
@@ -447,7 +482,9 @@ async def lookup_batch(
``http://ip-api.com/batch`` in chunks of up to :data:`_BATCH_SIZE`.
Only successful resolutions (``country_code is not None``) are written to
the persistent cache when *db* is provided.
the persistent cache when *db* is provided. Both positive and negative
entries are written in bulk using ``executemany`` (one round-trip per
chunk) rather than one ``execute`` per IP.
Args:
ips: List of IP address strings to resolve. Duplicates are ignored.
@@ -509,16 +546,19 @@ async def lookup_batch(
assert chunk_result is not None # noqa: S101
# Collect bulk-write rows instead of one execute per IP.
pos_rows: list[tuple[str, str | None, str | None, str | None, str | None]] = []
neg_ips: list[str] = []
for ip, info in chunk_result.items():
if info.country_code is not None:
# Successful API resolution.
_store(ip, info)
geo_result[ip] = info
if db is not None:
try:
await _persist_entry(db, ip, info)
except Exception as exc: # noqa: BLE001
log.warning("geo_persist_failed", ip=ip, error=str(exc))
pos_rows.append(
(ip, info.country_code, info.country_name, info.asn, info.org)
)
else:
# API failed — try local GeoIP fallback.
fallback = _geoip_lookup(ip)
@@ -526,19 +566,56 @@ async def lookup_batch(
_store(ip, fallback)
geo_result[ip] = fallback
if db is not None:
try:
await _persist_entry(db, ip, fallback)
except Exception as exc: # noqa: BLE001
log.warning("geo_persist_failed", ip=ip, error=str(exc))
pos_rows.append(
(
ip,
fallback.country_code,
fallback.country_name,
fallback.asn,
fallback.org,
)
)
else:
# Both resolvers failed — record in negative cache.
_neg_cache[ip] = time.monotonic()
geo_result[ip] = _empty
if db is not None:
try:
await _persist_neg_entry(db, ip)
except Exception as exc: # noqa: BLE001
log.warning("geo_persist_neg_failed", ip=ip, error=str(exc))
neg_ips.append(ip)
if db is not None:
if pos_rows:
try:
await db.executemany(
"""
INSERT INTO geo_cache (ip, country_code, country_name, asn, org)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(ip) DO UPDATE SET
country_code = excluded.country_code,
country_name = excluded.country_name,
asn = excluded.asn,
org = excluded.org,
cached_at = strftime('%Y-%m-%dT%H:%M:%fZ', 'now')
""",
pos_rows,
)
except Exception as exc: # noqa: BLE001
log.warning(
"geo_batch_persist_failed",
count=len(pos_rows),
error=str(exc),
)
if neg_ips:
try:
await db.executemany(
"INSERT OR IGNORE INTO geo_cache (ip) VALUES (?)",
[(ip,) for ip in neg_ips],
)
except Exception as exc: # noqa: BLE001
log.warning(
"geo_batch_persist_neg_failed",
count=len(neg_ips),
error=str(exc),
)
if db is not None:
try: