Compare commits

..

17 Commits

62 changed files with 2835 additions and 983 deletions
+19 -9
View File
@@ -111,8 +111,8 @@ optional; all values have defaults. Unknown keys are rejected with an error.
```toml ```toml
[general] [general]
preferred_player = "spotify" # preferred MPRIS player when multiple are active preferred_player = "" # preferred MPRIS player when multiple are active
player_blacklist = ["firefox", "zen", "chrome", "chromium", "vivaldi", "edge", "opera", "mpv"] player_blacklist = ["firefox", "zen", "chrome", "chromium", "vivaldi", "edge", "opera", "mpv"] # bypassed by --player/-p
http_timeout = 10.0 # seconds http_timeout = 10.0 # seconds
[credentials] [credentials]
@@ -133,7 +133,8 @@ socket_path = "" # Unix socket path; defaults to <cache_dir>/
for the Spotify source; leave empty to disable it. for the Spotify source; leave empty to disable it.
- `musixmatch_usertoken` — found at - `musixmatch_usertoken` — found at
[Curators Settings Page](https://curators.musixmatch.com/settings) → Login → "Copy debug info". [Curators Settings Page](https://curators.musixmatch.com/settings) → Login → "Copy debug info".
If empty, an anonymous token is fetched at runtime. If empty, an anonymous token will be fetched at runtime, which could be more likely to
hit the rate limits.
- `qq_music_api_url` — base URL of a self-hosted - `qq_music_api_url` — base URL of a self-hosted
[qq-music-api](https://github.com/tooplick/qq-music-api) (compatible) instance. Required [qq-music-api](https://github.com/tooplick/qq-music-api) (compatible) instance. Required
for the QQ Music source; leave empty to disable it. for the QQ Music source; leave empty to disable it.
@@ -143,8 +144,8 @@ socket_path = "" # Unix socket path; defaults to <cache_dir>/
Clone this repository: Clone this repository:
```bash ```bash
git clone https://github.com/Uyanide/LRX-CLI.git git clone https://github.com/Uyanide/lrx-cli.git
cd LRX-CLI cd lrx-cli
``` ```
Create a virtual environment and install dependencies (for example, using uv): Create a virtual environment and install dependencies (for example, using uv):
@@ -154,16 +155,25 @@ uv venv .venv
uv sync uv sync
``` ```
Run tests without network calls Run tests (without network access):
```bash ```bash
uv run pytest -m "not network" uv run poe test
``` ```
or full tests: Run tests including **REAL EXTERNAL** API calls. Some of them will be skipped
if the required credentials are not configured as [above](#configuration). This might be useful
to verify whether the lyric sources are still valid and working as expected:
```bash ```bash
uv run pytest uv run poe test-api
```
Other unified tasks:
```bash
uv run poe fmt # ruff format
uv run poe lint # ruff check + pyright
``` ```
Run the CLI: Run the CLI:
+2
View File
@@ -0,0 +1,2 @@
*
!.gitignore
+343
View File
@@ -0,0 +1,343 @@
from __future__ import annotations
import argparse
import asyncio
import json
import traceback
from dataclasses import asdict
from pathlib import Path
from typing import Any, Awaitable, Callable
import httpx
from lrx_cli.authenticators import create_authenticators
from lrx_cli.cache import CacheEngine
from lrx_cli.config import AppConfig, load_config
from lrx_cli.fetchers import (
create_fetchers,
LrclibFetcher,
LrclibSearchFetcher,
NeteaseFetcher,
SpotifyFetcher,
QQMusicFetcher,
MusixmatchFetcher,
MusixmatchSpotifyFetcher,
)
from lrx_cli.models import TrackMeta
SAMPLE_TRACK = TrackMeta(
title="One Last Kiss",
artist="Hikaru Utada",
album="One Last Kiss",
length=252026,
trackid="5RhWszHMSKzb7KiXk4Ae0M",
url="https://open.spotify.com/track/5RhWszHMSKzb7KiXk4Ae0M",
)
def _jsonable(value: Any) -> Any:
if isinstance(value, (str, int, float, bool)) or value is None:
return value
if isinstance(value, dict):
return {str(k): _jsonable(v) for k, v in value.items()}
if isinstance(value, (list, tuple)):
return [_jsonable(v) for v in value]
if isinstance(value, bytes):
try:
return value.decode("utf-8")
except Exception:
return value.hex()
if hasattr(value, "model_dump"):
return _jsonable(value.model_dump())
if hasattr(value, "__dict__"):
return _jsonable(vars(value))
return repr(value)
def _write_json(path: Path, payload: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(
json.dumps(_jsonable(payload), ensure_ascii=False, indent=2) + "\n",
encoding="utf-8",
)
def _clear_output_files(out_dir: Path) -> None:
for pattern in ("*.json", "*.db"):
for path in out_dir.glob(pattern):
if path.is_file():
path.unlink()
def _new_runtime(config: AppConfig, db_path: Path):
cache = CacheEngine(str(db_path))
authenticators = create_authenticators(cache, config)
fetchers = create_fetchers(cache, authenticators, config)
return fetchers, authenticators
async def _response_dump(resp: httpx.Response) -> dict[str, Any]:
out: dict[str, Any] = {
"status_code": resp.status_code,
"headers": dict(resp.headers),
"url": str(resp.request.url),
"method": resp.request.method,
}
try:
out["json"] = resp.json()
except Exception:
out["text"] = resp.text
return out
def _decode_body(content: bytes) -> str:
if not content:
return ""
try:
return content.decode("utf-8")
except Exception:
return content.hex()
def _dump_request(req: httpx.Request) -> dict[str, Any]:
query_params = {k: v for k, v in req.url.params.multi_items()}
return {
"method": req.method,
"url": str(req.url),
"headers": dict(req.headers),
"query_params": query_params,
"body": _decode_body(req.content),
}
async def run_capture(out_dir: Path, timeout: float, strict: bool) -> int:
out_dir.mkdir(parents=True, exist_ok=True)
_clear_output_files(out_dir)
# Use isolated cache DBs to avoid polluting normal runtime cache.
anon_fetchers, _ = _new_runtime(AppConfig(), out_dir / ".capture-anon.db")
cred_fetchers, _ = _new_runtime(load_config(), out_dir / ".capture-cred.db")
calls: list[tuple[str, dict[str, Any], Callable[[], Awaitable[Any]]]] = []
captured_requests: list[dict[str, Any]] = []
original_send = httpx.AsyncClient.send
async def _patched_send(
self: httpx.AsyncClient,
request: httpx.Request,
*args: Any,
**kwargs: Any,
) -> httpx.Response:
captured_requests.append(_dump_request(request))
return await original_send(self, request, *args, **kwargs)
httpx.AsyncClient.send = _patched_send # type: ignore[method-assign]
async with httpx.AsyncClient(timeout=timeout) as client:
# LRCLIB
lrclib = anon_fetchers["lrclib"]
assert isinstance(lrclib, LrclibFetcher)
calls.append(
(
"lrclib_get",
{"track": asdict(SAMPLE_TRACK)},
lambda: lrclib._api_get(client, SAMPLE_TRACK),
)
)
lrclib_search = anon_fetchers["lrclib-search"]
assert isinstance(lrclib_search, LrclibSearchFetcher)
calls.append(
(
"lrclib_search_candidates",
{"track": asdict(SAMPLE_TRACK)},
lambda: lrclib_search._api_candidates(client, SAMPLE_TRACK),
)
)
# Netease
netease = anon_fetchers["netease"]
assert isinstance(netease, NeteaseFetcher)
calls.append(
(
"netease_search_track",
{"track": asdict(SAMPLE_TRACK), "limit": 5},
lambda: netease._api_search_track(client, SAMPLE_TRACK, 5),
)
)
calls.append(
(
"netease_lyric_track",
{"track": asdict(SAMPLE_TRACK), "limit": 5},
lambda: netease._api_lyric_track(client, SAMPLE_TRACK, 5),
)
)
# Spotify (credentialed runtime)
spotify = cred_fetchers["spotify"]
assert isinstance(spotify, SpotifyFetcher)
calls.append(
(
"spotify_lyrics",
{"track": asdict(SAMPLE_TRACK)},
lambda: spotify._api_lyrics(SAMPLE_TRACK),
)
)
# QQMusic (credentialed runtime)
qq = cred_fetchers["qqmusic"]
assert isinstance(qq, QQMusicFetcher)
calls.append(
(
"qqmusic_search_track",
{"track": asdict(SAMPLE_TRACK), "limit": 10},
lambda: qq._api_search(SAMPLE_TRACK, 10),
)
)
calls.append(
(
"qqmusic_lyric_track",
{"track": asdict(SAMPLE_TRACK), "limit": 10},
lambda: qq._api_lyric_track(SAMPLE_TRACK, 10),
)
)
# Musixmatch anonymous
mxm_anon = anon_fetchers["musixmatch"]
mxm_sp_anon = anon_fetchers["musixmatch-spotify"]
assert isinstance(mxm_anon, MusixmatchFetcher)
assert isinstance(mxm_sp_anon, MusixmatchSpotifyFetcher)
calls.append(
(
"musixmatch_anonymous_search_track",
{"track": asdict(SAMPLE_TRACK)},
lambda: mxm_anon._api_search_track(SAMPLE_TRACK),
)
)
calls.append(
(
"musixmatch_anonymous_macro_track",
{"track": asdict(SAMPLE_TRACK)},
lambda: mxm_anon._api_macro_track(SAMPLE_TRACK),
)
)
calls.append(
(
"musixmatch_spotify_anonymous_macro_track",
{"track": asdict(SAMPLE_TRACK)},
lambda: mxm_sp_anon._api_macro_track(SAMPLE_TRACK),
)
)
# Musixmatch credentialed (if token configured, this uses it)
mxm_cred = cred_fetchers["musixmatch"]
mxm_sp_cred = cred_fetchers["musixmatch-spotify"]
assert isinstance(mxm_cred, MusixmatchFetcher)
assert isinstance(mxm_sp_cred, MusixmatchSpotifyFetcher)
calls.append(
(
"musixmatch_token_search_track",
{"track": asdict(SAMPLE_TRACK)},
lambda: mxm_cred._api_search_track(SAMPLE_TRACK),
)
)
calls.append(
(
"musixmatch_token_macro_track",
{"track": asdict(SAMPLE_TRACK)},
lambda: mxm_cred._api_macro_track(SAMPLE_TRACK),
)
)
calls.append(
(
"musixmatch_spotify_token_macro_track",
{"track": asdict(SAMPLE_TRACK)},
lambda: mxm_sp_cred._api_macro_track(SAMPLE_TRACK),
)
)
failures = 0
try:
for idx, (name, request_payload, fn) in enumerate(calls, start=1):
stem = f"{idx:03d}_{name}"
req_path = out_dir / f"{stem}.request.json"
resp_path = out_dir / f"{stem}.response.json"
captured_requests.clear()
try:
result = await fn()
if isinstance(result, httpx.Response):
payload = await _response_dump(result)
else:
payload = _jsonable(result)
_write_json(
req_path,
{
"call": name,
"input": request_payload,
"http_requests": _jsonable(captured_requests),
},
)
_write_json(resp_path, {"ok": True, "response": payload})
except Exception as exc:
failures += 1
_write_json(
req_path,
{
"call": name,
"input": request_payload,
"http_requests": _jsonable(captured_requests),
},
)
_write_json(
resp_path,
{
"ok": False,
"error": str(exc),
"traceback": traceback.format_exc(),
},
)
if strict:
break
finally:
httpx.AsyncClient.send = original_send # type: ignore[method-assign]
return failures
def main() -> int:
parser = argparse.ArgumentParser(
description=(
"Call external provider APIs with sample data and save request/response "
"pairs for API reference."
)
)
parser.add_argument(
"--out-dir",
type=Path,
default=Path("misc/api_ref"),
help="Output directory for request/response files.",
)
parser.add_argument(
"--timeout",
type=float,
default=20.0,
help="HTTP timeout in seconds.",
)
parser.add_argument(
"--strict",
action="store_true",
help="Stop on first failed call.",
)
args = parser.parse_args()
failures = asyncio.run(run_capture(args.out_dir, args.timeout, args.strict))
print(f"capture finished: failures={failures}, out_dir={args.out_dir}")
return 1 if (args.strict and failures > 0) else 0
if __name__ == "__main__":
raise SystemExit(main())
+18 -2
View File
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project] [project]
name = "lrx-cli" name = "lrx-cli"
version = "0.7.1" version = "0.7.9"
description = "Fetch line-synced lyrics for your music player." description = "Fetch line-synced lyrics for your music player."
readme = "README.md" readme = "README.md"
requires-python = ">=3.13" requires-python = ">=3.13"
@@ -24,4 +24,20 @@ lrx = "lrx_cli.cli:run"
ignore = ["E402"] # Since there are headers ignore = ["E402"] # Since there are headers
[dependency-groups] [dependency-groups]
dev = ["pytest>=9.0.2", "ruff>=0.15.8"] dev = [
"poethepoet>=0.44.0",
"pyright>=1.1.406",
"pytest>=9.0.2",
"ruff>=0.15.8",
]
[tool.poe.tasks]
fmt = "ruff format ."
lint = { shell = "ruff check . && pyright" }
test = "pytest"
test-api = "pytest -m 'network or not network'"
[tool.pyright]
pythonVersion = "3.13"
include = ["src", "tests", "misc"]
typeCheckingMode = "standard"
+1
View File
@@ -1,2 +1,3 @@
[pytest] [pytest]
addopts = -m "not network"
markers = network: marks tests that require real network access to external APIs markers = network: marks tests that require real network access to external APIs
+80 -35
View File
@@ -21,9 +21,9 @@ colorama==0.4.6 ; sys_platform == 'win32' \
# via # via
# loguru # loguru
# pytest # pytest
cyclopts==4.10.1 \ cyclopts==4.10.2 \
--hash=sha256:35f37257139380a386d9fe4475e1e7c87ca7795765ef4f31abba579fcfcb6ecd \ --hash=sha256:a1f2d6f8f7afac9456b48f75a40b36658778ddc9c6d406b520d017ae32c990fe \
--hash=sha256:ad4e4bb90576412d32276b14a76f55d43353753d16217f2c3cd5bdceba7f15a0 --hash=sha256:d7b950457ef2563596d56331f80cbbbf86a2772535fb8b315c4f03bc7e6127f1
# via lrx-cli # via lrx-cli
dbus-next==0.2.3 \ dbus-next==0.2.3 \
--hash=sha256:58948f9aff9db08316734c0be2a120f6dc502124d9642f55e90ac82ffb16a18b \ --hash=sha256:58948f9aff9db08316734c0be2a120f6dc502124d9642f55e90ac82ffb16a18b \
@@ -75,31 +75,72 @@ mutagen==1.47.0 \
--hash=sha256:719fadef0a978c31b4cf3c956261b3c58b6948b32023078a2117b1de09f0fc99 \ --hash=sha256:719fadef0a978c31b4cf3c956261b3c58b6948b32023078a2117b1de09f0fc99 \
--hash=sha256:edd96f50c5907a9539d8e5bba7245f62c9f520aef333d13392a79a4f70aca719 --hash=sha256:edd96f50c5907a9539d8e5bba7245f62c9f520aef333d13392a79a4f70aca719
# via lrx-cli # via lrx-cli
nodeenv==1.10.0 \
--hash=sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827 \
--hash=sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb
# via pyright
packaging==26.0 \ packaging==26.0 \
--hash=sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4 \ --hash=sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4 \
--hash=sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529 --hash=sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529
# via pytest # via pytest
platformdirs==4.9.4 \ pastel==0.2.1 \
--hash=sha256:1ec356301b7dc906d83f371c8f487070e99d3ccf9e501686456394622a01a934 \ --hash=sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364 \
--hash=sha256:68a9a4619a666ea6439f2ff250c12a853cd1cbd5158d258bd824a7df6be2f868 --hash=sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d
# via poethepoet
platformdirs==4.9.6 \
--hash=sha256:3bfa75b0ad0db84096ae777218481852c0ebc6c727b3168c1b9e0118e458cf0a \
--hash=sha256:e61adb1d5e5cb3441b4b7710bea7e4c12250ca49439228cc1021c00dcfac0917
# via lrx-cli # via lrx-cli
pluggy==1.6.0 \ pluggy==1.6.0 \
--hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \
--hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746 --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746
# via pytest # via pytest
pygments==2.19.2 \ poethepoet==0.44.0 \
--hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \ --hash=sha256:36d3d834708ed069ac1e4f8ed77915c55265b7b6e01aeb2fe617c9fe9cfd524a \
--hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b --hash=sha256:c2667b513621788fb46482e371cdf81c0b04344e0e0bcb7aa8af45f84c2fce7b
pygments==2.20.0 \
--hash=sha256:6757cd03768053ff99f3039c1a36d6c0aa0b263438fcab17520b30a303a82b5f \
--hash=sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176
# via # via
# pytest # pytest
# rich # rich
pytest==9.0.2 \ pyright==1.1.408 \
--hash=sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b \ --hash=sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1 \
--hash=sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11 --hash=sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684
python-dotenv==1.2.2 \ pytest==9.0.3 \
--hash=sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a \ --hash=sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9 \
--hash=sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3 --hash=sha256:b86ada508af81d19edeb213c681b1d48246c1a91d304c6c81a427674c17eb91c
# via lrx-cli pyyaml==6.0.3 \
--hash=sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c \
--hash=sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3 \
--hash=sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6 \
--hash=sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65 \
--hash=sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1 \
--hash=sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310 \
--hash=sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac \
--hash=sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9 \
--hash=sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7 \
--hash=sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35 \
--hash=sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb \
--hash=sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065 \
--hash=sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c \
--hash=sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c \
--hash=sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764 \
--hash=sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac \
--hash=sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8 \
--hash=sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3 \
--hash=sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5 \
--hash=sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702 \
--hash=sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788 \
--hash=sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba \
--hash=sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5 \
--hash=sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26 \
--hash=sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f \
--hash=sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b \
--hash=sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be \
--hash=sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c \
--hash=sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6
# via poethepoet
rich==14.3.3 \ rich==14.3.3 \
--hash=sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d \ --hash=sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d \
--hash=sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b --hash=sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b
@@ -110,25 +151,29 @@ rich-rst==1.3.2 \
--hash=sha256:a1196fdddf1e364b02ec68a05e8ff8f6914fee10fbca2e6b6735f166bb0da8d4 \ --hash=sha256:a1196fdddf1e364b02ec68a05e8ff8f6914fee10fbca2e6b6735f166bb0da8d4 \
--hash=sha256:a99b4907cbe118cf9d18b0b44de272efa61f15117c61e39ebdc431baf5df722a --hash=sha256:a99b4907cbe118cf9d18b0b44de272efa61f15117c61e39ebdc431baf5df722a
# via cyclopts # via cyclopts
ruff==0.15.8 \ ruff==0.15.10 \
--hash=sha256:04f79eff02a72db209d47d665ba7ebcad609d8918a134f86cb13dd132159fc89 \ --hash=sha256:0744e31482f8f7d0d10a11fcbf897af272fefdfcb10f5af907b18c2813ff4d5f \
--hash=sha256:0f29b989a55572fb885b77464cf24af05500806ab4edf9a0fd8977f9759d85b1 \ --hash=sha256:0ee3ef42dab7078bda5ff6a1bcba8539e9857deb447132ad5566a038674540d0 \
--hash=sha256:12e617fc01a95e5821648a6df341d80456bd627bfab8a829f7cfc26a14a4b4a3 \ --hash=sha256:136c00ca2f47b0018b073f28cb5c1506642a830ea941a60354b0e8bc8076b151 \
--hash=sha256:2033f963c43949d51e6fdccd3946633c6b37c484f5f98c3035f49c27395a8ab8 \ --hash=sha256:28cb32d53203242d403d819fd6983152489b12e4a3ae44993543d6fe62ab42ed \
--hash=sha256:432701303b26416d22ba696c39f2c6f12499b89093b61360abc34bcc9bf07762 \ --hash=sha256:51cb8cc943e891ba99989dd92d61e29b1d231e14811db9be6440ecf25d5c1609 \
--hash=sha256:6ee3ae5c65a42f273f126686353f2e08ff29927b7b7e203b711514370d500de3 \ --hash=sha256:601d1610a9e1f1c2165a4f561eeaa2e2ea1e97f3287c5aa258d3dab8b57c6188 \
--hash=sha256:75e5cd06b1cf3f47a3996cfc999226b19aa92e7cce682dcd62f80d7035f98f49 \ --hash=sha256:8154d43684e4333360fedd11aaa40b1b08a4e37d8ffa9d95fee6fa5b37b6fab1 \
--hash=sha256:8d9a5b8ea13f26ae90838afc33f91b547e61b794865374f114f349e9036835fb \ --hash=sha256:83e1dd04312997c99ea6965df66a14fb4f03ba978564574ffc68b0d61fd3989e \
--hash=sha256:995f11f63597ee362130d1d5a327a87cb6f3f5eae3094c620bcc632329a4d26e \ --hash=sha256:8ab88715f3a6deb6bde6c227f3a123410bec7b855c3ae331b4c006189e895cef \
--hash=sha256:ac51d486bf457cdc985a412fb1801b2dfd1bd8838372fc55de64b1510eff4bec \ --hash=sha256:8b80a2f3c9c8a950d6237f2ca12b206bccff626139be9fa005f14feb881a1ae8 \
--hash=sha256:bc1f0a51254ba21767bfa9a8b5013ca8149dcf38092e6a9eb704d876de94dc34 \ --hash=sha256:93cc06a19e5155b4441dd72808fdf84290d84ad8a39ca3b0f994363ade4cebb1 \
--hash=sha256:c2a33a529fb3cbc23a7124b5c6ff121e4d6228029cba374777bd7649cc8598b8 \ --hash=sha256:a768ff5969b4f44c349d48edf4ab4f91eddb27fd9d77799598e130fb628aa158 \
--hash=sha256:c9861eb959edab053c10ad62c278835ee69ca527b6dcd72b47d5c1e5648964f6 \ --hash=sha256:b0c52744cf9f143a393e284125d2576140b68264a93c6716464e129a3e9adb48 \
--hash=sha256:cbe05adeba76d58162762d6b239c9056f1a15a55bd4b346cfd21e26cd6ad7bc7 \ --hash=sha256:b1e7c16ea0ff5a53b7c2df52d947e685973049be1cdfe2b59a9c43601897b22e \
--hash=sha256:cf891fa8e3bb430c0e7fac93851a5978fc99c8fa2c053b57b118972866f8e5f2 \ --hash=sha256:d1f86e67ebfdef88e00faefa1552b5e510e1d35f3be7d423dc7e84e63788c94e \
--hash=sha256:d3e3d0b6ba8dca1b7ef9ab80a28e840a20070c4b62e56d675c24f366ef330570 \ --hash=sha256:d4272e87e801e9a27a2e8df7b21011c909d9ddd82f4f3281d269b6ba19789ca5 \
--hash=sha256:d910ae974b7a06a33a057cb87d2a10792a3b2b3b35e33d2699fdf63ec8f6b17a \ --hash=sha256:e3e53c588164dc025b671c9df2462429d60357ea91af7e92e9d56c565a9f1b07 \
--hash=sha256:fdce027ada77baa448077ccc6ebb2fa9c3c62fd110d8659d601cf2f475858d94 --hash=sha256:e59c9bdc056a320fb9ea1700a8d591718b8faf78af065484e801258d3a76bc3f
typing-extensions==4.15.0 \
--hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \
--hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548
# via pyright
win32-setctime==1.2.0 ; sys_platform == 'win32' \ win32-setctime==1.2.0 ; sys_platform == 'win32' \
--hash=sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390 \ --hash=sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390 \
--hash=sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0 --hash=sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0
+21
View File
@@ -0,0 +1,21 @@
from .config import AppConfig, GeneralConfig, CredentialConfig, load_config
from .core import LrcManager
from .models import CacheStatus, TrackMeta, LyricResult
from .lrc import LRCData, LyricLine
from .fetchers import FetcherMethodType
from .utils import get_sidecar_path
__all__ = [
"AppConfig",
"GeneralConfig",
"CredentialConfig",
"load_config",
"LrcManager",
"CacheStatus",
"TrackMeta",
"LRCData",
"LyricLine",
"LyricResult",
"FetcherMethodType",
"get_sidecar_path",
]
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-04-06 08:19:54
Description: The entry point. Description: The entry point.
""" """
from __future__ import annotations
from .cli import run from .cli import run
if __name__ == "__main__": if __name__ == "__main__":
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-04-06 08:21:01
Description: Credential authenticators for third-party provider APIs Description: Credential authenticators for third-party provider APIs
""" """
from __future__ import annotations
from lrx_cli.authenticators.qqmusic import QQMusicAuthenticator from lrx_cli.authenticators.qqmusic import QQMusicAuthenticator
from .base import BaseAuthenticator from .base import BaseAuthenticator
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-04-05 03:18:14
Description: Base class for credential authenticators. Description: Base class for credential authenticators.
""" """
from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Optional from typing import Optional
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-04-05 03:36:44
Description: A dummy authenticator that does nothing and always reports as configured. Description: A dummy authenticator that does nothing and always reports as configured.
""" """
from __future__ import annotations
from .base import BaseAuthenticator from .base import BaseAuthenticator
+14 -5
View File
@@ -4,6 +4,8 @@ Date: 2026-04-05 03:27:56
Description: Musixmatch authenticator — token management, 401 retry, and cooldown. Description: Musixmatch authenticator — token management, 401 retry, and cooldown.
""" """
from __future__ import annotations
import time import time
from typing import Optional from typing import Optional
from urllib.parse import urlencode from urllib.parse import urlencode
@@ -23,6 +25,13 @@ _MXM_BASE_PARAMS = {
} }
def _new_mxm_client(timeout: float) -> httpx.AsyncClient:
"""Build Musixmatch client without httpx default User-Agent header."""
client = httpx.AsyncClient(timeout=timeout, headers=_MXM_HEADERS)
client.headers.pop("User-Agent", None)
return client
class MusixmatchAuthenticator(BaseAuthenticator): class MusixmatchAuthenticator(BaseAuthenticator):
def __init__( def __init__(
self, cache: CacheEngine, credentials: CredentialConfig, general: GeneralConfig self, cache: CacheEngine, credentials: CredentialConfig, general: GeneralConfig
@@ -79,8 +88,8 @@ class MusixmatchAuthenticator(BaseAuthenticator):
logger.debug("Musixmatch: fetching anonymous token") logger.debug("Musixmatch: fetching anonymous token")
try: try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client: async with _new_mxm_client(self._general.http_timeout) as client:
resp = await client.get(url, headers=_MXM_HEADERS) resp = await client.get(url)
resp.raise_for_status() resp.raise_for_status()
data = resp.json() data = resp.json()
except Exception as e: except Exception as e:
@@ -141,9 +150,9 @@ class MusixmatchAuthenticator(BaseAuthenticator):
self._set_cooldown() self._set_cooldown()
return None return None
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client: async with _new_mxm_client(self._general.http_timeout) as client:
url = f"{url_base}?{urlencode({**_MXM_BASE_PARAMS, **params, 'usertoken': token})}" url = f"{url_base}?{urlencode({**_MXM_BASE_PARAMS, **params, 'usertoken': token})}"
resp = await client.get(url, headers=_MXM_HEADERS) resp = await client.get(url)
if resp.status_code == 401: if resp.status_code == 401:
logger.debug("Musixmatch: 401 received, refreshing token") logger.debug("Musixmatch: 401 received, refreshing token")
@@ -153,7 +162,7 @@ class MusixmatchAuthenticator(BaseAuthenticator):
self._set_cooldown() self._set_cooldown()
return None return None
url = f"{url_base}?{urlencode({**_MXM_BASE_PARAMS, **params, 'usertoken': token})}" url = f"{url_base}?{urlencode({**_MXM_BASE_PARAMS, **params, 'usertoken': token})}"
resp = await client.get(url, headers=_MXM_HEADERS) resp = await client.get(url)
resp.raise_for_status() resp.raise_for_status()
return resp.json() return resp.json()
+46
View File
@@ -4,7 +4,11 @@ Date: 2026-04-05 03:47:30
Description: QQ Music API authenticator - currently only a proxy. Description: QQ Music API authenticator - currently only a proxy.
""" """
from __future__ import annotations
from typing import Optional from typing import Optional
import httpx
from loguru import logger
from .base import BaseAuthenticator from .base import BaseAuthenticator
from ..cache import CacheEngine from ..cache import CacheEngine
@@ -26,3 +30,45 @@ class QQMusicAuthenticator(BaseAuthenticator):
async def authenticate(self) -> Optional[str]: async def authenticate(self) -> Optional[str]:
return self._credentials.qq_music_api_url.rstrip("/") or None return self._credentials.qq_music_api_url.rstrip("/") or None
async def search(self, keyword: str, num: int) -> dict | None:
"""Call qq-music-api search endpoint and return raw JSON payload."""
base_url = await self.authenticate()
if not base_url:
return None
try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
resp = await client.get(
f"{base_url}/api/search",
params={"keyword": keyword, "type": "song", "num": num},
)
resp.raise_for_status()
data = resp.json()
if not isinstance(data, dict):
return None
return data
except Exception as e:
logger.error(f"QQMusic: search request failed: {e}")
return None
async def get_lyric(self, mid: str) -> dict | None:
"""Call qq-music-api lyric endpoint and return raw JSON payload."""
base_url = await self.authenticate()
if not base_url:
return None
try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
resp = await client.get(
f"{base_url}/api/lyric",
params={"mid": mid},
)
resp.raise_for_status()
data = resp.json()
if not isinstance(data, dict):
return None
return data
except Exception as e:
logger.error(f"QQMusic: lyric request failed for mid={mid}: {e}")
return None
+35
View File
@@ -4,6 +4,8 @@ Date: 2026-04-05 03:18:14
Description: Spotify authenticator — TOTP-based access token via SP_DC cookie. Description: Spotify authenticator — TOTP-based access token via SP_DC cookie.
""" """
from __future__ import annotations
import hashlib import hashlib
import hmac import hmac
import struct import struct
@@ -18,6 +20,7 @@ from ..config import CredentialConfig, GeneralConfig, UA_BROWSER
_SPOTIFY_TOKEN_URL = "https://open.spotify.com/api/token" _SPOTIFY_TOKEN_URL = "https://open.spotify.com/api/token"
_SPOTIFY_SERVER_TIME_URL = "https://open.spotify.com/api/server-time" _SPOTIFY_SERVER_TIME_URL = "https://open.spotify.com/api/server-time"
_SPOTIFY_LYRICS_URL = "https://spclient.wg.spotify.com/color-lyrics/v2/track/"
_SPOTIFY_SECRET_URL = ( _SPOTIFY_SECRET_URL = (
"https://raw.githubusercontent.com/xyloflake/spot-secrets-go" "https://raw.githubusercontent.com/xyloflake/spot-secrets-go"
"/refs/heads/main/secrets/secrets.json" "/refs/heads/main/secrets/secrets.json"
@@ -208,3 +211,35 @@ class SpotifyAuthenticator(BaseAuthenticator):
except Exception as e: except Exception as e:
logger.error(f"Spotify: token request failed: {e}") logger.error(f"Spotify: token request failed: {e}")
return None return None
async def get_lyrics(self, track_id: str) -> dict | None:
"""Fetch raw lyrics JSON payload for a Spotify track."""
token = await self.authenticate()
if not token:
return None
url = (
f"{_SPOTIFY_LYRICS_URL}{track_id}"
"?format=json&vocalRemoval=false&market=from_token"
)
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {token}",
**SPOTIFY_BASE_HEADERS,
}
try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
res = await client.get(url, headers=headers)
if res.status_code == 404:
return None
if res.status_code != 200:
logger.error(f"Spotify: lyrics API returned {res.status_code}")
return None
data = res.json()
if not isinstance(data, dict):
return None
return data
except Exception as e:
logger.error(f"Spotify: lyrics fetch failed: {e}")
return None
+3 -1
View File
@@ -5,6 +5,8 @@ Description: SQLite-based lyric cache with per-source slot rows, TTL expiration,
and schema migrations (confidence versioning + slot migration). and schema migrations (confidence versioning + slot migration).
""" """
from __future__ import annotations
import json import json
import sqlite3 import sqlite3
import hashlib import hashlib
@@ -22,7 +24,7 @@ from .config import (
SLOT_UNSYNCED, SLOT_UNSYNCED,
) )
from .models import TrackMeta, LyricResult, CacheStatus from .models import TrackMeta, LyricResult, CacheStatus
from .ranking import is_positive_status, select_best_positive from .utils import is_positive_status, select_best_positive
_ALL_SLOTS = (SLOT_SYNCED, SLOT_UNSYNCED) _ALL_SLOTS = (SLOT_SYNCED, SLOT_UNSYNCED)
+49 -5
View File
@@ -4,6 +4,8 @@ Date: 2026-03-26 02:04:39
Description: CLI interface. Description: CLI interface.
""" """
from __future__ import annotations
import sys import sys
import time import time
import os import os
@@ -21,14 +23,15 @@ from .config import (
load_config, load_config,
enable_debug, enable_debug,
) )
from .utils import get_sidecar_path
from .models import TrackMeta from .models import TrackMeta
from .mpris import get_current_track from .mpris import get_current_track
from .core import LrcManager from .core import LrcManager
from .fetchers import FetcherMethodType from .fetchers import FetcherMethodType
from .lrc import get_sidecar_path
from .watch import WatchCoordinator from .watch import WatchCoordinator
from .watch.control import ControlClient, parse_delta from .watch.control import ControlClient, parse_delta
from .watch.view.pipe import PipeOutput from .watch.view.pipe import PipeOutput
from .watch.view.print import PrintOutput
app = cyclopts.App( app = cyclopts.App(
@@ -68,7 +71,7 @@ def launcher(
str | None, str | None,
cyclopts.Parameter( cyclopts.Parameter(
name=["--player", "-p"], name=["--player", "-p"],
help="Target a specific MPRIS player using its DBus name or a portion thereof.", help="Target a specific MPRIS player using its DBus name or a portion thereof. Bypasses player_blacklist.",
), ),
] = None, ] = None,
db_path: Annotated[ db_path: Annotated[
@@ -390,24 +393,65 @@ def pipe(
before: Annotated[ before: Annotated[
int, int,
cyclopts.Parameter( cyclopts.Parameter(
name="--before", name=["--before", "-b"],
help="Number of lyric lines to show before current line.", help="Number of lyric lines to show before current line.",
), ),
] = 0, ] = 0,
after: Annotated[ after: Annotated[
int, int,
cyclopts.Parameter( cyclopts.Parameter(
name="--after", name=["--after", "-a"],
help="Number of lyric lines to show after current line.", help="Number of lyric lines to show after current line.",
), ),
] = 0, ] = 0,
no_newline: Annotated[
bool,
cyclopts.Parameter(
name=["--no-newline", "-n"],
negative="",
help="Do not append a new line after the lyric output.",
),
] = False,
): ):
"""Watch active player and continuously print lyric window to stdout.""" """Watch active player and continuously print lyric window to stdout."""
logger.info( logger.info(
"Starting watch pipe (player filter: {})", "Starting watch pipe (player filter: {})",
_player or "<none>", _player or "<none>",
) )
output = PipeOutput(before=max(0, before), after=max(0, after)) output = PipeOutput(
before=max(0, before), after=max(0, after), no_newline=no_newline
)
try:
session = WatchCoordinator(
manager,
output,
player_hint=_player,
config=_app_config,
)
success = asyncio.run(session.run())
if not success:
sys.exit(1)
except KeyboardInterrupt:
logger.info("Watch stopped.")
@watch_app.command(name="print")
def watch_print(
plain: Annotated[
bool,
cyclopts.Parameter(
name="--plain",
negative="",
help="Output plain text (strips all tags). Takes priority over --normalize.",
),
] = False,
) -> None:
"""Watch active player and print all lyrics to stdout once per track change."""
logger.info(
"Starting watch print (player filter: {})",
_player or "<none>",
)
output = PrintOutput(plain=plain)
try: try:
session = WatchCoordinator( session = WatchCoordinator(
manager, manager,
+3 -1
View File
@@ -4,6 +4,8 @@ Date: 2026-03-25 10:17:56
Description: Global configuration constants, typed config dataclasses, and logger setup. Description: Global configuration constants, typed config dataclasses, and logger setup.
""" """
from __future__ import annotations
import dataclasses import dataclasses
import os import os
import sys import sys
@@ -67,7 +69,7 @@ MUSIXMATCH_COOLDOWN_MS = 600_000 # 10 minutes
os.makedirs(CACHE_DIR, exist_ok=True) os.makedirs(CACHE_DIR, exist_ok=True)
DEFAULT_PREFERRED_PLAYER = "spotify" DEFAULT_PREFERRED_PLAYER = ""
DEFAULT_PLAYER_BLACKLIST: tuple[str, ...] = ( DEFAULT_PLAYER_BLACKLIST: tuple[str, ...] = (
"firefox", "firefox",
"zen", "zen",
+3 -1
View File
@@ -5,6 +5,8 @@ Description: Core orchestrator — coordinates fetchers with cache-aware fallbac
Also handles enrichers & authenticators & … Also handles enrichers & authenticators & …
""" """
from __future__ import annotations
import asyncio import asyncio
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
@@ -26,7 +28,7 @@ from .config import (
) )
from .models import TrackMeta, LyricResult, CacheStatus from .models import TrackMeta, LyricResult, CacheStatus
from .enrichers import create_enrichers, enrich_track from .enrichers import create_enrichers, enrich_track
from .ranking import is_better_result, select_best_positive from .utils import is_better_result, select_best_positive
# Maps CacheStatus to the default TTL used when storing results # Maps CacheStatus to the default TTL used when storing results
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-03-31 06:09:11
Description: Metadata enrichment pipeline Description: Metadata enrichment pipeline
""" """
from __future__ import annotations
from loguru import logger from loguru import logger
from .base import BaseEnricher from .base import BaseEnricher
+3 -1
View File
@@ -4,13 +4,15 @@ Date: 2026-03-31 06:11:27
Description: Enricher that reads metadata from audio file tags. Description: Enricher that reads metadata from audio file tags.
""" """
from __future__ import annotations
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
from mutagen._file import File, FileType from mutagen._file import File, FileType
from .base import BaseEnricher from .base import BaseEnricher
from ..models import TrackMeta from ..models import TrackMeta
from ..lrc import get_audio_path from ..utils import get_audio_path
class AudioTagEnricher(BaseEnricher): class AudioTagEnricher(BaseEnricher):
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-03-31 06:08:16
Description: Base class for metadata enrichers. Description: Base class for metadata enrichers.
""" """
from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Optional from typing import Optional
+3 -1
View File
@@ -4,13 +4,15 @@ Date: 2026-03-31 06:08:44
Description: Enricher that parses metadata from the audio file path. Description: Enricher that parses metadata from the audio file path.
""" """
from __future__ import annotations
import re import re
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
from .base import BaseEnricher from .base import BaseEnricher
from ..models import TrackMeta from ..models import TrackMeta
from ..lrc import get_audio_path from ..utils import get_audio_path
# Common track-number prefixes: "01 - ", "01. ", "1 - ", etc. # Common track-number prefixes: "01 - ", "01. ", "1 - ", etc.
+2 -1
View File
@@ -4,8 +4,9 @@ Date: 2026-04-05 02:13:49
Description: Musixmatch metadata enricher (matcher.track.get by Spotify track ID). Description: Musixmatch metadata enricher (matcher.track.get by Spotify track ID).
""" """
from typing import Optional from __future__ import annotations
from typing import Optional
from loguru import logger from loguru import logger
from .base import BaseEnricher from .base import BaseEnricher
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-03-25 02:33:26
Description: Fetcher pipeline — registry and types. Description: Fetcher pipeline — registry and types.
""" """
from __future__ import annotations
from typing import Literal, Optional from typing import Literal, Optional
from loguru import logger from loguru import logger
+2
View File
@@ -4,6 +4,8 @@ Date: 2026-03-25 02:33:26
Description: Base fetcher class and common interfaces. Description: Base fetcher class and common interfaces.
""" """
from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Optional from typing import Optional
from dataclasses import dataclass from dataclasses import dataclass
+2 -1
View File
@@ -8,10 +8,11 @@ Description: Cache-search fetcher — cross-album fuzzy lookup in the local cach
albums or is played from different players. albums or is played from different players.
""" """
from __future__ import annotations
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
from .base import BaseFetcher, FetchResult from .base import BaseFetcher, FetchResult
from .selection import SearchCandidate, select_best from .selection import SearchCandidate, select_best
from ..models import TrackMeta, LyricResult, CacheStatus from ..models import TrackMeta, LyricResult, CacheStatus
+4 -1
View File
@@ -7,6 +7,8 @@ Description: Local fetcher — reads lyrics from .lrc sidecar files or embedded
2. Embedded lyrics in audio metadata (FLAC, MP3 USLT/SYLT tags) 2. Embedded lyrics in audio metadata (FLAC, MP3 USLT/SYLT tags)
""" """
from __future__ import annotations
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
from mutagen._file import File from mutagen._file import File
@@ -14,7 +16,8 @@ from mutagen.flac import FLAC
from .base import BaseFetcher, FetchResult from .base import BaseFetcher, FetchResult
from ..models import CacheStatus, TrackMeta, LyricResult from ..models import CacheStatus, TrackMeta, LyricResult
from ..lrc import get_audio_path, get_sidecar_path, LRCData from ..lrc import LRCData
from ..utils import get_audio_path, get_sidecar_path
class LocalFetcher(BaseFetcher): class LocalFetcher(BaseFetcher):
+57 -36
View File
@@ -5,6 +5,8 @@ Description: LRCLIB fetcher — queries lrclib.net for synced/plain lyrics.
Requires complete track metadata (artist, title, album, duration). Requires complete track metadata (artist, title, album, duration).
""" """
from __future__ import annotations
import httpx import httpx
from loguru import logger from loguru import logger
from urllib.parse import urlencode from urllib.parse import urlencode
@@ -21,6 +23,38 @@ from ..config import (
_LRCLIB_API_URL = "https://lrclib.net/api/get" _LRCLIB_API_URL = "https://lrclib.net/api/get"
def _parse_lrclib_response(data: dict) -> FetchResult:
"""Parse LRCLIB JSON response into synced/unsynced fetch result."""
synced = data.get("syncedLyrics")
unsynced = data.get("plainLyrics")
res_synced: LyricResult = LyricResult(
status=CacheStatus.NOT_FOUND, ttl=TTL_NOT_FOUND
)
res_unsynced: LyricResult = LyricResult(
status=CacheStatus.NOT_FOUND, ttl=TTL_NOT_FOUND
)
if isinstance(synced, str) and synced.strip():
lyrics = LRCData(synced)
res_synced = LyricResult(
status=CacheStatus.SUCCESS_SYNCED,
lyrics=lyrics,
source="lrclib",
)
if isinstance(unsynced, str) and unsynced.strip():
lyrics = LRCData(unsynced)
res_unsynced = LyricResult(
status=CacheStatus.SUCCESS_UNSYNCED,
lyrics=lyrics,
source="lrclib",
ttl=TTL_UNSYNCED,
)
return FetchResult(synced=res_synced, unsynced=res_unsynced)
class LrclibFetcher(BaseFetcher): class LrclibFetcher(BaseFetcher):
@property @property
def source_name(self) -> str: def source_name(self) -> str:
@@ -29,12 +63,12 @@ class LrclibFetcher(BaseFetcher):
def is_available(self, track: TrackMeta) -> bool: def is_available(self, track: TrackMeta) -> bool:
return track.is_complete return track.is_complete
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult: async def _api_get(
"""Fetch lyrics from LRCLIB. Requires complete metadata.""" self,
if not track.is_complete: client: httpx.AsyncClient,
logger.debug("LRCLIB: skipped — incomplete metadata") track: TrackMeta,
return FetchResult() ) -> httpx.Response:
"""Issue one LRCLIB get request using the same path as production fetch."""
params = { params = {
"track_name": track.title, "track_name": track.title,
"artist_name": track.artist, "artist_name": track.artist,
@@ -42,11 +76,19 @@ class LrclibFetcher(BaseFetcher):
"duration": track.length / 1000.0 if track.length else 0, "duration": track.length / 1000.0 if track.length else 0,
} }
url = f"{_LRCLIB_API_URL}?{urlencode(params)}" url = f"{_LRCLIB_API_URL}?{urlencode(params)}"
return await client.get(url, headers={"User-Agent": UA_LRX})
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult:
"""Fetch lyrics from LRCLIB. Requires complete metadata."""
if not track.is_complete:
logger.debug("LRCLIB: skipped — incomplete metadata")
return FetchResult()
logger.info(f"LRCLIB: fetching lyrics for {track.display_name()}") logger.info(f"LRCLIB: fetching lyrics for {track.display_name()}")
try: try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client: async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
resp = await client.get(url, headers={"User-Agent": UA_LRX}) resp = await self._api_get(client, track)
if resp.status_code == 404: if resp.status_code == 404:
logger.debug(f"LRCLIB: not found for {track.display_name()}") logger.debug(f"LRCLIB: not found for {track.display_name()}")
@@ -60,37 +102,16 @@ class LrclibFetcher(BaseFetcher):
if not isinstance(data, dict): if not isinstance(data, dict):
logger.error(f"LRCLIB: unexpected response type: {type(data).__name__}") logger.error(f"LRCLIB: unexpected response type: {type(data).__name__}")
return FetchResult.from_network_error() return FetchResult.from_network_error()
result = _parse_lrclib_response(data)
synced = data.get("syncedLyrics") if result.synced and result.synced.lyrics:
unsynced = data.get("plainLyrics") logger.info(
f"LRCLIB: got synced lyrics ({len(result.synced.lyrics)} lines)"
res_synced: LyricResult = LyricResult(
status=CacheStatus.NOT_FOUND, ttl=TTL_NOT_FOUND
) )
res_unsynced: LyricResult = LyricResult( if result.unsynced and result.unsynced.lyrics:
status=CacheStatus.NOT_FOUND, ttl=TTL_NOT_FOUND logger.info(
f"LRCLIB: got unsynced lyrics ({len(result.unsynced.lyrics)} lines)"
) )
return result
if isinstance(synced, str) and synced.strip():
lyrics = LRCData(synced)
logger.info(f"LRCLIB: got synced lyrics ({len(lyrics)} lines)")
res_synced = LyricResult(
status=CacheStatus.SUCCESS_SYNCED,
lyrics=lyrics,
source=self.source_name,
)
if isinstance(unsynced, str) and unsynced.strip():
lyrics = LRCData(unsynced)
logger.info(f"LRCLIB: got unsynced lyrics ({len(lyrics)} lines)")
res_unsynced = LyricResult(
status=CacheStatus.SUCCESS_UNSYNCED,
lyrics=lyrics,
source=self.source_name,
ttl=TTL_UNSYNCED,
)
return FetchResult(synced=res_synced, unsynced=res_unsynced)
except httpx.HTTPError as e: except httpx.HTTPError as e:
logger.error(f"LRCLIB: HTTP error: {e}") logger.error(f"LRCLIB: HTTP error: {e}")
+53 -32
View File
@@ -5,6 +5,8 @@ Description: LRCLIB search fetcher — fuzzy search via lrclib.net /api/search.
Used when metadata is incomplete (no album or duration) but title is available. Used when metadata is incomplete (no album or duration) but title is available.
""" """
from __future__ import annotations
import asyncio import asyncio
import httpx import httpx
from loguru import logger from loguru import logger
@@ -23,6 +25,24 @@ from ..config import (
_LRCLIB_SEARCH_URL = "https://lrclib.net/api/search" _LRCLIB_SEARCH_URL = "https://lrclib.net/api/search"
def _parse_lrclib_search_results(items: list[dict]) -> list[SearchCandidate[dict]]:
"""Map LRCLIB search JSON items to normalized SearchCandidate entries."""
return [
SearchCandidate(
item=item,
duration_ms=item["duration"] * 1000
if isinstance(item.get("duration"), (int, float))
else None,
is_synced=isinstance(item.get("syncedLyrics"), str)
and bool(item["syncedLyrics"].strip()),
title=item.get("trackName"),
artist=item.get("artistName"),
album=item.get("albumName"),
)
for item in items
]
class LrclibSearchFetcher(BaseFetcher): class LrclibSearchFetcher(BaseFetcher):
@property @property
def source_name(self) -> str: def source_name(self) -> str:
@@ -59,22 +79,12 @@ class LrclibSearchFetcher(BaseFetcher):
return queries return queries
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult: async def _api_query(
if not track.title: self,
logger.debug("LRCLIB-search: skipped — no title") client: httpx.AsyncClient,
return FetchResult() params: dict[str, str],
) -> tuple[list[dict], bool]:
queries = self._build_queries(track) """Issue one LRCLIB search query using production request path."""
logger.info(f"LRCLIB-search: searching for {track.display_name()}")
seen_ids: set[int] = set()
candidates: list[dict] = []
had_error = False
try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
async def _query(params: dict[str, str]) -> tuple[list[dict], bool]:
url = f"{_LRCLIB_SEARCH_URL}?{urlencode(params)}" url = f"{_LRCLIB_SEARCH_URL}?{urlencode(params)}"
logger.debug(f"LRCLIB-search: query {params}") logger.debug(f"LRCLIB-search: query {params}")
try: try:
@@ -90,8 +100,20 @@ class LrclibSearchFetcher(BaseFetcher):
return [], False return [], False
return [item for item in data if isinstance(item, dict)], False return [item for item in data if isinstance(item, dict)], False
all_results = await asyncio.gather(*(_query(p) for p in queries)) async def _api_candidates(
self,
client: httpx.AsyncClient,
track: TrackMeta,
) -> tuple[list[dict], bool]:
"""Request and merge LRCLIB-search candidates using built-in query strategy."""
queries = self._build_queries(track)
all_results = await asyncio.gather(
*(self._api_query(client, p) for p in queries)
)
seen_ids: set[int] = set()
candidates: list[dict] = []
had_error = False
for items, err in all_results: for items, err in all_results:
if err: if err:
had_error = True had_error = True
@@ -102,6 +124,18 @@ class LrclibSearchFetcher(BaseFetcher):
if item_id is not None: if item_id is not None:
seen_ids.add(item_id) seen_ids.add(item_id)
candidates.append(item) candidates.append(item)
return candidates, had_error
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult:
if not track.title:
logger.debug("LRCLIB-search: skipped — no title")
return FetchResult()
logger.info(f"LRCLIB-search: searching for {track.display_name()}")
try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
candidates, had_error = await self._api_candidates(client, track)
if not candidates: if not candidates:
if had_error: if had_error:
@@ -111,23 +145,10 @@ class LrclibSearchFetcher(BaseFetcher):
logger.debug( logger.debug(
f"LRCLIB-search: got {len(candidates)} unique candidates " f"LRCLIB-search: got {len(candidates)} unique candidates "
f"from {len(queries)} queries" f"from {len(self._build_queries(track))} queries"
) )
mapped = [ mapped = _parse_lrclib_search_results(candidates)
SearchCandidate(
item=item,
duration_ms=item["duration"] * 1000
if isinstance(item.get("duration"), (int, float))
else None,
is_synced=isinstance(item.get("syncedLyrics"), str)
and bool(item["syncedLyrics"].strip()),
title=item.get("trackName"),
artist=item.get("artistName"),
album=item.get("albumName"),
)
for item in candidates
]
best, confidence = select_best( best, confidence = select_best(
mapped, mapped,
track.length, track.length,
+115 -53
View File
@@ -11,6 +11,8 @@ Description: Musixmatch fetchers (desktop API, anonymous or usertoken auth).
musixmatch — metadata search + best-candidate fallback musixmatch — metadata search + best-candidate fallback
""" """
from __future__ import annotations
import json import json
from typing import Optional from typing import Optional
from loguru import logger from loguru import logger
@@ -83,21 +85,8 @@ def _parse_subtitle(body: str) -> Optional[str]:
return None return None
async def _fetch_macro( def _parse_mxm_macro(data: dict) -> LRCData | None:
auth: MusixmatchAuthenticator, """Parse macro.subtitles.get payload into LRCData (richsync preferred)."""
params: dict,
) -> Optional[LRCData]:
"""Call macro.subtitles.get via auth.get_json.
Returns LRCData (richsync preferred over subtitle), or None when no usable
lyrics are found. Raises on HTTP/network errors.
"""
logger.debug(f"Musixmatch: macro call with {list(params.keys())}")
data = await auth.get_json(_MUSIXMATCH_MACRO_URL, {**_MXM_MACRO_PARAMS, **params})
if data is None:
return None
# Musixmatch returns body=[] (not {}) when the track is not found
body = data.get("message", {}).get("body", {}) body = data.get("message", {}).get("body", {})
if not isinstance(body, dict): if not isinstance(body, dict):
return None return None
@@ -105,7 +94,6 @@ async def _fetch_macro(
if not isinstance(macro_calls, dict): if not isinstance(macro_calls, dict):
return None return None
# Prefer richsync (word-level timing)
richsync_msg = macro_calls.get("track.richsync.get", {}).get("message", {}) richsync_msg = macro_calls.get("track.richsync.get", {}).get("message", {})
if ( if (
isinstance(richsync_msg, dict) isinstance(richsync_msg, dict)
@@ -119,10 +107,8 @@ async def _fetch_macro(
if lrc_text: if lrc_text:
lrc = LRCData(lrc_text) lrc = LRCData(lrc_text)
if lrc: if lrc:
logger.debug("Musixmatch: got richsync lyrics")
return lrc return lrc
# Fall back to subtitle (line-level timing)
subtitle_msg = macro_calls.get("track.subtitles.get", {}).get("message", {}) subtitle_msg = macro_calls.get("track.subtitles.get", {}).get("message", {})
if ( if (
isinstance(subtitle_msg, dict) isinstance(subtitle_msg, dict)
@@ -136,13 +122,36 @@ async def _fetch_macro(
if lrc_text: if lrc_text:
lrc = LRCData(lrc_text) lrc = LRCData(lrc_text)
if lrc: if lrc:
logger.debug("Musixmatch: got subtitle lyrics")
return lrc return lrc
logger.debug("Musixmatch: no usable lyrics in macro response")
return None return None
def _parse_mxm_search(data: dict) -> list[SearchCandidate[int]]:
"""Parse track.search payload to normalized candidates."""
track_list = data.get("message", {}).get("body", {}).get("track_list", [])
if not isinstance(track_list, list) or not track_list:
return []
return [
SearchCandidate(
item=int(t["commontrack_id"]),
duration_ms=(
float(t["track_length"]) * 1000 if t.get("track_length") else None
),
is_synced=bool(t.get("has_subtitles") or t.get("has_richsync")),
title=t.get("track_name"),
artist=t.get("artist_name"),
album=t.get("album_name"),
)
for item in track_list
if isinstance(item, dict)
and isinstance(t := item.get("track", {}), dict)
and isinstance(t.get("commontrack_id"), int)
and not t.get("instrumental")
]
class MusixmatchSpotifyFetcher(BaseFetcher): class MusixmatchSpotifyFetcher(BaseFetcher):
"""Direct lookup by Spotify track ID — no search, single request.""" """Direct lookup by Spotify track ID — no search, single request."""
@@ -158,14 +167,36 @@ class MusixmatchSpotifyFetcher(BaseFetcher):
def is_available(self, track: TrackMeta) -> bool: def is_available(self, track: TrackMeta) -> bool:
return bool(track.trackid) and not self._auth.is_cooldown() return bool(track.trackid) and not self._auth.is_cooldown()
async def _api_macro(self, params: dict) -> dict | None:
"""Request macro payload through authenticator using production path."""
return await self._auth.get_json(
_MUSIXMATCH_MACRO_URL, {**_MXM_MACRO_PARAMS, **params}
)
async def _api_macro_track(self, track: TrackMeta) -> dict | None:
"""Request macro payload for one track using Spotify ID lookup path."""
if not track.trackid:
return None
return await self._api_macro({"track_spotify_id": track.trackid})
async def _fetch_macro(self, params: dict) -> LRCData | None:
"""Request and parse Musixmatch macro lyrics payload."""
logger.debug(f"Musixmatch: macro call with {list(params.keys())}")
data = await self._api_macro(params)
if data is None:
return None
lrc = _parse_mxm_macro(data)
if lrc is None:
logger.debug("Musixmatch: no usable lyrics in macro response")
return None
logger.debug("Musixmatch: parsed macro lyrics")
return lrc
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult: async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult:
logger.info(f"Musixmatch-Spotify: fetching lyrics for {track.display_name()}") logger.info(f"Musixmatch-Spotify: fetching lyrics for {track.display_name()}")
try: try:
lrc = await _fetch_macro( lrc = await self._fetch_macro({"track_spotify_id": track.trackid}) # type: ignore[dict-item]
self._auth,
{"track_spotify_id": track.trackid}, # type: ignore[dict-item]
)
except AttributeError: except AttributeError:
return FetchResult.from_not_found() return FetchResult.from_not_found()
except Exception as e: except Exception as e:
@@ -210,9 +241,13 @@ class MusixmatchFetcher(BaseFetcher):
def is_available(self, track: TrackMeta) -> bool: def is_available(self, track: TrackMeta) -> bool:
return bool(track.title) and not self._auth.is_cooldown() return bool(track.title) and not self._auth.is_cooldown()
async def _search(self, track: TrackMeta) -> tuple[Optional[int], float]: async def _api_search(self, params: dict) -> dict | None:
"""Search for track metadata. Raises on network/HTTP errors.""" """Request search payload through authenticator using production path."""
params: dict = { return await self._auth.get_json(_MUSIXMATCH_SEARCH_URL, params)
def _build_search_params(self, track: TrackMeta) -> dict[str, str]:
"""Build Musixmatch search params for one track."""
params: dict[str, str] = {
"q_track": track.title or "", "q_track": track.title or "",
"page_size": "10", "page_size": "10",
"f_has_lyrics": "1", "f_has_lyrics": "1",
@@ -221,36 +256,66 @@ class MusixmatchFetcher(BaseFetcher):
params["q_artist"] = track.artist params["q_artist"] = track.artist
if track.album: if track.album:
params["q_album"] = track.album params["q_album"] = track.album
return params
async def _api_search_track(self, track: TrackMeta) -> dict | None:
"""Request search payload for one track using production path."""
return await self._api_search(self._build_search_params(track))
async def _api_macro(self, params: dict) -> dict | None:
"""Request macro payload through authenticator using production path."""
return await self._auth.get_json(
_MUSIXMATCH_MACRO_URL, {**_MXM_MACRO_PARAMS, **params}
)
async def _api_macro_track(self, track: TrackMeta) -> dict | None:
"""Request macro payload for top-ranked search candidate of one track."""
search_data = await self._api_search_track(track)
if search_data is None:
return None
candidates = _parse_mxm_search(search_data)
if not candidates:
return None
commontrack_id, _confidence = select_best(
candidates,
track.length,
title=track.title,
artist=track.artist,
album=track.album,
)
if commontrack_id is None:
return None
return await self._api_macro({"commontrack_id": str(commontrack_id)})
async def _fetch_macro(self, params: dict) -> LRCData | None:
"""Request and parse Musixmatch macro lyrics payload."""
logger.debug(f"Musixmatch: macro call with {list(params.keys())}")
data = await self._api_macro(params)
if data is None:
return None
lrc = _parse_mxm_macro(data)
if lrc is None:
logger.debug("Musixmatch: no usable lyrics in macro response")
return None
logger.debug("Musixmatch: parsed macro lyrics")
return lrc
async def _search(self, track: TrackMeta) -> tuple[Optional[int], float]:
"""Search for track metadata. Raises on network/HTTP errors."""
logger.debug(f"Musixmatch: searching for '{track.display_name()}'") logger.debug(f"Musixmatch: searching for '{track.display_name()}'")
data = await self._auth.get_json(_MUSIXMATCH_SEARCH_URL, params) data = await self._api_search_track(track)
if data is None: if data is None:
return None, 0.0 return None, 0.0
track_list = data.get("message", {}).get("body", {}).get("track_list", []) candidates = _parse_mxm_search(data)
if not isinstance(track_list, list) or not track_list: if not candidates:
logger.debug("Musixmatch: search returned 0 results") logger.debug("Musixmatch: search returned 0 results")
return None, 0.0 return None, 0.0
logger.debug(f"Musixmatch: search returned {len(track_list)} candidates") logger.debug(f"Musixmatch: search returned {len(candidates)} candidates")
candidates = [
SearchCandidate(
item=int(t["commontrack_id"]),
duration_ms=(
float(t["track_length"]) * 1000 if t.get("track_length") else None
),
is_synced=bool(t.get("has_subtitles") or t.get("has_richsync")),
title=t.get("track_name"),
artist=t.get("artist_name"),
album=t.get("album_name"),
)
for item in track_list
if isinstance(item, dict)
and isinstance(t := item.get("track", {}), dict)
and isinstance(t.get("commontrack_id"), int)
and not t.get("instrumental")
]
best_id, confidence = select_best( best_id, confidence = select_best(
candidates, candidates,
@@ -274,10 +339,7 @@ class MusixmatchFetcher(BaseFetcher):
logger.debug(f"Musixmatch: no match found for {track.display_name()}") logger.debug(f"Musixmatch: no match found for {track.display_name()}")
return FetchResult.from_not_found() return FetchResult.from_not_found()
lrc = await _fetch_macro( lrc = await self._fetch_macro({"commontrack_id": str(commontrack_id)})
self._auth,
{"commontrack_id": str(commontrack_id)},
)
except AttributeError: except AttributeError:
return FetchResult.from_not_found() return FetchResult.from_not_found()
except Exception as e: except Exception as e:
+131 -66
View File
@@ -7,6 +7,8 @@ Description: Netease Cloud Music fetcher.
retrieving lyrics. No authentication required. retrieving lyrics. No authentication required.
""" """
from __future__ import annotations
import asyncio import asyncio
import httpx import httpx
from loguru import logger from loguru import logger
@@ -30,6 +32,42 @@ _NETEASE_BASE_HEADERS = {
} }
def _parse_netease_search(data: dict) -> list[SearchCandidate[int]]:
"""Parse Netease search response into scored candidates."""
result_body = data.get("result")
if not isinstance(result_body, dict):
return []
songs = result_body.get("songs")
if not isinstance(songs, list) or len(songs) == 0:
return []
return [
SearchCandidate(
item=song_id,
duration_ms=float(song["dt"]) if isinstance(song.get("dt"), int) else None,
title=song.get("name"),
artist=", ".join(a.get("name", "") for a in song.get("ar", [])) or None,
album=(song.get("al") or {}).get("name"),
)
for song in songs
if isinstance(song, dict) and isinstance(song_id := song.get("id"), int)
]
def _parse_netease_lyrics(data: dict) -> LRCData | None:
"""Parse Netease lyric response to LRCData."""
lrc_obj = data.get("lrc")
if not isinstance(lrc_obj, dict):
return None
lrc = lrc_obj.get("lyric", "")
if not isinstance(lrc, str) or not lrc.strip():
return None
return LRCData(lrc)
class NeteaseFetcher(BaseFetcher): class NeteaseFetcher(BaseFetcher):
@property @property
def source_name(self) -> str: def source_name(self) -> str:
@@ -38,6 +76,88 @@ class NeteaseFetcher(BaseFetcher):
def is_available(self, track: TrackMeta) -> bool: def is_available(self, track: TrackMeta) -> bool:
return bool(track.title) return bool(track.title)
async def _api_search(
self,
client: httpx.AsyncClient,
query: str,
limit: int,
) -> dict | None:
"""Issue one Netease search request and return JSON payload."""
resp = await client.post(
_NETEASE_SEARCH_URL,
headers=_NETEASE_BASE_HEADERS,
data={"s": query, "type": "1", "limit": str(limit), "offset": "0"},
)
resp.raise_for_status()
data = resp.json()
if not isinstance(data, dict):
return None
return data
async def _api_search_track(
self,
client: httpx.AsyncClient,
track: TrackMeta,
limit: int,
) -> dict | None:
"""Request Netease search payload for one track using production query strategy."""
query = f"{track.artist or ''} {track.title or ''}".strip()
if not query:
return None
return await self._api_search(client, query, limit)
async def _api_lyric(
self,
client: httpx.AsyncClient,
song_id: int,
) -> dict | None:
"""Issue one Netease lyric request and return JSON payload."""
resp = await client.post(
_NETEASE_LYRIC_URL,
headers=_NETEASE_BASE_HEADERS,
data={
"id": str(song_id),
"cp": "false",
"tv": "0",
"lv": "0",
"rv": "0",
"kv": "0",
"yv": "0",
"ytv": "0",
"yrv": "0",
},
)
resp.raise_for_status()
data = resp.json()
if not isinstance(data, dict):
return None
return data
async def _api_lyric_track(
self,
client: httpx.AsyncClient,
track: TrackMeta,
limit: int,
) -> dict | None:
"""Request lyric payload for top-ranked candidate of a track."""
search_data = await self._api_search_track(client, track, limit)
if search_data is None:
return None
candidates = _parse_netease_search(search_data)
if not candidates:
return None
ranked = select_ranked(
candidates,
track.length,
title=track.title,
artist=track.artist,
album=track.album,
)
if not ranked:
return None
top_song_id = ranked[0][0]
return await self._api_lyric(client, top_song_id)
async def _search( async def _search(
self, track: TrackMeta, limit: int = 10 self, track: TrackMeta, limit: int = 10
) -> list[tuple[int, float]]: ) -> list[tuple[int, float]]:
@@ -49,46 +169,18 @@ class NeteaseFetcher(BaseFetcher):
try: try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client: async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
resp = await client.post( result = await self._api_search_track(client, track, limit)
_NETEASE_SEARCH_URL,
headers=_NETEASE_BASE_HEADERS,
data={"s": query, "type": "1", "limit": str(limit), "offset": "0"},
)
resp.raise_for_status()
result = resp.json()
if not isinstance(result, dict): if result is None:
logger.error( logger.error("Netease: search returned non-dict payload")
f"Netease: search returned non-dict: {type(result).__name__}"
)
return [] return []
result_body = result.get("result") candidates = _parse_netease_search(result)
if not isinstance(result_body, dict): if not candidates:
logger.debug("Netease: search 'result' field missing or invalid")
return []
songs = result_body.get("songs")
if not isinstance(songs, list) or len(songs) == 0:
logger.debug("Netease: search returned 0 results") logger.debug("Netease: search returned 0 results")
return [] return []
logger.debug(f"Netease: search returned {len(songs)} candidates") logger.debug(f"Netease: search returned {len(candidates)} candidates")
candidates = [
SearchCandidate(
item=song_id,
duration_ms=float(song["dt"])
if isinstance(song.get("dt"), int)
else None,
title=song.get("name"),
artist=", ".join(a.get("name", "") for a in song.get("ar", []))
or None,
album=(song.get("al") or {}).get("name"),
)
for song in songs
if isinstance(song, dict) and isinstance(song_id := song.get("id"), int)
]
ranked = select_ranked( ranked = select_ranked(
candidates, candidates,
track.length, track.length,
@@ -114,43 +206,16 @@ class NeteaseFetcher(BaseFetcher):
try: try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client: async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
resp = await client.post( data = await self._api_lyric(client, song_id)
_NETEASE_LYRIC_URL,
headers=_NETEASE_BASE_HEADERS,
data={
"id": str(song_id),
"cp": "false",
"tv": "0",
"lv": "0",
"rv": "0",
"kv": "0",
"yv": "0",
"ytv": "0",
"yrv": "0",
},
)
resp.raise_for_status()
data = resp.json()
if not isinstance(data, dict): if data is None:
logger.error( logger.error("Netease: lyric response is not dict")
f"Netease: lyric response is not dict: {type(data).__name__}"
)
return FetchResult.from_network_error() return FetchResult.from_network_error()
lrc_obj = data.get("lrc") lrcdata = _parse_netease_lyrics(data)
if not isinstance(lrc_obj, dict): if lrcdata is None:
logger.debug(
f"Netease: no 'lrc' object in response for song_id={song_id}"
)
return FetchResult.from_not_found()
lrc: str = lrc_obj.get("lyric", "")
if not isinstance(lrc, str) or not lrc.strip():
logger.debug(f"Netease: empty lyrics for song_id={song_id}") logger.debug(f"Netease: empty lyrics for song_id={song_id}")
return FetchResult.from_not_found() return FetchResult.from_not_found()
lrcdata = LRCData(lrc)
status = lrcdata.detect_sync_status() status = lrcdata.detect_sync_status()
logger.info( logger.info(
f"Netease: got {status.value} lyrics for song_id={song_id} " f"Netease: got {status.value} lyrics for song_id={song_id} "
+98 -60
View File
@@ -9,12 +9,14 @@ Description: QQ Music fetcher via self-hosted API proxy.
Search → pick best match → fetch LRC lyrics. Search → pick best match → fetch LRC lyrics.
""" """
from __future__ import annotations
import asyncio import asyncio
import httpx
from loguru import logger from loguru import logger
from .base import BaseFetcher, FetchResult from .base import BaseFetcher, FetchResult
from .selection import SearchCandidate, select_ranked from .selection import SearchCandidate, select_ranked
from ..authenticators import QQMusicAuthenticator
from ..models import TrackMeta, LyricResult, CacheStatus from ..models import TrackMeta, LyricResult, CacheStatus
from ..lrc import LRCData from ..lrc import LRCData
from ..config import ( from ..config import (
@@ -23,9 +25,40 @@ from ..config import (
MULTI_CANDIDATE_DELAY_S, MULTI_CANDIDATE_DELAY_S,
) )
_QQ_MUSIC_API_SEARCH_ENDPOINT = "/api/search"
_QQ_MUSIC_API_LYRIC_ENDPOINT = "/api/lyric" def _parse_qq_search(data: dict) -> list[SearchCandidate[str]]:
from ..authenticators import QQMusicAuthenticator """Parse QQMusic search response into normalized candidates."""
if data.get("code") != 0:
return []
songs = data.get("data", {}).get("list", [])
if not isinstance(songs, list):
return []
return [
SearchCandidate(
item=mid,
duration_ms=float(song["interval"]) * 1000
if isinstance(song.get("interval"), int)
else None,
title=song.get("name"),
artist=", ".join(s.get("name", "") for s in song.get("singer", [])) or None,
album=(song.get("album") or {}).get("name"),
)
for song in songs
if isinstance(song, dict) and isinstance(mid := song.get("mid"), str)
]
def _parse_qq_lyrics(data: dict) -> LRCData | None:
"""Parse QQMusic lyric response to LRCData."""
if data.get("code") != 0:
return None
lrc = data.get("data", {}).get("lyric", "")
if not isinstance(lrc, str) or not lrc.strip():
return None
return LRCData(lrc)
class QQMusicFetcher(BaseFetcher): class QQMusicFetcher(BaseFetcher):
@@ -41,49 +74,73 @@ class QQMusicFetcher(BaseFetcher):
def is_available(self, track: TrackMeta) -> bool: def is_available(self, track: TrackMeta) -> bool:
return bool(track.title) and self._auth.is_configured() return bool(track.title) and self._auth.is_configured()
async def _api_search(
self,
track: TrackMeta,
limit: int,
) -> dict | None:
"""Return raw QQMusic search payload for one track."""
query = f"{track.artist or ''} {track.title or ''}".strip()
if not query:
return None
data = await self._auth.search(query, limit)
if not isinstance(data, dict):
return None
return data
async def _api_lyric(
self,
mid: str,
) -> dict | None:
"""Return raw QQMusic lyric payload for one song MID."""
data = await self._auth.get_lyric(mid)
if not isinstance(data, dict):
return None
return data
async def _api_lyric_track(
self,
track: TrackMeta,
limit: int,
) -> dict | None:
"""Return raw QQMusic lyric payload for top-ranked search candidate."""
search_data = await self._api_search(track, limit)
if search_data is None:
return None
candidates = _parse_qq_search(search_data)
if not candidates:
return None
ranked = select_ranked(
candidates,
track.length,
title=track.title,
artist=track.artist,
album=track.album,
)
if not ranked:
return None
mid = ranked[0][0]
return await self._api_lyric(mid)
async def _search( async def _search(
self, track: TrackMeta, limit: int = 10 self, track: TrackMeta, limit: int = 10
) -> list[tuple[str, float]]: ) -> list[tuple[str, float]]:
query = f"{track.artist or ''} {track.title or ''}".strip() search_data = await self._api_search(track, limit)
if not query: if search_data is None:
return [] return []
query = f"{track.artist or ''} {track.title or ''}".strip()
logger.debug(f"QQMusic: searching for '{query}' (limit={limit})") logger.debug(f"QQMusic: searching for '{query}' (limit={limit})")
try: candidates = _parse_qq_search(search_data)
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client: if not candidates:
resp = await client.get(
f"{await self._auth.authenticate()}{_QQ_MUSIC_API_SEARCH_ENDPOINT}",
params={"keyword": query, "type": "song", "num": limit},
)
resp.raise_for_status()
data = resp.json()
if data.get("code") != 0:
logger.error(f"QQMusic: search API error: {data}")
return []
songs = data.get("data", {}).get("list", [])
if not songs:
logger.debug("QQMusic: search returned 0 results") logger.debug("QQMusic: search returned 0 results")
return [] return []
logger.debug(f"QQMusic: search returned {len(songs)} candidates") logger.debug(f"QQMusic: search returned {len(candidates)} candidates")
candidates = [
SearchCandidate(
item=mid,
duration_ms=float(song["interval"]) * 1000
if isinstance(song.get("interval"), int)
else None,
title=song.get("name"),
artist=", ".join(s.get("name", "") for s in song.get("singer", []))
or None,
album=(song.get("album") or {}).get("name"),
)
for song in songs
if isinstance(song, dict) and isinstance(mid := song.get("mid"), str)
]
ranked = select_ranked( ranked = select_ranked(
candidates, candidates,
track.length, track.length,
@@ -100,32 +157,17 @@ class QQMusicFetcher(BaseFetcher):
logger.debug("QQMusic: no suitable candidate found") logger.debug("QQMusic: no suitable candidate found")
return ranked return ranked
except Exception as e:
logger.error(f"QQMusic: search failed: {e}")
return []
async def _get_lyric(self, mid: str, confidence: float = 0.0) -> FetchResult: async def _get_lyric(self, mid: str, confidence: float = 0.0) -> FetchResult:
logger.debug(f"QQMusic: fetching lyrics for mid={mid}") logger.debug(f"QQMusic: fetching lyrics for mid={mid}")
data = await self._api_lyric(mid)
try: if data is None:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
resp = await client.get(
f"{await self._auth.authenticate()}{_QQ_MUSIC_API_LYRIC_ENDPOINT}",
params={"mid": mid},
)
resp.raise_for_status()
data = resp.json()
if data.get("code") != 0:
logger.error(f"QQMusic: lyric API error: {data}")
return FetchResult.from_network_error() return FetchResult.from_network_error()
lrc = data.get("data", {}).get("lyric", "") lrcdata = _parse_qq_lyrics(data)
if not isinstance(lrc, str) or not lrc.strip(): if lrcdata is None:
logger.debug(f"QQMusic: empty lyrics for mid={mid}") logger.debug(f"QQMusic: empty lyrics for mid={mid}")
return FetchResult.from_not_found() return FetchResult.from_not_found()
lrcdata = LRCData(lrc)
status = lrcdata.detect_sync_status() status = lrcdata.detect_sync_status()
logger.info( logger.info(
f"QQMusic: got {status.value} lyrics for mid={mid} ({len(lrcdata)} lines)" f"QQMusic: got {status.value} lyrics for mid={mid} ({len(lrcdata)} lines)"
@@ -151,10 +193,6 @@ class QQMusicFetcher(BaseFetcher):
), ),
) )
except Exception as e:
logger.error(f"QQMusic: lyric fetch failed for mid={mid}: {e}")
return FetchResult.from_network_error()
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult: async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult:
if not self._auth.is_configured(): if not self._auth.is_configured():
logger.debug("QQMusic: skipped — Auth not configured") logger.debug("QQMusic: skipped — Auth not configured")
+2
View File
@@ -8,6 +8,8 @@ Description: Shared candidate-selection logic for search-based fetchers.
proximity, and sync status. proximity, and sync status.
""" """
from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from typing import Generic, Optional, TypeVar from typing import Generic, Optional, TypeVar
+72 -84
View File
@@ -4,16 +4,68 @@ Date: 2026-03-25 10:43:21
Description: Spotify fetcher — obtains synced lyrics via Spotify's internal color-lyrics API. Description: Spotify fetcher — obtains synced lyrics via Spotify's internal color-lyrics API.
""" """
import httpx from __future__ import annotations
from loguru import logger from loguru import logger
from .base import BaseFetcher, FetchResult from .base import BaseFetcher, FetchResult
from ..authenticators.spotify import SpotifyAuthenticator, SPOTIFY_BASE_HEADERS from ..authenticators.spotify import SpotifyAuthenticator
from ..models import TrackMeta, LyricResult, CacheStatus from ..models import TrackMeta, LyricResult, CacheStatus
from ..lrc import LRCData from ..lrc import LRCData
from ..config import GeneralConfig, TTL_NOT_FOUND from ..config import GeneralConfig, TTL_NOT_FOUND
_SPOTIFY_LYRICS_URL = "https://spclient.wg.spotify.com/color-lyrics/v2/track/"
def _format_lrc_line(start_ms: int, words: str) -> str:
minutes = start_ms // 60000
seconds = (start_ms // 1000) % 60
centiseconds = round((start_ms % 1000) / 10.0)
return f"[{minutes:02d}:{seconds:02d}.{centiseconds:02.0f}]{words}"
def _is_truly_synced(lines: list[dict]) -> bool:
for line in lines:
try:
ms = int(line.get("startTimeMs", "0"))
if ms > 0:
return True
except (ValueError, TypeError):
continue
return False
def _parse_spotify_lyrics(data: dict) -> LRCData | None:
"""Parse Spotify color-lyrics payload to LRCData."""
lyrics_data = data.get("lyrics")
if not isinstance(lyrics_data, dict):
return None
sync_type = lyrics_data.get("syncType", "")
lines = lyrics_data.get("lines", [])
if not isinstance(lines, list) or len(lines) == 0:
return None
is_synced = sync_type == "LINE_SYNCED" and _is_truly_synced(lines)
lrc_lines: list[str] = []
for line in lines:
if not isinstance(line, dict):
continue
words = line.get("words", "")
if not isinstance(words, str):
continue
try:
ms = int(line.get("startTimeMs", "0"))
except (ValueError, TypeError):
ms = 0
if is_synced:
lrc_lines.append(_format_lrc_line(ms, words))
else:
lrc_lines.append(f"[00:00.00]{words}")
if not lrc_lines:
return None
return LRCData("\n".join(lrc_lines))
class SpotifyFetcher(BaseFetcher): class SpotifyFetcher(BaseFetcher):
@@ -29,23 +81,14 @@ class SpotifyFetcher(BaseFetcher):
def is_available(self, track: TrackMeta) -> bool: def is_available(self, track: TrackMeta) -> bool:
return bool(track.trackid) and self._auth.is_configured() return bool(track.trackid) and self._auth.is_configured()
@staticmethod async def _api_lyrics(self, track: TrackMeta) -> dict | None:
def _format_lrc_line(start_ms: int, words: str) -> str: """Return raw Spotify lyrics payload for one track using production auth path."""
minutes = start_ms // 60000 if not track.trackid:
seconds = (start_ms // 1000) % 60 return None
centiseconds = round((start_ms % 1000) / 10.0) data = await self._auth.get_lyrics(track.trackid)
return f"[{minutes:02d}:{seconds:02d}.{centiseconds:02.0f}]{words}" if not isinstance(data, dict):
return None
@staticmethod return data
def _is_truly_synced(lines: list[dict]) -> bool:
for line in lines:
try:
ms = int(line.get("startTimeMs", "0"))
if ms > 0:
return True
except (ValueError, TypeError):
continue
return False
async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult: async def fetch(self, track: TrackMeta, bypass_cache: bool = False) -> FetchResult:
if not track.trackid: if not track.trackid:
@@ -54,71 +97,20 @@ class SpotifyFetcher(BaseFetcher):
logger.info(f"Spotify: fetching lyrics for trackid={track.trackid}") logger.info(f"Spotify: fetching lyrics for trackid={track.trackid}")
token = await self._auth.authenticate() data = await self._api_lyrics(track)
if not token: if data is None:
logger.error("Spotify: cannot fetch lyrics without a token") logger.debug(f"Spotify: no lyrics payload for trackid={track.trackid}")
return FetchResult.from_network_error()
url = f"{_SPOTIFY_LYRICS_URL}{track.trackid}?format=json&vocalRemoval=false&market=from_token"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {token}",
**SPOTIFY_BASE_HEADERS,
}
try:
async with httpx.AsyncClient(timeout=self._general.http_timeout) as client:
res = await client.get(url, headers=headers)
if res.status_code == 404:
logger.debug(f"Spotify: 404 for trackid={track.trackid}")
return FetchResult.from_not_found() return FetchResult.from_not_found()
if res.status_code != 200: content = _parse_spotify_lyrics(data)
logger.error(f"Spotify: lyrics API returned {res.status_code}") if content is None:
return FetchResult.from_network_error() logger.debug("Spotify: response contained no parseable lyric lines")
data = res.json()
if not isinstance(data, dict) or "lyrics" not in data:
logger.error("Spotify: unexpected lyrics response structure")
return FetchResult.from_network_error()
lyrics_data = data["lyrics"]
sync_type = lyrics_data.get("syncType", "")
lines = lyrics_data.get("lines", [])
if not isinstance(lines, list) or len(lines) == 0:
logger.debug("Spotify: response contained no lyric lines")
return FetchResult.from_not_found() return FetchResult.from_not_found()
is_synced = sync_type == "LINE_SYNCED" and self._is_truly_synced(lines) status = content.detect_sync_status()
logger.info(f"Spotify: got {status.value} lyrics ({len(content)} lines)")
lrc_lines: list[str] = []
for line in lines:
words = line.get("words", "")
if not isinstance(words, str):
continue
try:
ms = int(line.get("startTimeMs", "0"))
except (ValueError, TypeError):
ms = 0
if is_synced:
lrc_lines.append(self._format_lrc_line(ms, words))
else:
lrc_lines.append(f"[00:00.00]{words}")
content = LRCData("\n".join(lrc_lines))
status = (
CacheStatus.SUCCESS_SYNCED
if is_synced
else CacheStatus.SUCCESS_UNSYNCED
)
logger.info(f"Spotify: got {status.value} lyrics ({len(lrc_lines)} lines)")
not_found = LyricResult(status=CacheStatus.NOT_FOUND, ttl=TTL_NOT_FOUND) not_found = LyricResult(status=CacheStatus.NOT_FOUND, ttl=TTL_NOT_FOUND)
if is_synced: if status == CacheStatus.SUCCESS_SYNCED:
return FetchResult( return FetchResult(
synced=LyricResult( synced=LyricResult(
status=CacheStatus.SUCCESS_SYNCED, status=CacheStatus.SUCCESS_SYNCED,
@@ -135,7 +127,3 @@ class SpotifyFetcher(BaseFetcher):
source=self.source_name, source=self.source_name,
), ),
) )
except Exception as e:
logger.error(f"Spotify: lyrics fetch failed: {e}")
return FetchResult.from_network_error()
+2 -33
View File
@@ -4,12 +4,12 @@ Date: 2026-03-25 21:54:01
Description: LRC parsing, modeling, and serialization helpers. Description: LRC parsing, modeling, and serialization helpers.
""" """
from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from dataclasses import dataclass, field from dataclasses import dataclass, field
import re import re
from pathlib import Path
from typing import Optional from typing import Optional
from urllib.parse import unquote
from .models import CacheStatus from .models import CacheStatus
@@ -463,34 +463,3 @@ class LRCData:
""" """
normalized = self.normalize() normalized = self.normalize()
return self._serialize_lines(normalized._lines, include_word_sync=False) return self._serialize_lines(normalized._lines, include_word_sync=False)
def get_audio_path(audio_url: str, ensure_exists: bool = False) -> Optional[Path]:
"""Convert file:// URL to Path, return None if invalid or (if ensure_exists) file doesn't exist."""
if not audio_url.startswith("file://"):
return None
file_path = unquote(audio_url.replace("file://", "", 1))
path = Path(file_path)
if ensure_exists and not path.exists():
return None
return path
def get_sidecar_path(
audio_url: str,
ensure_audio_exists: bool = False,
ensure_exists: bool = False,
extension: str = ".lrc",
) -> Optional[Path]:
"""Given a file:// URL, return the corresponding .lrc sidecar path.
If ensure_audio_exists is True, return None if the audio file does not exist.
If ensure_exists is True, return None if the .lrc file does not exist.
"""
audio_path = get_audio_path(audio_url, ensure_exists=ensure_audio_exists)
if not audio_path:
return None
lrc_path = audio_path.with_suffix(extension)
if ensure_exists and not lrc_path.exists():
return None
return lrc_path
+52 -29
View File
@@ -4,6 +4,8 @@ Date: 2026-03-25 04:44:15
Description: MPRIS integration for fetching track metadata. Description: MPRIS integration for fetching track metadata.
""" """
from __future__ import annotations
import asyncio import asyncio
from dbus_next.aio.message_bus import MessageBus from dbus_next.aio.message_bus import MessageBus
from dbus_next.constants import BusType from dbus_next.constants import BusType
@@ -15,11 +17,8 @@ from .config import DEFAULT_PLAYER_BLACKLIST, DEFAULT_PREFERRED_PLAYER
from .models import TrackMeta from .models import TrackMeta
async def _list_mpris_players( async def _list_mpris_players(bus: MessageBus) -> List[str]:
bus: MessageBus, """List all MPRIS player bus names without any filtering."""
player_blacklist: tuple[str, ...],
) -> List[str]:
"""List all MPRIS player bus names, excluding blacklisted entries."""
try: try:
reply = await bus.call( reply = await bus.call(
Message( Message(
@@ -32,10 +31,7 @@ async def _list_mpris_players(
if not reply or not reply.body: if not reply or not reply.body:
return [] return []
return [ return [
name name for name in reply.body[0] if name.startswith("org.mpris.MediaPlayer2.")
for name in reply.body[0]
if name.startswith("org.mpris.MediaPlayer2.")
and not any(x.lower() in name.lower() for x in player_blacklist)
] ]
except Exception as e: except Exception as e:
logger.error(f"Failed to list DBus names: {e}") logger.error(f"Failed to list DBus names: {e}")
@@ -59,6 +55,32 @@ async def _get_playback_status(bus: MessageBus, player_name: str) -> Optional[st
return None return None
def pick_active_player(
all_names: list[str],
playing: list[str],
preferred: str,
last_active: str | None = None,
) -> str | None:
"""Select the best MPRIS player by play state, preferred keyword, and continuity.
Priority: single playing > preferred keyword among playing > preferred keyword
among all candidates > last active > first candidate.
"""
if not all_names:
return None
if len(playing) == 1:
return playing[0]
candidates = playing if playing else all_names
preferred_lower = preferred.lower().strip()
if preferred_lower:
for name in candidates:
if preferred_lower in name.lower():
return name
if last_active and last_active in all_names:
return last_active
return candidates[0] if candidates else None
async def _select_player( async def _select_player(
bus: MessageBus, bus: MessageBus,
specific_player: Optional[str], specific_player: Optional[str],
@@ -67,38 +89,39 @@ async def _select_player(
) -> Optional[str]: ) -> Optional[str]:
"""Select the best MPRIS player. """Select the best MPRIS player.
When specific_player is given, filter by name match. When specific_player is given, it bypasses player_blacklist and filters by name.
Otherwise: prefer the currently playing player. If multiple are playing, Otherwise: prefer the currently playing player. If multiple are playing,
prefer the one matching preferred_player (default: spotify). prefer the one matching preferred_player (default: spotify).
""" """
players = await _list_mpris_players(bus, player_blacklist) all_names = await _list_mpris_players(bus)
if not players: if not all_names:
return None return None
if specific_player: if specific_player:
players = [p for p in players if specific_player.lower() in p.lower()] # --player bypasses player_blacklist so the user can target any player
return players[0] if players else None matched = [p for p in all_names if specific_player.lower() in p.lower()]
return matched[0] if matched else None
# Check playback status for each player # auto-selection: apply blacklist before choosing
playing = [] # candidates = []
for p in players: # for p in all_names:
# if any(x.lower() in p.lower() for x in player_blacklist):
# logger.info(f"Excluding blacklisted player: {p}")
# else:
# candidates.append(p)
candidates = [
p
for p in all_names
if not any(x.lower() in p.lower() for x in player_blacklist)
]
playing: list[str] = []
for p in candidates:
status = await _get_playback_status(bus, p) status = await _get_playback_status(bus, p)
logger.debug(f"Player {p}: {status}") logger.debug(f"Player {p}: {status}")
if status == "Playing": if status == "Playing":
playing.append(p) playing.append(p)
candidates = playing if playing else players return pick_active_player(candidates, playing, preferred_player)
if len(candidates) == 1:
return candidates[0]
# Multiple candidates: prefer preferred_player
preferred = preferred_player.lower()
if preferred:
for p in candidates:
if preferred in p.lower():
return p
return candidates[0]
async def _fetch_metadata_dbus( async def _fetch_metadata_dbus(
+2
View File
@@ -5,6 +5,8 @@ Description: Shared text normalization utilities for fuzzy matching.
Used by cache key generation, cache search, and candidate selection scoring. Used by cache key generation, cache search, and candidate selection scoring.
""" """
from __future__ import annotations
import re import re
import unicodedata import unicodedata
@@ -1,14 +1,56 @@
"""Shared ranking rules for LyricResult selection. """
Author: Uyanide pywang0608@foxmail.com
This module centralizes how positive lyric results are compared so cache/core Date: 2026-04-10 17:06:37
and other callers use the same precedence and edge-case handling. Description: Utility functions
""" """
from __future__ import annotations from __future__ import annotations
from typing import Optional from typing import TYPE_CHECKING, Optional
from urllib.parse import unquote
from pathlib import Path
from .models import CacheStatus, LyricResult from .models import CacheStatus
if TYPE_CHECKING:
from .models import LyricResult
# Paths
def get_audio_path(audio_url: str, ensure_exists: bool = False) -> Optional[Path]:
"""Convert file:// URL to Path, return None if invalid or (if ensure_exists) file doesn't exist."""
if not audio_url.startswith("file://"):
return None
file_path = unquote(audio_url.replace("file://", "", 1))
path = Path(file_path)
if ensure_exists and not path.exists():
return None
return path
def get_sidecar_path(
audio_url: str,
ensure_audio_exists: bool = False,
ensure_exists: bool = False,
extension: str = ".lrc",
) -> Optional[Path]:
"""Given a file:// URL, return the corresponding .lrc sidecar path.
If ensure_audio_exists is True, return None if the audio file does not exist.
If ensure_exists is True, return None if the .lrc file does not exist.
"""
audio_path = get_audio_path(audio_url, ensure_exists=ensure_audio_exists)
if not audio_path:
return None
lrc_path = audio_path.with_suffix(extension)
if ensure_exists and not lrc_path.exists():
return None
return lrc_path
# Ranking
def is_positive_status(status: CacheStatus) -> bool: def is_positive_status(status: CacheStatus) -> bool:
+1 -1
View File
@@ -1,4 +1,4 @@
"""Watch subsystem public exports.""" from __future__ import annotations
from .session import WatchCoordinator from .session import WatchCoordinator
+13 -1
View File
@@ -1,4 +1,11 @@
"""Unix-socket control channel for communicating with a running watch session.""" """
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:14:58
Description: Unix-socket control channel for communicating with a running watch session.
"""
from __future__ import annotations
import asyncio import asyncio
import json import json
@@ -40,14 +47,17 @@ class ControlServer:
return True return True
try: try:
# probe the socket to distinguish a live session from a stale socket file
reader, writer = await asyncio.open_unix_connection(str(self._socket_path)) reader, writer = await asyncio.open_unix_connection(str(self._socket_path))
writer.close() writer.close()
await writer.wait_closed() await writer.wait_closed()
# connection succeeded → another watch session is actively listening
logger.error( logger.error(
"A watch session is already running. Use 'lrx watch ctl status'." "A watch session is already running. Use 'lrx watch ctl status'."
) )
return False return False
except Exception: except Exception:
# connection refused / file is stale → safe to remove and reuse
try: try:
self._socket_path.unlink(missing_ok=True) self._socket_path.unlink(missing_ok=True)
except Exception: except Exception:
@@ -136,6 +146,8 @@ def parse_delta(raw: str) -> tuple[bool, int | None, str | None]:
if value.startswith("+"): if value.startswith("+"):
return True, int(value[1:]), None return True, int(value[1:]), None
if value.startswith("-"): if value.startswith("-"):
# keep the sign by negating; bare int() would accept "-123" too but
# explicit split is clearer about intent and avoids double-negative edge cases
return True, -int(value[1:]), None return True, -int(value[1:]), None
return True, int(value), None return True, int(value), None
except ValueError: except ValueError:
+11 -1
View File
@@ -1,4 +1,11 @@
"""Debounced lyric fetch orchestration for watch session.""" """
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:14:41
Description: Debounced lyric fetch orchestration for watch session.
"""
from __future__ import annotations
import asyncio import asyncio
from typing import Awaitable, Callable, Optional from typing import Awaitable, Callable, Optional
@@ -50,6 +57,7 @@ class LyricFetcher:
"""Request lyrics for track with debounce collapsing.""" """Request lyrics for track with debounce collapsing."""
self._pending_track = track self._pending_track = track
if self._debounce_task is not None: if self._debounce_task is not None:
# cancel any pending debounce window — the new request supersedes it
self._debounce_task.cancel() self._debounce_task.cancel()
self._debounce_task = asyncio.create_task(self._debounce_then_fetch()) self._debounce_task = asyncio.create_task(self._debounce_then_fetch())
@@ -61,6 +69,7 @@ class LyricFetcher:
return return
if self._fetch_task is not None: if self._fetch_task is not None:
# abort any in-flight fetch for a previous track before starting the new one
self._fetch_task.cancel() self._fetch_task.cancel()
await asyncio.gather(self._fetch_task, return_exceptions=True) await asyncio.gather(self._fetch_task, return_exceptions=True)
@@ -68,6 +77,7 @@ class LyricFetcher:
async def _do_fetch(self, track: TrackMeta) -> None: async def _do_fetch(self, track: TrackMeta) -> None:
"""Execute fetch lifecycle callbacks and fetch lyrics for a track.""" """Execute fetch lifecycle callbacks and fetch lyrics for a track."""
# callbacks may be plain functions or coroutines — handle both
fetching_callback_result = self._on_fetching() fetching_callback_result = self._on_fetching()
if asyncio.iscoroutine(fetching_callback_result): if asyncio.iscoroutine(fetching_callback_result):
await fetching_callback_result await fetching_callback_result
+35 -39
View File
@@ -1,4 +1,11 @@
"""Player discovery, state monitoring, and active-player selection for watch mode.""" """
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:14:27
Description: Player discovery, state monitoring, and active-player selection for watch mode.
"""
from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from typing import Callable, Optional from typing import Callable, Optional
@@ -10,6 +17,7 @@ from dbus_next.message import Message
from loguru import logger from loguru import logger
from ..models import TrackMeta from ..models import TrackMeta
from ..mpris import pick_active_player
def _variant_value(item: object) -> object | None: def _variant_value(item: object) -> object | None:
@@ -33,25 +41,6 @@ class PlayerTarget:
"""Constraint for choosing which players are visible to watch.""" """Constraint for choosing which players are visible to watch."""
hint: Optional[str] = None hint: Optional[str] = None
player_blacklist: tuple[str, ...] = ()
def validation_error(self) -> str | None:
"""Return validation message when hint conflicts with blacklist, else None."""
normalized_hint = self.normalized_hint
if not normalized_hint:
return None
for blocked in self.player_blacklist:
normalized_blocked = blocked.strip().lower()
if not normalized_blocked:
continue
if _keyword_match(normalized_hint, normalized_blocked) or _keyword_match(
normalized_blocked, normalized_hint
):
return (
f"Requested player '{self.hint}' is blocked by "
f"PLAYER_BLACKLIST entry '{blocked}'."
)
return None
@property @property
def normalized_hint(self) -> str: def normalized_hint(self) -> str:
@@ -96,7 +85,7 @@ class PlayerMonitor:
self._on_players_changed = on_players_changed self._on_players_changed = on_players_changed
self._on_seeked = on_seeked self._on_seeked = on_seeked
self._on_playback_status = on_playback_status self._on_playback_status = on_playback_status
self._target = target or PlayerTarget(player_blacklist=self._player_blacklist) self._target = target or PlayerTarget()
self.players: dict[str, PlayerState] = {} self.players: dict[str, PlayerState] = {}
self._bus: MessageBus | None = None self._bus: MessageBus | None = None
self._props_cache: dict[str, object] = {} self._props_cache: dict[str, object] = {}
@@ -162,7 +151,11 @@ class PlayerMonitor:
logger.debug(f"Failed to add DBus match rule {rule}: {e}") logger.debug(f"Failed to add DBus match rule {rule}: {e}")
async def _list_mpris_players(self) -> list[str]: async def _list_mpris_players(self) -> list[str]:
"""List visible MPRIS players after applying blacklist and target filter.""" """List visible MPRIS players after applying target filter and optional blacklist.
The blacklist is skipped when an explicit player hint is active so that
``--player`` can target any player regardless of PLAYER_BLACKLIST.
"""
if not self._bus: if not self._bus:
return [] return []
try: try:
@@ -177,10 +170,15 @@ class PlayerMonitor:
if not reply or not reply.body: if not reply or not reply.body:
return [] return []
out: list[str] = [] out: list[str] = []
hint_active = bool(self._target.normalized_hint)
for name in reply.body[0]: for name in reply.body[0]:
if not name.startswith("org.mpris.MediaPlayer2."): if not name.startswith("org.mpris.MediaPlayer2."):
continue continue
if any(x.lower() in name.lower() for x in self._player_blacklist): # --player bypasses the blacklist; only filter when no hint is given
if not hint_active and any(
x.lower() in name.lower() for x in self._player_blacklist
):
# logger.info(f"Excluding blacklisted player: {name}")
continue continue
if not self._target.allows(name): if not self._target.allows(name):
continue continue
@@ -219,6 +217,7 @@ class PlayerMonitor:
trackid = metadata.get("mpris:trackid") trackid = metadata.get("mpris:trackid")
if trackid is not None: if trackid is not None:
trackid = _variant_value(trackid) trackid = _variant_value(trackid)
# normalize Spotify track IDs — the raw MPRIS value varies by client version
if isinstance(trackid, str) and trackid.startswith("spotify:track:"): if isinstance(trackid, str) and trackid.startswith("spotify:track:"):
trackid = trackid.removeprefix("spotify:track:") trackid = trackid.removeprefix("spotify:track:")
elif isinstance(trackid, str) and trackid.startswith("/com/spotify/track/"): elif isinstance(trackid, str) and trackid.startswith("/com/spotify/track/"):
@@ -230,12 +229,14 @@ class PlayerMonitor:
length_ms = None length_ms = None
length_value = _variant_value(length) if length is not None else None length_value = _variant_value(length) if length is not None else None
if isinstance(length_value, int): if isinstance(length_value, int):
# MPRIS reports length in microseconds; convert to milliseconds
length_ms = length_value // 1000 length_ms = length_value // 1000
artist = metadata.get("xesam:artist") artist = metadata.get("xesam:artist")
artist_v = None artist_v = None
artist_value = _variant_value(artist) if artist is not None else None artist_value = _variant_value(artist) if artist is not None else None
if isinstance(artist_value, list) and artist_value: if isinstance(artist_value, list) and artist_value:
# xesam:artist is a list; take the first entry as primary artist
artist_v = artist_value[0] artist_v = artist_value[0]
title = metadata.get("xesam:title") title = metadata.get("xesam:title")
@@ -286,10 +287,14 @@ class PlayerMonitor:
async def _resolve_well_known_name(self, unique_sender: str) -> str | None: async def _resolve_well_known_name(self, unique_sender: str) -> str | None:
"""Map a DBus unique sender (e.g. :1.42) to a tracked MPRIS bus name.""" """Map a DBus unique sender (e.g. :1.42) to a tracked MPRIS bus name."""
if unique_sender in self.players: if unique_sender in self.players:
# sender is already a well-known name we track (unlikely but fast path)
return unique_sender return unique_sender
if not self._bus: if not self._bus:
return None return None
# Seeked signals arrive with the unique connection name (:1.N), not the
# well-known bus name (org.mpris.MediaPlayer2.X). Ask D-Bus which
# well-known name owns that unique name.
for bus_name in self.players: for bus_name in self.players:
try: try:
reply = await self._bus.call( reply = await self._bus.call(
@@ -325,6 +330,7 @@ class PlayerMonitor:
message.interface == "org.freedesktop.DBus" message.interface == "org.freedesktop.DBus"
and message.member == "NameOwnerChanged" and message.member == "NameOwnerChanged"
): ):
# a player appeared or disappeared — rescan the full player list
if message.body and str(message.body[0]).startswith( if message.body and str(message.body[0]).startswith(
"org.mpris.MediaPlayer2." "org.mpris.MediaPlayer2."
): ):
@@ -335,7 +341,9 @@ class PlayerMonitor:
message.interface == "org.freedesktop.DBus.Properties" message.interface == "org.freedesktop.DBus.Properties"
and message.member == "PropertiesChanged" and message.member == "PropertiesChanged"
): ):
# Message.sender is a DBus unique name, so match by path+iface. # message.sender is a unique connection name, not the well-known bus
# name, so we can't filter by sender here — match by object path and
# interface instead to scope it to MPRIS Player properties only
path_ok = message.path == "/org/mpris/MediaPlayer2" path_ok = message.path == "/org/mpris/MediaPlayer2"
iface = message.body[0] if message.body else None iface = message.body[0] if message.body else None
if path_ok and iface == "org.mpris.MediaPlayer2.Player": if path_ok and iface == "org.mpris.MediaPlayer2.Player":
@@ -348,6 +356,7 @@ class PlayerMonitor:
): ):
sender = message.sender or "" sender = message.sender or ""
if sender and message.body: if sender and message.body:
# MPRIS Seeked position is in microseconds; convert to ms
position_us = int(message.body[0]) position_us = int(message.body[0])
asyncio.create_task( asyncio.create_task(
self._handle_seeked_signal( self._handle_seeked_signal(
@@ -388,19 +397,6 @@ class ActivePlayerSelector:
"""Select active player by playing state, preferred keyword, and continuity.""" """Select active player by playing state, preferred keyword, and continuity."""
if not players: if not players:
return None return None
all_names = list(players.keys())
playing = [name for name, st in players.items() if st.status == "Playing"] playing = [name for name, st in players.items() if st.status == "Playing"]
if len(playing) == 1: return pick_active_player(all_names, playing, preferred_player, last_active)
return playing[0]
preferred = preferred_player.lower().strip()
candidates = playing if playing else list(players.keys())
if preferred:
for name in candidates:
if preferred in name.lower():
return name
if last_active and last_active in players:
return last_active
return candidates[0] if candidates else None
+47 -44
View File
@@ -1,10 +1,15 @@
"""Watch orchestration with explicit MVVM role boundaries. """
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:10:52
Description: Watch orchestration with explicit MVVM role boundaries.
- Model: WatchModel stores domain state. - Model: WatchModel stores domain state.
- ViewModel: WatchViewModel projects model to output-facing state/signature. - ViewModel: WatchViewModel projects model to output-facing state/signature.
- Coordinator: WatchCoordinator wires services and drives async workflows. - Coordinator: WatchCoordinator wires services and drives async workflows.
""" """
from __future__ import annotations
import asyncio import asyncio
from dataclasses import asdict from dataclasses import asdict
from typing import Optional from typing import Optional
@@ -17,7 +22,7 @@ from ..models import TrackMeta
from .control import ControlServer from .control import ControlServer
from .fetcher import LyricFetcher from .fetcher import LyricFetcher
from ..config import AppConfig from ..config import AppConfig
from .view import BaseOutput, LyricView, WatchState from .view import BaseOutput, LyricView, WatchState, WatchStatus
from .player import ActivePlayerSelector, PlayerMonitor, PlayerTarget from .player import ActivePlayerSelector, PlayerMonitor, PlayerTarget
from .tracker import PositionTracker from .tracker import PositionTracker
@@ -28,14 +33,14 @@ class WatchModel:
offset_ms: int offset_ms: int
active_player: str | None active_player: str | None
active_track_key: str | None active_track_key: str | None
status: str status: WatchStatus
lyrics: LyricView | None lyrics: LyricView | None
def __init__(self) -> None: def __init__(self) -> None:
self.offset_ms = 0 self.offset_ms = 0
self.active_player: str | None = None self.active_player: str | None = None
self.active_track_key: str | None = None self.active_track_key: str | None = None
self.status: str = "idle" self.status: WatchStatus = WatchStatus.IDLE
self.lyrics: LyricView | None = None self.lyrics: LyricView | None = None
def set_lyrics(self, lyrics: LRCData | None) -> None: def set_lyrics(self, lyrics: LRCData | None) -> None:
@@ -48,6 +53,8 @@ class WatchModel:
def state_signature(self, track: TrackMeta | None, position_ms: int) -> tuple: def state_signature(self, track: TrackMeta | None, position_ms: int) -> tuple:
"""Build dedupe signature from model state and current lyric cursor.""" """Build dedupe signature from model state and current lyric cursor."""
# prefer trackid when available; fall back to display name for players
# that don't expose a stable ID (e.g. some MPRIS implementations)
track_key = ( track_key = (
track.trackid track.trackid
if track and track.trackid if track and track.trackid
@@ -56,7 +63,8 @@ class WatchModel:
else None else None
) )
if self.status != "ok" or self.lyrics is None: if self.status != WatchStatus.OK or self.lyrics is None:
# non-OK states don't have cursor position — discriminate by status alone
return ("status", self.status, self.active_player, track_key) return ("status", self.status, self.active_player, track_key)
at_ms = position_ms + self.offset_ms at_ms = position_ms + self.offset_ms
cursor = self.lyrics.signature_cursor(at_ms) cursor = self.lyrics.signature_cursor(at_ms)
@@ -82,7 +90,7 @@ class WatchViewModel:
lyrics=self._model.lyrics, lyrics=self._model.lyrics,
position_ms=position_ms, position_ms=position_ms,
offset_ms=self._model.offset_ms, offset_ms=self._model.offset_ms,
status=self._model.status, # type: ignore[arg-type] status=self._model.status,
) )
@@ -121,10 +129,7 @@ class WatchCoordinator:
self._emit_scheduled = False self._emit_scheduled = False
self._calibration_task = None self._calibration_task = None
self._target = PlayerTarget( self._target = PlayerTarget(hint=player_hint)
hint=player_hint,
player_blacklist=self._config.general.player_blacklist,
)
self._control = ControlServer(socket_path=config.watch.socket_path) self._control = ControlServer(socket_path=config.watch.socket_path)
self._player_monitor = PlayerMonitor( self._player_monitor = PlayerMonitor(
@@ -148,11 +153,6 @@ class WatchCoordinator:
async def run(self) -> bool: async def run(self) -> bool:
"""Run watch workflow and return success flag.""" """Run watch workflow and return success flag."""
target_issue = self._target.validation_error()
if target_issue:
logger.error(target_issue)
return False
logger.info( logger.info(
"watch session starting (player filter: {})", "watch session starting (player filter: {})",
self._player_hint or "<none>", self._player_hint or "<none>",
@@ -164,7 +164,9 @@ class WatchCoordinator:
await self._player_monitor.start() await self._player_monitor.start()
await self._tracker.start() await self._tracker.start()
self._calibration_task = asyncio.create_task(self._calibration_loop()) self._calibration_task = asyncio.create_task(self._calibration_loop())
# emit once at startup so outputs don't sit blank until the first event
self._schedule_emit() self._schedule_emit()
# block forever; CancelledError from signal handler exits the loop cleanly
await asyncio.Event().wait() await asyncio.Event().wait()
return True return True
except asyncio.CancelledError: except asyncio.CancelledError:
@@ -206,8 +208,10 @@ class WatchCoordinator:
if track is None: if track is None:
return False return False
if self._model.lyrics is not None: if self._model.lyrics is not None:
# lyrics already loaded — nothing to fetch
return False return False
if self._model.status == "fetching": if self._model.status == WatchStatus.FETCHING:
# a fetch is already in flight — don't queue another
return False return False
logger.info("fetching lyrics for track ({}): {}", reason, track.display_name()) logger.info("fetching lyrics for track ({}): {}", reason, track.display_name())
self._fetcher.request(track) self._fetcher.request(track)
@@ -246,7 +250,7 @@ class WatchCoordinator:
) )
if selected is None: if selected is None:
self._model.status = "idle" self._model.status = WatchStatus.IDLE
self._model.active_track_key = None self._model.active_track_key = None
self._model.set_lyrics(None) self._model.set_lyrics(None)
self._schedule_emit() self._schedule_emit()
@@ -254,7 +258,7 @@ class WatchCoordinator:
state = self._player_monitor.players.get(selected) state = self._player_monitor.players.get(selected)
if state is None: if state is None:
self._model.status = "idle" self._model.status = WatchStatus.IDLE
self._model.active_track_key = None self._model.active_track_key = None
self._model.set_lyrics(None) self._model.set_lyrics(None)
self._schedule_emit() self._schedule_emit()
@@ -272,6 +276,7 @@ class WatchCoordinator:
track_changed = track_key != prev_track_key track_changed = track_key != prev_track_key
player_changed = selected != prev_player player_changed = selected != prev_player
if track_changed or player_changed: if track_changed or player_changed:
# clear stale lyrics immediately so the old track's lines don't flash
self._model.set_lyrics(None) self._model.set_lyrics(None)
self._model.active_track_key = track_key self._model.active_track_key = track_key
@@ -284,21 +289,20 @@ class WatchCoordinator:
) )
) )
if state.status != "Playing": # only fetch on identity change — calibration ticks must not re-trigger fetches
self._model.status = "paused"
self._schedule_emit()
return
started_fetch = False started_fetch = False
if track is not None and (player_changed or track_changed): if track is not None and (player_changed or track_changed):
started_fetch = self._request_fetch_for_active_track("track-changed") started_fetch = self._request_fetch_for_active_track("track-changed")
# derive status from what actually happened this tick; preserve FETCHING
# if an in-flight request was started before this snapshot arrived
if self._model.lyrics is not None: if self._model.lyrics is not None:
self._model.status = "ok" self._model.status = WatchStatus.OK
elif started_fetch: elif started_fetch:
self._model.status = "fetching" self._model.status = WatchStatus.FETCHING
elif self._model.status != "fetching": elif self._model.status != WatchStatus.FETCHING:
self._model.status = "no_lyrics" # don't overwrite FETCHING with NO_LYRICS while a request is in flight
self._model.status = WatchStatus.NO_LYRICS
self._schedule_emit() self._schedule_emit()
def _on_seeked(self, bus_name: str, position_ms: int) -> None: def _on_seeked(self, bus_name: str, position_ms: int) -> None:
@@ -306,29 +310,18 @@ class WatchCoordinator:
asyncio.create_task(self._tracker.on_seeked(bus_name, position_ms)) asyncio.create_task(self._tracker.on_seeked(bus_name, position_ms))
def _on_playback_status(self, bus_name: str, status: str) -> None: def _on_playback_status(self, bus_name: str, status: str) -> None:
"""React to playback status change and tracker sync.""" """Forward playback status change to position tracker."""
if bus_name == self._model.active_player:
if status == "Playing":
started_fetch = self._request_fetch_for_active_track("resume-playing")
if self._model.lyrics is not None:
self._model.status = "ok"
elif started_fetch:
self._model.status = "fetching"
elif self._model.status != "fetching":
self._model.status = "no_lyrics"
else:
self._model.status = "paused"
self._schedule_emit()
asyncio.create_task(self._tracker.on_playback_status(bus_name, status)) asyncio.create_task(self._tracker.on_playback_status(bus_name, status))
def _on_tracker_tick(self) -> None: def _on_tracker_tick(self) -> None:
"""Emit updates from tracker tick only while lyrics are actively rendering.""" """Emit updates from tracker tick only while lyrics are actively rendering."""
if self._model.status == "ok": if self._model.status == WatchStatus.OK and self._output.position_sensitive:
self._schedule_emit() self._schedule_emit()
def _schedule_emit(self) -> None: def _schedule_emit(self) -> None:
"""Coalesce frequent events into at most one in-flight emit task.""" """Coalesce frequent events into at most one in-flight emit task."""
if self._emit_scheduled: if self._emit_scheduled:
# a task is already queued; it will pick up the latest model state when it runs
return return
self._emit_scheduled = True self._emit_scheduled = True
asyncio.create_task(self._run_scheduled_emit()) asyncio.create_task(self._run_scheduled_emit())
@@ -338,17 +331,20 @@ class WatchCoordinator:
try: try:
await self._emit_state() await self._emit_state()
finally: finally:
# release the gate even on error so future events can still schedule
self._emit_scheduled = False self._emit_scheduled = False
async def _on_fetching(self) -> None: async def _on_fetching(self) -> None:
"""Mark model as fetching and emit state.""" """Mark model as fetching and emit state."""
self._model.status = "fetching" self._model.status = WatchStatus.FETCHING
await self._emit_state() await self._emit_state()
async def _on_lyrics_update(self, lyrics: Optional[LRCData]) -> None: async def _on_lyrics_update(self, lyrics: Optional[LRCData]) -> None:
"""Update model with fetched lyrics and emit state.""" """Update model with fetched lyrics and emit state."""
self._model.set_lyrics(lyrics) self._model.set_lyrics(lyrics)
self._model.status = "ok" if lyrics is not None else "no_lyrics" self._model.status = (
WatchStatus.OK if lyrics is not None else WatchStatus.NO_LYRICS
)
logger.info( logger.info(
"lyrics update result: {}", "lyrics update result: {}",
"found" if lyrics is not None else "not found", "found" if lyrics is not None else "not found",
@@ -359,10 +355,17 @@ class WatchCoordinator:
"""Emit output state only when semantic signature changes.""" """Emit output state only when semantic signature changes."""
player = self._player_monitor.players.get(self._model.active_player or "") player = self._player_monitor.players.get(self._model.active_player or "")
track = player.track if player else None track = player.track if player else None
position = await self._tracker.get_position_ms() # position=0 for non-position-sensitive outputs so the signature is stable
# across ticks and on_state fires at most once per track+status transition
position = (
await self._tracker.get_position_ms()
if self._output.position_sensitive
else 0
)
signature = self._view_model.signature(track, position) signature = self._view_model.signature(track, position)
if signature == self._last_emit_signature: if signature == self._last_emit_signature:
# state hasn't changed semantically — skip redundant render
return return
self._last_emit_signature = signature self._last_emit_signature = signature
state = self._view_model.state(track, position) state = self._view_model.state(track, position)
+18 -5
View File
@@ -1,4 +1,11 @@
"""Playback position tracking utilities for watch mode.""" """
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:13:35
Description: Playback position tracking utilities for watch mode.
"""
from __future__ import annotations
import asyncio import asyncio
import time import time
@@ -69,11 +76,13 @@ class PositionTracker:
self._is_playing = playback_status == "Playing" self._is_playing = playback_status == "Playing"
status_changed_to_playing = self._is_playing and not was_playing status_changed_to_playing = self._is_playing and not was_playing
if player_changed or track_changed: if player_changed or track_changed:
# reset to 0 so stale position from a previous track doesn't bleed through
self._position_ms = 0 self._position_ms = 0
should_calibrate_now = ( # poll MPRIS on any identity change (player, track, or resume) so a paused
self._is_playing # mid-song player gets its position anchored immediately; calibration-loop
and bool(self._active_player) # ticks are excluded because they pass the same player/track/status
and (player_changed or track_changed or status_changed_to_playing) should_calibrate_now = bool(self._active_player) and (
player_changed or track_changed or status_changed_to_playing
) )
self._track_key = track_key self._track_key = track_key
self._last_tick = time.monotonic() self._last_tick = time.monotonic()
@@ -97,6 +106,7 @@ class PositionTracker:
return return
was_playing = self._is_playing was_playing = self._is_playing
self._is_playing = playback_status == "Playing" self._is_playing = playback_status == "Playing"
# re-anchor last_tick when resuming so the gap while paused isn't counted
should_calibrate_now = self._is_playing and not was_playing should_calibrate_now = self._is_playing and not was_playing
self._last_tick = time.monotonic() self._last_tick = time.monotonic()
@@ -112,10 +122,13 @@ class PositionTracker:
async with self._lock: async with self._lock:
now = time.monotonic() now = time.monotonic()
if self._is_playing and self._active_player: if self._is_playing and self._active_player:
# accumulate elapsed wall-clock time as playback position;
# seek events and calibration snapshots correct drift periodically
delta_ms = int((now - self._last_tick) * 1000) delta_ms = int((now - self._last_tick) * 1000)
if delta_ms > 0: if delta_ms > 0:
self._position_ms += delta_ms self._position_ms += delta_ms
should_notify = True should_notify = True
# always update last_tick so paused time isn't counted on resume
self._last_tick = now self._last_tick = now
if should_notify and self._on_tick is not None: if should_notify and self._on_tick is not None:
+24 -2
View File
@@ -1,14 +1,24 @@
"""Output abstraction types for watch mode rendering.""" """Output abstraction types for watch mode rendering."""
from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from bisect import bisect_right from bisect import bisect_right
from dataclasses import dataclass from dataclasses import dataclass
from typing import Literal, Optional from enum import Enum
from typing import Optional
from ...lrc import LRCData, LyricLine from ...lrc import LRCData, LyricLine
from ...models import TrackMeta from ...models import TrackMeta
class WatchStatus(str, Enum):
IDLE = "idle"
FETCHING = "fetching"
OK = "ok"
NO_LYRICS = "no_lyrics"
@dataclass(slots=True, frozen=True) @dataclass(slots=True, frozen=True)
class LyricView: class LyricView:
"""View-ready immutable lyric data projected from one normalized LRC object.""" """View-ready immutable lyric data projected from one normalized LRC object."""
@@ -29,13 +39,16 @@ class LyricView:
line_index = 0 line_index = 0
for line in normalized.lines: for line in normalized.lines:
if not isinstance(line, LyricLine): if not isinstance(line, LyricLine):
# skip metadata/tag lines that carry no renderable text
continue continue
text = line.text text = line.text
lines.append(text) lines.append(text)
# use first timestamp; clamp to 0 so bisect always works with non-negative ms
timestamp = line.line_times_ms[0] if line.line_times_ms else 0 timestamp = line.line_times_ms[0] if line.line_times_ms else 0
entries.append((max(0, timestamp), line_index)) entries.append((max(0, timestamp), line_index))
line_index += 1 line_index += 1
# extract timestamps into a flat tuple so bisect_right can binary-search it
timestamps = tuple(timestamp for timestamp, _ in entries) timestamps = tuple(timestamp for timestamp, _ in entries)
return LyricView( return LyricView(
normalized=normalized, normalized=normalized,
@@ -47,12 +60,16 @@ class LyricView:
def signature_cursor(self, at_ms: int) -> tuple: def signature_cursor(self, at_ms: int) -> tuple:
"""Build a stable cursor signature for dedupe decisions.""" """Build a stable cursor signature for dedupe decisions."""
if not self.timed_line_entries: if not self.timed_line_entries:
# untimed lyrics: signature is the full line set — changes only on track change
return ("plain", self.lines) return ("plain", self.lines)
first_ts = self.timed_line_entries[0][0] first_ts = self.timed_line_entries[0][0]
if at_ms < first_ts: if at_ms < first_ts:
# playback hasn't reached the first lyric yet; hold until it does
return ("before_first", first_ts) return ("before_first", first_ts)
# bisect_right gives the insertion point after equal timestamps, so -1 gives
# the last line whose timestamp <= at_ms (i.e. the currently active line)
idx = bisect_right(self.timestamps, at_ms) - 1 idx = bisect_right(self.timestamps, at_ms) - 1
if idx < 0: if idx < 0:
idx = 0 idx = 0
@@ -70,10 +87,15 @@ class WatchState:
lyrics: Optional[LyricView] lyrics: Optional[LyricView]
position_ms: int position_ms: int
offset_ms: int offset_ms: int
status: Literal["fetching", "ok", "no_lyrics", "paused", "idle"] status: WatchStatus
class BaseOutput(ABC): class BaseOutput(ABC):
# When False, the coordinator passes position=0 for signature computation and
# skips tracker-tick-driven emits, so on_state fires at most once per
# track+status transition rather than on every lyric cursor advance.
position_sensitive: bool = True
@abstractmethod @abstractmethod
async def on_state(self, state: WatchState) -> None: async def on_state(self, state: WatchState) -> None:
"""Render or deliver one watch state frame.""" """Render or deliver one watch state frame."""
+19 -9
View File
@@ -1,10 +1,16 @@
"""Pipe output implementation for watch mode.""" """
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:15:17
Description: Pipe output implementation for watch mode.
"""
from __future__ import annotations
from bisect import bisect_right from bisect import bisect_right
from dataclasses import dataclass from dataclasses import dataclass
import sys import sys
from . import BaseOutput, WatchState from . import BaseOutput, WatchState, WatchStatus
@dataclass(slots=True) @dataclass(slots=True)
@@ -13,6 +19,7 @@ class PipeOutput(BaseOutput):
before: int = 0 before: int = 0
after: int = 0 after: int = 0
no_newline: bool = False
def _window_size(self) -> int: def _window_size(self) -> int:
"""Return rendered lyric window size.""" """Return rendered lyric window size."""
@@ -37,12 +44,14 @@ class PipeOutput(BaseOutput):
effective_ms = state.position_ms + state.offset_ms effective_ms = state.position_ms + state.offset_ms
current_line_idx: int | None current_line_idx: int | None
if entries and effective_ms < entries[0][0]: if entries and effective_ms < entries[0][0]:
# Before first timestamp, current lyric is empty and after-window shows upcoming lines. # playback hasn't reached the first lyric yet; treat current slot as empty
# so the after-window can show upcoming lines without a "current" anchor
current_line_idx = None current_line_idx = None
else: else:
if not entries: if not entries:
current_line_idx = 0 current_line_idx = 0
else: else:
# bisect_right - 1 gives the last entry whose timestamp <= effective_ms
current_entry_idx = ( current_entry_idx = (
bisect_right(state.lyrics.timestamps, effective_ms) - 1 bisect_right(state.lyrics.timestamps, effective_ms) - 1
) )
@@ -53,6 +62,8 @@ class PipeOutput(BaseOutput):
out: list[str] = [] out: list[str] = []
for rel in range(-self.before, self.after + 1): for rel in range(-self.before, self.after + 1):
if current_line_idx is None: if current_line_idx is None:
# before-first-timestamp: before/current slots are empty; after slots
# show lines starting from index 0 (rel=1 → line 0, rel=2 → line 1, …)
if rel <= 0: if rel <= 0:
out.append("") out.append("")
continue continue
@@ -69,17 +80,16 @@ class PipeOutput(BaseOutput):
async def on_state(self, state: WatchState) -> None: async def on_state(self, state: WatchState) -> None:
"""Render and flush one frame for the latest watch state.""" """Render and flush one frame for the latest watch state."""
if state.status == "fetching": if state.status == WatchStatus.FETCHING:
lines = self._render_status("[fetching...]") lines = self._render_status("[fetching...]")
elif state.status == "no_lyrics": elif state.status == WatchStatus.NO_LYRICS:
lines = self._render_status("[no lyrics]") lines = self._render_status("[no lyrics]")
elif state.status == "paused": elif state.status == WatchStatus.IDLE:
lines = self._render_status("[paused]")
elif state.status == "idle":
lines = self._render_status("[idle]") lines = self._render_status("[idle]")
else: else:
lines = self._render_lyrics(state) lines = self._render_lyrics(state)
for line in lines: for line in lines:
print(line) # no_newline mode lets callers use \r to overwrite the previous frame in-place
sys.stdout.write(line + ("\n" if not self.no_newline else ""))
sys.stdout.flush() sys.stdout.flush()
+46
View File
@@ -0,0 +1,46 @@
"""
Author: Uyanide pywang0608@foxmail.com
Date: 2026-04-10 08:15:31
Description: Print output implementation for watch mode — one shot per track.
"""
from __future__ import annotations
import sys
from . import BaseOutput, WatchState, WatchStatus
class PrintOutput(BaseOutput):
"""Emit full lyrics to stdout once per track transition, then stay silent.
Deduplication is delegated to the coordinator via position_sensitive=False:
the coordinator uses a fixed position for signatures, so on_state fires at
most once per (status, track_key) transition rather than on every tick.
"""
# fixed position=0 in signatures → coordinator calls on_state only on
# track/status transitions, never on lyric cursor advances
position_sensitive = False
plain: bool
def __init__(self, plain: bool = False) -> None:
self.plain = plain
async def on_state(self, state: WatchState) -> None:
if state.status == WatchStatus.FETCHING or state.status == WatchStatus.IDLE:
return
if state.status == WatchStatus.NO_LYRICS:
# emit a blank line as a machine-readable sentinel for "track changed, no lyrics"
sys.stdout.write("\n")
sys.stdout.flush()
elif state.status == WatchStatus.OK and state.lyrics is not None:
lrc = state.lyrics.normalized
if self.plain:
text = lrc.to_plain()
else:
text = str(lrc)
sys.stdout.write(text + "\n")
sys.stdout.flush()
+4
View File
@@ -0,0 +1,4 @@
{
"syncedLyrics": "[00:01.00]s1\n[00:02.00]s2",
"plainLyrics": "p1\np2"
}
+20
View File
@@ -0,0 +1,20 @@
[
{
"id": 1,
"trackName": "My Love",
"artistName": "Westlife",
"albumName": "Coast To Coast",
"duration": 231.847,
"syncedLyrics": "[00:01.00]hello",
"plainLyrics": "hello"
},
{
"id": 2,
"trackName": "My Love (Live)",
"artistName": "Westlife",
"albumName": "Live",
"duration": 262.0,
"syncedLyrics": "",
"plainLyrics": "hello"
}
]
+28
View File
@@ -0,0 +1,28 @@
{
"message": {
"body": {
"macro_calls": {
"track.richsync.get": {
"message": {
"header": {
"status_code": 200
},
"body": {
"richsync": {
"richsync_body": "[{\"ts\": 1.2, \"x\": \"hello\"}, {\"ts\": 2.34, \"x\": \"world\"}]"
}
}
}
},
"track.subtitles.get": {
"message": {
"header": {
"status_code": 404
},
"body": {}
}
}
}
}
}
}
+32
View File
@@ -0,0 +1,32 @@
{
"message": {
"body": {
"macro_calls": {
"track.richsync.get": {
"message": {
"header": {
"status_code": 404
},
"body": {}
}
},
"track.subtitles.get": {
"message": {
"header": {
"status_code": 200
},
"body": {
"subtitle_list": [
{
"subtitle": {
"subtitle_body": "[{\"text\": \"hello\", \"time\": {\"total\": 1.1}}, {\"text\": \"world\", \"time\": {\"total\": 2.22}}]"
}
}
]
}
}
}
}
}
}
}
+20
View File
@@ -0,0 +1,20 @@
{
"message": {
"body": {
"track_list": [
{
"track": {
"commontrack_id": 123,
"track_length": 232,
"has_subtitles": 1,
"has_richsync": 0,
"track_name": "My Love",
"artist_name": "Westlife",
"album_name": "Coast To Coast",
"instrumental": 0
}
}
]
}
}
}
+5
View File
@@ -0,0 +1,5 @@
{
"lrc": {
"lyric": "[00:01.00]line1\n[00:02.00]line2"
}
}
+32
View File
@@ -0,0 +1,32 @@
{
"result": {
"songs": [
{
"id": 2080607,
"name": "My Love",
"dt": 231941,
"ar": [
{
"name": "Westlife"
}
],
"al": {
"name": "Unbreakable"
}
},
{
"id": 572412968,
"name": "My Love",
"dt": 231000,
"ar": [
{
"name": "Westlife"
}
],
"al": {
"name": "Pure... Love"
}
}
]
}
}
+6
View File
@@ -0,0 +1,6 @@
{
"code": 0,
"data": {
"lyric": "[00:01.00]hello\n[00:02.00]world"
}
}
+33
View File
@@ -0,0 +1,33 @@
{
"code": 0,
"data": {
"list": [
{
"mid": "mid1",
"interval": 232,
"name": "My Love",
"singer": [
{
"name": "Westlife"
}
],
"album": {
"name": "Coast To Coast"
}
},
{
"mid": "mid2",
"interval": 248,
"name": "My Love (Album Version)",
"singer": [
{
"name": "Little Texas"
}
],
"album": {
"name": "Greatest Hits"
}
}
]
}
}
+9
View File
@@ -0,0 +1,9 @@
{
"lyrics": {
"syncType": "LINE_SYNCED",
"lines": [
{"startTimeMs": "1000", "words": "hello"},
{"startTimeMs": "2500", "words": "world"}
]
}
}
+9
View File
@@ -0,0 +1,9 @@
{
"lyrics": {
"syncType": "UNSYNCED",
"lines": [
{"startTimeMs": "0", "words": "plain one"},
{"startTimeMs": "0", "words": "plain two"}
]
}
}
+2
View File
@@ -1,3 +1,5 @@
from __future__ import annotations
import pytest import pytest
from lrx_cli.config import AppConfig, CredentialConfig, WatchConfig, load_config from lrx_cli.config import AppConfig, CredentialConfig, WatchConfig, load_config
+463 -79
View File
@@ -1,19 +1,42 @@
from dataclasses import replace from __future__ import annotations
from pathlib import Path
from dataclasses import replace
import asyncio
import json
from pathlib import Path
from typing import Callable
import httpx
import pytest import pytest
from lrx_cli.authenticators import create_authenticators
from lrx_cli.cache import CacheEngine
from lrx_cli.config import AppConfig, load_config from lrx_cli.config import AppConfig, load_config
from lrx_cli.core import LrcManager from lrx_cli.core import LrcManager
from lrx_cli.fetchers import FetcherMethodType from lrx_cli.fetchers import FetcherMethodType, create_fetchers
from lrx_cli.models import TrackMeta from lrx_cli.fetchers.lrclib import LrclibFetcher, _parse_lrclib_response
from tests.marks import ( from lrx_cli.fetchers.lrclib_search import (
requires_musixmatch_token, LrclibSearchFetcher,
requires_qq_music, _parse_lrclib_search_results,
requires_spotify,
) )
from lrx_cli.fetchers.musixmatch import (
MusixmatchFetcher,
MusixmatchSpotifyFetcher,
_parse_mxm_macro,
_parse_mxm_search,
)
from lrx_cli.fetchers.netease import (
NeteaseFetcher,
_parse_netease_lyrics,
_parse_netease_search,
)
from lrx_cli.fetchers.qqmusic import QQMusicFetcher, _parse_qq_lyrics, _parse_qq_search
from lrx_cli.fetchers.spotify import SpotifyFetcher, _parse_spotify_lyrics
from lrx_cli.lrc import LRCData
from lrx_cli.models import CacheStatus, TrackMeta
from tests.marks import requires_musixmatch_token, requires_qq_music, requires_spotify
SAMPLE_SPOTIFY_TRACK: TrackMeta = TrackMeta( SAMPLE_TRACK = TrackMeta(
title="One Last Kiss", title="One Last Kiss",
artist="Hikaru Utada", artist="Hikaru Utada",
album="One Last Kiss", album="One Last Kiss",
@@ -22,86 +45,152 @@ SAMPLE_SPOTIFY_TRACK: TrackMeta = TrackMeta(
url="https://open.spotify.com/track/5RhWszHMSKzb7KiXk4Ae0M", url="https://open.spotify.com/track/5RhWszHMSKzb7KiXk4Ae0M",
) )
SAMPLE_SPOTIFY_TRACK_ALBUM_MODIFIED = replace(SAMPLE_SPOTIFY_TRACK, album="BADモード") SAMPLE_TRACK_ALBUM_MODIFIED = replace(SAMPLE_TRACK, album="BADモード")
SAMPLE_TRACK_ARTIST_MODIFIED = replace(SAMPLE_TRACK, artist="宇多田ヒカル")
SAMPLE_SPOTIFY_TRACK_ARTIST_MODIFIED = replace( SAMPLE_TRACK_ALBUM_ARTIST_MODIFIED = replace(
SAMPLE_SPOTIFY_TRACK, artist="宇多田ヒカル" SAMPLE_TRACK,
artist="宇多田ヒカル",
album="BADモード",
) )
SAMPLE_SPOTIFY_TRACK_ALBUM_ARTIST_MODIFIED = replace( _FIXTURE_DIR = Path(__file__).parent / "fixtures" / "fetchers"
SAMPLE_SPOTIFY_TRACK, artist="宇多田ヒカル", album="BADモード" _NETWORK_TIMEOUT = 20.0
)
ParserFunc = Callable[[dict], LRCData | None]
@pytest.fixture @pytest.fixture
def lrc_manager(tmp_path: Path) -> LrcManager: def lrc_manager(tmp_path: Path) -> LrcManager:
"""LrcManager with empty credentials (no auth required)."""
return LrcManager(str(tmp_path / "cache.db"), AppConfig()) return LrcManager(str(tmp_path / "cache.db"), AppConfig())
@pytest.fixture @pytest.fixture
def cred_lrc_manager(tmp_path: Path) -> LrcManager: def cred_lrc_manager(tmp_path: Path) -> LrcManager:
"""LrcManager with credentials from config.toml (for CI/network tests)."""
return LrcManager(str(tmp_path / "cache.db"), load_config()) return LrcManager(str(tmp_path / "cache.db"), load_config())
def _fetch_and_assert( @pytest.fixture
def fetcher_runtime_anonymous(tmp_path: Path):
cfg = AppConfig()
cache = CacheEngine(str(tmp_path / "network-anon-cache.db"))
authenticators = create_authenticators(cache, cfg)
fetchers = create_fetchers(cache, authenticators, cfg)
return fetchers, cfg
@pytest.fixture
def fetcher_runtime_credentialed(tmp_path: Path):
cfg = load_config()
cache = CacheEngine(str(tmp_path / "network-cred-cache.db"))
authenticators = create_authenticators(cache, cfg)
fetchers = create_fetchers(cache, authenticators, cfg)
return fetchers, cfg
def _load_fixture(name: str) -> dict | list:
return json.loads((_FIXTURE_DIR / name).read_text(encoding="utf-8"))
def _assert_shape(actual: object, fixture: object) -> None:
"""Assert actual payload contains fixture structure recursively.
- dict: all fixture keys must exist with matching nested shape
- list: actual must contain at least fixture length and each indexed shape must match
- scalar: runtime type must match fixture type
"""
if isinstance(fixture, dict):
assert isinstance(actual, dict)
for key, value in fixture.items():
assert key in actual
_assert_shape(actual[key], value)
return
if isinstance(fixture, list):
assert isinstance(actual, list)
assert len(actual) >= len(fixture)
for idx, value in enumerate(fixture):
_assert_shape(actual[idx], value)
return
if fixture is None:
return
assert isinstance(actual, type(fixture))
def _fetch_with_method(
lrc_manager: LrcManager, lrc_manager: LrcManager,
method: FetcherMethodType, method: FetcherMethodType,
expect_fail: bool = False, *,
bypass_cache: bool = True, bypass_cache: bool = False,
) -> None: ):
result = lrc_manager.fetch_for_track( return lrc_manager.fetch_for_track(
SAMPLE_SPOTIFY_TRACK, force_method=method, bypass_cache=bypass_cache SAMPLE_TRACK,
force_method=method,
bypass_cache=bypass_cache,
) )
if expect_fail:
# Cache-search fetcher behavior
def test_cache_search_no_cache_fails(lrc_manager: LrcManager):
result = _fetch_with_method(lrc_manager, "cache-search", bypass_cache=False)
assert result is None assert result is None
else:
def test_cache_search_exact_hit(lrc_manager: LrcManager):
expected = "[00:00.01]lyrics"
lrc_manager.manual_insert(SAMPLE_TRACK, expected)
result = lrc_manager.fetch_for_track(
SAMPLE_TRACK,
force_method="cache-search",
bypass_cache=False,
)
assert result is not None assert result is not None
assert result.status == "SUCCESS_SYNCED"
assert result.lyrics is not None assert result.lyrics is not None
assert result.lyrics.to_text() == expected
def test_cache_search_fetcher_without_cache(lrc_manager: LrcManager):
_fetch_and_assert(lrc_manager, "cache-search", expect_fail=True, bypass_cache=False)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"query_track", "query_track",
[ [
pytest.param(SAMPLE_SPOTIFY_TRACK, id="exact_match"), pytest.param(SAMPLE_TRACK_ARTIST_MODIFIED, id="artist_modified"),
pytest.param(SAMPLE_SPOTIFY_TRACK_ARTIST_MODIFIED, id="artist_modified"), pytest.param(SAMPLE_TRACK_ALBUM_MODIFIED, id="album_modified"),
pytest.param(SAMPLE_SPOTIFY_TRACK_ALBUM_MODIFIED, id="album_modified"), pytest.param(SAMPLE_TRACK_ALBUM_ARTIST_MODIFIED, id="album_artist_modified"),
pytest.param(
SAMPLE_SPOTIFY_TRACK_ALBUM_ARTIST_MODIFIED, id="album_artist_modified"
),
], ],
) )
def test_cache_search_fetcher_with_fuzzy_metadata( def test_cache_search_fuzzy_hit(lrc_manager: LrcManager, query_track: TrackMeta):
lrc_manager: LrcManager, query_track: TrackMeta expected = "[00:00.01]lyrics"
): lrc_manager.manual_insert(SAMPLE_TRACK, expected)
expected_lrc = "[00:00.01]lyrics"
lrc_manager.manual_insert(SAMPLE_SPOTIFY_TRACK, expected_lrc)
result = lrc_manager.fetch_for_track( result = lrc_manager.fetch_for_track(
query_track, force_method="cache-search", bypass_cache=False query_track,
force_method="cache-search",
bypass_cache=False,
) )
assert result is not None assert result is not None
assert result.lyrics is not None assert result.lyrics is not None
assert result.lyrics.to_text() == expected_lrc assert result.lyrics.to_text() == expected
def test_cache_search_fetcher_prefer_better_match(lrc_manager: LrcManager): def test_cache_search_prefer_better_match(lrc_manager: LrcManager):
lrc_manager.manual_insert( lrc_manager.manual_insert(
SAMPLE_SPOTIFY_TRACK_ARTIST_MODIFIED, "[00:00.01]artist modified" SAMPLE_TRACK_ARTIST_MODIFIED,
"[00:00.01]artist modified",
) )
lrc_manager.manual_insert( lrc_manager.manual_insert(
SAMPLE_SPOTIFY_TRACK_ALBUM_ARTIST_MODIFIED, "[00:00.01]artist+album modified" SAMPLE_TRACK_ALBUM_ARTIST_MODIFIED,
"[00:00.01]artist+album modified",
) )
result = lrc_manager.fetch_for_track( result = lrc_manager.fetch_for_track(
SAMPLE_SPOTIFY_TRACK, force_method="cache-search", bypass_cache=False SAMPLE_TRACK,
force_method="cache-search",
bypass_cache=False,
) )
assert result is not None assert result is not None
@@ -109,52 +198,347 @@ def test_cache_search_fetcher_prefer_better_match(lrc_manager: LrcManager):
assert result.lyrics.to_text() == "[00:00.01]artist modified" assert result.lyrics.to_text() == "[00:00.01]artist modified"
# API response format for every fetcher
@pytest.mark.network @pytest.mark.network
@pytest.mark.parametrize( def test_api_lrclib_response_shape(fetcher_runtime_anonymous):
"method, expect_fail", fetchers, _cfg = fetcher_runtime_anonymous
[ fetcher = fetchers["lrclib"]
("lrclib", False), assert isinstance(fetcher, LrclibFetcher)
("lrclib-search", False),
("netease", False), async def _run() -> dict:
("spotify", True), # requires auth async with httpx.AsyncClient(timeout=_NETWORK_TIMEOUT) as client:
("qqmusic", True), # requires api response = await fetcher._api_get(client, SAMPLE_TRACK)
], assert response.status_code == 200
) payload = response.json()
def test_anonymous_remote_fetchers( assert isinstance(payload, dict)
lrc_manager: LrcManager, return payload
method: FetcherMethodType,
expect_fail: bool, payload = asyncio.run(_run())
): _assert_shape(payload, _load_fixture("lrclib_response.json"))
_fetch_and_assert(lrc_manager, method, expect_fail)
@pytest.mark.network
def test_api_lrclib_search_response_shape(fetcher_runtime_anonymous):
fetchers, _cfg = fetcher_runtime_anonymous
fetcher = fetchers["lrclib-search"]
assert isinstance(fetcher, LrclibSearchFetcher)
async def _run() -> list[dict]:
async with httpx.AsyncClient(timeout=_NETWORK_TIMEOUT) as client:
items, had_error = await fetcher._api_candidates(client, SAMPLE_TRACK)
assert had_error is False
return items
payload = asyncio.run(_run())
_assert_shape(payload, _load_fixture("lrclib_search_results.json"))
@pytest.mark.network
def test_api_netease_response_shape(fetcher_runtime_anonymous):
fetchers, _cfg = fetcher_runtime_anonymous
fetcher = fetchers["netease"]
assert isinstance(fetcher, NeteaseFetcher)
async def _run() -> tuple[dict, dict]:
async with httpx.AsyncClient(timeout=_NETWORK_TIMEOUT) as client:
search = await fetcher._api_search_track(client, SAMPLE_TRACK, 5)
lyric = await fetcher._api_lyric_track(client, SAMPLE_TRACK, 5)
assert isinstance(search, dict)
assert isinstance(lyric, dict)
return search, lyric
search_payload, lyric_payload = asyncio.run(_run())
_assert_shape(search_payload, _load_fixture("netease_search.json"))
_assert_shape(lyric_payload, _load_fixture("netease_lyrics.json"))
@pytest.mark.network @pytest.mark.network
@requires_spotify @requires_spotify
def test_spotify_fetcher(cred_lrc_manager: LrcManager): def test_api_spotify_response_shape(fetcher_runtime_credentialed):
_fetch_and_assert(cred_lrc_manager, "spotify") fetchers, _cfg = fetcher_runtime_credentialed
fetcher = fetchers["spotify"]
assert isinstance(fetcher, SpotifyFetcher)
async def _run() -> dict:
payload = await fetcher._api_lyrics(SAMPLE_TRACK)
assert isinstance(payload, dict)
return payload
payload = asyncio.run(_run())
_assert_shape(payload, _load_fixture("spotify_synced.json"))
@pytest.mark.network @pytest.mark.network
@requires_qq_music @requires_qq_music
def test_qqmusic_fetcher(cred_lrc_manager: LrcManager): def test_api_qqmusic_response_shape(fetcher_runtime_credentialed):
_fetch_and_assert(cred_lrc_manager, "qqmusic") fetchers, _cfg = fetcher_runtime_credentialed
fetcher = fetchers["qqmusic"]
assert isinstance(fetcher, QQMusicFetcher)
async def _run() -> tuple[dict, dict]:
search = await fetcher._api_search(SAMPLE_TRACK, 10)
lyric = await fetcher._api_lyric_track(SAMPLE_TRACK, 10)
assert isinstance(search, dict)
assert isinstance(lyric, dict)
return search, lyric
search_payload, lyric_payload = asyncio.run(_run())
_assert_shape(search_payload, _load_fixture("qq_search.json"))
_assert_shape(lyric_payload, _load_fixture("qq_lyrics.json"))
@pytest.mark.network @pytest.mark.network
def test_musixmatch_anonymous_fetcher(lrc_manager: LrcManager): def test_api_musixmatch_anonymous_response_shape(fetcher_runtime_anonymous):
# These fetchers should be tested in a single test to share the same usertoken """Anonymous musixmatch calls must share one cache/auth context in this test."""
# Otherwise the second may fail due to rate limits fetchers, _cfg = fetcher_runtime_anonymous
_fetch_and_assert(lrc_manager, "musixmatch", expect_fail=False) search_fetcher = fetchers["musixmatch"]
_fetch_and_assert(lrc_manager, "musixmatch-spotify", expect_fail=False) spotify_fetcher = fetchers["musixmatch-spotify"]
assert isinstance(search_fetcher, MusixmatchFetcher)
assert isinstance(spotify_fetcher, MusixmatchSpotifyFetcher)
async def _run() -> tuple[dict, dict, dict]:
search = await search_fetcher._api_search_track(SAMPLE_TRACK)
macro_from_search = await search_fetcher._api_macro_track(SAMPLE_TRACK)
macro_from_spotify = await spotify_fetcher._api_macro_track(SAMPLE_TRACK)
assert isinstance(search, dict)
assert isinstance(macro_from_search, dict)
assert isinstance(macro_from_spotify, dict)
return search, macro_from_search, macro_from_spotify
search_payload, macro_payload, spotify_macro_payload = asyncio.run(_run())
_assert_shape(search_payload, _load_fixture("musixmatch_search.json"))
_assert_shape(macro_payload, _load_fixture("musixmatch_macro_richsync.json"))
_assert_shape(
spotify_macro_payload, _load_fixture("musixmatch_macro_richsync.json")
)
@pytest.mark.network @pytest.mark.network
@requires_musixmatch_token @requires_musixmatch_token
def test_musixmatch_fetcher(cred_lrc_manager: LrcManager): def test_api_musixmatch_token_response_shape(fetcher_runtime_credentialed):
_fetch_and_assert(cred_lrc_manager, "musixmatch") fetchers, _cfg = fetcher_runtime_credentialed
_fetch_and_assert(cred_lrc_manager, "musixmatch-spotify") search_fetcher = fetchers["musixmatch"]
spotify_fetcher = fetchers["musixmatch-spotify"]
assert isinstance(search_fetcher, MusixmatchFetcher)
assert isinstance(spotify_fetcher, MusixmatchSpotifyFetcher)
async def _run() -> tuple[dict, dict, dict]:
search = await search_fetcher._api_search_track(SAMPLE_TRACK)
macro_from_search = await search_fetcher._api_macro_track(SAMPLE_TRACK)
macro_from_spotify = await spotify_fetcher._api_macro_track(SAMPLE_TRACK)
assert isinstance(search, dict)
assert isinstance(macro_from_search, dict)
assert isinstance(macro_from_spotify, dict)
return search, macro_from_search, macro_from_spotify
search_payload, macro_payload, spotify_macro_payload = asyncio.run(_run())
_assert_shape(search_payload, _load_fixture("musixmatch_search.json"))
_assert_shape(macro_payload, _load_fixture("musixmatch_macro_richsync.json"))
_assert_shape(
spotify_macro_payload, _load_fixture("musixmatch_macro_richsync.json")
)
def test_local_fetcher(lrc_manager: LrcManager): # Parse fixture JSON into real data structures
# Since this not a local track
_fetch_and_assert(lrc_manager, "local", True)
@pytest.mark.parametrize(
"fixture_name,parser,expected_status",
[
pytest.param(
"spotify_synced.json",
_parse_spotify_lyrics,
"SUCCESS_SYNCED",
id="spotify-synced",
),
pytest.param(
"spotify_unsynced.json",
_parse_spotify_lyrics,
"SUCCESS_UNSYNCED",
id="spotify-unsynced",
),
],
)
def test_parse_spotify_fixture(
fixture_name: str,
parser: ParserFunc,
expected_status: str,
):
payload = _load_fixture(fixture_name)
assert isinstance(payload, dict)
parsed = parser(payload)
assert parsed is not None
assert parsed.detect_sync_status().value == expected_status
if expected_status == "SUCCESS_SYNCED":
assert parsed.to_text() == "[00:01.00]hello\n[00:02.50]world"
else:
assert parsed.to_text() == "[00:00.00]plain one\n[00:00.00]plain two"
def test_parse_qq_search_fixture() -> None:
payload = _load_fixture("qq_search.json")
assert isinstance(payload, dict)
parsed = _parse_qq_search(payload)
assert len(parsed) == 2
assert parsed[0].item == "mid1"
assert parsed[0].title == "My Love"
assert parsed[0].artist == "Westlife"
assert parsed[0].duration_ms == 232000.0
assert parsed[0].album == "Coast To Coast"
assert parsed[1].item == "mid2"
assert parsed[1].title == "My Love (Album Version)"
assert parsed[1].artist == "Little Texas"
assert parsed[1].duration_ms == 248000.0
assert parsed[1].album == "Greatest Hits"
def test_parse_qq_lyrics_fixture() -> None:
payload = _load_fixture("qq_lyrics.json")
assert isinstance(payload, dict)
parsed = _parse_qq_lyrics(payload)
assert parsed is not None
assert len(parsed) == 2
assert parsed.detect_sync_status() == CacheStatus.SUCCESS_SYNCED
def test_parse_lrclib_response_fixture() -> None:
payload = _load_fixture("lrclib_response.json")
assert isinstance(payload, dict)
parsed = _parse_lrclib_response(payload)
assert parsed.synced is not None and parsed.synced.lyrics is not None
assert parsed.unsynced is not None and parsed.unsynced.lyrics is not None
assert parsed.synced.status == CacheStatus.SUCCESS_SYNCED
assert parsed.unsynced.status == CacheStatus.SUCCESS_UNSYNCED
assert parsed.synced.lyrics.to_text() == "[00:01.00]s1\n[00:02.00]s2"
assert parsed.unsynced.lyrics.to_text() == "[00:00.00]p1\n[00:00.00]p2"
def test_parse_lrclib_search_results_fixture() -> None:
payload = _load_fixture("lrclib_search_results.json")
assert isinstance(payload, list)
parsed = _parse_lrclib_search_results(payload)
assert len(parsed) == 2
assert parsed[0].item.get("id") == 1
assert parsed[0].duration_ms == 231847.0
assert parsed[0].is_synced is True
assert parsed[0].title == "My Love"
assert parsed[0].artist == "Westlife"
assert parsed[0].album == "Coast To Coast"
assert parsed[1].item.get("id") == 2
assert parsed[1].duration_ms == 262000.0
assert parsed[1].is_synced is False
assert parsed[1].title == "My Love (Live)"
assert parsed[1].artist == "Westlife"
assert parsed[1].album == "Live"
def test_parse_netease_search_fixture() -> None:
payload = _load_fixture("netease_search.json")
assert isinstance(payload, dict)
parsed = _parse_netease_search(payload)
assert len(parsed) == 2
assert parsed[0].item == 2080607
assert parsed[0].title == "My Love"
assert parsed[0].artist == "Westlife"
assert parsed[0].duration_ms == 231941.0
assert parsed[0].album == "Unbreakable"
assert parsed[1].item == 572412968
assert parsed[1].artist == "Westlife"
assert parsed[1].duration_ms == 231000.0
def test_parse_netease_lyrics_fixture() -> None:
payload = _load_fixture("netease_lyrics.json")
assert isinstance(payload, dict)
parsed = _parse_netease_lyrics(payload)
assert parsed is not None
assert len(parsed) == 2
assert parsed.detect_sync_status() == CacheStatus.SUCCESS_SYNCED
assert parsed.to_text() == "[00:01.00]line1\n[00:02.00]line2"
def test_parse_musixmatch_search_fixture() -> None:
payload = _load_fixture("musixmatch_search.json")
assert isinstance(payload, dict)
parsed = _parse_mxm_search(payload)
assert len(parsed) == 1
assert parsed[0].item == 123
assert parsed[0].is_synced is True
assert parsed[0].title == "My Love"
assert parsed[0].artist == "Westlife"
assert parsed[0].duration_ms == 232000.0
assert parsed[0].album == "Coast To Coast"
def test_parse_musixmatch_macro_fixture() -> None:
payload = _load_fixture("musixmatch_macro_richsync.json")
assert isinstance(payload, dict)
parsed = _parse_mxm_macro(payload)
assert parsed is not None
assert len(parsed) == 2
assert parsed.detect_sync_status() == CacheStatus.SUCCESS_SYNCED
def test_parse_musixmatch_macro_subtitle_fallback_fixture() -> None:
payload = _load_fixture("musixmatch_macro_subtitle.json")
assert isinstance(payload, dict)
parsed = _parse_mxm_macro(payload)
assert parsed is not None
assert len(parsed) == 2
assert parsed.detect_sync_status() == CacheStatus.SUCCESS_SYNCED
assert parsed.to_text() == "[00:01.10]hello\n[00:02.22]world"
# Empty / partial-error response handling
def test_parse_spotify_empty_or_invalid() -> None:
assert _parse_spotify_lyrics({}) is None
assert _parse_spotify_lyrics({"lyrics": {"lines": []}}) is None
def test_parse_qq_search_empty_or_error() -> None:
assert _parse_qq_search({}) == []
assert _parse_qq_search({"code": 1}) == []
assert _parse_qq_search({"code": 0, "data": {"list": []}}) == []
def test_parse_qq_lyrics_empty_or_error() -> None:
assert _parse_qq_lyrics({}) is None
assert _parse_qq_lyrics({"code": 1}) is None
assert _parse_qq_lyrics({"code": 0, "data": {"lyric": ""}}) is None
def test_parse_lrclib_response_empty_or_partial() -> None:
parsed = _parse_lrclib_response({})
assert parsed.synced is not None
assert parsed.unsynced is not None
assert parsed.synced.lyrics is None
assert parsed.unsynced.lyrics is None
parsed_partial = _parse_lrclib_response({"syncedLyrics": "[00:01.00]line"})
assert (
parsed_partial.synced is not None and parsed_partial.synced.lyrics is not None
)
assert parsed_partial.unsynced is not None
def test_parse_netease_empty_or_partial() -> None:
assert _parse_netease_search({}) == []
assert _parse_netease_search({"result": {"songs": []}}) == []
assert _parse_netease_lyrics({}) is None
assert _parse_netease_lyrics({"lrc": {"lyric": ""}}) is None
def test_parse_musixmatch_empty_or_partial() -> None:
assert _parse_mxm_search({}) == []
assert _parse_mxm_search({"message": {"body": {"track_list": []}}}) == []
assert _parse_mxm_macro({}) is None
assert _parse_mxm_macro({"message": {"body": []}}) is None
+490 -263
View File
@@ -2,20 +2,22 @@ from __future__ import annotations
import asyncio import asyncio
from pathlib import Path from pathlib import Path
from typing import Optional
from lrx_cli.lrc import LRCData from lrx_cli.lrc import LRCData
from lrx_cli.models import TrackMeta from lrx_cli.models import TrackMeta
from lrx_cli.watch.control import ControlClient, ControlServer, parse_delta from lrx_cli.watch.control import ControlClient, ControlServer, parse_delta
from lrx_cli.watch.view import BaseOutput, LyricView, WatchState from lrx_cli.watch.view import BaseOutput, LyricView, WatchState, WatchStatus
from lrx_cli.watch.view.pipe import PipeOutput from lrx_cli.watch.view.pipe import PipeOutput
from lrx_cli.watch.view.print import PrintOutput
from lrx_cli.watch.player import ActivePlayerSelector, PlayerState, PlayerTarget from lrx_cli.watch.player import ActivePlayerSelector, PlayerState, PlayerTarget
from lrx_cli.watch.fetcher import LyricFetcher
from lrx_cli.config import AppConfig from lrx_cli.config import AppConfig
from lrx_cli.watch.tracker import PositionTracker from lrx_cli.watch.tracker import PositionTracker
from lrx_cli.watch.session import WatchCoordinator from lrx_cli.watch.session import WatchCoordinator
TEST_CONFIG = AppConfig() TEST_CONFIG = AppConfig()
BUS = "org.mpris.MediaPlayer2.spotify"
def test_parse_delta_supports_plus_minus_and_reset() -> None: def test_parse_delta_supports_plus_minus_and_reset() -> None:
@@ -24,79 +26,88 @@ def test_parse_delta_supports_plus_minus_and_reset() -> None:
assert parse_delta("0") == (True, 0, None) assert parse_delta("0") == (True, 0, None)
# PlayerTarget
def test_player_target_allows_all_when_hint_empty() -> None: def test_player_target_allows_all_when_hint_empty() -> None:
target = PlayerTarget() target = PlayerTarget()
assert target.allows("org.mpris.MediaPlayer2.spotify") is True assert target.allows("org.mpris.MediaPlayer2.spotify") is True
assert target.allows("org.mpris.MediaPlayer2.mpd") is True assert target.allows("org.mpris.MediaPlayer2.mpd") is True
def test_player_target_filters_by_case_insensitive_substring() -> None: def test_player_target_filters_by_case_insensitive_substring() -> None:
target = PlayerTarget("Spot") target = PlayerTarget("Spot")
assert target.allows("org.mpris.MediaPlayer2.spotify") is True assert target.allows("org.mpris.MediaPlayer2.spotify") is True
assert target.allows("org.mpris.MediaPlayer2.mpd") is False assert target.allows("org.mpris.MediaPlayer2.mpd") is False
def test_player_target_reports_blacklisted_hint() -> None: def test_player_target_hint_allows_regardless_of_blacklist() -> None:
target = PlayerTarget("spot", player_blacklist=("spotify",)) # --player bypasses PLAYER_BLACKLIST; PlayerTarget.allows() reflects the hint only
assert target.validation_error() is not None target = PlayerTarget("spot")
assert target.allows("org.mpris.MediaPlayer2.spotify") is True
# ActivePlayerSelector
def _ps(bus: str, status: str = "Playing") -> PlayerState:
return PlayerState(bus_name=bus, status=status, track=TrackMeta(title="T"))
def test_active_player_selector_returns_none_when_no_players() -> None:
assert ActivePlayerSelector.select({}, None, "spotify") is None
def test_active_player_selector_prefers_single_playing() -> None: def test_active_player_selector_prefers_single_playing() -> None:
players = { players = {
"org.mpris.MediaPlayer2.foo": PlayerState( "org.mpris.MediaPlayer2.foo": _ps("org.mpris.MediaPlayer2.foo", "Paused"),
bus_name="org.mpris.MediaPlayer2.foo", "org.mpris.MediaPlayer2.bar": _ps("org.mpris.MediaPlayer2.bar", "Playing"),
status="Paused",
track=TrackMeta(title="A"),
),
"org.mpris.MediaPlayer2.bar": PlayerState(
bus_name="org.mpris.MediaPlayer2.bar",
status="Playing",
track=TrackMeta(title="B"),
),
} }
assert ( assert (
ActivePlayerSelector.select(players, None, TEST_CONFIG.general.preferred_player) ActivePlayerSelector.select(players, None, "spotify")
== "org.mpris.MediaPlayer2.bar" == "org.mpris.MediaPlayer2.bar"
) )
def test_active_player_selector_prefers_keyword_among_multiple_playing() -> None:
players = {
"org.mpris.MediaPlayer2.foo": _ps("org.mpris.MediaPlayer2.foo"),
"org.mpris.MediaPlayer2.spotify": _ps("org.mpris.MediaPlayer2.spotify"),
}
assert (
ActivePlayerSelector.select(players, None, "spotify")
== "org.mpris.MediaPlayer2.spotify"
)
def test_active_player_selector_uses_last_active_when_no_playing() -> None: def test_active_player_selector_uses_last_active_when_no_playing() -> None:
players = { players = {
"org.mpris.MediaPlayer2.foo": PlayerState( "org.mpris.MediaPlayer2.foo": _ps("org.mpris.MediaPlayer2.foo", "Paused"),
bus_name="org.mpris.MediaPlayer2.foo", "org.mpris.MediaPlayer2.bar": _ps("org.mpris.MediaPlayer2.bar", "Stopped"),
status="Paused",
track=TrackMeta(title="A"),
),
"org.mpris.MediaPlayer2.bar": PlayerState(
bus_name="org.mpris.MediaPlayer2.bar",
status="Stopped",
track=TrackMeta(title="B"),
),
} }
assert ( assert (
ActivePlayerSelector.select( ActivePlayerSelector.select(players, "org.mpris.MediaPlayer2.bar", "spotify")
players,
"org.mpris.MediaPlayer2.bar",
TEST_CONFIG.general.preferred_player,
)
== "org.mpris.MediaPlayer2.bar" == "org.mpris.MediaPlayer2.bar"
) )
def test_active_player_selector_falls_back_to_first_when_no_preference() -> None:
players = {
"org.mpris.MediaPlayer2.foo": _ps("org.mpris.MediaPlayer2.foo", "Paused"),
}
result = ActivePlayerSelector.select(players, None, "")
assert result == "org.mpris.MediaPlayer2.foo"
# PositionTracker
def test_position_tracker_seeked_calibrates_immediately() -> None: def test_position_tracker_seeked_calibrates_immediately() -> None:
async def _run() -> None: async def _run() -> None:
async def _poll(_bus: str): tracker = PositionTracker(lambda _: asyncio.sleep(0, result=1200), TEST_CONFIG)
return 1200
tracker = PositionTracker(_poll, TEST_CONFIG)
await tracker.start() await tracker.start()
await tracker.set_active_player( await tracker.set_active_player(BUS, "Playing", "track-A")
"org.mpris.MediaPlayer2.foo", "Playing", "track-A" await tracker.on_seeked(BUS, 3500)
)
await tracker.on_seeked("org.mpris.MediaPlayer2.foo", 3500)
pos = await tracker.get_position_ms() pos = await tracker.get_position_ms()
await tracker.stop() await tracker.stop()
assert pos >= 3500 assert pos >= 3500
@@ -104,74 +115,70 @@ def test_position_tracker_seeked_calibrates_immediately() -> None:
asyncio.run(_run()) asyncio.run(_run())
def test_position_tracker_playback_status_pause_stops_fast_growth() -> None: def test_position_tracker_pause_stops_position_growth() -> None:
async def _run() -> None: async def _run() -> None:
async def _poll(_bus: str): tracker = PositionTracker(lambda _: asyncio.sleep(0, result=0), TEST_CONFIG)
return 0
tracker = PositionTracker(_poll, TEST_CONFIG)
await tracker.start() await tracker.start()
await tracker.set_active_player( await tracker.set_active_player(BUS, "Playing", "track-A")
"org.mpris.MediaPlayer2.foo", "Playing", "track-A"
)
await asyncio.sleep(0.08) await asyncio.sleep(0.08)
before = await tracker.get_position_ms() before = await tracker.get_position_ms()
await tracker.on_playback_status(BUS, "Paused")
await tracker.on_playback_status("org.mpris.MediaPlayer2.foo", "Paused")
await asyncio.sleep(0.08) await asyncio.sleep(0.08)
after = await tracker.get_position_ms() after = await tracker.get_position_ms()
await tracker.stop() await tracker.stop()
assert before > 0 assert before > 0
assert after - before < 20 assert after - before < 20
asyncio.run(_run()) asyncio.run(_run())
def test_position_tracker_playback_status_playing_calibrates_once() -> None: def test_position_tracker_resume_via_playback_status_calibrates() -> None:
async def _run() -> None: async def _run() -> None:
async def _poll(_bus: str): tracker = PositionTracker(lambda _: asyncio.sleep(0, result=50000), TEST_CONFIG)
return 50000
tracker = PositionTracker(_poll, TEST_CONFIG)
await tracker.start() await tracker.start()
await tracker.set_active_player( await tracker.set_active_player(BUS, "Paused", "track-A")
"org.mpris.MediaPlayer2.foo", "Paused", "track-A" await tracker.on_playback_status(BUS, "Playing")
)
await tracker.on_playback_status("org.mpris.MediaPlayer2.foo", "Playing")
pos = await tracker.get_position_ms() pos = await tracker.get_position_ms()
await tracker.stop() await tracker.stop()
assert pos >= 50000 assert pos >= 50000
asyncio.run(_run()) asyncio.run(_run())
def test_position_tracker_set_active_player_playing_calibrates_on_resume() -> None: def test_position_tracker_paused_start_calibrates_initial_position() -> None:
async def _run() -> None: """set_active_player with Paused must still calibrate position — player may be mid-song."""
async def _poll(_bus: str):
return 42000
tracker = PositionTracker(_poll, TEST_CONFIG) async def _run() -> None:
tracker = PositionTracker(lambda _: asyncio.sleep(0, result=45000), TEST_CONFIG)
await tracker.start() await tracker.start()
await tracker.set_active_player( await tracker.set_active_player(BUS, "Paused", "track-A")
"org.mpris.MediaPlayer2.foo", "Paused", "track-A"
)
await tracker.set_active_player(
"org.mpris.MediaPlayer2.foo", "Playing", "track-A"
)
pos = await tracker.get_position_ms() pos = await tracker.get_position_ms()
await tracker.stop() await tracker.stop()
assert pos >= 45000
asyncio.run(_run())
def test_position_tracker_resume_via_set_active_player_calibrates() -> None:
async def _run() -> None:
tracker = PositionTracker(lambda _: asyncio.sleep(0, result=42000), TEST_CONFIG)
await tracker.start()
await tracker.set_active_player(BUS, "Paused", "track-A")
await tracker.set_active_player(BUS, "Playing", "track-A")
pos = await tracker.get_position_ms()
await tracker.stop()
assert pos >= 42000 assert pos >= 42000
asyncio.run(_run()) asyncio.run(_run())
# ControlServer and ControlClient
def test_control_server_and_client_roundtrip(tmp_path: Path) -> None: def test_control_server_and_client_roundtrip(tmp_path: Path) -> None:
async def _run() -> None: async def _run() -> None:
class _Session: class _Session:
def __init__(self): def __init__(self) -> None:
self.offset = 0 self.offset = 0
def handle_offset(self, delta: int) -> dict: def handle_offset(self, delta: int) -> dict:
@@ -183,14 +190,11 @@ def test_control_server_and_client_roundtrip(tmp_path: Path) -> None:
socket_path = tmp_path / "watch.sock" socket_path = tmp_path / "watch.sock"
server = ControlServer(socket_path=str(socket_path)) server = ControlServer(socket_path=str(socket_path))
session = _Session() await server.start(_Session()) # type: ignore
await server.start(session) # type: ignore
client = ControlClient(socket_path=str(socket_path)) client = ControlClient(socket_path=str(socket_path))
r1 = await client._send_async({"cmd": "offset", "delta": 200}) r1 = await client._send_async({"cmd": "offset", "delta": 200})
r2 = await client._send_async({"cmd": "status"}) r2 = await client._send_async({"cmd": "status"})
await server.stop() await server.stop()
assert r1 == {"ok": True, "offset_ms": 200} assert r1 == {"ok": True, "offset_ms": 200}
assert r2["ok"] is True assert r2["ok"] is True
assert r2["offset_ms"] == 200 assert r2["offset_ms"] == 200
@@ -198,260 +202,483 @@ def test_control_server_and_client_roundtrip(tmp_path: Path) -> None:
asyncio.run(_run()) asyncio.run(_run())
def test_pipe_output_prints_fixed_window_for_status(capsys) -> None: # PipeOutput
output = PipeOutput(before=1, after=1)
state = WatchState(
track=None, def _pipe_state(
lyrics=None, status: WatchStatus,
position_ms=0, lyrics: Optional[LRCData] = None,
offset_ms=0, position_ms: int = 0,
status="fetching", offset_ms: int = 0,
track: Optional[TrackMeta] = None,
) -> WatchState:
return WatchState(
track=track,
lyrics=LyricView.from_lrc(lyrics) if lyrics else None,
position_ms=position_ms,
offset_ms=offset_ms,
status=status,
) )
asyncio.run(output.on_state(state))
printed = capsys.readouterr().out def test_pipe_output_fetching_renders_status_window(capsys) -> None:
assert printed == "\n[fetching...]\n\n" asyncio.run(
PipeOutput(before=1, after=1).on_state(_pipe_state(WatchStatus.FETCHING))
def test_pipe_output_uses_context_window_for_lyrics(capsys) -> None:
output = PipeOutput(before=1, after=1)
lyrics = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
state = WatchState(
track=TrackMeta(title="Song"),
lyrics=LyricView.from_lrc(lyrics),
position_ms=2100,
offset_ms=0,
status="ok",
) )
assert capsys.readouterr().out == "\n[fetching...]\n\n"
asyncio.run(output.on_state(state))
printed = capsys.readouterr().out
assert printed == "a\nb\nc\n"
def test_pipe_output_shows_upcoming_lines_before_first_timestamp(capsys) -> None: def test_pipe_output_no_lyrics_renders_status_window(capsys) -> None:
output = PipeOutput(before=1, after=1) asyncio.run(
lyrics = LRCData("[00:02.00]a\n[00:03.00]b") PipeOutput(before=1, after=1).on_state(_pipe_state(WatchStatus.NO_LYRICS))
state = WatchState(
track=TrackMeta(title="Song"),
lyrics=LyricView.from_lrc(lyrics),
position_ms=0,
offset_ms=0,
status="ok",
) )
assert capsys.readouterr().out == "\n[no lyrics]\n\n"
asyncio.run(output.on_state(state))
printed = capsys.readouterr().out
assert printed == "\n\na\n"
def test_pipe_output_first_line_keeps_before_region_empty(capsys) -> None: def test_pipe_output_idle_renders_status_window(capsys) -> None:
output = PipeOutput(before=1, after=1) asyncio.run(PipeOutput(before=1, after=1).on_state(_pipe_state(WatchStatus.IDLE)))
lyrics = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c") assert capsys.readouterr().out == "\n[idle]\n\n"
state = WatchState(
track=TrackMeta(title="Song"),
lyrics=LyricView.from_lrc(lyrics), def test_pipe_output_no_newline_mode(capsys) -> None:
position_ms=1100, asyncio.run(
offset_ms=0, PipeOutput(before=0, after=0, no_newline=True).on_state(
status="ok", _pipe_state(WatchStatus.FETCHING)
) )
asyncio.run(output.on_state(state))
printed = capsys.readouterr().out
assert printed == "\na\nb\n"
def test_pipe_output_last_line_keeps_after_region_empty(capsys) -> None:
output = PipeOutput(before=1, after=1)
lyrics = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
state = WatchState(
track=TrackMeta(title="Song"),
lyrics=LyricView.from_lrc(lyrics),
position_ms=3100,
offset_ms=0,
status="ok",
) )
assert capsys.readouterr().out == "[fetching...]"
asyncio.run(output.on_state(state))
printed = capsys.readouterr().out def test_pipe_output_default_window_shows_current_line(capsys) -> None:
assert printed == "b\nc\n\n" lrc = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
asyncio.run(
PipeOutput().on_state(_pipe_state(WatchStatus.OK, lrc, position_ms=2100))
)
assert capsys.readouterr().out == "b\n"
def test_pipe_output_context_window(capsys) -> None:
lrc = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
asyncio.run(
PipeOutput(before=1, after=1).on_state(
_pipe_state(WatchStatus.OK, lrc, position_ms=2100)
)
)
assert capsys.readouterr().out == "a\nb\nc\n"
def test_pipe_output_before_region_empty_at_first_line(capsys) -> None:
lrc = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
asyncio.run(
PipeOutput(before=1, after=1).on_state(
_pipe_state(WatchStatus.OK, lrc, position_ms=1100)
)
)
assert capsys.readouterr().out == "\na\nb\n"
def test_pipe_output_after_region_empty_at_last_line(capsys) -> None:
lrc = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
asyncio.run(
PipeOutput(before=1, after=1).on_state(
_pipe_state(WatchStatus.OK, lrc, position_ms=3100)
)
)
assert capsys.readouterr().out == "b\nc\n\n"
def test_pipe_output_upcoming_lines_before_first_timestamp(capsys) -> None:
lrc = LRCData("[00:02.00]a\n[00:03.00]b")
asyncio.run(
PipeOutput(before=1, after=1).on_state(
_pipe_state(WatchStatus.OK, lrc, position_ms=0)
)
)
assert capsys.readouterr().out == "\n\na\n"
def test_pipe_output_offset_ms_shifts_effective_position(capsys) -> None:
lrc = LRCData("[00:01.00]a\n[00:02.00]b\n[00:03.00]c")
asyncio.run(
PipeOutput().on_state(
_pipe_state(WatchStatus.OK, lrc, position_ms=1000, offset_ms=1500)
)
)
# effective = 2500 ms → line b
assert capsys.readouterr().out == "b\n"
def test_pipe_output_repeated_text_uses_correct_timed_occurrence(capsys) -> None: def test_pipe_output_repeated_text_uses_correct_timed_occurrence(capsys) -> None:
output = PipeOutput(before=1, after=1) lrc = LRCData("[00:01.00]A\n[00:02.00]X\n[00:03.00]B\n[00:04.00]X\n[00:05.00]C")
lyrics = LRCData("[00:01.00]A\n[00:02.00]X\n[00:03.00]B\n[00:04.00]X\n[00:05.00]C") asyncio.run(
state = WatchState( PipeOutput(before=1, after=1).on_state(
track=TrackMeta(title="Song"), _pipe_state(WatchStatus.OK, lrc, position_ms=4100)
)
)
assert capsys.readouterr().out == "B\nX\nC\n"
# PrintOutput
def _ok_state(lyrics: LRCData, track: Optional[TrackMeta] = None) -> WatchState:
return WatchState(
track=track or TrackMeta(title="Song", artist="Artist"),
lyrics=LyricView.from_lrc(lyrics), lyrics=LyricView.from_lrc(lyrics),
position_ms=4100, position_ms=0,
offset_ms=0, offset_ms=0,
status="ok", status=WatchStatus.OK,
) )
def _status_state(status: WatchStatus, track: Optional[TrackMeta] = None) -> WatchState:
return WatchState(
track=track or TrackMeta(title="Song", artist="Artist"),
lyrics=None,
position_ms=0,
offset_ms=0,
status=status,
)
def test_print_output_emits_lrc_on_ok(capsys) -> None:
asyncio.run(
PrintOutput().on_state(_ok_state(LRCData("[00:01.00]Hello\n[00:02.00]World")))
)
assert capsys.readouterr().out.startswith("[00:01.00]")
def test_print_output_plain_strips_tags(capsys) -> None:
asyncio.run(
PrintOutput(plain=True).on_state(
_ok_state(LRCData("[00:01.00]Hello\n[00:02.00]World"))
)
)
out = capsys.readouterr().out
assert "[" not in out
assert "Hello" in out
def test_print_output_plain_with_unsynced_lyrics(capsys) -> None:
asyncio.run(PrintOutput(plain=True).on_state(_ok_state(LRCData("Hello\nWorld"))))
out = capsys.readouterr().out
assert "Hello" in out
assert "[" not in out
def test_print_output_no_lyrics_emits_blank_line(capsys) -> None:
asyncio.run(PrintOutput().on_state(_status_state(WatchStatus.NO_LYRICS)))
assert capsys.readouterr().out == "\n"
def test_print_output_fetching_emits_nothing(capsys) -> None:
asyncio.run(PrintOutput().on_state(_status_state(WatchStatus.FETCHING)))
assert capsys.readouterr().out == ""
def test_print_output_idle_emits_nothing(capsys) -> None:
asyncio.run(PrintOutput().on_state(_status_state(WatchStatus.IDLE)))
assert capsys.readouterr().out == ""
def test_print_output_is_stateless(capsys) -> None:
"""View has no internal deduplication — emits on every call."""
output = PrintOutput()
state = _ok_state(LRCData("[00:01.00]Hello"))
asyncio.run(output.on_state(state)) asyncio.run(output.on_state(state))
asyncio.run(output.on_state(state))
printed = capsys.readouterr().out lines = [ln for ln in capsys.readouterr().out.splitlines() if ln]
assert printed == "B\nX\nC\n" assert len(lines) == 2
def test_session_fetches_on_resume_playing_without_lyrics() -> None: def test_print_output_position_sensitive_is_false() -> None:
async def _run() -> None: assert PrintOutput.position_sensitive is False
class _Manager:
def fetch_for_track(self, *_args, **_kwargs):
return None
class _Output(BaseOutput):
async def on_state(self, state: WatchState) -> None:
return None
class _Fetcher(LyricFetcher): # WatchCoordinator
def __init__(self):
async def _fetch(_track: TrackMeta):
return None
async def _on_fetching() -> None:
return None
async def _on_result(_lyrics) -> None: class _CaptureFetcher:
return None def __init__(self) -> None:
self.requested: list[str] = []
super().__init__(
_fetch, _on_fetching, _on_result, TEST_CONFIG.watch.debounce_ms
)
self.requested = []
def request(self, track: TrackMeta) -> None: def request(self, track: TrackMeta) -> None:
self.requested.append(track.display_name()) self.requested.append(track.display_name())
async def stop(self) -> None:
pass
def _make_coordinator(output: Optional[BaseOutput] = None) -> WatchCoordinator:
class _Manager:
def fetch_for_track(self, *_a, **_kw):
return None
class _NullOutput(BaseOutput):
async def on_state(self, state: WatchState) -> None:
pass
session = WatchCoordinator( session = WatchCoordinator(
_Manager(), # type: ignore _Manager(), # type: ignore
_Output(), output or _NullOutput(),
player_hint=None, player_hint=None,
config=TEST_CONFIG, config=TEST_CONFIG,
) )
fake_fetcher = _Fetcher()
session._fetcher = fake_fetcher
session._tracker = PositionTracker( session._tracker = PositionTracker(
lambda _bus: asyncio.sleep(0, result=0), lambda _bus: asyncio.sleep(0, result=0),
TEST_CONFIG, TEST_CONFIG,
) )
return session
bus_name = "org.mpris.MediaPlayer2.spotify"
track = TrackMeta(title="Song", artist="Artist")
session._model.active_player = bus_name
session._player_monitor.players = {
bus_name: PlayerState(bus_name=bus_name, status="Playing", track=track)
}
session._model.set_lyrics(None)
session._model.status = "paused"
session._on_playback_status(bus_name, "Playing") def _pstate(status: str = "Playing", title: str = "Song") -> PlayerState:
return PlayerState(
bus_name=BUS,
status=status,
track=TrackMeta(title=title, artist="Artist"),
)
def test_coordinator_fetches_on_initial_player() -> None:
async def _run() -> None:
session = _make_coordinator()
fetcher = _CaptureFetcher()
session._fetcher = fetcher # type: ignore[assignment]
session._player_monitor.players = {BUS: _pstate("Playing")}
session._on_player_change()
await asyncio.sleep(0)
assert fetcher.requested == ["Artist - Song"]
assert session._model.status == WatchStatus.FETCHING
asyncio.run(_run())
def test_coordinator_fetches_while_paused() -> None:
"""Fetch starts immediately even when player is paused — no wait for resume."""
async def _run() -> None:
session = _make_coordinator()
fetcher = _CaptureFetcher()
session._fetcher = fetcher # type: ignore[assignment]
session._player_monitor.players = {BUS: _pstate("Paused")}
session._on_player_change()
await asyncio.sleep(0)
assert fetcher.requested == ["Artist - Song"]
asyncio.run(_run())
def test_coordinator_paused_start_emits_correct_line_after_fetch() -> None:
"""After fetch completes with a mid-song paused player, the current lyric line must render."""
async def _run() -> None:
received: list[WatchState] = []
class _CaptureOutput(BaseOutput):
position_sensitive = True
async def on_state(self, state: WatchState) -> None:
received.append(state)
class _Manager:
def fetch_for_track(self, *_a, **_kw):
return None
PAUSED_MS = 45000
lrc = LRCData("[00:43.00]a\n[00:44.00]b\n[00:46.00]c")
session = WatchCoordinator(
_Manager(), # type: ignore
_CaptureOutput(),
player_hint=None,
config=TEST_CONFIG,
)
session._tracker = PositionTracker(
lambda _bus: asyncio.sleep(0, result=PAUSED_MS),
TEST_CONFIG,
)
await session._tracker.start()
# Calibrate tracker directly (tracker-level behavior already covered by
# test_position_tracker_paused_start_calibrates_initial_position)
await session._tracker.set_active_player(BUS, "Paused", "Artist - Song")
# Put model in the state _on_player_change would have produced
session._model.active_player = BUS
session._model.active_track_key = "Artist - Song"
session._model.status = WatchStatus.FETCHING
session._player_monitor.players = {BUS: _pstate("Paused")}
session._last_emit_signature = (
"status",
WatchStatus.FETCHING,
BUS,
"Artist - Song",
)
await session._on_lyrics_update(lrc)
last_ok = next(
(s for s in reversed(received) if s.status == WatchStatus.OK), None
)
assert last_ok is not None, "no OK state emitted after lyrics loaded"
assert last_ok.position_ms >= PAUSED_MS
await session._tracker.stop()
asyncio.run(_run())
def test_coordinator_fetches_on_track_change() -> None:
async def _run() -> None:
session = _make_coordinator()
session._model.active_player = BUS
session._model.active_track_key = "Artist - Old Song"
session._model.set_lyrics(LRCData("[00:01.00]old"))
session._model.status = WatchStatus.OK
fetcher = _CaptureFetcher()
session._fetcher = fetcher # type: ignore[assignment]
session._player_monitor.players = {BUS: _pstate("Playing", title="New Song")}
session._on_player_change()
await asyncio.sleep(0) await asyncio.sleep(0)
assert fake_fetcher.requested == ["Artist - Song"] assert fetcher.requested == ["Artist - New Song"]
assert session._model.status == "fetching" assert session._model.lyrics is None
asyncio.run(_run()) asyncio.run(_run())
def test_session_emit_state_only_when_lyric_cursor_changes() -> None: def test_coordinator_no_refetch_on_calibration_no_lyrics() -> None:
"""Calibration with same player/track and no_lyrics must NOT trigger a second fetch."""
async def _run() -> None: async def _run() -> None:
class _Manager: session = _make_coordinator()
def fetch_for_track(self, *_args, **_kwargs): fetcher = _CaptureFetcher()
return None session._fetcher = fetcher # type: ignore[assignment]
session._player_monitor.players = {BUS: _pstate("Playing")}
session._on_player_change()
await asyncio.sleep(0)
assert len(fetcher.requested) == 1
class _Output(BaseOutput): session._model.status = WatchStatus.NO_LYRICS
def __init__(self): session._on_player_change()
self.count = 0 await asyncio.sleep(0)
assert len(fetcher.requested) == 1
async def on_state(self, state: WatchState) -> None: asyncio.run(_run())
self.count += 1
output = _Output()
session = WatchCoordinator(
_Manager(), # type: ignore
output,
player_hint=None,
config=TEST_CONFIG,
)
session._tracker = PositionTracker(
lambda _bus: asyncio.sleep(0, result=0),
TEST_CONFIG,
)
bus_name = "org.mpris.MediaPlayer2.spotify" def test_coordinator_no_fetch_when_lyrics_present() -> None:
track = TrackMeta(title="Song", artist="Artist") async def _run() -> None:
session._model.active_player = bus_name session = _make_coordinator()
session._model.active_player = BUS
session._model.active_track_key = "Artist - Song"
session._model.set_lyrics(LRCData("[00:01.00]line"))
session._model.status = WatchStatus.OK
fetcher = _CaptureFetcher()
session._fetcher = fetcher # type: ignore[assignment]
session._player_monitor.players = {BUS: _pstate("Playing")}
session._on_player_change()
await asyncio.sleep(0)
assert fetcher.requested == []
assert session._model.status == WatchStatus.OK
asyncio.run(_run())
def test_coordinator_player_disappears_goes_idle() -> None:
async def _run() -> None:
session = _make_coordinator()
session._model.active_player = BUS
session._model.active_track_key = "Artist - Song"
session._model.set_lyrics(LRCData("[00:01.00]line"))
session._model.status = WatchStatus.OK
session._player_monitor.players = {}
session._on_player_change()
await asyncio.sleep(0)
assert session._model.status == WatchStatus.IDLE
assert session._model.lyrics is None
assert session._model.active_player is None
asyncio.run(_run())
def test_coordinator_no_fetch_when_track_is_none() -> None:
"""Player present but reports no track metadata → no fetch, status NO_LYRICS."""
async def _run() -> None:
session = _make_coordinator()
fetcher = _CaptureFetcher()
session._fetcher = fetcher # type: ignore[assignment]
session._player_monitor.players = { session._player_monitor.players = {
bus_name: PlayerState(bus_name=bus_name, status="Playing", track=track) BUS: PlayerState(bus_name=BUS, status="Playing", track=None)
}
session._on_player_change()
await asyncio.sleep(0)
assert fetcher.requested == []
assert session._model.status == WatchStatus.NO_LYRICS
asyncio.run(_run())
def test_coordinator_emit_deduplicates_on_same_cursor() -> None:
async def _run() -> None:
counts = [0]
class _CountOutput(BaseOutput):
async def on_state(self, state: WatchState) -> None:
counts[0] += 1
session = _make_coordinator(_CountOutput())
track = TrackMeta(title="Song", artist="Artist")
session._model.active_player = BUS
session._player_monitor.players = {
BUS: PlayerState(bus_name=BUS, status="Playing", track=track)
} }
session._model.set_lyrics(LRCData("[00:01.00]a\n[00:03.00]b")) session._model.set_lyrics(LRCData("[00:01.00]a\n[00:03.00]b"))
session._model.status = "ok" session._model.status = WatchStatus.OK
await session._tracker.set_active_player( await session._tracker.set_active_player(BUS, "Playing", "Artist - Song")
bus_name,
"Playing",
"Artist - Song",
)
await session._emit_state() await session._emit_state() # emits
await session._emit_state() await session._emit_state() # same cursor → no emit
assert counts[0] == 1
await session._tracker.on_seeked(bus_name, 3200) await session._tracker.on_seeked(BUS, 3200)
await session._emit_state() await session._emit_state() # cursor advanced → emits
assert counts[0] == 2
assert output.count == 2
asyncio.run(_run()) asyncio.run(_run())
def test_session_emits_when_crossing_first_timestamp() -> None: def test_coordinator_position_insensitive_output_ignores_seeks() -> None:
"""With position_sensitive=False, seek events do not trigger re-emit."""
async def _run() -> None: async def _run() -> None:
class _Manager: counts = [0]
def fetch_for_track(self, *_args, **_kwargs):
return None
class _Output(BaseOutput):
def __init__(self):
self.count = 0
class _CountPrint(PrintOutput):
async def on_state(self, state: WatchState) -> None: async def on_state(self, state: WatchState) -> None:
self.count += 1 counts[0] += 1
output = _Output() session = _make_coordinator(_CountPrint())
session = WatchCoordinator(
_Manager(), # type: ignore
output,
player_hint=None,
config=TEST_CONFIG,
)
session._tracker = PositionTracker(
lambda _bus: asyncio.sleep(0, result=0),
TEST_CONFIG,
)
bus_name = "org.mpris.MediaPlayer2.spotify"
track = TrackMeta(title="Song", artist="Artist") track = TrackMeta(title="Song", artist="Artist")
session._model.active_player = bus_name session._model.active_player = BUS
session._player_monitor.players = { session._player_monitor.players = {
bus_name: PlayerState(bus_name=bus_name, status="Playing", track=track) BUS: PlayerState(bus_name=BUS, status="Playing", track=track)
} }
session._model.set_lyrics(LRCData("[00:02.00]a\n[00:03.00]b")) session._model.set_lyrics(LRCData("[00:01.00]a\n[00:03.00]b"))
session._model.status = "ok" session._model.status = WatchStatus.OK
await session._tracker.set_active_player(
bus_name,
"Playing",
"Artist - Song",
)
await session._emit_state() await session._emit_state() # emits once
await session._tracker.on_seeked(bus_name, 2500) assert counts[0] == 1
await session._emit_state()
assert output.count == 2 await session._tracker.on_seeked(BUS, 3200)
await session._emit_state() # position fixed at 0 → same signature → no re-emit
assert counts[0] == 1
asyncio.run(_run()) asyncio.run(_run())
Generated
+94 -1
View File
@@ -153,7 +153,7 @@ wheels = [
[[package]] [[package]]
name = "lrx-cli" name = "lrx-cli"
version = "0.7.1" version = "0.7.9"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "cyclopts" }, { name = "cyclopts" },
@@ -166,6 +166,8 @@ dependencies = [
[package.dev-dependencies] [package.dev-dependencies]
dev = [ dev = [
{ name = "poethepoet" },
{ name = "pyright" },
{ name = "pytest" }, { name = "pytest" },
{ name = "ruff" }, { name = "ruff" },
] ]
@@ -182,6 +184,8 @@ requires-dist = [
[package.metadata.requires-dev] [package.metadata.requires-dev]
dev = [ dev = [
{ name = "poethepoet", specifier = ">=0.44.0" },
{ name = "pyright", specifier = ">=1.1.406" },
{ name = "pytest", specifier = ">=9.0.2" }, { name = "pytest", specifier = ">=9.0.2" },
{ name = "ruff", specifier = ">=0.15.8" }, { name = "ruff", specifier = ">=0.15.8" },
] ]
@@ -216,6 +220,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b0/7a/620f945b96be1f6ee357d211d5bf74ab1b7fe72a9f1525aafbfe3aee6875/mutagen-1.47.0-py3-none-any.whl", hash = "sha256:edd96f50c5907a9539d8e5bba7245f62c9f520aef333d13392a79a4f70aca719", size = 194391, upload-time = "2023-09-03T16:33:29.955Z" }, { url = "https://files.pythonhosted.org/packages/b0/7a/620f945b96be1f6ee357d211d5bf74ab1b7fe72a9f1525aafbfe3aee6875/mutagen-1.47.0-py3-none-any.whl", hash = "sha256:edd96f50c5907a9539d8e5bba7245f62c9f520aef333d13392a79a4f70aca719", size = 194391, upload-time = "2023-09-03T16:33:29.955Z" },
] ]
[[package]]
name = "nodeenv"
version = "1.10.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" },
]
[[package]] [[package]]
name = "packaging" name = "packaging"
version = "26.0" version = "26.0"
@@ -225,6 +238,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
] ]
[[package]]
name = "pastel"
version = "0.2.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/76/f1/4594f5e0fcddb6953e5b8fe00da8c317b8b41b547e2b3ae2da7512943c62/pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d", size = 7555, upload-time = "2020-09-16T19:21:12.43Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/aa/18/a8444036c6dd65ba3624c63b734d3ba95ba63ace513078e1580590075d21/pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364", size = 5955, upload-time = "2020-09-16T19:21:11.409Z" },
]
[[package]] [[package]]
name = "platformdirs" name = "platformdirs"
version = "4.9.6" version = "4.9.6"
@@ -243,6 +265,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
] ]
[[package]]
name = "poethepoet"
version = "0.44.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pastel" },
{ name = "pyyaml" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a1/a4/e487662f12a5ecd2ac4d77f7697e4bda481953bb80032b158e5ab55173d4/poethepoet-0.44.0.tar.gz", hash = "sha256:c2667b513621788fb46482e371cdf81c0b04344e0e0bcb7aa8af45f84c2fce7b", size = 96040, upload-time = "2026-04-06T19:40:58.908Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/80/b7/503b7d3a51b0de9a329f1323048d166e309a97bb31bdc60e6acd11d2c71f/poethepoet-0.44.0-py3-none-any.whl", hash = "sha256:36d3d834708ed069ac1e4f8ed77915c55265b7b6e01aeb2fe617c9fe9cfd524a", size = 122873, upload-time = "2026-04-06T19:40:57.369Z" },
]
[[package]] [[package]]
name = "pygments" name = "pygments"
version = "2.20.0" version = "2.20.0"
@@ -252,6 +287,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f4/7e/a72dd26f3b0f4f2bf1dd8923c85f7ceb43172af56d63c7383eb62b332364/pygments-2.20.0-py3-none-any.whl", hash = "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176", size = 1231151, upload-time = "2026-03-29T13:29:30.038Z" }, { url = "https://files.pythonhosted.org/packages/f4/7e/a72dd26f3b0f4f2bf1dd8923c85f7ceb43172af56d63c7383eb62b332364/pygments-2.20.0-py3-none-any.whl", hash = "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176", size = 1231151, upload-time = "2026-03-29T13:29:30.038Z" },
] ]
[[package]]
name = "pyright"
version = "1.1.408"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nodeenv" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" },
]
[[package]] [[package]]
name = "pytest" name = "pytest"
version = "9.0.3" version = "9.0.3"
@@ -268,6 +316,42 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d4/24/a372aaf5c9b7208e7112038812994107bc65a84cd00e0354a88c2c77a617/pytest-9.0.3-py3-none-any.whl", hash = "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", size = 375249, upload-time = "2026-04-07T17:16:16.13Z" }, { url = "https://files.pythonhosted.org/packages/d4/24/a372aaf5c9b7208e7112038812994107bc65a84cd00e0354a88c2c77a617/pytest-9.0.3-py3-none-any.whl", hash = "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", size = 375249, upload-time = "2026-04-07T17:16:16.13Z" },
] ]
[[package]]
name = "pyyaml"
version = "6.0.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" },
{ url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" },
{ url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" },
{ url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" },
{ url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" },
{ url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" },
{ url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" },
{ url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" },
{ url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" },
{ url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" },
{ url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
{ url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
{ url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
{ url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
{ url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
{ url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
{ url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
{ url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
{ url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
{ url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
{ url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
{ url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
{ url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
{ url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
{ url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
{ url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
{ url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
{ url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
]
[[package]] [[package]]
name = "rich" name = "rich"
version = "14.3.3" version = "14.3.3"
@@ -319,6 +403,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/58/ed/dea90a65b7d9e69888890fb14c90d7f51bf0c1e82ad800aeb0160e4bacfd/ruff-0.15.10-py3-none-win_arm64.whl", hash = "sha256:601d1610a9e1f1c2165a4f561eeaa2e2ea1e97f3287c5aa258d3dab8b57c6188", size = 11035607, upload-time = "2026-04-09T14:05:47.593Z" }, { url = "https://files.pythonhosted.org/packages/58/ed/dea90a65b7d9e69888890fb14c90d7f51bf0c1e82ad800aeb0160e4bacfd/ruff-0.15.10-py3-none-win_arm64.whl", hash = "sha256:601d1610a9e1f1c2165a4f561eeaa2e2ea1e97f3287c5aa258d3dab8b57c6188", size = 11035607, upload-time = "2026-04-09T14:05:47.593Z" },
] ]
[[package]]
name = "typing-extensions"
version = "4.15.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]
[[package]] [[package]]
name = "win32-setctime" name = "win32-setctime"
version = "1.2.0" version = "1.2.0"