[mod] data: implement a simple tracker URL (SQL) database

On demand, the tracker data is loaded directly into the cache, so that the
maintenance of this data via PRs is no longer necessary.

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2025-05-24 17:53:57 +02:00 committed by Bnyro
parent 58c10f758b
commit 2dd4f7b972
7 changed files with 168 additions and 2067 deletions

View file

@ -33,7 +33,6 @@ jobs:
- update_engine_traits.py
- update_wikidata_units.py
- update_engine_descriptions.py
- update_tracker_patterns.py
permissions:
contents: write

View file

@ -10,6 +10,7 @@ from __future__ import annotations
__all__ = ["ExpireCacheCfg", "ExpireCacheStats", "ExpireCache", "ExpireCacheSQLite"]
import abc
from collections.abc import Iterator
import dataclasses
import datetime
import hashlib
@ -396,6 +397,20 @@ class ExpireCacheSQLite(sqlitedb.SQLiteAppl, ExpireCache):
return self.deserialize(row[0])
def pairs(self, ctx: str) -> Iterator[tuple[str, typing.Any]]:
"""Iterate over key/value pairs from table given by argument ``ctx``.
If ``ctx`` argument is ``None`` (the default), a table name is
generated from the :py:obj:`ExpireCacheCfg.name`."""
table = ctx
self.maintenance()
if not table:
table = self.normalize_name(self.cfg.name)
if table in self.table_names:
for row in self.DB.execute(f"SELECT key, value FROM {table}"):
yield row[0], self.deserialize(row[1])
def state(self) -> ExpireCacheStats:
cached_items = {}
for table in self.table_names:

View file

@ -13,6 +13,7 @@ import typing
from .core import log, data_dir
from .currencies import CurrenciesDB
from .tracker_patterns import TrackerPatternsDB
CURRENCIES: CurrenciesDB
USER_AGENTS: dict[str, typing.Any]
@ -23,7 +24,7 @@ OSM_KEYS_TAGS: dict[str, typing.Any]
ENGINE_DESCRIPTIONS: dict[str, typing.Any]
ENGINE_TRAITS: dict[str, typing.Any]
LOCALES: dict[str, typing.Any]
TRACKER_PATTERNS: list[dict[str, typing.Any]]
TRACKER_PATTERNS: TrackerPatternsDB
lazy_globals = {
"CURRENCIES": CurrenciesDB(),
@ -35,7 +36,7 @@ lazy_globals = {
"ENGINE_DESCRIPTIONS": None,
"ENGINE_TRAITS": None,
"LOCALES": None,
"TRACKER_PATTERNS": None,
"TRACKER_PATTERNS": TrackerPatternsDB(),
}
data_json_files = {
@ -47,7 +48,6 @@ data_json_files = {
"ENGINE_DESCRIPTIONS": "engine_descriptions.json",
"ENGINE_TRAITS": "engine_traits.json",
"LOCALES": "locales.json",
"TRACKER_PATTERNS": "tracker_patterns.json",
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,142 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Simple implementation to store TrackerPatterns data in a SQL database."""
from __future__ import annotations
import typing
__all__ = ["TrackerPatternsDB"]
import re
import pathlib
from collections.abc import Iterator
from urllib.parse import urlparse, urlunparse, parse_qsl, urlencode
import httpx
from searx.data.core import get_cache, log
RuleType = tuple[str, list[str], list[str]]
class TrackerPatternsDB:
# pylint: disable=missing-class-docstring
ctx_name = "data_tracker_patterns"
json_file = pathlib.Path(__file__).parent / "tracker_patterns.json"
CLEAR_LIST_URL = [
# ClearURL rule lists, the first one that responds HTTP 200 is used
"https://rules1.clearurls.xyz/data.minify.json",
"https://rules2.clearurls.xyz/data.minify.json",
"https://raw.githubusercontent.com/ClearURLs/Rules/refs/heads/master/data.min.json",
]
class Fields:
# pylint: disable=too-few-public-methods, invalid-name
url_regexp: typing.Final = 0 # URL (regular expression) match condition of the link
url_ignore: typing.Final = 1 # URL (regular expression) to ignore
del_args: typing.Final = 2 # list of URL arguments (regular expression) to delete
def __init__(self):
self.cache = get_cache()
def init(self):
if self.cache.properties("tracker_patterns loaded") != "OK":
self.load()
self.cache.properties.set("tracker_patterns loaded", "OK")
# F I X M E:
# do we need a maintenance .. rember: database is stored
# in /tmp and will be rebuild during the reboot anyway
def load(self):
log.debug("init searx.data.TRACKER_PATTERNS")
for rule in self.iter_clear_list():
self.add(rule)
def add(self, rule: RuleType):
self.cache.set(
key=rule[self.Fields.url_regexp],
value=(
rule[self.Fields.url_ignore],
rule[self.Fields.del_args],
),
ctx=self.ctx_name,
expire=None,
)
def rules(self) -> Iterator[RuleType]:
self.init()
for key, value in self.cache.pairs(ctx=self.ctx_name):
yield key, value[0], value[1]
def iter_clear_list(self) -> Iterator[RuleType]:
resp = None
for url in self.CLEAR_LIST_URL:
resp = httpx.get(url, timeout=3)
if resp.status_code == 200:
break
log.warning(f"TRACKER_PATTERNS: ClearURL ignore HTTP {resp.status_code} {url}")
if resp is None:
log.error("TRACKER_PATTERNS: failed fetching ClearURL rule lists")
return
for rule in resp.json()["providers"].values():
yield (
rule["urlPattern"].replace("\\\\", "\\"), # fix javascript regex syntax
[exc.replace("\\\\", "\\") for exc in rule.get("exceptions", [])],
rule.get("rules", []),
)
def clean_url(self, url: str) -> bool | str:
"""The URL arguments are normalized and cleaned of tracker parameters.
Returns bool ``True`` to use URL unchanged (``False`` to ignore URL).
If URL should be modified, the returned string is the new URL to use.
"""
new_url = url
parsed_new_url = urlparse(url=new_url)
for rule in self.rules():
if not re.match(rule[self.Fields.url_regexp], new_url):
# no match / ignore pattern
continue
do_ignore = False
for pattern in rule[self.Fields.url_ignore]:
if re.match(pattern, new_url):
do_ignore = True
break
if do_ignore:
# pattern is in the list of exceptions / ignore pattern
# HINT:
# we can't break the outer pattern loop since we have
# overlapping urlPattern like ".*"
continue
# remove tracker arguments from the url-query part
query_args: list[tuple[str, str]] = list(parse_qsl(parsed_new_url.query))
for name, val in query_args.copy():
# remove URL arguments
for pattern in rule[self.Fields.del_args]:
if re.match(pattern, name):
log.debug("TRACKER_PATTERNS: %s remove tracker arg: %s='%s'", parsed_new_url.netloc, name, val)
query_args.remove((name, val))
parsed_new_url = parsed_new_url._replace(query=urlencode(query_args))
new_url = urlunparse(parsed_new_url)
if new_url != url:
return new_url
return True
if __name__ == "__main__":
db = TrackerPatternsDB()
for r in db.rules():
print(r)

View file

@ -2,17 +2,15 @@
# pylint: disable=missing-module-docstring, unused-argument
from __future__ import annotations
import typing
import re
from urllib.parse import urlparse, urlunparse, parse_qsl, urlencode
import logging
import typing
from flask_babel import gettext
from searx.data import TRACKER_PATTERNS
from . import Plugin, PluginInfo
from ._core import log
if typing.TYPE_CHECKING:
from searx.search import SearchWithPlugins
@ -21,13 +19,16 @@ if typing.TYPE_CHECKING:
from searx.plugins import PluginCfg
log = logging.getLogger("searx.plugins.tracker_url_remover")
class SXNGPlugin(Plugin):
"""Remove trackers arguments from the returned URL."""
id = "tracker_url_remover"
log = log.getChild(id)
def __init__(self, plg_cfg: "PluginCfg") -> None:
super().__init__(plg_cfg)
self.info = PluginInfo(
id=self.id,
@ -47,42 +48,7 @@ class SXNGPlugin(Plugin):
If URL should be modified, the returned string is the new URL to use."""
if not url_src:
cls.log.debug("missing a URL in field %s", field_name)
log.debug("missing a URL in field %s", field_name)
return True
new_url = url_src
parsed_new_url = urlparse(url=new_url)
for rule in TRACKER_PATTERNS:
if not re.match(rule["urlPattern"], new_url):
# no match / ignore pattern
continue
in_exceptions = False
for exception in rule["exceptions"]:
if re.match(exception, new_url):
in_exceptions = True
break
if in_exceptions:
# pattern is in the list of exceptions / ignore pattern
# hint: we can't break the outer pattern loop since we have
# overlapping urlPattern like ".*"
continue
# remove tracker arguments from the url-query part
query_args: list[tuple[str, str]] = list(parse_qsl(parsed_new_url.query))
for name, val in query_args.copy():
for reg in rule["trackerParams"]:
if re.match(reg, name):
cls.log.debug("%s remove tracker arg: %s='%s'", parsed_new_url.netloc, name, val)
query_args.remove((name, val))
parsed_new_url = parsed_new_url._replace(query=urlencode(query_args))
new_url = urlunparse(parsed_new_url)
if new_url != url_src:
return new_url
return True
return TRACKER_PATTERNS.clean_url(url=url_src)

View file

@ -1,36 +0,0 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Fetch trackers"""
import json
import httpx
from searx.data import data_dir
DATA_FILE = data_dir / "tracker_patterns.json"
CLEAR_LIST_URL = "https://raw.githubusercontent.com/ClearURLs/Rules/refs/heads/master/data.min.json"
def fetch_clear_url_filters():
resp = httpx.get(CLEAR_LIST_URL)
if resp.status_code != 200:
# pylint: disable=broad-exception-raised
raise Exception(f"Error fetching ClearURL filter lists, HTTP code {resp.status_code}")
providers = resp.json()["providers"]
rules = []
for rule in providers.values():
rules.append(
{
"urlPattern": rule["urlPattern"].replace("\\\\", "\\"), # fix javascript regex syntax
"exceptions": [exc.replace("\\\\", "\\") for exc in rule["exceptions"]],
"trackerParams": rule["rules"],
}
)
return rules
if __name__ == '__main__':
filter_list = fetch_clear_url_filters()
with DATA_FILE.open("w", encoding='utf-8') as f:
json.dump(filter_list, f, indent=4, sort_keys=True, ensure_ascii=False)