ai-content-maker/.venv/Lib/site-packages/spacy/lang/es/tokenizer_exceptions.py

83 lines
1.4 KiB
Python
Raw Permalink Normal View History

2024-05-03 04:18:51 +03:00
from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {
"pal": [{ORTH: "pa"}, {ORTH: "l", NORM: "el"}],
}
for exc_data in [
{ORTH: ""},
{ORTH: "°C"},
{ORTH: "aprox."},
{ORTH: "dna."},
{ORTH: "dpto."},
{ORTH: "ej."},
{ORTH: "esq."},
{ORTH: "pág."},
{ORTH: "p.ej."},
{ORTH: "Ud.", NORM: "usted"},
{ORTH: "Vd.", NORM: "usted"},
{ORTH: "Uds.", NORM: "ustedes"},
{ORTH: "Vds.", NORM: "ustedes"},
{ORTH: "vol.", NORM: "volúmen"},
]:
_exc[exc_data[ORTH]] = [exc_data]
# Times
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m."}]
for h in range(1, 12 + 1):
for period in ["a.m.", "am"]:
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period}]
for period in ["p.m.", "pm"]:
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period}]
for orth in [
"a.C.",
"a.J.C.",
"d.C.",
"d.J.C.",
"apdo.",
"Av.",
"Avda.",
"Cía.",
"Dr.",
"Dra.",
"EE.UU.",
"Ee.Uu.",
"EE. UU.",
"Ee. Uu.",
"etc.",
"fig.",
"Gob.",
"Gral.",
"Ing.",
"J.C.",
"km/h",
"Lic.",
"m.n.",
"núm.",
"P.D.",
"Prof.",
"Profa.",
"q.e.p.d.",
"Q.E.P.D.",
"S.A.",
"S.L.",
"S.R.L.",
"s.s.s.",
"Sr.",
"Sra.",
"Srta.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)