ai-content-maker/.venv/Lib/site-packages/nltk/tokenize/destructive.py

234 lines
9.0 KiB
Python
Raw Normal View History

2024-05-03 04:18:51 +03:00
# Natural Language Toolkit: NLTK's very own tokenizer.
#
# Copyright (C) 2001-2023 NLTK Project
# Author: Liling Tan
# Tom Aarsen <> (modifications)
# URL: <https://www.nltk.org>
# For license information, see LICENSE.TXT
import re
import warnings
from typing import Iterator, List, Tuple
from nltk.tokenize.api import TokenizerI
from nltk.tokenize.util import align_tokens
class MacIntyreContractions:
"""
List of contractions adapted from Robert MacIntyre's tokenizer.
"""
CONTRACTIONS2 = [
r"(?i)\b(can)(?#X)(not)\b",
r"(?i)\b(d)(?#X)('ye)\b",
r"(?i)\b(gim)(?#X)(me)\b",
r"(?i)\b(gon)(?#X)(na)\b",
r"(?i)\b(got)(?#X)(ta)\b",
r"(?i)\b(lem)(?#X)(me)\b",
r"(?i)\b(more)(?#X)('n)\b",
r"(?i)\b(wan)(?#X)(na)(?=\s)",
]
CONTRACTIONS3 = [r"(?i) ('t)(?#X)(is)\b", r"(?i) ('t)(?#X)(was)\b"]
CONTRACTIONS4 = [r"(?i)\b(whad)(dd)(ya)\b", r"(?i)\b(wha)(t)(cha)\b"]
class NLTKWordTokenizer(TokenizerI):
"""
The NLTK tokenizer that has improved upon the TreebankWordTokenizer.
This is the method that is invoked by ``word_tokenize()``. It assumes that the
text has already been segmented into sentences, e.g. using ``sent_tokenize()``.
The tokenizer is "destructive" such that the regexes applied will munge the
input string to a state beyond re-construction. It is possible to apply
`TreebankWordDetokenizer.detokenize` to the tokenized outputs of
`NLTKDestructiveWordTokenizer.tokenize` but there's no guarantees to
revert to the original string.
"""
# Starting quotes.
STARTING_QUOTES = [
(re.compile("([«“‘„]|[`]+)", re.U), r" \1 "),
(re.compile(r"^\""), r"``"),
(re.compile(r"(``)"), r" \1 "),
(re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "),
(re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d|n)(\w)\b", re.U), r"\1 \2"),
]
# Ending quotes.
ENDING_QUOTES = [
(re.compile("([»”’])", re.U), r" \1 "),
(re.compile(r"''"), " '' "),
(re.compile(r'"'), " '' "),
(re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
(re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
]
# For improvements for starting/closing quotes from TreebankWordTokenizer,
# see discussion on https://github.com/nltk/nltk/pull/1437
# Adding to TreebankWordTokenizer, nltk.word_tokenize now splits on
# - chervon quotes u'\xab' and u'\xbb' .
# - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d'
# See https://github.com/nltk/nltk/issues/1995#issuecomment-376741608
# Also, behavior of splitting on clitics now follows Stanford CoreNLP
# - clitics covered (?!re|ve|ll|m|t|s|d)(\w)\b
# Punctuation.
PUNCTUATION = [
(re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "),
(re.compile(r"([:,])([^\d])"), r" \1 \2"),
(re.compile(r"([:,])$"), r" \1 "),
(
re.compile(r"\.{2,}", re.U),
r" \g<0> ",
), # See https://github.com/nltk/nltk/pull/2322
(re.compile(r"[;@#$%&]"), r" \g<0> "),
(
re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'),
r"\1 \2\3 ",
), # Handles the final period.
(re.compile(r"[?!]"), r" \g<0> "),
(re.compile(r"([^'])' "), r"\1 ' "),
(
re.compile(r"[*]", re.U),
r" \g<0> ",
), # See https://github.com/nltk/nltk/pull/2322
]
# Pads parentheses
PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ")
# Optionally: Convert parentheses, brackets and converts them to PTB symbols.
CONVERT_PARENTHESES = [
(re.compile(r"\("), "-LRB-"),
(re.compile(r"\)"), "-RRB-"),
(re.compile(r"\["), "-LSB-"),
(re.compile(r"\]"), "-RSB-"),
(re.compile(r"\{"), "-LCB-"),
(re.compile(r"\}"), "-RCB-"),
]
DOUBLE_DASHES = (re.compile(r"--"), r" -- ")
# List of contractions adapted from Robert MacIntyre's tokenizer.
_contractions = MacIntyreContractions()
CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2))
CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3))
def tokenize(
self, text: str, convert_parentheses: bool = False, return_str: bool = False
) -> List[str]:
r"""Return a tokenized copy of `text`.
>>> from nltk.tokenize import NLTKWordTokenizer
>>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.'''
>>> NLTKWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE
['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36',
'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two',
'of', 'them.', 'Thanks', '.']
>>> NLTKWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE
['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36',
'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two',
'of', 'them.', 'Thanks', '.']
:param text: A string with a sentence or sentences.
:type text: str
:param convert_parentheses: if True, replace parentheses to PTB symbols,
e.g. `(` to `-LRB-`. Defaults to False.
:type convert_parentheses: bool, optional
:param return_str: If True, return tokens as space-separated string,
defaults to False.
:type return_str: bool, optional
:return: List of tokens from `text`.
:rtype: List[str]
"""
if return_str:
warnings.warn(
"Parameter 'return_str' has been deprecated and should no "
"longer be used.",
category=DeprecationWarning,
stacklevel=2,
)
for regexp, substitution in self.STARTING_QUOTES:
text = regexp.sub(substitution, text)
for regexp, substitution in self.PUNCTUATION:
text = regexp.sub(substitution, text)
# Handles parentheses.
regexp, substitution = self.PARENS_BRACKETS
text = regexp.sub(substitution, text)
# Optionally convert parentheses
if convert_parentheses:
for regexp, substitution in self.CONVERT_PARENTHESES:
text = regexp.sub(substitution, text)
# Handles double dash.
regexp, substitution = self.DOUBLE_DASHES
text = regexp.sub(substitution, text)
# add extra space to make things easier
text = " " + text + " "
for regexp, substitution in self.ENDING_QUOTES:
text = regexp.sub(substitution, text)
for regexp in self.CONTRACTIONS2:
text = regexp.sub(r" \1 \2 ", text)
for regexp in self.CONTRACTIONS3:
text = regexp.sub(r" \1 \2 ", text)
# We are not using CONTRACTIONS4 since
# they are also commented out in the SED scripts
# for regexp in self._contractions.CONTRACTIONS4:
# text = regexp.sub(r' \1 \2 \3 ', text)
return text.split()
def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]:
r"""
Returns the spans of the tokens in ``text``.
Uses the post-hoc nltk.tokens.align_tokens to return the offset spans.
>>> from nltk.tokenize import NLTKWordTokenizer
>>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).'''
>>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23),
... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38),
... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59),
... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)]
>>> list(NLTKWordTokenizer().span_tokenize(s)) == expected
True
>>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in',
... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')',
... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.']
>>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected
True
:param text: A string with a sentence or sentences.
:type text: str
:yield: Tuple[int, int]
"""
raw_tokens = self.tokenize(text)
# Convert converted quotes back to original double quotes
# Do this only if original text contains double quote(s) or double
# single-quotes (because '' might be transformed to `` if it is
# treated as starting quotes).
if ('"' in text) or ("''" in text):
# Find double quotes and converted quotes
matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)]
# Replace converted quotes back to double quotes
tokens = [
matched.pop(0) if tok in ['"', "``", "''"] else tok
for tok in raw_tokens
]
else:
tokens = raw_tokens
yield from align_tokens(tokens, text)