197 lines
8.0 KiB
Python
197 lines
8.0 KiB
Python
# coding=utf-8
|
|
# Copyright 2022 The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
Processor class for Donut.
|
|
"""
|
|
import re
|
|
import warnings
|
|
from contextlib import contextmanager
|
|
|
|
from ...processing_utils import ProcessorMixin
|
|
|
|
|
|
class DonutProcessor(ProcessorMixin):
|
|
r"""
|
|
Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
|
|
processor.
|
|
|
|
[`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
|
|
[`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
|
|
[`~DonutProcessor.decode`] for more information.
|
|
|
|
Args:
|
|
image_processor ([`DonutImageProcessor`], *optional*):
|
|
An instance of [`DonutImageProcessor`]. The image processor is a required input.
|
|
tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
|
|
An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
|
|
"""
|
|
|
|
attributes = ["image_processor", "tokenizer"]
|
|
image_processor_class = "AutoImageProcessor"
|
|
tokenizer_class = "AutoTokenizer"
|
|
|
|
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
|
|
feature_extractor = None
|
|
if "feature_extractor" in kwargs:
|
|
warnings.warn(
|
|
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
|
|
" instead.",
|
|
FutureWarning,
|
|
)
|
|
feature_extractor = kwargs.pop("feature_extractor")
|
|
|
|
image_processor = image_processor if image_processor is not None else feature_extractor
|
|
if image_processor is None:
|
|
raise ValueError("You need to specify an `image_processor`.")
|
|
if tokenizer is None:
|
|
raise ValueError("You need to specify a `tokenizer`.")
|
|
|
|
super().__init__(image_processor, tokenizer)
|
|
self.current_processor = self.image_processor
|
|
self._in_target_context_manager = False
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
"""
|
|
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
|
|
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
|
|
[`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
|
|
[`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
|
|
"""
|
|
# For backward compatibility
|
|
if self._in_target_context_manager:
|
|
return self.current_processor(*args, **kwargs)
|
|
|
|
images = kwargs.pop("images", None)
|
|
text = kwargs.pop("text", None)
|
|
if len(args) > 0:
|
|
images = args[0]
|
|
args = args[1:]
|
|
|
|
if images is None and text is None:
|
|
raise ValueError("You need to specify either an `images` or `text` input to process.")
|
|
|
|
if images is not None:
|
|
inputs = self.image_processor(images, *args, **kwargs)
|
|
if text is not None:
|
|
encodings = self.tokenizer(text, **kwargs)
|
|
|
|
if text is None:
|
|
return inputs
|
|
elif images is None:
|
|
return encodings
|
|
else:
|
|
inputs["labels"] = encodings["input_ids"]
|
|
return inputs
|
|
|
|
def batch_decode(self, *args, **kwargs):
|
|
"""
|
|
This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
|
|
to the docstring of this method for more information.
|
|
"""
|
|
return self.tokenizer.batch_decode(*args, **kwargs)
|
|
|
|
def decode(self, *args, **kwargs):
|
|
"""
|
|
This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
|
|
docstring of this method for more information.
|
|
"""
|
|
return self.tokenizer.decode(*args, **kwargs)
|
|
|
|
@contextmanager
|
|
def as_target_processor(self):
|
|
"""
|
|
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
|
|
"""
|
|
warnings.warn(
|
|
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
|
|
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
|
|
"your images inputs, or in a separate call."
|
|
)
|
|
self._in_target_context_manager = True
|
|
self.current_processor = self.tokenizer
|
|
yield
|
|
self.current_processor = self.image_processor
|
|
self._in_target_context_manager = False
|
|
|
|
def token2json(self, tokens, is_inner_value=False, added_vocab=None):
|
|
"""
|
|
Convert a (generated) token sequence into an ordered JSON format.
|
|
"""
|
|
if added_vocab is None:
|
|
added_vocab = self.tokenizer.get_added_vocab()
|
|
|
|
output = {}
|
|
|
|
while tokens:
|
|
start_token = re.search(r"<s_(.*?)>", tokens, re.IGNORECASE)
|
|
if start_token is None:
|
|
break
|
|
key = start_token.group(1)
|
|
key_escaped = re.escape(key)
|
|
|
|
end_token = re.search(rf"</s_{key_escaped}>", tokens, re.IGNORECASE)
|
|
start_token = start_token.group()
|
|
if end_token is None:
|
|
tokens = tokens.replace(start_token, "")
|
|
else:
|
|
end_token = end_token.group()
|
|
start_token_escaped = re.escape(start_token)
|
|
end_token_escaped = re.escape(end_token)
|
|
content = re.search(
|
|
f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE | re.DOTALL
|
|
)
|
|
if content is not None:
|
|
content = content.group(1).strip()
|
|
if r"<s_" in content and r"</s_" in content: # non-leaf node
|
|
value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab)
|
|
if value:
|
|
if len(value) == 1:
|
|
value = value[0]
|
|
output[key] = value
|
|
else: # leaf nodes
|
|
output[key] = []
|
|
for leaf in content.split(r"<sep/>"):
|
|
leaf = leaf.strip()
|
|
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
|
|
leaf = leaf[1:-2] # for categorical special tokens
|
|
output[key].append(leaf)
|
|
if len(output[key]) == 1:
|
|
output[key] = output[key][0]
|
|
|
|
tokens = tokens[tokens.find(end_token) + len(end_token) :].strip()
|
|
if tokens[:6] == r"<sep/>": # non-leaf nodes
|
|
return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab)
|
|
|
|
if len(output):
|
|
return [output] if is_inner_value else output
|
|
else:
|
|
return [] if is_inner_value else {"text_sequence": tokens}
|
|
|
|
@property
|
|
def feature_extractor_class(self):
|
|
warnings.warn(
|
|
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
|
|
FutureWarning,
|
|
)
|
|
return self.image_processor_class
|
|
|
|
@property
|
|
def feature_extractor(self):
|
|
warnings.warn(
|
|
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
|
|
FutureWarning,
|
|
)
|
|
return self.image_processor
|