Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update tokenizer.py #1093

Closed
wants to merge 9 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 41 additions & 19 deletions faster_whisper/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@
import tokenizers


class TokenizationError(Exception):
pass


class Tokenizer:
"""Simple wrapper around a tokenizers.Tokenizer."""

Expand Down Expand Up @@ -87,23 +91,44 @@ def encode(self, text: str) -> List[int]:
return self.tokenizer.encode(text, add_special_tokens=False).ids

def decode(self, tokens: List[int]) -> str:
text_tokens = [token for token in tokens if token < self.eot]
return self.tokenizer.decode(text_tokens)
try:
text_tokens = [token for token in tokens if token < self.eot]
if not text_tokens:
return ""
if any(not isinstance(t, int) or t < 0 for t in text_tokens):
raise ValueError("Invalid token values detected")
return self.tokenizer.decode(text_tokens)
except Exception as e:
raise TokenizationError(f"Failed to decode tokens: {e}") from e

def decode_with_timestamps(self, tokens: List[int]) -> str:
outputs = [[]]

for token in tokens:
if token >= self.timestamp_begin:
timestamp = f"<|{(token - self.timestamp_begin) * 0.02:.2f}|>"
outputs.append(timestamp)
outputs.append([])
else:
outputs[-1].append(token)

return "".join(
[s if isinstance(s, str) else self.tokenizer.decode(s) for s in outputs]
)
try:
if not tokens:
raise ValueError("Empty token sequence")
if any(not isinstance(t, int) or t < 0 for t in tokens):
raise ValueError("Invalid token values detected")

outputs = [[]]
for token in tokens:
if token >= self.timestamp_begin:
timestamp = f"<|{(token - self.timestamp_begin) * 0.02:.2f}|>"
outputs.append(timestamp)
outputs.append([])
else:
outputs[-1].append(token)

decoded = [
s if isinstance(s, str) else self.tokenizer.decode(s) for s in outputs
]

if not any(decoded) and not any(isinstance(s, str) for s in outputs):
return ""

return "".join(decoded)
except Exception as e:
raise TokenizationError(
f"Failed to decode tokens with timestamps: {e}"
) from e

@cached_property
def non_speech_tokens(self) -> Tuple[int]:
Expand Down Expand Up @@ -205,10 +230,7 @@ def split_tokens_on_spaces(
return words, word_tokens


_TASKS = (
"transcribe",
"translate",
)
_TASKS = ("transcribe", "translate")

_LANGUAGE_CODES = (
"af",
Expand Down
Loading