text chunking

This commit is contained in:
Karl 2025-09-23 15:49:06 +01:00
parent fd999ec1e6
commit 650369b06f

View File

@ -8,12 +8,12 @@ class CLIPTextChunker:
Using a conservative limit of 70 tokens to account for special tokens. Using a conservative limit of 70 tokens to account for special tokens.
""" """
def __init__(self, max_tokens: int = 70): def __init__(self, max_tokens: int = 40):
""" """
Initialize the text chunker. Initialize the text chunker.
Args: Args:
max_tokens (int): Maximum number of tokens per chunk (default: 70 for CLIP, being conservative) max_tokens (int): Maximum number of tokens per chunk (default: 40 for CLIP, being very conservative)
""" """
self.max_tokens = max_tokens self.max_tokens = max_tokens
self._tokenizer = None self._tokenizer = None
@ -24,8 +24,9 @@ class CLIPTextChunker:
if self._tokenizer is None: if self._tokenizer is None:
try: try:
from transformers import CLIPTokenizer from transformers import CLIPTokenizer
self._tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") # Use a simpler model that should be more reliable
except ImportError: self._tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32", local_files_only=False)
except Exception as e:
# Fallback to character-based estimation if transformers not available # Fallback to character-based estimation if transformers not available
self._tokenizer = None self._tokenizer = None
return self._tokenizer return self._tokenizer
@ -42,7 +43,8 @@ class CLIPTextChunker:
""" """
if self.tokenizer is None: if self.tokenizer is None:
# Fallback to character count if tokenizer not available # Fallback to character count if tokenizer not available
return len(text) # Use a very conservative estimate: ~0.6 characters per token for CLIP
return int(len(text) * 0.6)
tokens = self.tokenizer( tokens = self.tokenizer(
text, text,
@ -153,14 +155,14 @@ class CLIPTextChunker:
# Fallback to regular chunking # Fallback to regular chunking
return self.chunk_text(text) return self.chunk_text(text)
def chunk_prompt_for_clip(prompt: str, max_tokens: int = 70) -> List[str]: def chunk_prompt_for_clip(prompt: str, max_tokens: int = 40) -> List[str]:
""" """
Convenience function to chunk a prompt for CLIP processing. Convenience function to chunk a prompt for CLIP processing.
Uses a conservative 70 token limit to be safe. Uses a conservative 40 token limit to be safe.
Args: Args:
prompt (str): The prompt to chunk prompt (str): The prompt to chunk
max_tokens (int): Maximum tokens per chunk (default: 70 for safety) max_tokens (int): Maximum tokens per chunk (default: 40 for safety)
Returns: Returns:
List[str]: List of prompt chunks List[str]: List of prompt chunks