# Importing necessary libraries import torch from transformers import Pipeline from .modeling_gpt import GPTModelForTextGeneration class GPT124MTextGenerationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} forward_kwargs = { "max_length": kwargs.get("max_length", 50), "do_sample": kwargs.get("do_sample", True), "top_k": kwargs.get("top_k", 50), "top_p": kwargs.get("top_p", 0.95), "temperature": kwargs.get("temperature", 0.9), "device": kwargs.get("device", self.device.type), } postprocess_kwargs = {} return preprocess_kwargs, forward_kwargs, postprocess_kwargs def preprocess(self, prompt_text: str, **preprocess_kwargs): assert ( isinstance(prompt_text, str) and len(prompt_text) > 0 ), "prompt_text must be a non-empty string" # Encode the input text input_ids = self.tokenizer.encode(prompt_text) # Convert to a PyTorch tensor input_tensor = torch.tensor([input_ids]) return {"input_ids": input_tensor} def _forward(self, model_inputs, **forward_kwargs): return self.model.generate(**model_inputs, **forward_kwargs) def postprocess(self, model_output, **postprocess_kwargs): return self.tokenizer.decode(model_output)