Spaces:
Runtime error
Runtime error
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from keras.models import load_model | |
| import pickle | |
| import numpy as np | |
| from keras.preprocessing.sequence import pad_sequences | |
| app = FastAPI() | |
| max_sequence_length = 180 | |
| # Load the trained model | |
| try: | |
| model = load_model('word_prediction_model.h5') | |
| except Exception as e: | |
| print(f"Error loading the model: {str(e)}") | |
| model = None | |
| # Load the tokenizer | |
| try: | |
| with open('tokenizer.pickle', 'rb') as handle: | |
| tokenizer = pickle.load(handle) | |
| except Exception as e: | |
| print(f"Error loading the tokenizer: {str(e)}") | |
| tokenizer = None | |
| class PredictionRequest(BaseModel): | |
| input_phrase: str | |
| top_n: int = 5 | |
| class PredictionResponse(BaseModel): | |
| top_words: list | |
| top_probabilities: list | |
| def predict(request: PredictionRequest): | |
| if tokenizer is None or model is None: | |
| raise HTTPException(status_code=500, detail="Model or tokenizer not loaded") | |
| input_phrase = request.input_phrase | |
| top_n = request.top_n | |
| input_sequence = tokenizer.texts_to_sequences([input_phrase])[0] | |
| padded_sequence = pad_sequences([input_sequence], maxlen=max_sequence_length-1, padding='pre') | |
| predicted_probs = model.predict(padded_sequence)[0] | |
| top_indices = predicted_probs.argsort()[-top_n:][::-1] | |
| top_words = [tokenizer.index_word[index] for index in top_indices] | |
| top_probabilities = predicted_probs[top_indices] | |
| return {"top_words": top_words, "top_probabilities": top_probabilities.tolist()} | |
| def read_root(): | |
| return {"message": "Hello from MDS Darija Prediction Team!"} | |