-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwithout algoI.txt
40 lines (31 loc) · 1.46 KB
/
without algoI.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
# Initialize the FastAPI app
app = FastAPI()
# Load the tokenizer and model from the Hugging Face Transformers library
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
class QuestionContext(BaseModel):
question: str
context: str
@app.get("/")
async def read_root():
"""A simple root path to ensure the API is working"""
return {"Hello": "World"}
@app.post("/ask")
async def answer_question(item: QuestionContext):
"""POST endpoint to ask a question with provided context and get an answer"""
inputs = tokenizer.encode_plus(item.question, item.context, add_special_tokens=True, return_tensors="pt")
input_ids = inputs["input_ids"].tolist()[0]
# Get model output
outputs = model(**inputs)
answer_start_scores = outputs.start_logits
answer_end_scores = outputs.end_logits
# Find the position of the start and end of the answer in the context
answer_start = torch.argmax(answer_start_scores)
answer_end = torch.argmax(answer_end_scores) + 1
# Convert tokens to the answer string
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
return {"answer": answer}