Skip to content

Commit

Permalink
add
Browse files Browse the repository at this point in the history
  • Loading branch information
rajdugad committed May 28, 2024
1 parent b8d6447 commit 9d9579a
Show file tree
Hide file tree
Showing 2 changed files with 59 additions and 22 deletions.
22 changes: 0 additions & 22 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,28 +59,6 @@ def plot_top_repeated_words(text):
fig = px.bar(x=words, y=counts, labels={'x': 'Words', 'y': 'Counts'}, title='Top 10 Most Repeated Words')
st.plotly_chart(fig, use_container_width=True)


from transformers import pipeline

def is_generated_by_ai(paragraph):
# Load the text classification pipeline
text_classifier = pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")

# Classify the input paragraph
result = text_classifier(paragraph)

# You can adjust this threshold based on experimentation
confidence_threshold = 0.7

# Check if the label is consistent with AI-generated text
# label = result[0]['label']
confidence = result[0]['score']
if confidence >= confidence_threshold:
st.write("Confidence Score:",result[0]['score'])
return True
else:
return False

st.set_page_config(layout="wide")

st.title("GPT Shield: AI Plagiarism Detector")
Expand Down
59 changes: 59 additions & 0 deletions perplexity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import streamlit as st
from transformers import XLNetTokenizer, XLNetLMHeadModel
from transformers import GPT2Tokenizer, GPT2LMHeadModel
import torch
import nltk
from nltk.probability import FreqDist
from collections import Counter
from nltk.corpus import stopwords
import string
import numpy as np

# Load XLNet tokenizer and model
# tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
# model = XLNetLMHeadModel.from_pretrained('xlnet-base-cased')

tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
def calculate_perplexity(text):
encoded_input = tokenizer.encode(text, add_special_tokens=False, return_tensors='pt')
input_ids = encoded_input[0]
# input_ids = encoded_input[0].unsqueeze(0)



with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits

perplexity = torch.exp(torch.nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), input_ids.view(-1)))
return perplexity.item()

def generate_sample_data():
# Generate sample data including both human-generated and AI-generated text
human_text = ["we are a group of 5 engineering students, we aim to create an ai generated text detection project, that accurately marks the difference between human and GPT-generated content. This will help the audience know the origin of content, thus increase the transparency between the users."]

ai_text = ["As a team of five engineering students, we want to develop an artificial intelligence project for text identification that can distinguish between content produced by GPT and that created by humans. This will improve user transparency by assisting the audience in understanding the source of the content."]

return human_text, ai_text

def calculate_threshold(human_text, ai_text):
# Calculate perplexity for human-generated text
human_perplexities = [calculate_perplexity(text) for text in human_text]

# Calculate perplexity for AI-generated text
ai_perplexities = [calculate_perplexity(text) for text in ai_text]

# Calculate the threshold as the mean perplexity plus some standard deviation
threshold = np.mean(human_perplexities) + np.std(human_perplexities)

return threshold

# Generate sample data
human_text, ai_text = generate_sample_data()

# Calculate threshold
threshold = calculate_threshold(human_text, ai_text)

# Display the threshold
st.write("Perplexity Threshold:", threshold)

0 comments on commit 9d9579a

Please sign in to comment.