Spaces:
Build error
Build error
| import os | |
| import gradio as gr | |
| #import whisper | |
| import requests | |
| import tempfile | |
| #from neon_tts_plugin_coqui import CoquiTTS | |
| from datasets import load_dataset | |
| import random | |
| dataset = load_dataset("ysharma/short_jokes", split="train") | |
| filtered_dataset = dataset.filter( | |
| lambda x: (True not in [nsfw in x["Joke"].lower() for nsfw in ["warning","porn", "blow", "fuck", "dead", "nsfw","69", "sex", "prostitute","prostitutes", "pedophiles", "pedophile"]]) | |
| ) | |
| # Model 2: Sentence Transformer | |
| API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b" | |
| HF_TOKEN = os.environ["HF_TOKEN"] | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| def query(payload): | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.json() | |
| # Driver function | |
| def driver_fun(text) : | |
| print("*********** Inside Driver ************") | |
| random_val = random.randrange(0,231657) | |
| if random_val < 226657: | |
| lower_limit = random_val | |
| upper_limit = random_val + 4000 | |
| else: | |
| lower_limit = random_val - 4000 | |
| upper_limit = random_val | |
| print(f"lower_limit : upper_limit = {lower_limit} : {upper_limit}") | |
| dataset_subset = filtered_dataset['Joke'][lower_limit : upper_limit] | |
| data = query({"inputs": {"source_sentence": text ,"sentences": dataset_subset} } ) #"That is a happy person" | |
| if 'error' in data: | |
| print(f"Error is : {data}") | |
| return 'Error in model inference - Run Again Please', 'Error in model inference - Run Again Please', None | |
| print(f"type(data) : {type(data)}") | |
| #print(f"data : {data} ") | |
| max_match_score = max(data) | |
| indx_score = data.index(max_match_score) | |
| joke = dataset_subset[indx_score] | |
| print(f"Joke is : {joke}") | |
| return joke | |
| demo = gr.Blocks() | |
| with demo: | |
| gr.Markdown("<h1><center>Text-to-Joke</center></h1>") | |
| gr.Markdown( | |
| """<center>Enter a theme or a context for AI to find a joke for you on that.</center><br><center>If you see the message 'Error in model inference - Run Again Please', just press the button again every time!</center> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(): | |
| in_text = gr.Textbox(label= 'Enter a theme or context for a joke') | |
| b1 = gr.Button("Get a Joke") | |
| with gr.Column(): | |
| out_generated_joke = gr.Textbox(label= 'Joke returned! ') | |
| b1.click(driver_fun,inputs=[in_text], outputs=[out_generated_joke]) #out_translation_en, out_generated_text,out_generated_text_en, | |
| with gr.Row(): | |
| gr.Markdown( | |
| """Built using [Sentence Transformers](https://huggingface.co/models?library=sentence-transformers&sort=downloads) and [**Gradio Block API**](https://gradio.app/docs/#blocks).<br><br>Few Caveats:<br>1. Please note that sometimes the joke might be NSFW. Although, I have tried putting in filters to not have that experience, but the filters seem non-exhaustive.<br>2. Sometimes the joke might not match your theme, please bear with the limited capabilities of free open-source ML prototypes.<br>3. Much like real life, sometimes the joke might just not land, haha!<br>4. Repeating this: If you see the message 'Error in model inference - Run Again Please', just press the button again every time! | |
| """) | |
| demo.queue(concurrency_count=3) | |
| demo.launch(enable_queue=True, debug=True) |