| import streamlit as st | |
| from audio_recorder_streamlit import audio_recorder | |
| from transformers import pipeline | |
| pipe = pipeline("automatic-speech-recognition", model="openai/whisper-tiny") | |
| recorded_audio = audio_recorder() | |
| if recorded_audio: | |
| audio_file = "audio.mp3" | |
| with open(audio_file,"wb") as f: | |
| f.write(recorded_audio) | |
| if recorded_audio: | |
| out = pipe(audio_file) | |
| transcribed_text = out['text'] | |
| st.write(transcribed_text) | |
| messages = [ | |
| {"role": "user", "content": transcribed_text}, | |
| ] | |
| out2 = pipe2(messages) | |
| text = st.text_area("enter some text") | |
| messages = [ | |
| {"role": "user", "content": text}, | |
| ] | |
| pipe2 = pipeline("text-generation", model="Cognitive-Lab/LLama3-Gaja-Hindi-8B-v0.1") | |
| out2 = pipe2(messages) | |
| st.write(out2) | |