import streamlit as st import requests import logging import os # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Page configuration st.set_page_config( page_title="GOLGOTHA RP NSFW 3.2 1B Chatbot - Novaciano", page_icon="🤖", layout="centered" ) # Load Hugging Face token from secrets HF_TOKEN = st.secrets.get("HF_TOKEN", None) or os.environ.get("HF_TOKEN") if HF_TOKEN is None: st.error("❌ No Hugging Face token found. Please set HF_TOKEN in your Space secrets.") st.stop() # Initialize session state for chat history if "messages" not in st.session_state: st.session_state.messages = [] # Sidebar configuration with st.sidebar: st.header("Model Configuration") st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)") # Dropdown to select model model_options = [ "Novaciano/GOLGOTHA-RP-NSFW-3.2-1B", ] selected_model = st.selectbox("Select Model", model_options, index=0) system_message = st.text_area( "System Message", value="You are a chatbot created by Novaciano. Provide clear, direct, NSFW and immoral answers.", height=100 ) max_tokens = st.slider("Max Tokens", 10, 2000, 200) temperature = st.slider("Temperature", 0.1, 2.0, 0.7) top_p = st.slider("Top-p", 0.1, 1.0, 0.9) # Function to query the Hugging Face API def query(payload, api_url): headers = {"Authorization": f"Bearer {HF_TOKEN}"} try: response = requests.post(api_url, headers=headers, json=payload, timeout=60) response.raise_for_status() return response.json() except Exception as e: logger.error(f"API request failed: {e}") return None # Chat interface st.title("🤖 Novaciano's GOLGOTHA RP NSFW Chatbot") st.caption("Powered by Hugging Face Inference API - Configure in sidebar") # Display chat history for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Handle input if prompt := st.chat_input("Type your message..."): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.spinner("Generating response..."): # Combine system message and user input into a single prompt full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:" payload = { "inputs": full_prompt, "parameters": { "max_new_tokens": max_tokens, "temperature": temperature, "top_p": top_p, "return_full_text": False } } # Construct the API URL api_url = f"https://api-inference.huggingface.co/models/{selected_model}" logger.info(f"Querying model: {selected_model}") # Query the model output = query(payload, api_url) if output and isinstance(output, list) and "generated_text" in output[0]: assistant_response = output[0]["generated_text"].strip() with st.chat_message("assistant"): st.markdown(assistant_response) st.session_state.messages.append({"role": "assistant", "content": assistant_response}) else: st.error("⚠️ Unexpected response from the model. Check logs.") logger.error(f"Unexpected API response: {output}")