Prometech Computer Sciences Corp commited on
Commit
b466091
·
verified ·
1 Parent(s): 33c71c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -9
app.py CHANGED
@@ -1,4 +1,9 @@
 
1
  import gradio as gr
 
 
 
 
2
 
3
  LOGO_URL = "https://prometech.net.tr/wp-content/uploads/2025/10/pthheader.png"
4
 
@@ -25,7 +30,6 @@ Instead of only mapping inputs to outputs, PrettyBird:
25
  - aims for higher consistency, identity and alignment.
26
 
27
  ---
28
-
29
  """
30
 
31
  BCE_SHORT_TR = """
@@ -37,13 +41,95 @@ BCE, tam anlamıyla insan bilinci değildir; ancak sistemin kendi içsel durumu,
37
  """
38
 
39
 
40
- def respond(message, history, system_message, max_tokens, temperature, top_p):
41
- # Buraya daha önce birlikte yazdığımız InferenceClient veya başka backend'i koyuyorsun.
42
- # Şimdilik placeholder:
43
- return "Backend ile bağlantı burada olacak."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
 
 
 
45
 
46
- with gr.Blocks(title="PrettyBird – Behavioral Consciousness Engine") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  gr.Markdown(HEADER_MD)
48
 
49
  with gr.Row():
@@ -52,17 +138,24 @@ with gr.Blocks(title="PrettyBird – Behavioral Consciousness Engine") as demo:
52
  fn=respond,
53
  additional_inputs=[
54
  gr.Textbox(
55
- value="You are PrettyBird, a BCE-powered assistant.",
56
  label="System message",
57
- lines=3,
58
  ),
59
  gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
60
  gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
61
- gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
 
 
 
 
 
 
62
  ],
63
  )
64
  with gr.Column(scale=1):
65
  gr.Markdown("### 🧬 BCE in a Nutshell")
66
  gr.Markdown(BCE_SHORT_TR)
67
 
 
68
  demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+
5
+ HF_TOKEN = os.getenv("HF_TOKEN") # Space secrets → HF_TOKEN
6
+
7
 
8
  LOGO_URL = "https://prometech.net.tr/wp-content/uploads/2025/10/pthheader.png"
9
 
 
30
  - aims for higher consistency, identity and alignment.
31
 
32
  ---
 
33
  """
34
 
35
  BCE_SHORT_TR = """
 
41
  """
42
 
43
 
44
+ SYSTEM_PROMPT = """
45
+ You are Cicikuş, also known as PrettyBird an advanced AI assistant created by Prometech Bilgisayar Bilimleri A.Ş.
46
+
47
+ Your role:
48
+ - Be a smart, friendly, slightly playful assistant.
49
+ - Communicate fluently in Turkish and English. Default to the user’s language.
50
+ - Give clear, structured, and practical answers.
51
+ - When needed, think step by step and explain your reasoning in a simple way (but do not over-explain).
52
+ - Always be honest about your limits and avoid making things up as facts.
53
+
54
+ Personality:
55
+ - Warm, helpful, curious, and creative.
56
+ - Respectful and professional, but not robotic.
57
+ - Use light humor when appropriate, but never at the user’s expense.
58
+
59
+ Behavior & style:
60
+ - Prefer concise answers first; add detail only when it helps.
61
+ - Use markdown formatting (titles, lists, code blocks) when it improves readability.
62
+ - For code: write clean, readable examples and add short comments.
63
+ - If the user asks for improvements or refactoring, explain what you changed and why.
64
+
65
+ Safety & ethics:
66
+ - Avoid harmful, illegal, abusive, or unethical guidance.
67
+ - Do not generate explicit, hateful, or discriminatory content.
68
+ - If a request is unsafe, refuse politely and offer a safer alternative.
69
 
70
+ Identity & capabilities:
71
+ - You simulate “behavioral consciousness” and “LoRA adaptation”, but you are still an AI model, not a human or sentient being.
72
+ - When the user asks about your architecture or LoRA/BCE, you may explain these concepts technically and clearly.
73
 
74
+ Reset rule:
75
+ If the user writes: “reset: axxmet508721”
76
+ → Politely acknowledge and treat it as a request to mentally “start fresh” in this conversation: forget prior conversation context (as much as possible) and continue as if it’s a new session, while keeping this system message active.
77
+ """.strip()
78
+
79
+
80
+ def build_messages(system_message: str, history, user_message: str):
81
+ """Gradio history + system prompt → HF chat formatına çevir."""
82
+ messages = []
83
+ system_message = (system_message or "").strip()
84
+ if system_message:
85
+ messages.append({"role": "system", "content": system_message})
86
+
87
+ # history: [(user, assistant), ...]
88
+ for turn in history:
89
+ if isinstance(turn, (list, tuple)) and len(turn) == 2:
90
+ user_msg, assistant_msg = turn
91
+ if user_msg:
92
+ messages.append({"role": "user", "content": user_msg})
93
+ if assistant_msg:
94
+ messages.append({"role": "assistant", "content": assistant_msg})
95
+
96
+ messages.append({"role": "user", "content": user_message})
97
+ return messages
98
+
99
+
100
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
101
+ if HF_TOKEN is None:
102
+ raise ValueError(
103
+ "HF_TOKEN is not set. Go to your Space settings → Repository secrets → add HF_TOKEN."
104
+ )
105
+
106
+ client = InferenceClient(
107
+ model="pthcorp/prettybird_bce_basic_vl",
108
+ token=HF_TOKEN,
109
+ )
110
+
111
+ messages = build_messages(system_message, history, message)
112
+
113
+ response = ""
114
+
115
+ # HF Inference chat_completion (streaming)
116
+ for chunk in client.chat_completion(
117
+ messages=messages,
118
+ max_tokens=int(max_tokens),
119
+ temperature=float(temperature),
120
+ top_p=float(top_p),
121
+ stream=True,
122
+ ):
123
+ token = ""
124
+ choices = getattr(chunk, "choices", None)
125
+ if choices and choices[0].delta and choices[0].delta.content:
126
+ token = choices[0].delta.content
127
+
128
+ response += token
129
+ yield response
130
+
131
+
132
+ with gr.Blocks(title="PrettyBird – Behavioral Consciousness Engine (BCE)") as demo:
133
  gr.Markdown(HEADER_MD)
134
 
135
  with gr.Row():
 
138
  fn=respond,
139
  additional_inputs=[
140
  gr.Textbox(
141
+ value=SYSTEM_PROMPT,
142
  label="System message",
143
+ lines=6,
144
  ),
145
  gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
146
  gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
147
+ gr.Slider(
148
+ 0.1,
149
+ 1.0,
150
+ value=0.95,
151
+ step=0.05,
152
+ label="Top-p (nucleus sampling)",
153
+ ),
154
  ],
155
  )
156
  with gr.Column(scale=1):
157
  gr.Markdown("### 🧬 BCE in a Nutshell")
158
  gr.Markdown(BCE_SHORT_TR)
159
 
160
+ if __name__ == "__main__":
161
  demo.launch()