limloop commited on
Commit
747a9f2
·
verified ·
1 Parent(s): d41c017

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -7
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  license: apache-2.0
3
  datasets:
4
- - loim/characters_dialogs
5
  - IlyaGusev/gpt_roleplay_realm
6
  - tamohannes/llm-roleplay
7
  - radce/communication_dataset
@@ -56,8 +56,8 @@ Mamba2Config(
56
  ```python
57
  from transformers import AutoTokenizer, AutoModelForCausalLM
58
 
59
- tokenizer = AutoTokenizer.from_pretrained("loim/whiff-mamba2-20M")
60
- model = AutoModelForCausalLM.from_pretrained("loim/whiff-mamba2-20M")
61
 
62
  def chat(messages, temp=0.5):
63
  inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
@@ -96,7 +96,7 @@ print(response)
96
 
97
  Sources:
98
 
99
- - `loim/characters_dialogs`
100
  - `IlyaGusev/gpt_roleplay_realm`
101
  - `tamohannes/llm-roleplay`
102
  - `radce/communication_dataset`
@@ -154,8 +154,8 @@ Mamba2Config(
154
  ```python
155
  from transformers import AutoTokenizer, AutoModelForCausalLM
156
 
157
- tokenizer = AutoTokenizer.from_pretrained("loim/whiff-mamba2-20M")
158
- model = AutoModelForCausalLM.from_pretrained("loim/whiff-mamba2-20M")
159
 
160
  def chat(messages, temp=0.5):
161
  inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
@@ -194,7 +194,7 @@ print(response)
194
 
195
  Источники:
196
 
197
- - `loim/characters_dialogs`
198
  - `IlyaGusev/gpt_roleplay_realm`
199
  - `tamohannes/llm-roleplay`
200
  - `radce/communication_dataset`
 
1
  ---
2
  license: apache-2.0
3
  datasets:
4
+ - limloop/characters_dialogs
5
  - IlyaGusev/gpt_roleplay_realm
6
  - tamohannes/llm-roleplay
7
  - radce/communication_dataset
 
56
  ```python
57
  from transformers import AutoTokenizer, AutoModelForCausalLM
58
 
59
+ tokenizer = AutoTokenizer.from_pretrained("limloop/whiff-mamba2-20M")
60
+ model = AutoModelForCausalLM.from_pretrained("limloop/whiff-mamba2-20M")
61
 
62
  def chat(messages, temp=0.5):
63
  inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
 
96
 
97
  Sources:
98
 
99
+ - `limloop/characters_dialogs`
100
  - `IlyaGusev/gpt_roleplay_realm`
101
  - `tamohannes/llm-roleplay`
102
  - `radce/communication_dataset`
 
154
  ```python
155
  from transformers import AutoTokenizer, AutoModelForCausalLM
156
 
157
+ tokenizer = AutoTokenizer.from_pretrained("limloop/whiff-mamba2-20M")
158
+ model = AutoModelForCausalLM.from_pretrained("limloop/whiff-mamba2-20M")
159
 
160
  def chat(messages, temp=0.5):
161
  inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
 
194
 
195
  Источники:
196
 
197
+ - `limloop/characters_dialogs`
198
  - `IlyaGusev/gpt_roleplay_realm`
199
  - `tamohannes/llm-roleplay`
200
  - `radce/communication_dataset`