NoobAI-NAI-XL-Wildcards / randumbizer.py
mimizukari's picture
Additions to sheet & lots of bug fixes for randumbizer.py.
f15f799 verified
import random, pyperclip, requests, re
from bs4 import BeautifulSoup
from urllib.parse import quote, urlparse
def _norm(s):
return s.lower().replace("_", " ").replace("-", " ").replace("–", " ").replace("—", " ").strip()
def extract_tags_danbooru(soup):
cls = ["character-tag-list", "copyright-tag-list", "artist-tag-list", "general-tag-list"]
buckets = {c: [] for c in cls}
for c in cls:
ul = soup.find("ul", class_=c)
if ul:
buckets[c] = [li["data-tag-name"] for li in ul.find_all("li") if li.has_attr("data-tag-name")]
random.shuffle(buckets["general-tag-list"])
return buckets["character-tag-list"] + buckets["copyright-tag-list"] + buckets["artist-tag-list"] + buckets["general-tag-list"]
def extract_tags_gelbooru(soup):
buckets = {"character": [], "copyright": [], "artist": [], "general": []}
for li in soup.find_all("li", class_=lambda x: x and x.startswith("tag-type-")):
ttype = li["class"][0].replace("tag-type-", "")
if ttype in buckets:
anchors = li.find_all("a")
if len(anchors) >= 2:
buckets[ttype].append(anchors[1].text.strip())
random.shuffle(buckets["general"])
return buckets["character"] + buckets["copyright"] + buckets["artist"] + buckets["general"]
class ContentGenerator:
def __init__(self, artist_file="artist.txt", sheet_file="Sheet1.txt"):
self.artist_lines = self._load(artist_file)
self.sheet_lines = self._load(sheet_file)
@staticmethod
def _load(path):
with open(path, encoding="utf-8") as f:
return [ln.strip() for ln in f if ln.strip()]
def _out(self, text):
pyperclip.copy(text)
print(text, "\n\nContent copied to clipboard.")
def get_random_artists(self):
res = ", ".join(random.sample(self.artist_lines, random.randint(1, 4)))
self._out(res)
return res
def get_anime_screenshot(self):
ln = random.choice(self.sheet_lines)
sp = ln.split(", ")
idx = 3 if sp[0] in ["1girl", "1boy", "1other"] else 2
res = f"{', '.join(sp[:idx])}, anime screenshot, {', '.join(sp[idx:])}".rstrip(", ")
self._out(res)
return res
def process_character_lines(self, n, tags=None):
n = min(n, 5)
available = self.sheet_lines.copy()
chosen = []
if tags:
normal_tags = [_norm(t) for t in tags if t.strip()]
for t in normal_tags:
matches = [ln for ln in available if _norm(ln).find(t) != -1]
if matches:
sel = random.choice(matches)
chosen.append(sel)
available.remove(sel)
remaining = n - len(chosen)
if remaining:
pool = [ln for t in normal_tags for ln in available if _norm(ln).find(t) != -1]
if pool:
chosen.extend(random.sample(pool, min(remaining, len(pool))))
missing = n - len(chosen)
if missing and available:
chosen.extend(random.sample(available, missing))
random.shuffle(chosen)
artists = ", ".join(random.choice(self.artist_lines) for _ in range(random.randint(1, 4)))
return self._format_characters(chosen, artists)
def _format_characters(self, lines, artists):
counts = {"girl": 0, "boy": 0, "other": 0}
char_entries = []
src_seen, src_list = set(), []
for ln in lines:
sp = [x for x in ln.split(", ") if x != "no humans"]
if sp[0] in ("1girl", "1boy", "1other"):
ctype_token = sp[0]
name, src, tags = sp[1], (sp[2] if len(sp) > 2 else ""), sp[3:]
if ctype_token == "1girl": counts["girl"] += 1
elif ctype_token == "1boy": counts["boy"] += 1
else: counts["other"] += 1
prefix = ctype_token
else:
name, src, tags = sp[0], (sp[1] if len(sp) > 1 else ""), sp[2:]
counts["other"] += 1
prefix = None
tags = [t for t in tags if t not in ("1girl", "1boy", "1other", "female", "male")]
if src and src not in src_seen:
src_seen.add(src)
src_list.append(src)
entry = ", ".join(([prefix] if prefix else []) + [name] + tags)
char_entries.append(entry)
count_part = ", ".join(f"{v}{k}{'s' if v > 1 else ''}" for k, v in counts.items() if v)
res = f"{count_part}, {', '.join(src_list)}, {artists} | " + " | ".join(char_entries)
self._out(res)
return res
def process_default(self, term=None):
pool = [l for l in self.sheet_lines if term and term in l] or self.sheet_lines
ln = random.choice(pool)
sp = ln.split(", ")
idx = 3 if sp[0] in ["1girl", "1boy", "1other"] else 2
artists = ", ".join(random.sample(self.artist_lines, random.randint(1, 4)))
res = f"{', '.join(sp[:idx])}, {artists}, {', '.join(sp[idx:])}".rstrip(", ")
self._out(res)
return res
def fetch_and_process_tags(self, site, term=None):
try:
if site == "d":
base = "https://danbooru.donmai.us/posts/random"
url = base + (f"?tags={quote(term.lower().replace(' ', '_'))}" if term else "")
else:
url = "https://gelbooru.com/index.php?page=post&s=random"
r = requests.get(url, allow_redirects=True, timeout=10)
r.raise_for_status()
final = r.url
soup = BeautifulSoup(requests.get(final, timeout=10).text, "html.parser")
domain = urlparse(final).netloc
tags = extract_tags_danbooru(soup) if "danbooru" in domain else extract_tags_gelbooru(soup)
blocked = ["speech bubble","comic","english text","simplified chinese","translated","artist name","side along","subtitles","text","copyright text","black border","furigana","politics","english","translation","russian text","web address"]
repl = {"v":"peace sign","double v":"double peace","|_|":"bar eyes","\\||/":"open \\m/",":|":"neutral face",";|":"neutral face","eyepatch bikini":"square bikini","tachi-e":"character image"}
rx = re.compile(r"\d(?:boy|girl|other)s?$", re.I)
keep = []
blocked_lc = {b.lower() for b in blocked}
for t in tags:
clean = t if len(t) <= 3 else t.replace("_", " ")
if clean.lower() not in blocked_lc:
keep.append(repl.get(clean, clean))
keep.sort(key=lambda x: not rx.search(x))
self._out(", ".join(keep))
except Exception as e:
self._out(f"Error: {e}")
def main():
gen = ContentGenerator()
gen.process_default()
abbrev = {"1g": "1girl", "1b": "1boy", "1o": "1other", "2g": "2girl", "2b": "2boy", "2o": "2other"}
while True:
s = input("\nEnter 'a', 'c', '1-5 [+tags]', 'd [tag]', 'g', or search term: ").strip()
if not s:
gen.process_default()
continue
s_l = s.lower()
if s_l == "a":
gen.get_random_artists()
continue
if s_l == "c":
gen.get_anime_screenshot()
continue
if s_l == "g":
gen.fetch_and_process_tags("g")
continue
if s_l.startswith("d "):
gen.fetch_and_process_tags("d", s[2:].strip())
continue
if s_l == "d":
gen.fetch_and_process_tags("d")
continue
m = re.match(r"^(\d+)\s+(.+)$", s)
if m:
num = int(m.group(1))
tags = [t.strip() for t in m.group(2).split(",") if t.strip()]
gen.process_character_lines(num, tags)
continue
if re.fullmatch(r"\d+", s_l):
gen.process_character_lines(int(s_l))
continue
if s_l in abbrev:
gen.process_default(abbrev[s_l])
continue
gen.process_default(s)
if __name__ == "__main__":
main()