mahjong_vision / live_feed.py
krmin's picture
Add model download script and local model files for Mahjong Soul Vision
401cf69
# %%
# %%
import time
import sys
import cv2
from PIL import Image, ImageGrab
import pygetwindow as gw
import numpy as np
import pyautogui
from transformers import pipeline
from tools import make_prediction
from tools import translate_to_vision
from tools import create_reverse_translation_dict
import keyboard
import torch
import torch.nn as nn
from safetensors.torch import load_file
# Load model directly
from transformers import AutoModel
from PyQt5.QtWidgets import QApplication, QLabel, QVBoxLayout, QWidget
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QFont
# 透明オーバーレイウィンドウクラス
class TransparentOverlay(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
# ウィンドウ設定
self.setWindowFlags(
Qt.WindowStaysOnTopHint | # 最前面
Qt.FramelessWindowHint | # フレームなし
Qt.Tool # タスクバーに表示しない
)
self.setAttribute(Qt.WA_TranslucentBackground) # 透明背景
# 位置とサイズ(左上)
self.setGeometry(10, 10, 400, 150)
# レイアウト
layout = QVBoxLayout()
layout.setContentsMargins(10, 10, 10, 10)
# 手牌ラベル
self.hand_label = QLabel("手牌: 雀魂で牌が配られるまで待機中...")
self.hand_label.setFont(QFont("Yu Gothic UI", 12, QFont.Bold))
self.hand_label.setStyleSheet("""
QLabel {
color: white;
background-color: rgba(0, 0, 0, 180);
padding: 8px;
border-radius: 5px;
}
""")
layout.addWidget(self.hand_label)
# 推奨打牌ラベル
self.recommendation_label = QLabel("推奨: -")
self.recommendation_label.setFont(QFont("Yu Gothic UI", 16, QFont.Bold))
self.recommendation_label.setStyleSheet("""
QLabel {
color: #FFD700;
background-color: rgba(0, 0, 0, 180);
padding: 10px;
border-radius: 5px;
border: 2px solid #FFD700;
}
""")
layout.addWidget(self.recommendation_label)
# ステータスラベル
self.status_label = QLabel("✓ 起動完了 | Space: 自動クリック | 更新: 0.2秒毎")
self.status_label.setFont(QFont("Yu Gothic UI", 9))
self.status_label.setStyleSheet("""
QLabel {
color: #00FF00;
background-color: rgba(0, 0, 0, 150);
padding: 5px;
border-radius: 3px;
}
""")
layout.addWidget(self.status_label)
self.setLayout(layout)
def update_hand(self, tiles):
"""手牌を更新"""
if tiles:
self.hand_label.setText(f"手牌: {' '.join(tiles)}")
def update_recommendation(self, tile):
"""推奨打牌を更新"""
if tile:
self.recommendation_label.setText(f"推奨: {tile}")
self.recommendation_label.setStyleSheet("""
QLabel {
color: #FF4444;
background-color: rgba(0, 0, 0, 200);
padding: 10px;
border-radius: 5px;
border: 3px solid #FF4444;
}
""")
else:
self.recommendation_label.setText("推奨: -")
self.recommendation_label.setStyleSheet("""
QLabel {
color: #FFD700;
background-color: rgba(0, 0, 0, 180);
padding: 10px;
border-radius: 5px;
border: 2px solid #FFD700;
}
""")
class ImprovedNN(nn.Module):
def __init__(self, input_dim, output_dim):
super(ImprovedNN, self).__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, 1024),
nn.BatchNorm1d(1024),
nn.PReLU(),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.PReLU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.PReLU(),
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.PReLU(),
nn.Linear(128, 64),
nn.BatchNorm1d(64),
nn.PReLU(),
nn.Linear(64, output_dim)
)
def forward(self, x):
return self.model(x)
if torch.cuda.is_available():
print("✓ CUDA利用可能")
device = torch.device("cuda")
else:
print("⚠ CUDA利用不可 - CPUモード")
device = torch.device("cpu")
# モデル読み込み(ローカルキャッシュを優先)
print("モデル読み込み中...")
import os
local_model_path = "./vision_transformer_local"
model_name = "krmin/mahjong_soul_vision"
# ローカルにモデルがあればそれを使用、なければHuggingFaceから
if os.path.exists(local_model_path):
print(f" ローカルモデルを使用: {local_model_path}")
pipe = pipeline("image-classification", model=local_model_path, device=device)
else:
print(f" HuggingFaceからダウンロード: {model_name}")
print(" 初回は30-60秒かかります")
pipe = pipeline("image-classification", model=model_name, device=device)
# ダウンロード後、ローカルに保存
try:
print(" 次回用にローカル保存中...")
pipe.model.save_pretrained(local_model_path)
pipe.feature_extractor.save_pretrained(local_model_path)
print(f" ✓ ローカルに保存完了: {local_model_path}")
except Exception as e:
print(f" ⚠ ローカル保存失敗: {e}")
print(" ✓ Vision Transformer読み込み完了")
input_dim = 204
output_dim = 34
discard_model = ImprovedNN(input_dim=input_dim, output_dim=output_dim)
model_path = "model.safetensors"
state_dict = load_file(model_path)
discard_model.load_state_dict(state_dict)
print(" ✓ 打牌予測モデル読み込み完了")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
discard_model.to(device)
global_debug = False
discard_model.to(device)
# グローバル変数
window = None
window_title = "雀魂"
# 雀魂の手牌座標(実際の画面から確認済み)
# ウィンドウ相対座標: x=105, y=759, width=627, height=84
PLAYER_HAND_X = 105
PLAYER_HAND_Y = 759
PLAYER_HAND_W = 627
PLAYER_HAND_H = 84
PLAYER_PON_X = PLAYER_HAND_X + PLAYER_HAND_W
PLAYER_PON_Y = PLAYER_HAND_Y
PLAYER_PON_W = 200
PLAYER_PON_H = 84
PLAYER_THROW_X = 790
PLAYER_THROW_Y = 1048 - 490
PLAYER_THROW_W = 350
PLAYER_THROW_H = 250
RIGHT_PLAYER_THROW_X = 1125
RIGHT_PLAYER_THROW_Y = 1048 - 715
RIGHT_PLAYER_THROW_W = 280
RIGHT_PLAYER_THROW_H = 220
LEFT_PLAYER_THROW_X = 530
LEFT_PLAYER_THROW_Y = 1048 - 715
LEFT_PLAYER_THROW_W = 280
LEFT_PLAYER_THROW_H = 220
OPPOSITE_PLAYER_THROW_X = 820
OPPOSITE_PLAYER_THROW_Y = 1048 - 850
OPPOSITE_PLAYER_THROW_W = 300
OPPOSITE_PLAYER_THROW_H = 160
use_gpu = torch.cuda.is_available()
ASPECT_RATIO_1 = 0.6
ASPECT_RATIO_2 = 1.5
BUFFER = 0.1
if use_gpu:
print("Using CUDA")
def nothing(x):
pass
# Get the window by its title. Adjust this to the title of the window you want to capture.
print(f"雀魂ウィンドウを検索中...")
try:
window = gw.getWindowsWithTitle(window_title)[0]
print(f" ✓ ウィンドウ検出: {window.title}")
except IndexError:
print(f" ✗ エラー: '{window_title}' というタイトルのウィンドウが見つかりません")
print(f" 雀魂を起動してからもう一度お試しください")
raise Exception(f"No window with title '{window_title}' found.")
if global_debug:
cv2.namedWindow('Trackbars')
cv2.createTrackbar('Lower', 'Trackbars', 0, 255, nothing)
cv2.createTrackbar('Upper', 'Trackbars', 255, 255, nothing)
def analyze_region(frame, x, y, w, h, lower=100, upper=255, debug=False):
if global_debug:
lower = cv2.getTrackbarPos('Lower', 'Trackbars')
upper = cv2.getTrackbarPos('Upper', 'Trackbars')
# Extract the region
roi = frame[y:y + h, x:x + w]
# Debugging: Überprüfen Sie die Größe und den Inhalt der ROI
if roi.size == 0:
print(f"ROI is empty for coordinates x: {x}, y: {y}, w: {w}, h: {h}")
return [], [], []
# Überprüfen Sie, ob die ROI leer ist
if roi is None or roi.size == 0:
return [], [], []
roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
_, roi_threshed = cv2.threshold(roi_gray, lower, upper, cv2.THRESH_BINARY)
rois = [] # Liste zur Sammlung von Regionen von Interesse (ROIs)
boxes_temp = [] # Temporäre Liste zur Sammlung von Bounding-Box-Koordinaten
contours, _ = cv2.findContours(roi_threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
area = cv2.contourArea(contour)
# 最小面積を200に下げて小さい牌も認識
if area > 200:
x_rect, y_rect, w_rect, h_rect = cv2.boundingRect(contour)
aspect_ratio = w_rect / h_rect
if (ASPECT_RATIO_1 - BUFFER <= aspect_ratio <= ASPECT_RATIO_1 + BUFFER) or \
(ASPECT_RATIO_2 - BUFFER <= aspect_ratio <= ASPECT_RATIO_2 + BUFFER) or True:
# Extract the region of interest and predict
roi_tile = roi[y_rect:y_rect + h_rect, x_rect:x_rect + w_rect]
roi_img = Image.fromarray(roi_tile)
if debug:
cv2.imshow("tile_watcher", roi_tile)
rois.append(roi_img)
boxes_temp.append([x + x_rect, y + y_rect, x + x_rect + w_rect, y + y_rect + h_rect]) # Add the offset
boxes = []
probs = []
labels = []
if rois:
for idx, roi_img in enumerate(rois): # idx hinzugefügt
predictions = pipe(roi_img)
label = predictions[0]['label']
prob = predictions[0]['score']
# 確率が85%以上の認識結果のみ採用(Vision Transformerは99.7%の精度)
if prob > 0.85:
boxes.append(boxes_temp[idx]) # idx wird hier verwendet
probs.append(prob)
labels.append(label)
# デバッグ出力は手牌のみ(捨て牌は出力しない)
# print(f"認識: {label} ({prob*100:.1f}%)", end=" ")
return boxes, labels, probs
def translate_boxes_to_tensors(all_boxes):
# Ein leeres Array mit der Größe 6x34 erstellen
tensor_array = torch.zeros((6, 34))
# Konvertierungsfunktion für die Labels
def label_to_index(label):
return create_reverse_translation_dict().get(label, "Invalid label")
# POVs Hand
player_hand_labels = all_boxes.get("player_hand_labels", [])
for label in player_hand_labels:
tensor_array[0, label_to_index(label)] += 1
# POVs Meldungen
player_pon_labels = all_boxes.get("player_pon_labels", [])
for label in player_pon_labels:
tensor_array[1, label_to_index(label)] += 1
# Pools der vier Spieler
pools_labels = [
all_boxes.get("player_throw_labels", []),
all_boxes.get("right_player_throw_labels", []),
all_boxes.get("opposite_player_throw_labels", []),
all_boxes.get("left_player_throw_labels", [])
]
for i, pool_labels in enumerate(pools_labels):
for label in pool_labels:
tensor_array[2 + i, label_to_index(label)] += 1
return tensor_array
def draw_transparent_box(image, box, color, alpha):
# Zeichnen eines transparenten Rechtecks auf das Bild
overlay = image.copy()
cv2.rectangle(overlay, (box[0], box[1]), (box[2], box[3]), color, -1)
cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
def click_hand_tile(all_boxes, frame):
player_hand_boxes = all_boxes.get("player_hand", [])
player_hand_labels = all_boxes.get("player_hand_labels", [])
player_pon_boxes = all_boxes.get("player_pon", [])
player_pon_labels = all_boxes.get("player_pon_labels", [])
player_throw_boxes = all_boxes.get("player_throw", [])
player_throw_labels = all_boxes.get("player_throw_labels", [])
right_player_throw_boxes = all_boxes.get("right_player_throw", [])
right_player_throw_labels = all_boxes.get("right_player_throw_labels", [])
left_player_throw_boxes = all_boxes.get("left_player_throw", [])
left_player_throw_labels = all_boxes.get("left_player_throw_labels", [])
opposite_player_throw_boxes = all_boxes.get("opposite_player_throw", [])
opposite_player_throw_labels = all_boxes.get("opposite_player_throw_labels", [])
if not player_hand_boxes or not player_hand_labels:
return
translated_tensor = translate_boxes_to_tensors(all_boxes)
# Stellen Sie sicher, dass Ihre make_prediction Funktion die Rohwahrscheinlichkeiten zurückgibt
probs = make_prediction(discard_model, translated_tensor)
# Sortieren Sie die Wahrscheinlichkeiten in absteigender Reihenfolge und erhalten Sie die Indizes
sorted_indices = probs.argsort(descending=True)
sorted_indices = sorted_indices.squeeze()
guess_count = 0
# Überprüfen Sie die Top-n Vorhersagen
for idx in sorted_indices:
idx = idx.item()
predicted_tile_str = translate_to_vision(idx)
# print("Player hand labels:", player_hand_labels)
# print("Top 5 predicted indices:", sorted_indices[:5])
# print("Top 5 predicted tiles:", [translate_to_vision(idx) for idx in sorted_indices[:5]])
guess_count = guess_count + 1
if predicted_tile_str in player_hand_labels:
# Wenn Sie eine Übereinstimmung gefunden haben, brechen Sie ab und führen Sie Ihre Aktionen aus
tile_idx = player_hand_labels.index(predicted_tile_str)
box = player_hand_boxes[tile_idx]
label = player_hand_labels[tile_idx]
# Drucken des ausgewählten Labels
print(f"Selected tile: {label} at {guess_count} guess.")
startX, startY, endX, endY = box
# Berechnen Sie die Mitte der Box
center_x = startX + (endX - startX) // 2 + window.left # add the window's left coordinate
center_y = startY + (endY - startY) // 2 + window.top # add the window's top coordinate
# Verschieben Sie die Maus zur Position der Mitte und führen Sie einen Klick aus
# Überprüfen Sie den Status der Num Lock-Taste
if keyboard.is_pressed('space'):
pyautogui.moveTo(center_x, center_y)
pyautogui.sleep(0.3)
# Führen Sie einen Klick aus, wenn Num Lock aktiviert ist
pyautogui.click(center_x, center_y)
else:
draw_transparent_box(frame, (startX, startY, endX, endY), (0, 0, 255), 0.3)
break
else:
print("No matching tile found in player_hand_labels")
def get_color_based_on_probability(prob):
# Umwandeln des Wahrscheinlichkeitswertes in eine Farbe von Grün zu Rot
# Bei prob=1, wird es grün sein, bei prob=0 wird es rot sein
return (int(255 * (1 - prob)), int(255 * prob), 0)
def draw_boxes(frame, boxes, labels, probs):
for box, label, prob in zip(boxes, labels, probs):
startX, startY, endX, endY = box
color = get_color_based_on_probability(prob)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
text = f"{label} ({prob:.2f})" # Zeigt das Label und die Wahrscheinlichkeit in der Form "label (0.99)"
# Text background
(text_width, text_height), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
cv2.rectangle(frame, (startX, startY - 20), (startX + text_width, startY - 20 + text_height), (0, 0, 0), -1)
# Draw text with white color
cv2.putText(frame, text, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
PLAYER_PON_X_TEMP = PLAYER_PON_X
PLAYER_PON_W_TEMP = PLAYER_PON_W
# PyQt5アプリケーション初期化
print("UIを初期化中...")
app = QApplication(sys.argv)
overlay = TransparentOverlay()
overlay.show()
print(" ✓ 透明オーバーレイウィンドウを表示")
# グローバル変数で推奨牌を保持
current_recommendation = None
previous_hand_count = 0 # 前回の手牌枚数を記憶
print("\n" + "="*60)
print("起動完了!")
print("="*60)
print("左上の透明ウィンドウに手牌と推奨牌を表示します")
print("Spaceキー: 推奨牌を自動クリック")
print("Dキー: デバッグ用に画面キャプチャを保存")
print("二値化閾値: 100 (lower) - 明るい牌を検出")
print("認識閾値: 85% - 高精度のみ採用")
print("ウィンドウを閉じる: 終了")
print("="*60 + "\n")
def process_frame():
"""フレーム処理とUI更新"""
global PLAYER_PON_X_TEMP, PLAYER_PON_W_TEMP, PLAYER_HAND_W_TEMP, current_recommendation, previous_hand_count, window
# ウィンドウ位置を毎回更新(ウィンドウが移動しても追従)
try:
old_window = window
window = gw.getWindowsWithTitle(window_title)[0]
# デバッグ: ウィンドウ位置が変わったら通知
if old_window and (old_window.left != window.left or old_window.top != window.top):
print(f"\nウィンドウ移動検出: ({old_window.left}, {old_window.top}) → ({window.left}, {window.top})")
except IndexError:
print("\r雀魂ウィンドウが見つかりません ", end="", flush=True)
return
screenshot = ImageGrab.grab(bbox=(window.left, window.top, window.right, window.bottom), all_screens=True)
frame = np.array(screenshot)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# デバッグ: 'd'キーでキャプチャを保存
if keyboard.is_pressed('d'):
timestamp = int(time.time())
filename = f"debug_capture_{timestamp}.png"
cv2.imwrite(filename, frame)
print(f"\n📷 キャプチャ保存: {filename} (座標: left={window.left}, top={window.top}, right={window.right}, bottom={window.bottom})")
# 手牌領域も保存
roi = frame[PLAYER_HAND_Y:PLAYER_HAND_Y + PLAYER_HAND_H, PLAYER_HAND_X:PLAYER_HAND_X + PLAYER_HAND_W]
cv2.imwrite(f"debug_hand_{timestamp}.png", roi)
print(f"📷 手牌領域保存: debug_hand_{timestamp}.png")
time.sleep(0.5) # 連続保存を防ぐ
# Analyze regions and get boxes, labels, and probabilities
player_pon_boxes, player_pon_labels, player_pon_probs = analyze_region(frame, PLAYER_PON_X_TEMP, PLAYER_PON_Y,
PLAYER_PON_W_TEMP, PLAYER_PON_H)
pon_width_increase = len(player_pon_labels) * 60
PLAYER_HAND_W_TEMP = PLAYER_HAND_W - pon_width_increase
PLAYER_PON_X_TEMP = PLAYER_PON_X - pon_width_increase
PLAYER_PON_W_TEMP = PLAYER_PON_W + pon_width_increase
player_hand_boxes, player_hand_labels, player_hand_probs = analyze_region(frame, PLAYER_HAND_X, PLAYER_HAND_Y,
PLAYER_HAND_W_TEMP, PLAYER_HAND_H)
# 手牌認識の詳細をコンソールに出力
if len(player_hand_labels) > 0:
print(f"\n手牌検出: ", end="")
for i, label in enumerate(player_hand_labels):
print(f"{label}({player_hand_probs[i]*100:.1f}%) ", end="")
player_throw_boxes, player_throw_labels, player_throw_probs = analyze_region(frame, PLAYER_THROW_X, PLAYER_THROW_Y,
PLAYER_THROW_W, PLAYER_THROW_H)
right_player_throw_boxes, right_player_throw_labels, right_player_throw_probs = analyze_region(frame,
RIGHT_PLAYER_THROW_X,
RIGHT_PLAYER_THROW_Y,
RIGHT_PLAYER_THROW_W,
RIGHT_PLAYER_THROW_H)
left_player_throw_boxes, left_player_throw_labels, left_player_throw_probs = analyze_region(frame,
LEFT_PLAYER_THROW_X,
LEFT_PLAYER_THROW_Y,
LEFT_PLAYER_THROW_W,
LEFT_PLAYER_THROW_H)
opposite_player_throw_boxes, opposite_player_throw_labels, opposite_player_throw_probs = analyze_region(frame,
OPPOSITE_PLAYER_THROW_X,
OPPOSITE_PLAYER_THROW_Y,
OPPOSITE_PLAYER_THROW_W,
OPPOSITE_PLAYER_THROW_H)
# UI更新: 手牌
current_hand_count = len(player_hand_labels) + len(player_pon_labels)
if len(player_hand_labels) > 0:
overlay.update_hand(player_hand_labels)
# シンプルな表示(タイムスタンプ付き)
hand_str = " ".join(player_hand_labels)
current_time = time.strftime("%H:%M:%S")
# 手牌枚数が変化したら通知
if current_hand_count != previous_hand_count:
if current_hand_count == 14:
print(f"\n★ツモ! 14枚になりました", end="")
print(f"\n[{current_time}] [{len(player_hand_labels)}枚] {hand_str} ", end="", flush=True)
previous_hand_count = current_hand_count
# else:
# print(f"\r[{len(player_hand_labels)}枚] {hand_str} ", end="", flush=True)
else:
# 手牌が認識されていない場合のデバッグ情報
print(f"\r⚠ 手牌未検出 (座標: x={PLAYER_HAND_X}, y={PLAYER_HAND_Y}, w={PLAYER_HAND_W_TEMP}, h={PLAYER_HAND_H}) 二値化閾値=100 ", end="", flush=True)
previous_hand_count = 0
all_boxes = {
"player_hand": player_hand_boxes,
"player_hand_labels": player_hand_labels,
"player_pon": player_pon_boxes,
"player_pon_labels": player_pon_labels,
"player_throw": player_throw_boxes,
"player_throw_labels": player_throw_labels,
"right_player_throw": right_player_throw_boxes,
"right_player_throw_labels": right_player_throw_labels,
"left_player_throw": left_player_throw_boxes,
"left_player_throw_labels": left_player_throw_labels,
"opposite_player_throw": opposite_player_throw_boxes,
"opposite_player_throw_labels": opposite_player_throw_labels
}
# 推奨牌の計算と表示
if len(player_hand_labels) + len(player_pon_labels) >= 14:
print(f"\n自分の番 (手牌:{len(player_hand_labels)}+ポン:{len(player_pon_labels)}={len(player_hand_labels)+len(player_pon_labels)}枚)", end="")
# 推奨牌を計算
try:
translated_tensor = translate_boxes_to_tensors(all_boxes)
probs = make_prediction(discard_model, translated_tensor)
# 最も確率の高い牌を取得
sorted_indices = probs.argsort(descending=True).squeeze()
# デバッグ: モデルの上位推奨を表示
print(f"\nモデル推奨TOP5: ", end="")
for i, idx in enumerate(sorted_indices[:5]):
top_idx = int(idx.item())
tile = translate_to_vision(top_idx)
prob = probs[0][top_idx].item() * 100
in_hand = "✓" if tile in player_hand_labels else "✗"
print(f"{tile}({prob:.1f}%{in_hand}) ", end="")
# 手牌に存在する牌の中から最も確率の高いものを選択
found_recommendation = False
for idx in sorted_indices[:10]: # 上位10個をチェック
top_idx = int(idx.item())
recommended_tile = translate_to_vision(top_idx)
# 手牌に存在する牌のみを推奨
if recommended_tile in player_hand_labels:
current_recommendation = recommended_tile
overlay.update_recommendation(recommended_tile)
found_recommendation = True
print(f" → 推奨:{recommended_tile}", end="")
# Spaceキーで自動クリック
if keyboard.is_pressed('space'):
overlay.status_label.setText("クリック中...")
overlay.status_label.setStyleSheet("""
QLabel {
color: #FF0000;
background-color: rgba(0, 0, 0, 150);
padding: 5px;
border-radius: 3px;
}
""")
# 実際のクリック処理
for i, label in enumerate(player_hand_labels):
if label == recommended_tile:
box = player_hand_boxes[i]
x, y = box[0] + (box[2] - box[0]) // 2, box[1] + (box[3] - box[1]) // 2
abs_x = window.left + x
abs_y = window.top + y
pyautogui.click(abs_x, abs_y)
print(f" クリック!", end="")
break
time.sleep(0.5)
overlay.status_label.setText("✓ 起動完了 | Space: 自動クリック | 更新: 0.2秒毎")
overlay.status_label.setStyleSheet("""
QLabel {
color: #00FF00;
background-color: rgba(0, 0, 0, 150);
padding: 5px;
border-radius: 3px;
}
""")
break
if not found_recommendation:
overlay.update_recommendation(None)
print(f"\n→ 手牌に該当する推奨牌が見つかりません(TOP10に手牌の牌なし)", end="")
except Exception as e:
print(f"\n推奨計算エラー: {e}", end="")
import traceback
traceback.print_exc()
overlay.update_recommendation(None)
else:
overlay.update_recommendation(None)
# タイマーでフレーム処理を実行(200ms間隔 = より高頻度で更新)
timer = QTimer()
timer.timeout.connect(process_frame)
timer.start(200)
# アプリケーション実行
sys.exit(app.exec_())
# %%
# %%