| { | |
| "architectures": ["GPTNeoXForCausalLM"], | |
| "model_type": "gpt-neox", | |
| "base_model": "gpt-oss:20b", | |
| "base_model_size": "20B", | |
| "adapter_type": "lora", | |
| "lora_config": { | |
| "r": 16, | |
| "lora_alpha": 32, | |
| "lora_dropout": 0.1, | |
| "target_modules": [ | |
| "q_proj", | |
| "k_proj", | |
| "v_proj", | |
| "o_proj", | |
| "gate_proj", | |
| "up_proj", | |
| "down_proj" | |
| ] | |
| }, | |
| "mev_classifier": { | |
| "parameters": 315151, | |
| "input_dim": 240, | |
| "hidden_dim": 512, | |
| "dropout_rate": 0.3, | |
| "num_labels": 4, | |
| "label_map": { | |
| "0": "normal", | |
| "1": "arbitrage", | |
| "2": "sandwich", | |
| "3": "liquidation" | |
| } | |
| }, | |
| "training_info": { | |
| "dataset_size": 700805, | |
| "validation_accuracy": 0.993, | |
| "validation_loss": 0.0174, | |
| "device": "mps", | |
| "optimizer": "AdamW", | |
| "learning_rate": 0.001, | |
| "weight_decay": 0.01 | |
| }, | |
| "task": "mev-detection-generation", | |
| "capabilities": [ | |
| "arbitrage_detection", | |
| "sandwich_attack_detection", | |
| "liquidation_detection", | |
| "profit_estimation", | |
| "text_generation", | |
| "transaction_analysis" | |
| ], | |
| "total_model_size": "~13GB", | |
| "adapter_size": "1.2MB", | |
| "inference_requirements": { | |
| "min_ram": "16GB", | |
| "recommended_ram": "32GB", | |
| "quantization": "Q4_K_M" | |
| } | |
| } |