Spaces:
Runtime error
Runtime error
File size: 7,720 Bytes
9e7dc23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 |
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List
import time
import uvicorn
from text_humanizer import AITextHumanizer
# Initialize FastAPI app
app = FastAPI(
title="AI Text Humanizer API",
description="Transform AI-generated text to sound more natural and human-like",
version="1.0.0"
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize the humanizer (this will load models on startup)
print("Initializing AI Text Humanizer...")
humanizer = AITextHumanizer()
print("Humanizer ready!")
# Request and response models
class HumanizeRequest(BaseModel):
text: str
style: Optional[str] = "natural" # natural, casual, conversational
intensity: Optional[float] = 0.7 # 0.0 to 1.0
class HumanizeResponse(BaseModel):
original_text: str
humanized_text: str
similarity_score: float
changes_made: List[str]
processing_time_ms: float
style: str
intensity: float
class BatchHumanizeRequest(BaseModel):
texts: List[str]
style: Optional[str] = "natural"
intensity: Optional[float] = 0.7
class BatchHumanizeResponse(BaseModel):
results: List[HumanizeResponse]
total_processing_time_ms: float
@app.get("/")
async def root():
"""Root endpoint with API information"""
return {
"message": "AI Text Humanizer API",
"version": "1.0.0",
"endpoints": {
"humanize": "POST /humanize - Humanize a single text",
"batch_humanize": "POST /batch_humanize - Humanize multiple texts",
"health": "GET /health - Health check"
}
}
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {
"status": "healthy",
"timestamp": time.time(),
"models_loaded": {
"similarity_model": humanizer.similarity_model is not None,
"paraphraser": humanizer.paraphraser is not None
}
}
@app.post("/humanize", response_model=HumanizeResponse)
async def humanize_text(request: HumanizeRequest):
"""
Humanize a single piece of text
- **text**: The text to humanize
- **style**: Style of humanization (natural, casual, conversational)
- **intensity**: Intensity of humanization (0.0 to 1.0)
"""
if not request.text.strip():
raise HTTPException(status_code=400, detail="Text cannot be empty")
if request.intensity < 0.0 or request.intensity > 1.0:
raise HTTPException(status_code=400, detail="Intensity must be between 0.0 and 1.0")
if request.style not in ["natural", "casual", "conversational"]:
raise HTTPException(status_code=400, detail="Style must be one of: natural, casual, conversational")
try:
start_time = time.time()
# Humanize the text
result = humanizer.humanize_text(
text=request.text,
style=request.style,
intensity=request.intensity
)
processing_time = (time.time() - start_time) * 1000
return HumanizeResponse(
original_text=result["original_text"],
humanized_text=result["humanized_text"],
similarity_score=result["similarity_score"],
changes_made=result["changes_made"],
processing_time_ms=processing_time,
style=result["style"],
intensity=result["intensity"]
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {str(e)}")
@app.post("/batch_humanize", response_model=BatchHumanizeResponse)
async def batch_humanize_text(request: BatchHumanizeRequest):
"""
Humanize multiple pieces of text in batch
- **texts**: List of texts to humanize
- **style**: Style of humanization (natural, casual, conversational)
- **intensity**: Intensity of humanization (0.0 to 1.0)
"""
if not request.texts:
raise HTTPException(status_code=400, detail="Texts list cannot be empty")
if len(request.texts) > 50:
raise HTTPException(status_code=400, detail="Maximum 50 texts per batch request")
if request.intensity < 0.0 or request.intensity > 1.0:
raise HTTPException(status_code=400, detail="Intensity must be between 0.0 and 1.0")
if request.style not in ["natural", "casual", "conversational"]:
raise HTTPException(status_code=400, detail="Style must be one of: natural, casual, conversational")
try:
start_time = time.time()
results = []
for text in request.texts:
if text.strip(): # Only process non-empty texts
text_start_time = time.time()
result = humanizer.humanize_text(
text=text,
style=request.style,
intensity=request.intensity
)
text_processing_time = (time.time() - text_start_time) * 1000
results.append(HumanizeResponse(
original_text=result["original_text"],
humanized_text=result["humanized_text"],
similarity_score=result["similarity_score"],
changes_made=result["changes_made"],
processing_time_ms=text_processing_time,
style=result["style"],
intensity=result["intensity"]
))
else:
# Handle empty texts
results.append(HumanizeResponse(
original_text=text,
humanized_text=text,
similarity_score=1.0,
changes_made=[],
processing_time_ms=0.0,
style=request.style,
intensity=request.intensity
))
total_processing_time = (time.time() - start_time) * 1000
return BatchHumanizeResponse(
results=results,
total_processing_time_ms=total_processing_time
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {str(e)}")
@app.get("/stats")
async def get_stats():
"""Get API statistics and model information"""
return {
"models": {
"similarity_model": "all-MiniLM-L6-v2" if humanizer.similarity_model else None,
"paraphraser": "google/flan-t5-small" if humanizer.paraphraser else None
},
"features": {
"formal_word_replacement": True,
"contraction_addition": True,
"ai_transition_replacement": True,
"sentence_structure_variation": True,
"natural_imperfections": True,
"segment_paraphrasing": humanizer.paraphraser is not None,
"semantic_similarity": humanizer.similarity_model is not None
},
"supported_styles": ["natural", "casual", "conversational"],
"intensity_range": [0.0, 1.0]
}
if __name__ == "__main__":
print("\nπ Starting AI Text Humanizer API Server...")
print("π API will be available at: http://localhost:8000")
print("π API documentation: http://localhost:8000/docs")
print("π Health check: http://localhost:8000/health")
print("\n" + "="*50 + "\n")
uvicorn.run(
"fastapi_server:app",
host="0.0.0.0",
port=8000,
reload=True,
log_level="info"
) |