import time
import json
import redis
import logging
from flask import Flask, request, jsonify
from flask_cors import CORS
from celery import Celery
from config import (CELERY_BROKER_URL, CELERY_RESULT_BACKEND, REDIS_HOST, REDIS_PORT, REDIS_DB, OUTPUT_DIR,
CELERY_WORKER_CONCURRENCY, CELERY_TASK_TIME_LIMIT, CELERY_TASK_MAX_RETRIES,
CELERY_TASK_RETRY_DELAY, NUMBER_GPUS, IP, PORT)
# your algorithms
from simulator import rainfall_simulation_gpu, rainfall_simulation_cpu
# Flask
app = Flask(__name__)
CORS(app)
app.config.update(
CELERY_BROKER_URL=CELERY_BROKER_URL,
CELERY_RESULT_BACKEND=CELERY_RESULT_BACKEND,
CELERY_WORKER_CONCURRENCY=CELERY_WORKER_CONCURRENCY,
CELERY_TASK_TIME_LIMIT=CELERY_TASK_TIME_LIMIT,
JSON_AS_ASCII=False,
)
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(result_backend=app.config['CELERY_RESULT_BACKEND'], worker_concurrency=app.config['CELERY_WORKER_CONCURRENCY'],
task_time_limit=app.config['CELERY_TASK_TIME_LIMIT'])
redis_client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@celery.task(bind=True, max_retries=CELERY_TASK_MAX_RETRIES, default_retry_delay=CELERY_TASK_RETRY_DELAY)
def long_running_task(self, task_info):
unique_key = f"task:{hash(str(task_info))}"
try:
if not redis_client.set(unique_key, self.request.id, nx=True, ex=3600):
return {'status': 'Duplicate task, skipping execution'}
logger.info(f"Processing task: {task_info}")
if task_info.get('GPU') == 1:
simulation_status, save_path = rainfall_simulation_gpu(task_info, ngpus=NUMBER_GPUS, output_dir=OUTPUT_DIR)
else:
simulation_status, save_path = rainfall_simulation_cpu(task_info, output_dir=OUTPUT_DIR)
if simulation_status:
self.update_state(state='SUCCESS', meta={'progress': 50})
return {'status': 'Task completed!', 'save_path': save_path}
raise Exception(save_path)
except Exception as exc:
logger.error(f"Error in task: {exc}")
if self.request.retries >= self.max_retries:
redis_client.delete(unique_key)
raise self.update_state(state='FAILURE', meta={'exc': str(exc)})
raise self.retry(exc=exc)
@app.route('/new_task_rainfall_simulation/', methods=['POST'])
def new_task_rainfall_simulation():
task_info = json.loads(request.data.decode('utf-8'))
task = long_running_task.apply_async(args=[task_info])
return jsonify({'task_id': task.id}), 202
@app.route('/task-status/' , methods=['GET'])
def task_status(task_id):
task = long_running_task.AsyncResult(task_id)
response = {'state': task.state, 'result': task.info if task.state in ['SUCCESS', 'FAILURE'] else None}
return jsonify(response)
@app.route('/cancel-task/' , methods=['GET'])
def cancel_task(task_id):
task = long_running_task.AsyncResult(task_id)
task.revoke(terminate=True, signal=signal.SIGTERM)
task.update_state(state='REVOKED', meta={''})
return jsonify({'TASK CANCEL SUCCEEDED'}), 200
@app.route('/change-task-status//' , methods=['GET'])
def change_task_status(task_id, status):
task = long_running_task.AsyncResult(task_id)
task.update_state(state=status, meta={''})
return jsonify({'TASK STATUS CHANGED'}), 200
if __name__ == '__main__':
app.run(host=IP, port=PORT) // set your server IP and port
config.py
# Redis configuration
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
# Celery configuration
CELERY_BROKER_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
CELERY_RESULT_BACKEND = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
# General application settings
JSON_AS_ASCII = False
# Simulation configuration
OUTPUT_DIR = 'xxx' # Save simulation results
# Celery worker configuration
CELERY_WORKER_CONCURRENCY = 6 # concurrent users, depends on your hardware and algorithms
CELERY_TASK_MAX_RETRIES = 2 # define retry times for each task
CELERY_TASK_RETRY_DELAY = 10 # retry the task after n seconds
CELERY_TASK_TIME_LIMIT = 432000 # time limitation for each task, second
# number of gpus
NUMBER_GPUS = 1
# Flask server IP and port
IP = 'xx.xx.xx.xx'
PORT = 5000