Python 接口自动化测试项目
下面是一个基于 Python 的接口自动化测试项目框架,使用 pytest + requests + allure 报告体系。
项目结构
api_automation/
│
├── config/ # 配置文件目录
│ ├── init.py
│ ├── config.py # 基础配置
│ └── env.py # 环境配置
│
├── testcases/ # 测试用例目录
│ ├── init.py
│ ├── test_demo_api.py # 示例测试用例
│ └── …
│
├── utils/ # 工具类目录
│ ├── init.py
│ ├── request_util.py # 请求封装
│ ├── assert_util.py # 断言封装
│ ├── logger.py # 日志封装
│ ├── data_util.py # 数据处理
│ └── …
│
├── data/ # 测试数据目录
│ ├── init.py
│ ├── test_data.yaml # YAML 测试数据
│ └── …
│
├── reports/ # 测试报告目录
│ ├── allure-results/ # Allure 原始报告
│ └── …
│
├── conftest.py # pytest 配置
├── pytest.ini # pytest 配置文件
├── requirements.txt # 依赖文件
└── README.md # 项目说明
核心代码实现
BASE_DIR = Path(file).parent.parent
class Config:
# 基础配置
PROJECT_NAME = “API Automation”
ENV = “test” # 默认测试环境
# 请求配置
TIMEOUT = 10
RETRIES = 3
# 日志配置
LOG_LEVEL = "INFO"
LOG_FILE = os.path.join(BASE_DIR, "logs", "api_test.log")
# 报告配置
REPORT_DIR = os.path.join(BASE_DIR, "reports")
ALLURE_RESULTS_DIR = os.path.join(REPORT_DIR, "allure-results")
@classmethod
def init_env(cls, env):
cls.ENV = env
# 可以根据不同环境加载不同配置
if env == "test":
cls.BASE_URL = "https://test.api.example.com"
elif env == "staging":
cls.BASE_URL = "https://staging.api.example.com"
else:
cls.BASE_URL = "https://api.example.com"
config = Config()
2. 请求封装 (utils/request_util.py)
python
import requests
import json
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from config.config import config
from utils.logger import logger
class RequestUtil:
def init(self):
self.session = requests.Session()
retry_strategy = Retry(
total=config.RETRIES,
backoff_factor=1,
status_forcelist=[408, 429, 500, 502, 503, 504]
)
adapter = HTTPAdapter(max_retries=retry_strategy)
self.session.mount(“https://”, adapter)
self.session.mount(“http://”, adapter)
def send_request(self, method, url, **kwargs):
"""
发送HTTP请求
:param method: 请求方法
:param url: 请求URL
:param kwargs: requests支持的其他参数
:return: 响应对象
"""
full_url = f"{config.BASE_URL}{url}" if not url.startswith("http") else url
logger.info(f"Request: {method.upper()} {full_url}")
logger.debug(f"Request Params: {kwargs}")
try:
response = self.session.request(
method=method.lower(),
url=full_url,
timeout=config.TIMEOUT,
**kwargs
)
logger.info(f"Response Status: {response.status_code}")
logger.debug(f"Response Body: {response.text}")
return response
except Exception as e:
logger.error(f"Request failed: {str(e)}")
raise e
def get(self, url, **kwargs):
return self.send_request("get", url, **kwargs)
def post(self, url, **kwargs):
return self.send_request("post", url, **kwargs)
def put(self, url, **kwargs):
return self.send_request("put", url, **kwargs)
def delete(self, url, **kwargs):
return self.send_request("delete", url, **kwargs)
def patch(self, url, **kwargs):
return self.send_request("patch", url, **kwargs)
request_util = RequestUtil()
3. 断言封装 (utils/assert_util.py)
python
import json
from jsonschema import validate
from config.config import config
from utils.logger import logger
class AssertUtil:
@staticmethod
def assert_status_code(response, expected_code):
“”“断言响应状态码”“”
assert response.status_code == expected_code,
f"Expected status code {expected_code}, but got {response.status_code}"
@staticmethod
def assert_schema(response, schema):
"""断言响应数据符合JSON Schema"""
try:
validate(instance=response.json(), schema=schema)
except Exception as e:
logger.error(f"Schema validation failed: {str(e)}")
raise AssertionError(f"Schema validation failed: {str(e)}")
@staticmethod
def assert_json_value(response, json_path, expected_value, comparator="eq"):
"""断言JSON响应中的特定值"""
import jsonpath
actual_value = jsonpath.jsonpath(response.json(), json_path)
if not actual_value:
raise AssertionError(f"JSON path '{json_path}' not found in response")
actual_value = actual_value[0]
if comparator == "eq":
assert actual_value == expected_value, \
f"Expected {json_path} to be {expected_value}, but got {actual_value}"
elif comparator == "contains":
assert expected_value in actual_value, \
f"Expected {json_path} to contain {expected_value}, but got {actual_value}"
elif comparator == "startswith":
assert str(actual_value).startswith(str(expected_value)), \
f"Expected {json_path} to start with {expected_value}, but got {actual_value}"
else:
raise ValueError(f"Unsupported comparator: {comparator}")
@staticmethod
def assert_response_time(response, max_time):
"""断言响应时间小于指定值"""
assert response.elapsed.total_seconds() <= max_time, \
f"Response time {response.elapsed.total_seconds()}s exceeds {max_time}s"
BASE_DIR = Path(file).parent.parent
def get_logger(name=None):
“”“获取日志记录器”“”
logger = logging.getLogger(name)
logger.setLevel(config.LOG_LEVEL.upper())
# 避免重复添加handler
if logger.handlers:
return logger
# 创建日志目录
os.makedirs(os.path.dirname(config.LOG_FILE), exist_ok=True)
# 文件handler
file_handler = logging.FileHandler(config.LOG_FILE, encoding="utf-8")
file_handler.setLevel(config.LOG_LEVEL.upper())
# 控制台handler
console_handler = logging.StreamHandler()
console_handler.setLevel(config.LOG_LEVEL.upper())
# 格式化
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
logger = get_logger(name)
5. 示例测试用例 (testcases/test_demo_api.py)
python
import pytest
from utils.request_util import request_util
from utils.assert_util import AssertUtil
from config.config import config
@pytest.mark.demo
class TestDemoAPI:
“”“示例API测试类”“”
@pytest.fixture(autouse=True)
def setup(self):
"""测试前置和后置处理"""
self.assert_util = AssertUtil()
yield
# 可以在这里添加测试后的清理代码
def test_get_user_info(self):
"""获取用户信息测试"""
# 准备测试数据
user_id = "123"
expected_status = 200
expected_schema = {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
"email": {"type": "string"},
"status": {"type": "number"}
},
"required": ["id", "name", "email", "status"]
}
# 发送请求
response = request_util.get(
f"/api/v1/users/{user_id}",
headers={"Authorization": "Bearer test_token"}
)
# 断言
self.assert_util.assert_status_code(response, expected_status)
self.assert_util.assert_schema(response, expected_schema)
self.assert_util.assert_json_value(response, "$.id", user_id)
self.assert_util.assert_json_value(response, "$.status", 1, "eq")
@pytest.mark.parametrize("test_data", [
{"username": "testuser", "password": "test123", "expected_code": 200},
{"username": "", "password": "test123", "expected_code": 400},
{"username": "nonexist", "password": "wrong", "expected_code": 401}
])
def test_login(self, test_data):
"""登录接口参数化测试"""
response = request_util.post(
"/api/v1/auth/login",
json=test_data
)
self.assert_util.assert_status_code(response, test_data["expected_code"])
if response.status_code == 200:
self.assert_util.assert_json_value(response, "$.token", str, "startswith")
def pytest_addoption(parser):
“”“添加命令行参数”“”
parser.addoption(“–env”, action=“store”, default=“test”, help=“set test environment”)
def pytest_configure(config):
“”“pytest 配置”“”
# 初始化环境配置
env = config.getoption(“–env”)
config.config.env = env
config.config.init_env(env)
# 注册标记
config.addinivalue_line(
"markers",
"demo: mark test as demo test case"
)
@pytest.fixture(scope=“session”, autouse=True)
def setup_session(request):
“”“会话级别的fixture”“”
logger.info(f"Starting test session with env: {config.ENV}")
yield
logger.info(“Test session finished”)
@pytest.fixture(autouse=True)
def setup_test(request):
“”“每个测试用例的前置和后置处理”“”
logger.info(f"Starting test: {request.node.name}“)
yield
logger.info(f"Finished test: {request.node.name}”)
7. pytest 配置文件 (pytest.ini)
ini
[pytest]
testpaths = testcases
python_files = test_.py
python_classes = Test
python_functions = test_*
addopts = -v --alluredir=./reports/allure-results --clean-alluredir
markers =
demo: mark test as demo test case
8. 依赖文件 (requirements.txt)
pytest7.4.0
requests2.31.0
pyyaml6.0.1
jsonschema4.19.0
jsonpath-ng1.5.3
allure-pytest2.13.2
pytest-html==4.0.2
项目使用说明
安装依赖:
bash
pip install -r requirements.txt
运行测试:
bash
pytest
pytest --env=staging
pytest -m demo
pytest --html=./reports/report.html
生成Allure报告:
bash
pytest --alluredir=./reports/allure-results
allure serve ./reports/allure-results
扩展建议
数据驱动:可以结合pytest的parametrize或使用外部数据文件(YAML/JSON/Excel)实现数据驱动
接口依赖:使用fixture实现接口之间的依赖关系,如先登录获取token
数据库校验:添加数据库查询功能,验证接口对数据库的影响
性能测试:集成locust或pytest-benchmark进行性能测试
CI/CD集成:与Jenkins/GitHub Actions等CI工具集成
Mock服务:对于依赖的第三方接口,可以使用requests-mock或wiremock进行mock
这个框架提供了接口自动化测试的基础结构,你可以根据实际项目需求进行扩展和定制。