-
Notifications
You must be signed in to change notification settings - Fork 137
/
Makefile
62 lines (49 loc) · 2.83 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
.PHONY: style check_code_quality
export PYTHONPATH = .
check_dirs := inference inference_sdk
style:
python3 -m black $(check_dirs) --exclude '__init__\.py|node_modules'
python3 -m isort $(check_dirs) --skip-glob '**/__init__.py' --skip-glob '**/node_modules/**'
check_code_quality:
python3 -m black --check $(check_dirs) --exclude '__init__\.py|node_modules'
python3 -m isort --check-only $(check_dirs) --skip-glob '**/__init__.py' --skip-glob '**/node_modules/**'
# stop the build if there are Python syntax errors or undefined names
flake8 $(check_dirs) --count --select=E9,F63,F7,F82 --show-source --statistics --exclude __init__.py,inference/inference/landing/node_modules
# exit-zero treats all errors as warnings. E203 for black, E501 for docstring, W503 for line breaks before logical operators
flake8 $(check_dirs) --count --max-line-length=88 --exit-zero --ignore=D --extend-ignore=E203,E501,W503 --statistics --exclude __init__.py,inference/inference/landing/node_modules
start_test_docker_cpu:
docker run -d --rm -p $(PORT):$(PORT) -e PORT=$(PORT) -e MAX_BATCH_SIZE=17 --name inference-test roboflow/${INFERENCE_SERVER_REPO}:test
start_test_docker_gpu:
docker run -d --rm -p $(PORT):$(PORT) -e PORT=$(PORT) -e MAX_BATCH_SIZE=17 --gpus=all --name inference-test roboflow/${INFERENCE_SERVER_REPO}:test
start_test_docker_gpu_with_roboflow_staging:
docker run -d --rm -p $(PORT):$(PORT) -e PORT=$(PORT) -e MAX_BATCH_SIZE=17 --gpus=all -e PROJECT=roboflow-staging --name inference-test roboflow/${INFERENCE_SERVER_REPO}:test
start_test_docker_jetson:
docker run -d --rm -p $(PORT):$(PORT) -e PORT=$(PORT) -e MAX_ACTIVE_MODELS=1 -e MAX_BATCH_SIZE=17 --runtime=nvidia --name inference-test roboflow/${INFERENCE_SERVER_REPO}:test
stop_test_docker:
docker rm -f inference-test
create_wheels:
python -m pip install --upgrade pip
python -m pip install wheel twine requests -r requirements/_requirements.txt -r requirements/requirements.cpu.txt -r requirements/requirements.http.txt -r requirements/requirements.sdk.http.txt
rm -f dist/*
rm -rf build/*
python .release/pypi/inference.core.setup.py bdist_wheel
rm -rf build/*
python .release/pypi/inference.cpu.setup.py bdist_wheel
rm -rf build/*
python .release/pypi/inference.gpu.setup.py bdist_wheel
rm -rf build/*
python .release/pypi/inference.setup.py bdist_wheel
rm -rf build/*
python .release/pypi/inference.sdk.setup.py bdist_wheel
rm -rf build/*
python .release/pypi/inference.cli.setup.py bdist_wheel
create_wheels_for_gpu_notebook:
python -m pip install --upgrade pip
python -m pip install wheel twine requests
rm -f dist/*
python .release/pypi/inference.core.setup.py bdist_wheel
python .release/pypi/inference.gpu.setup.py bdist_wheel
python .release/pypi/inference.sdk.setup.py bdist_wheel
python .release/pypi/inference.cli.setup.py bdist_wheel
upload_wheels:
twine upload dist/*.whl