@superpigy
2023-06-19T10:16:06.000000Z
字数 4644
阅读 266
未分类
./ffmpeg -rtsp_transport tcp -re -i rtsp://admin:hik12345@192.168.100.33/h264/ch1/sub/av_stream -vcodec copy -an -movflags faststart -g 5 -f flv rtmp://stream.ai.hamuna.club/live/test
docker run -d --name npc --net=host --restart=always -v /home/hmcz/npc/conf:/conf ffdfgdfg/npc -config=/conf/npc.conf
在后台生成npc.conf拷贝到设备/home/hmcz/npc/conf目录内
docker run -it -p 1935:1935 -p 8080:8080 -p 4433:4433 -p 5544:5544 -p 8083:8083 -p 8084:8084 -p 30000-30100:30000-30100/udp q191201771/lal /lal/bin/lalserver -c /lal/conf/lalserver.conf.json
armbian-config
Create Slice File: /etc/systemd/system/docker_limit.slice
[Unit]
Description=Hamuna Docker Resources Limits
Before=slices.target
[Slice]
CPUAccounting=true
CPUQuota=390%
MemoryAccounting=true
MemoryLimit=3.5G
sudo nano /etc/docker/daemon.json
{
"registry-mirrors" : ["https://dockerproxy.com"],
"dns": ["8.8.8.8","114.114.114.114"],
"log-driver":"json-file",
"log-opts": {"max-size":"500m", "max-file":"1"}
}
sudo service docker restart
sudo docker run -d -p 9000:9000 -p 9001:9001 --name minio -e MINIO_ROOT_USER=hmcz -e MINIO_ROOT_PASSWORD=hmcz1234 -v /media/hdd0/minio:/data minio/minio:latest server /data --console-address :9001
docker run -d --name hamunadocker-updator --restart=always -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --interval 60
# CPU版本
docker run -d --name hamuna-inference-node -v /home/hmcz/deployment:/deployment --restart=always hamunadocker/inference-node:latest
# GPU版本
docker run -d --name hamuna-inference-node-gpu --net=host -v /home/hmcz/deployment:/deployment --restart=always --gpus all hamunadocker/inference-node-gpu:latest
sudo docker run -d -p 9000:9000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ce:latest
1. 安装Cuda驱动
2. 安装Cudnn驱动
3. nvidia-smi检查是否成功驱动显卡
4. 安装nvidia-docker2
curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/ubuntu20.04/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt update
sudo apt install nvidia-docker2
通讯器配置文件: {"PROJECT_ID":"","APP_ID":"","DEVICE_ID":"","SERVICES":["ai_door_count_cow"]}
计算单元配置文件(Nano): {"PROJECT_ID":"","APP_ID":"","DEVICE_ID":"","SERVICES":["ai_cow_count"], "EP":"tensorrt", "FP16": true}
计算单元配置文件(非Nano): {"PROJECT_ID":"","APP_ID":"","DEVICE_ID":"","SERVICES":["ai_cow_count"], "EP":"tensorrt", "FP16": false}
追踪配置文件: {"PROJECT_ID":"","APP_ID":"","DEVICE_ID":"","SERVICES":["ai_track_count_cow"], "EP":"tensorrt", "STREAM_CV_SHOW": false, "FP16": false}
firefly开发板配置 sudo apt-get install apparmor-profiles
通讯器部署方式: docker run -d --name hamuna-eye-task-node --net=host --restart=always -v /home/hmcz/hamuna_eye:/hamuna_eye hamunadocker/hamuna-eye-task-dispatcher:latest
计算单元部署方式: docker run -d --runtime=nvidia --name hamuna-eye-ai-node --restart=always -v /home/hmcz/hamuna_eye:/hamuna_eye hamunadocker/hamuna-eye-ai-node:latest
追踪点数部署方式: sudo docker run -d -v /home/hmcz/hamuna_eye:/hamuna_eye --restart=always --name hamuna-tracker --net=host --gpus all hamunadocker/hamuna-eye-tracker:latest
自动更新部署方式: docker run -d --name hamunadocker-updator --restart=always -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower -c --interval 60
看门狗部署: docker run -d --name hamunawatchdog --restart=always -v /home/hmcz/hamuna_eye:/hamuna_eye -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/usr/bin/docker --net=host hamunadocker/hamuna-eye-watchdog
半实时摄像头: sudo docker run -d --net=host --restart=always -m 800m -memory-swap 1300m --name=hamuna-eye-v2-commu-semi -v /home/hmcz/hamuna_eye:/hamuna_eye hamunadocker/hamuna-eye-v2-commu-service:latest sh ./sh/communi_hub_semi.sh
定时摄像头: sudo docker run -d --net=host --restart=always -m 800m -memory-swap 2000m --name=hamuna-eye-v2-commu-timing -v /home/hmcz/hamuna_eye:/hamuna_eye hamunadocker/hamuna-eye-v2-commu-service:latest sh ./sh/communi_hub_timing.sh
摄像头搜索: sudo docker run -d --net=host --restart=always -m 300m -memory-swap 300m --name=hamuna-eye-v2-commu-cam-search -v /home/hmcz/hamuna_eye:/hamuna_eye hamunadocker/hamuna-eye-v2-commu-service:latest python communi_hub.py --runtime=search_cameras
AI节点部署: docker run -d --runtime=nvidia --name hamuna-eye-ai-node-v2 --net=host --restart=always -v /home/hmcz/hamuna_eye:/hamuna_eye hamunadocker/hamuna-eye-v2-ai-service:latest sh ./sh/ai_service_hub.sh
实时部署方式: sudo docker run -d -v /home/hmcz/hamuna_eye:/hamuna_eye --restart=always --name hamuna-eye-v2-commu-realtime-gpu --net=host --gpus all hamunadocker/hamuna-eye-v2-commu-service-gpu:latest
环境安装:
pip install nvidia-pyindex
工具包安装 :
pip install nvidia-tensorrt==8.2.4.2
配置路径:
pip show nvidia-tensorrt
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:(填写上个步骤展路径)
添加全局默认启动:
nano ./bashrc(填写上个步骤命令至最后一行)