Commit cf986887 by Yuhaibo

Merge branch 'main' into Wangbing1

parents 1fc99f7f fed05a05
...@@ -2,6 +2,16 @@ ...@@ -2,6 +2,16 @@
* *
# 但保留以下文件和文件夹 # 但保留以下文件和文件夹
!database/
!database/**
!hooks/
!hooks/**
!icons/
!icons/**
!labelme/
!labelme/**
!rules/
!rules/**
!widgets/ !widgets/
!widgets/** !widgets/**
!handlers/ !handlers/
......
...@@ -24,8 +24,6 @@ os.environ['ULTRALYTICS_CONFIG_DIR'] = os.path.join(current_dir, '.cache', 'ultr ...@@ -24,8 +24,6 @@ os.environ['ULTRALYTICS_CONFIG_DIR'] = os.path.join(current_dir, '.cache', 'ultr
# 修复 OpenMP 运行时冲突问题 # 修复 OpenMP 运行时冲突问题
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' # 允许多个OpenMP库共存(临时解决方案) os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' # 允许多个OpenMP库共存(临时解决方案)
print("[环境变量] ultralytics离线模式已启用")
print("[环境变量] OpenMP冲突已修复")
from qtpy import QtWidgets from qtpy import QtWidgets
......
...@@ -620,11 +620,10 @@ class MainWindow( ...@@ -620,11 +620,10 @@ class MainWindow(
if os.path.exists(icon_path): if os.path.exists(icon_path):
icon = QtGui.QIcon(icon_path) icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon) self.setWindowIcon(icon)
print(f"[主窗口] 窗口图标已设置: {icon_path}")
else: else:
print(f"[主窗口] 图标文件不存在: {icon_path}") pass
except Exception as e: except Exception as e:
print(f"[主窗口] 设置窗口图标失败: {e}") pass
def _loadDefaultConfig(self): def _loadDefaultConfig(self):
"""从 default_config.yaml 加载配置""" """从 default_config.yaml 加载配置"""
...@@ -772,7 +771,6 @@ class MainWindow( ...@@ -772,7 +771,6 @@ class MainWindow(
# 🔥 为每个通道面板的任务标签设置变量名(channel1mission, channel2mission, channel3mission, channel4mission) # 🔥 为每个通道面板的任务标签设置变量名(channel1mission, channel2mission, channel3mission, channel4mission)
mission_var_name = f'channel{i+1}mission' mission_var_name = f'channel{i+1}mission'
setattr(self, mission_var_name, channelPanel.taskLabel) setattr(self, mission_var_name, channelPanel.taskLabel)
print(f"[MainWindow] 已设置任务标签变量: {mission_var_name}")
if hasattr(self, '_connectChannelPanelSignals'): if hasattr(self, '_connectChannelPanelSignals'):
self._connectChannelPanelSignals(channelPanel) self._connectChannelPanelSignals(channelPanel)
...@@ -803,7 +801,6 @@ class MainWindow( ...@@ -803,7 +801,6 @@ class MainWindow(
history_panel.setObjectName(f"HistoryVideoPanel_{i+1}") history_panel.setObjectName(f"HistoryVideoPanel_{i+1}")
self.historyVideoPanels.append(history_panel) self.historyVideoPanels.append(history_panel)
print(f"[MainWindow] 已创建 {len(self.historyVideoPanels)} 个历史视频面板")
# 通过handler初始化通道面板数据 # 通过handler初始化通道面板数据
if hasattr(self, 'initializeChannelPanels'): if hasattr(self, 'initializeChannelPanels'):
...@@ -830,7 +827,6 @@ class MainWindow( ...@@ -830,7 +827,6 @@ class MainWindow(
# 🔥 设置曲线面板的任务选择下拉框变量名(curvemission) # 🔥 设置曲线面板的任务选择下拉框变量名(curvemission)
self.curvemission = self.curvePanel.curvemission self.curvemission = self.curvePanel.curvemission
print(f"[MainWindow] 已设置曲线任务变量: curvemission")
# 连接任务选择变化信号 # 连接任务选择变化信号
self.curvemission.currentTextChanged.connect(self._onCurveMissionChanged) self.curvemission.currentTextChanged.connect(self._onCurveMissionChanged)
...@@ -862,7 +858,6 @@ class MainWindow( ...@@ -862,7 +858,6 @@ class MainWindow(
self.videoLayoutStack.addWidget(layout_widget) self.videoLayoutStack.addWidget(layout_widget)
print(f"[MainWindow] 曲线模式布局已创建:左侧子布局栈(实时/历史) + 右侧共用CurvePanel")
def _createRealtimeCurveSubLayout(self): def _createRealtimeCurveSubLayout(self):
"""创建实时检测曲线子布局(索引0)- 左侧通道列表""" """创建实时检测曲线子布局(索引0)- 左侧通道列表"""
...@@ -906,7 +901,6 @@ class MainWindow( ...@@ -906,7 +901,6 @@ class MainWindow(
sublayout.addWidget(self.curve_scroll_area) sublayout.addWidget(self.curve_scroll_area)
self.curveLayoutStack.addWidget(sublayout_widget) self.curveLayoutStack.addWidget(sublayout_widget)
print(f"[MainWindow] 实时检测曲线子布局已创建(索引0)- 基于CSV文件的动态通道系统")
def _createHistoryCurveSubLayout(self): def _createHistoryCurveSubLayout(self):
"""创建历史回放曲线子布局(索引1)- 使用历史视频面板容器""" """创建历史回放曲线子布局(索引1)- 使用历史视频面板容器"""
...@@ -950,7 +944,6 @@ class MainWindow( ...@@ -950,7 +944,6 @@ class MainWindow(
sublayout.addWidget(self.history_scroll_area) sublayout.addWidget(self.history_scroll_area)
self.curveLayoutStack.addWidget(sublayout_widget) self.curveLayoutStack.addWidget(sublayout_widget)
print(f"[MainWindow] 历史回放曲线子布局已创建(索引1)- 历史视频面板容器系统")
def _onChannelCurveClicked(self, task_name): def _onChannelCurveClicked(self, task_name):
""" """
...@@ -959,15 +952,9 @@ class MainWindow( ...@@ -959,15 +952,9 @@ class MainWindow(
Args: Args:
task_name: 通道面板的任务名称 task_name: 通道面板的任务名称
""" """
print(f"🔄 [主窗口] 通道面板查看曲线按钮被点击,任务名称: {task_name}")
# 设置 curvemission 的值 # 设置 curvemission 的值
if hasattr(self, 'curvePanel') and self.curvePanel: if hasattr(self, 'curvePanel') and self.curvePanel:
success = self.curvePanel.setMissionFromTaskName(task_name) success = self.curvePanel.setMissionFromTaskName(task_name)
if success:
print(f"✅ [主窗口] 已设置 curvemission 为: {task_name}")
else:
print(f"⚠️ [主窗口] 设置 curvemission 失败: {task_name}")
# 切换到曲线模式 # 切换到曲线模式
self.toggleVideoPageMode() self.toggleVideoPageMode()
...@@ -1268,7 +1255,6 @@ class MainWindow( ...@@ -1268,7 +1255,6 @@ class MainWindow(
# ========== 通道管理按钮信号 ========== # ========== 通道管理按钮信号 ==========
# 🔥 已改为内嵌显示,由 MissionPanelHandler 处理,不再使用弹窗 # 🔥 已改为内嵌显示,由 MissionPanelHandler 处理,不再使用弹窗
# self.missionTable.channelManageClicked.connect(self.onChannelManage) # 旧的弹窗方式 # self.missionTable.channelManageClicked.connect(self.onChannelManage) # 旧的弹窗方式
print("[App] 通道管理已改为内嵌显示,不再使用弹窗")
# ========== 通道面板信号(为所有面板连接) ========== # ========== 通道面板信号(为所有面板连接) ==========
# 注意:channelConnected, channelDisconnected, channelEdited, amplifyClicked, channelNameChanged # 注意:channelConnected, channelDisconnected, channelEdited, amplifyClicked, channelNameChanged
...@@ -1517,15 +1503,12 @@ class MainWindow( ...@@ -1517,15 +1503,12 @@ class MainWindow(
def closeEvent(self, event): def closeEvent(self, event):
"""窗口关闭事件""" """窗口关闭事件"""
try: try:
print("[应用] 正在关闭应用...")
# 清理全局检测线程 # 清理全局检测线程
if hasattr(self, 'view_handler') and self.view_handler: if hasattr(self, 'view_handler') and self.view_handler:
video_handler = getattr(self.view_handler, 'video_handler', None) video_handler = getattr(self.view_handler, 'video_handler', None)
if video_handler: if video_handler:
thread_manager = getattr(video_handler, 'thread_manager', None) thread_manager = getattr(video_handler, 'thread_manager', None)
if thread_manager: if thread_manager:
print("[应用] 清理全局检测线程...")
thread_manager.cleanup_global_detection_thread() thread_manager.cleanup_global_detection_thread()
# 保存窗口状态 # 保存窗口状态
...@@ -1536,10 +1519,7 @@ class MainWindow( ...@@ -1536,10 +1519,7 @@ class MainWindow(
# 保存当前页面索引 # 保存当前页面索引
self.settings.setValue("window/last_page", self.getCurrentPageIndex()) self.settings.setValue("window/last_page", self.getCurrentPageIndex())
print("[应用] 应用关闭清理完成")
except Exception as e: except Exception as e:
print(f"[应用] 关闭清理失败: {e}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
......
channel1:
annotation_count: 1
areas:
area_1:
height: 20mm
name: 通道1_区域1
boxes:
- - 617
- 415
- 192
fixed_bottoms:
- 482
fixed_tops:
- 387
last_updated: '2025-11-27 15:55:10'
channel2:
annotation_count: 1
areas:
area_1:
height: 20mm
name: 我去饿_区域1
boxes:
- - 643
- 558
- 160
fixed_bottoms:
- 616
fixed_tops:
- 534
last_updated: '2025-11-26 20:09:26'
channel3:
annotation_count: 1
areas:
area_1:
height: 20mm
name: 3_区域1
boxes:
- - 1365
- 915
- 128
fixed_bottoms:
- 939
fixed_tops:
- 886
last_updated: '2025-11-26 20:09:35'
channel4:
annotation_count: 1
areas:
area_1:
height: 20mm
name: asfdhuu_区域1
boxes:
- - 1689
- 884
- 96
fixed_bottoms:
- 908
fixed_tops:
- 860
last_updated: '2025-11-26 20:02:17'
通道1:
annotation_count: 2
areas:
area_1:
height: 20mm
name: 通道1_区域1
area_2:
height: 22mm
name: 通道1_
boxes:
- - 653
- 281
- 192
- - 337
- 520
- 160
fixed_bottoms:
- 204
- 579
fixed_tops:
- 300
- 456
last_updated: '2025-11-03 15:58:08'
channels:
1:
address: rtsp://admin:cei345678@192.168.0.127:8000/stream1
channel_id: 1
name: 不后悔1
2:
address: rtsp://admin:cei345678@192.168.0.127:8000/stream1
channel_id: 2
name: '2'
3:
address: rtsp://admin:cei345678@192.168.0.127:8000/stream1
channel_id: 3
name: '3'
4:
address: rtsp://admin:cei345678@192.168.0.127:8000/stream1
channel_id: 4
name: '4'
channel2:
general:
task_id: '123'
task_name: '21'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\123_21
channel3:
general:
task_id: '123'
task_name: '21'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\123_21
channel4:
general:
task_id: '1'
task_name: '1'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_1
channel1:
general:
task_id: '1'
task_name: '1'
area_count: 0
safe_low: 2.0mm
safe_high: 10.0mm
frequency: 25fps
video_format: AVI
push_address: ''
video_path: ''
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_1
areas:
area_1: 通道1_区域1
area_2: 通道1_区域2
area_heights:
area_1: 20mm
area_2: 20mm
model:
model_path: d:\restructure\liquid_level_line_detection_system\database\model\detection_model\5\best.dat
channel_1:
general:
task_id: '1'
task_name: '1'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_1
channel_2:
general:
task_id: '2'
task_name: '2'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\2_2
channel_3:
general:
task_id: '3'
task_name: '3'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\3_3
channel_4:
general:
task_id: '4'
task_name: '4'
save_liquid_data_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\4_4
task_id: '123'
task_name: '21'
status: 待配置
selected_channels:
- 通道2
- 通道3
created_time: '2025-11-26 14:55:31'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\123_21
task_id: '1'
task_name: '1'
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
- 通道4
created_time: '2025-11-26 19:53:35'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_1
task_id: '1'
task_name: '2'
status: 待配置
selected_channels:
- 通道1
- 通道2
created_time: '2025-11-26 16:24:36'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_2
task_id: '1'
task_name: '222'
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
- 通道4
created_time: '2025-11-26 19:58:15'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_222
task_id: '1'
task_name: test
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
- 通道4
created_time: '2025-11-26 19:46:34'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\1_test
task_id: '21'
task_name: '321'
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
- 通道4
created_time: '2025-11-26 20:08:46'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\21_321
task_id: '2'
task_name: test
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
- 通道4
created_time: '2025-11-26 20:01:16'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\2_test
task_id: 2恶趣味
task_id: 2恶趣味
task_name: q'we
status: 待配置
selected_channels:
- 通道2
created_time: '2025-11-26 14:56:26'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\2恶趣味_q'we
task_id: 吃个海鲜
task_id: 吃个海鲜
task_name: 显示提醒他
status: 待配置
selected_channels:
- 通道4
created_time: '2025-11-27 11:06:03'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\吃个海鲜_显示提醒他
task_id: 大润发给
task_id: 大润发给
task_name: 上方
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
created_time: '2025-11-27 11:00:31'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\大润发给_上方
task_id: 的使得发
task_id: 的使得发
task_name: 如图微软
status: 待配置
selected_channels:
- 通道1
- 通道3
- 通道4
created_time: '2025-11-27 11:02:21'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\的使得发_如图微软
task_id: 的啊
task_id: 的啊
task_name: 而突然
status: 待配置
selected_channels:
- 通道1
- 通道2
- 通道3
- 通道4
created_time: '2025-11-27 11:03:28'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\的啊_而突然
test_model:
areas:
area_1:
height: 20mm
name: test_model_区域1
area_2:
height: 20mm
name: test_model_区域2
bottoms:
- !!python/tuple
- 1082
- 1028
- !!python/tuple
- 1140
- 283
boxes:
- !!python/tuple
- 1078
- 934
- 192
- !!python/tuple
- 1141
- 222
- 128
tops:
- !!python/tuple
- 1076
- 839
- !!python/tuple
- 1140
- 159
# 物理变焦设备配置示例
# 用于配置海康威视摄像头的物理变焦功能
# 通道1配置 - 海康威视球机
channel1_device:
ip: "192.168.1.100" # 设备IP地址
port: 8000 # 设备端口,默认8000
username: "admin" # 登录用户名
password: "admin123" # 登录密码
channel: 1 # 设备通道号
enable_physical_zoom: true # 是否启用物理变焦
zoom_capabilities:
min_zoom: 1.0 # 最小变焦倍数
max_zoom: 30.0 # 最大变焦倍数
zoom_step: 0.5 # 变焦步长
auto_focus: true # 是否支持自动聚焦
# 通道2配置 - 海康威视枪机(不支持变焦)
channel2_device:
ip: "192.168.1.101"
port: 8000
username: "admin"
password: "admin123"
channel: 1
enable_physical_zoom: false # 枪机通常不支持变焦
# 通道3配置 - 海康威视球机
channel3_device:
ip: "192.168.1.102"
port: 8000
username: "admin"
password: "admin123"
channel: 1
enable_physical_zoom: true
zoom_capabilities:
min_zoom: 1.0
max_zoom: 20.0 # 不同型号支持的倍数可能不同
zoom_step: 0.5
auto_focus: true
# 通道4配置 - 海康威视球机
channel4_device:
ip: "192.168.1.103"
port: 8000
username: "admin"
password: "admin123"
channel: 1
enable_physical_zoom: true
zoom_capabilities:
min_zoom: 1.0
max_zoom: 25.0
zoom_step: 0.5
auto_focus: true
# 全局配置
global_settings:
# 物理变焦优先级:true表示优先使用物理变焦,false表示优先使用数字变焦
prefer_physical_zoom: true
# 连接超时时间(秒)
connection_timeout: 10
# 变焦操作超时时间(秒)
zoom_timeout: 15
# 自动重连间隔(秒)
reconnect_interval: 30
# 错误重试次数
max_retry_count: 3
# 使用说明:
# 1. 将此文件重命名为 physical_zoom_config.yaml
# 2. 根据实际设备信息修改IP地址、用户名、密码等
# 3. 确保设备支持PTZ控制功能
# 4. 在放大窗口中使用以下快捷键:
# - 鼠标滚轮:变焦放大/缩小
# - R键:重置变焦到1倍
# - D键:显示物理变焦状态
# - F键:自动聚焦
# - H键:显示/隐藏帮助信息
# - E键:切换锐化增强
# - N键:切换降噪处理
# - C键:切换对比度增强
#
# 注意:系统只支持物理变焦,需要海康威视PTZ设备支持
{
"model_config": {
"model_type": "YOLOv8",
"model_size": "n",
"pretrained": true,
"input_size": [640, 640]
},
"training_config": {
"epochs": 100,
"batch_size": 16,
"learning_rate": 0.01,
"optimizer": "SGD",
"momentum": 0.937,
"weight_decay": 0.0005
},
"dataset_config": {
"num_classes": 3,
"class_names": ["liquid", "foam", "background"],
"data_yaml": "database/config/train_configs/data.yaml"
},
"augmentation_config": {
"hsv_h": 0.015,
"hsv_s": 0.7,
"hsv_v": 0.4,
"degrees": 0.0,
"translate": 0.1,
"scale": 0.5,
"shear": 0.0,
"perspective": 0.0,
"flipud": 0.0,
"fliplr": 0.5,
"mosaic": 1.0,
"mixup": 0.0
},
"validation_config": {
"val_interval": 1,
"save_period": 10,
"patience": 50
},
"device_config": {
"device": "cuda",
"workers": 8,
"amp": true
}
}
# 模板1 - 快速训练配置(轻量级)
# 用于快速验证模型和数据集
task: segment
mode: train
model: database/model/detection_model/5/best.dat
data: database/dataset/data_template_1.yaml
epochs: 50
batch: 8
imgsz: 416
save: true
save_period: 10
cache: false
device: gpu
workers: 4
project: runs/train
name: template_1_exp
exist_ok: false
pretrained: false
optimizer: SGD
verbose: false
seed: 0
deterministic: true
single_cls: false
rect: false
cos_lr: false
close_mosaic: 10
resume: false
amp: false
fraction: 1.0
profile: false
freeze: null
multi_scale: false
overlap_mask: true
mask_ratio: 4
dropout: 0.0
val: true
split: val
save_json: false
conf: null
iou: 0.7
max_det: 300
half: false
dnn: false
plots: true
source: null
vid_stride: 1
stream_buffer: false
visualize: false
augment: false
agnostic_nms: false
classes: null
retina_masks: false
embed: null
show: false
save_frames: false
save_txt: false
save_conf: false
save_crop: false
show_labels: true
show_conf: true
show_boxes: true
line_width: null
format: torchscript
keras: false
optimize: false
int8: false
dynamic: false
simplify: true
opset: null
workspace: null
nms: false
lr0: 0.01
lrf: 0.01
momentum: 0.937
weight_decay: 0.0005
warmup_epochs: 3.0
warmup_momentum: 0.8
warmup_bias_lr: 0.1
box: 7.5
cls: 0.5
dfl: 1.5
pose: 12.0
kobj: 1.0
nbs: 64
hsv_h: 0.015
hsv_s: 0.7
hsv_v: 0.4
degrees: 0.0
translate: 0.1
scale: 0.5
shear: 0.0
perspective: 0.0
flipud: 0.0
fliplr: 0.5
bgr: 0.0
mosaic: 1.0
mixup: 0.0
cutmix: 0.0
copy_paste: 0.0
copy_paste_mode: flip
auto_augment: randaugment
erasing: 0.4
cfg: null
tracker: botsort.yaml
# 模板2 - 标准训练配置(平衡型)
# 用于常规模型训练,平衡精度和速度
task: segment
mode: train
model: database/model/detection_model/5/best.dat
data: database/dataset/data_template_2.yaml
epochs: 100
batch: 16
imgsz: 640
save: true
save_period: 10
cache: false
device: gpu
workers: 4
project: runs/train
name: template_2_exp
exist_ok: false
pretrained: false
optimizer: SGD
verbose: false
seed: 0
deterministic: true
single_cls: false
rect: false
cos_lr: false
close_mosaic: 10
resume: false
amp: false
fraction: 1.0
profile: false
freeze: null
multi_scale: false
overlap_mask: true
mask_ratio: 4
dropout: 0.0
val: true
split: val
save_json: false
conf: null
iou: 0.7
max_det: 300
half: false
dnn: false
plots: true
source: null
vid_stride: 1
stream_buffer: false
visualize: false
augment: false
agnostic_nms: false
classes: null
retina_masks: false
embed: null
show: false
save_frames: false
save_txt: false
save_conf: false
save_crop: false
show_labels: true
show_conf: true
show_boxes: true
line_width: null
format: torchscript
keras: false
optimize: false
int8: false
dynamic: false
simplify: true
opset: null
workspace: null
nms: false
lr0: 0.01
lrf: 0.01
momentum: 0.937
weight_decay: 0.0005
warmup_epochs: 3.0
warmup_momentum: 0.8
warmup_bias_lr: 0.1
box: 7.5
cls: 0.5
dfl: 1.5
pose: 12.0
kobj: 1.0
nbs: 64
hsv_h: 0.015
hsv_s: 0.7
hsv_v: 0.4
degrees: 0.0
translate: 0.1
scale: 0.5
shear: 0.0
perspective: 0.0
flipud: 0.0
fliplr: 0.5
bgr: 0.0
mosaic: 1.0
mixup: 0.0
cutmix: 0.0
copy_paste: 0.0
copy_paste_mode: flip
auto_augment: randaugment
erasing: 0.4
cfg: null
tracker: botsort.yaml
# 模板3 - 高精度训练配置(精度优先)
# 用于追求最高精度的模型训练
task: segment
mode: train
model: database/model/detection_model/5/best.dat
data: database/dataset/data_template_3.yaml
epochs: 200
batch: 32
imgsz: 768
save: true
save_period: 10
cache: false
device: gpu
workers: 4
project: runs/train
name: template_3_exp
exist_ok: false
pretrained: false
optimizer: Adam
verbose: false
seed: 0
deterministic: true
single_cls: false
rect: false
cos_lr: true
close_mosaic: 10
resume: false
amp: false
fraction: 1.0
profile: false
freeze: null
multi_scale: true
overlap_mask: true
mask_ratio: 4
dropout: 0.0
val: true
split: val
save_json: false
conf: null
iou: 0.7
max_det: 300
half: false
dnn: false
plots: true
source: null
vid_stride: 1
stream_buffer: false
visualize: false
augment: false
agnostic_nms: false
classes: null
retina_masks: false
embed: null
show: false
save_frames: false
save_txt: false
save_conf: false
save_crop: false
show_labels: true
show_conf: true
show_boxes: true
line_width: null
format: torchscript
keras: false
optimize: false
int8: false
dynamic: false
simplify: true
opset: null
workspace: null
nms: false
lr0: 0.01
lrf: 0.01
momentum: 0.937
weight_decay: 0.0005
warmup_epochs: 3.0
warmup_momentum: 0.8
warmup_bias_lr: 0.1
box: 7.5
cls: 0.5
dfl: 1.5
pose: 12.0
kobj: 1.0
nbs: 64
hsv_h: 0.015
hsv_s: 0.7
hsv_v: 0.4
degrees: 0.0
translate: 0.1
scale: 0.5
shear: 0.0
perspective: 0.0
flipud: 0.0
fliplr: 0.5
bgr: 0.0
mosaic: 1.0
mixup: 0.0
cutmix: 0.0
copy_paste: 0.0
copy_paste_mode: flip
auto_augment: randaugment
erasing: 0.4
cfg: null
tracker: botsort.yaml
task_id: 2恶趣味
task_id: 2恶趣味
task_name: q'we
status: 待配置
selected_channels:
- 通道2
created_time: '2025-11-26 14:56:26'
mission_result_folder_path: d:\restructure\liquid_level_line_detection_system\database\mission_result\2恶趣味_q'we
然后我去佛i好的食品发酵食品的
然后我去佛i好的食品发酵食品的
\ No newline at end of file
{
"d:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\recording_20251114_161804.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\recording_20251114_161804_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\recording_20251114_161804_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\recording_20251114_161804_区域3"
],
"timestamp": 1763623553.6413627
},
"d:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251115_213950.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251115_213950_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251115_213950_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251115_213950_区域3"
],
"timestamp": 1763623570.4085448
},
"d:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\11.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\11_区域1"
],
"timestamp": 1764162728.0737584
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\11.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\11_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\11_区域2"
],
"timestamp": 1763809441.0187602
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\recording_20251114_161804.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\recording_20251114_161804_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\recording_20251114_161804_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\recording_20251114_161804_区域3"
],
"timestamp": 1763652978.8164835
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251114_162238.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251114_162238_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251114_162238_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251114_162238_区域3"
],
"timestamp": 1763653387.158211
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251115_213950.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251115_213950_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251115_213950_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251115_213950_区域3"
],
"timestamp": 1763653027.766409
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251118_102818.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251118_102818_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251118_102818_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251118_102818_区域3"
],
"timestamp": 1763653036.4475722
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251120_152702.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251120_152702_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251120_152702_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251120_152702_区域3"
],
"timestamp": 1763653067.312616
},
"D:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251120_153508.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251120_153508_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251120_153508_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251120_153508_区域3"
],
"timestamp": 1763801356.5907655
},
"d:\\restructure\\liquid_level_line_detection_system\\database\\data\\111\\采集视频_20251118_102818.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251118_102818_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251118_102818_区域2",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251118_102818_区域3"
],
"timestamp": 1764210156.5055494
},
"d:\\restructure\\liquid_level_line_detection_system\\database\\data\\test2\\采集视频_20251127_094018.mp4": {
"save_path": "d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture",
"region_paths": [
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251127_094018_区域1",
"d:\\restructure\\liquid_level_line_detection_system\\database\\Corp_picture\\采集视频_20251127_094018_区域2"
],
"timestamp": 1764210266.1624653
}
}
\ No newline at end of file
...@@ -517,40 +517,26 @@ class ModelSetHandler: ...@@ -517,40 +517,26 @@ class ModelSetHandler:
try: try:
# 获取模型目录路径 # 获取模型目录路径
current_dir = Path(__file__).parent.parent.parent current_dir = Path(__file__).parent.parent.parent
model_dir = current_dir / "database" / "model" / "train_model" model_dir = current_dir / "database" / "model" / "detection_model"
print(f"[模型扫描] 扫描目录: {model_dir}")
if not model_dir.exists(): if not model_dir.exists():
print(f"[模型扫描] 目录不存在")
return models return models
print(f"[模型扫描] 目录存在: {model_dir.exists()}") # 扫描所有子目录(数字和非数字)
# 按数字排序子目录(降序,最新的在前)
all_subdirs = [d for d in model_dir.iterdir() if d.is_dir()] all_subdirs = [d for d in model_dir.iterdir() if d.is_dir()]
print(f"[模型扫描] 找到子目录数量: {len(all_subdirs)}")
print(f"[模型扫描] 子目录列表: {[d.name for d in all_subdirs]}")
# 分离数字目录和非数字目录
digit_subdirs = [d for d in all_subdirs if d.name.isdigit()] digit_subdirs = [d for d in all_subdirs if d.name.isdigit()]
print(f"[模型扫描] 数字子目录数量: {len(digit_subdirs)}") non_digit_subdirs = [d for d in all_subdirs if not d.name.isdigit()]
sorted_subdirs = sorted(digit_subdirs, key=lambda x: int(x.name), reverse=True) # 数字目录按数字降序排序,非数字目录按字母排序
print(f"[模型扫描] 数字子目录: {[d.name for d in sorted_subdirs]}") sorted_digit_subdirs = sorted(digit_subdirs, key=lambda x: int(x.name), reverse=True)
sorted_non_digit_subdirs = sorted(non_digit_subdirs, key=lambda x: x.name)
# 合并:数字目录在前,非数字目录在后
sorted_subdirs = sorted_digit_subdirs + sorted_non_digit_subdirs
for subdir in sorted_subdirs: for subdir in sorted_subdirs:
print(f"[模型扫描] 处理子目录: {subdir.name}")
# 尝试读取 config.yaml 获取详细信息
config_file = subdir / "config.yaml"
model_config = None
if config_file.exists():
try:
with open(config_file, 'r', encoding='utf-8') as f:
model_config = yaml.safe_load(f)
except Exception as e:
print(f"[模型扫描] 读取config.yaml失败: {e}")
# 检查是否有weights子目录(优先检查train/weights,然后weights) # 检查是否有weights子目录(优先检查train/weights,然后weights)
train_weights_dir = subdir / "train" / "weights" train_weights_dir = subdir / "train" / "weights"
...@@ -558,15 +544,10 @@ class ModelSetHandler: ...@@ -558,15 +544,10 @@ class ModelSetHandler:
if train_weights_dir.exists(): if train_weights_dir.exists():
search_dir = train_weights_dir search_dir = train_weights_dir
print(f"[模型扫描] 找到train/weights目录: {search_dir}")
elif weights_dir.exists(): elif weights_dir.exists():
search_dir = weights_dir search_dir = weights_dir
print(f"[模型扫描] 找到weights目录: {search_dir}")
else: else:
search_dir = subdir search_dir = subdir
print(f"[模型扫描] 使用根目录: {search_dir}")
print(f"[模型扫描] 搜索目录: {search_dir}")
# 按优先级查找模型文件:best > last > epoch1 # 按优先级查找模型文件:best > last > epoch1
# 支持的扩展名:.dat, .pt, .template_*, 无扩展名 # 支持的扩展名:.dat, .pt, .template_*, 无扩展名
...@@ -581,7 +562,6 @@ class ModelSetHandler: ...@@ -581,7 +562,6 @@ class ModelSetHandler:
# 检查文件名是否匹配模式 # 检查文件名是否匹配模式
if file.name.startswith('best.') and not file.name.endswith('.pt'): if file.name.startswith('best.') and not file.name.endswith('.pt'):
selected_model = file selected_model = file
print(f"[模型扫描] 找到best模型: {file.name}")
break break
# 优先级2: last模型(如果没有best) # 优先级2: last模型(如果没有best)
...@@ -589,7 +569,6 @@ class ModelSetHandler: ...@@ -589,7 +569,6 @@ class ModelSetHandler:
for file in search_dir.iterdir(): for file in search_dir.iterdir():
if file.is_file() and file.name.startswith('last.') and not file.name.endswith('.pt'): if file.is_file() and file.name.startswith('last.') and not file.name.endswith('.pt'):
selected_model = file selected_model = file
print(f"[模型扫描] 找到last模型: {file.name}")
break break
# 优先级3: epoch1模型(如果没有best和last) # 优先级3: epoch1模型(如果没有best和last)
...@@ -597,7 +576,6 @@ class ModelSetHandler: ...@@ -597,7 +576,6 @@ class ModelSetHandler:
for file in search_dir.iterdir(): for file in search_dir.iterdir():
if file.is_file() and file.name.startswith('epoch1.') and not file.name.endswith('.pt'): if file.is_file() and file.name.startswith('epoch1.') and not file.name.endswith('.pt'):
selected_model = file selected_model = file
print(f"[模型扫描] 找到epoch1模型: {file.name}")
break break
# 如果都没找到,尝试查找任何非.pt文件 # 如果都没找到,尝试查找任何非.pt文件
...@@ -605,22 +583,15 @@ class ModelSetHandler: ...@@ -605,22 +583,15 @@ class ModelSetHandler:
for file in search_dir.iterdir(): for file in search_dir.iterdir():
if file.is_file() and not file.name.endswith('.pt') and not file.name.endswith('.txt') and not file.name.endswith('.yaml'): if file.is_file() and not file.name.endswith('.pt') and not file.name.endswith('.txt') and not file.name.endswith('.yaml'):
selected_model = file selected_model = file
print(f"[模型扫描] 找到其他模型: {file.name}")
break break
# 如果找到了模型文件,添加到列表 # 如果找到了模型文件,添加到列表
if selected_model: if selected_model:
# 从 config.yaml 获取信息,或使用默认值 # 使用"文件夹名称/模型文件名"格式
if model_config: model_name = f"{subdir.name}/{selected_model.stem}"
model_name = model_config.get('name', f"训练模型-{subdir.name}") description = f"来自目录 {subdir.name}"
description = model_config.get('description', '') training_date = ''
training_date = model_config.get('training_date', '') epochs = ''
epochs = model_config.get('epochs', '')
else:
model_name = selected_model.stem # 使用文件名(不含扩展名)作为模型名
description = f"来自目录 {subdir.name}"
training_date = ''
epochs = ''
# 获取文件格式 # 获取文件格式
file_ext = selected_model.suffix.lstrip('.') file_ext = selected_model.suffix.lstrip('.')
...@@ -643,16 +614,11 @@ class ModelSetHandler: ...@@ -643,16 +614,11 @@ class ModelSetHandler:
'file_name': selected_model.name 'file_name': selected_model.name
} }
models.append(model_info) models.append(model_info)
print(f"[模型扫描] 添加模型: {model_name} ({selected_model.name})")
else:
print(f"[模型扫描] 子目录 {subdir.name} 中未找到有效模型")
except Exception as e: except Exception as e:
import traceback import traceback
traceback.print_exc() traceback.print_exc()
print(f"[模型扫描] 扫描异常: {e}")
print(f"[模型扫描] 总共找到 {len(models)} 个模型")
return models return models
def _mergeModelInfo(self, channel_models, scanned_models): def _mergeModelInfo(self, channel_models, scanned_models):
......
# 自动标点功能模块使用说明
## 功能概述
`auto_dot.py` 模块实现了基于YOLO分割掩码的自动标点功能,可以自动检测容器的顶部和底部位置,替代人工手动标点。
## 核心特性
- **输入**: 图片 + 检测框
- **输出**: 点位置信息 + 标注后的图片
- **检测方法**:
1. **liquid底部 + air顶部** (最可靠)
2. **liquid底部 + liquid顶部** (次选)
3. **air底部 + air顶部** (备选)
## 独立调试
### 1. 准备测试数据
将测试图片放置到:
```
D:\restructure\liquid_level_line_detection_system\test_data\test_image.jpg
```
### 2. 配置检测框
编辑 `auto_dot.py` 中的 `test_auto_dot()` 函数,修改 `boxes` 参数:
```python
# 方式1: [x1, y1, x2, y2] 格式
boxes = [
[100, 200, 300, 600], # 第一个容器
[400, 200, 600, 600], # 第二个容器
]
# 方式2: [cx, cy, size] 格式
boxes = [
[200, 400, 400], # 中心点(200, 400), 尺寸400
]
```
### 3. 运行测试
```bash
cd D:\restructure\liquid_level_line_detection_system\handlers\videopage
python auto_dot.py
```
### 4. 查看结果
- **控制台输出**: 详细的检测过程和结果
- **标注图片**: `D:\restructure\liquid_level_line_detection_system\test_output\auto_dot_result.jpg`
## API 使用示例
```python
from handlers.videopage.auto_dot import AutoDotDetector
import cv2
# 1. 创建检测器
detector = AutoDotDetector(
model_path="path/to/model.dat",
device='cuda' # 或 'cpu'
)
# 2. 加载图片
image = cv2.imread("test_image.jpg")
# 3. 定义检测框
boxes = [
[100, 200, 300, 600], # [x1, y1, x2, y2]
]
# 4. 执行检测
result = detector.detect_container_boundaries(
image=image,
boxes=boxes,
conf_threshold=0.5
)
# 5. 获取结果
if result['success']:
for container in result['containers']:
print(f"容器 {container['index']}:")
print(f" 顶部: ({container['top_x']}, {container['top']})")
print(f" 底部: ({container['bottom_x']}, {container['bottom']})")
print(f" 高度: {container['height']}px")
print(f" 置信度: {container['confidence']:.3f}")
# 保存标注图片
cv2.imwrite("result.jpg", result['annotated_image'])
```
## 输出数据结构
```python
{
'success': bool, # 检测是否成功
'containers': [
{
'index': int, # 容器索引
'top': int, # 顶部y坐标
'bottom': int, # 底部y坐标
'top_x': int, # 顶部x坐标
'bottom_x': int, # 底部x坐标
'height': int, # 容器高度(像素)
'confidence': float, # 检测置信度
'method': str # 检测方法
},
...
],
'annotated_image': np.ndarray # 标注后的图片
}
```
## 检测方法说明
### 方法1: liquid_air (最可靠)
- **容器底部**: liquid掩码的最低点
- **容器顶部**: air掩码的最高点
- **适用场景**: 同时检测到液体和空气
### 方法2: liquid_only (次选)
- **容器底部**: liquid掩码的最低点
- **容器顶部**: liquid掩码的最高点
- **适用场景**: 只检测到液体,未检测到空气
### 方法3: air_only (备选)
- **容器底部**: air掩码的最低点
- **容器顶部**: air掩码的最高点
- **适用场景**: 只检测到空气,未检测到液体
## 可视化标注
标注图片包含:
- **绿色圆点**: 容器顶部
- **红色圆点**: 容器底部
- **青色连线**: 容器高度
- **水平参考线**: 顶部和底部的水平位置
- **文字标注**: Top-N, Bottom-N, 高度值
## 注意事项
1. **模型路径**: 确保模型文件存在且可访问
2. **检测框位置**: 检测框应覆盖完整的容器区域
3. **置信度阈值**: 默认0.5,可根据实际情况调整
4. **GPU加速**: 建议使用CUDA加速,提高检测速度
## 调试技巧
1. **查看控制台输出**: 详细的检测过程日志
2. **检查标注图片**: 验证检测结果的准确性
3. **调整检测框**: 如果检测失败,尝试调整检测框的位置和大小
4. **降低置信度**: 如果检测不到掩码,尝试降低 `conf_threshold`
## 接入系统
调试成功后,可以在主系统中调用:
```python
from handlers.videopage.auto_dot import AutoDotDetector
# 在标注页面添加"自动标点"按钮
# 点击后调用 detector.detect_container_boundaries()
# 将返回的 top/bottom 坐标填充到标注点位置
```
...@@ -1909,7 +1909,6 @@ class ChannelPanelHandler: ...@@ -1909,7 +1909,6 @@ class ChannelPanelHandler:
config_path = os.path.join(project_root, 'database', 'config', 'default_config.yaml') config_path = os.path.join(project_root, 'database', 'config', 'default_config.yaml')
if not os.path.exists(config_path): if not os.path.exists(config_path):
print(f"[ConfigWatcher] 配置文件不存在: {config_path}")
return return
# 创建文件系统监控器 # 创建文件系统监控器
...@@ -1919,18 +1918,13 @@ class ChannelPanelHandler: ...@@ -1919,18 +1918,13 @@ class ChannelPanelHandler:
# 连接文件变化信号 # 连接文件变化信号
self._config_watcher.fileChanged.connect(self._onConfigFileChanged) self._config_watcher.fileChanged.connect(self._onConfigFileChanged)
print(f"[ConfigWatcher] 已开始监控配置文件: {config_path}")
except Exception as e: except Exception as e:
print(f"[ConfigWatcher] 初始化配置文件监控器失败: {e}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
def _onConfigFileChanged(self, path): def _onConfigFileChanged(self, path):
"""配置文件变化时的回调""" """配置文件变化时的回调"""
try: try:
print(f"🔄 [ConfigWatcher] 检测到配置文件变化: {path}")
# 延迟一小段时间,确保文件写入完成 # 延迟一小段时间,确保文件写入完成
# 修复:检查 QApplication 是否存在 # 修复:检查 QApplication 是否存在
if QtWidgets.QApplication.instance() is not None: if QtWidgets.QApplication.instance() is not None:
...@@ -1940,45 +1934,32 @@ class ChannelPanelHandler: ...@@ -1940,45 +1934,32 @@ class ChannelPanelHandler:
self._reloadChannelConfig() self._reloadChannelConfig()
except Exception as e: except Exception as e:
print(f"[ConfigWatcher] 处理配置文件变化失败: {e}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
def _reloadChannelConfig(self): def _reloadChannelConfig(self):
"""重新加载通道配置""" """重新加载通道配置"""
try: try:
print("🔄 [ConfigWatcher] 开始重新加载通道配置...")
# 获取配置文件路径 # 获取配置文件路径
project_root = get_project_root() project_root = get_project_root()
config_path = os.path.join(project_root, 'database', 'config', 'default_config.yaml') config_path = os.path.join(project_root, 'database', 'config', 'default_config.yaml')
print(f" 📂 [ConfigWatcher] 配置文件路径: {config_path}")
print(f" 📂 [ConfigWatcher] 文件是否存在: {os.path.exists(config_path)}")
if not os.path.exists(config_path): if not os.path.exists(config_path):
print(f"[ConfigWatcher] 配置文件不存在: {config_path}")
return return
# 读取配置文件 # 读取配置文件
with open(config_path, 'r', encoding='utf-8') as f: with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f) or {} config = yaml.safe_load(f) or {}
print(f" 📄 [ConfigWatcher] 配置文件内容键: {list(config.keys())}")
print(f" 🗺️ [ConfigWatcher] 通道面板映射: {list(self._channel_panels_map.keys())}")
# 🔥 关键修复:更新 self._config,这样 _getChannelConfigFromFile 才能读取到最新配置 # 🔥 关键修复:更新 self._config,这样 _getChannelConfigFromFile 才能读取到最新配置
old_config = self._config old_config = self._config
self._config = config self._config = config
print(f" 🔄 [ConfigWatcher] 已更新内部配置缓存 (self._config)")
# 更新每个通道面板的名称和地址信息 # 更新每个通道面板的名称和地址信息
for i in range(1, 5): for i in range(1, 5):
channel_id = f'channel{i}' channel_id = f'channel{i}'
channel_key = f'channel{i}' channel_key = f'channel{i}'
print(f" [ConfigWatcher] 处理 {channel_id}...")
# 获取通道面板 # 获取通道面板
panel = self._channel_panels_map.get(channel_id) panel = self._channel_panels_map.get(channel_id)
if not panel: if not panel:
...@@ -2024,15 +2005,10 @@ class ChannelPanelHandler: ...@@ -2024,15 +2005,10 @@ class ChannelPanelHandler:
# 重新添加监控(因为某些编辑器保存文件时会删除再创建,导致监控失效) # 重新添加监控(因为某些编辑器保存文件时会删除再创建,导致监控失效)
if hasattr(self, '_config_watcher'): if hasattr(self, '_config_watcher'):
monitored_files = self._config_watcher.files() monitored_files = self._config_watcher.files()
print(f" 👀 [ConfigWatcher] 当前监控的文件: {monitored_files}")
if config_path not in monitored_files: if config_path not in monitored_files:
self._config_watcher.addPath(config_path) self._config_watcher.addPath(config_path)
print(f" 🔄 [ConfigWatcher] 重新添加文件监控: {config_path}")
print("[ConfigWatcher] 通道配置重新加载完成(包括地址配置)")
except Exception as e: except Exception as e:
print(f"[ConfigWatcher] 重新加载通道配置失败: {e}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
......
...@@ -117,6 +117,7 @@ class GeneralSetPanelHandler: ...@@ -117,6 +117,7 @@ class GeneralSetPanelHandler:
widget.annotationEngineRequested.connect(self._handleAnnotationEngineRequest) widget.annotationEngineRequested.connect(self._handleAnnotationEngineRequest)
widget.frameLoadRequested.connect(self._handleFrameLoadRequest) widget.frameLoadRequested.connect(self._handleFrameLoadRequest)
widget.annotationDataRequested.connect(self._handleAnnotationDataRequest) widget.annotationDataRequested.connect(self._handleAnnotationDataRequest)
widget.liveFrameRequested.connect(self._handleLiveFrameRequest)
def _handleRefreshModelList(self, model_widget=None): def _handleRefreshModelList(self, model_widget=None):
"""处理刷新模型列表请求""" """处理刷新模型列表请求"""
...@@ -264,7 +265,6 @@ class GeneralSetPanelHandler: ...@@ -264,7 +265,6 @@ class GeneralSetPanelHandler:
if self.general_set_panel: if self.general_set_panel:
self.general_set_panel.setTaskIdOptions(task_ids) self.general_set_panel.setTaskIdOptions(task_ids)
print(f"[Handler] 已加载 {len(task_ids)} 个任务编号选项")
except Exception as e: except Exception as e:
print(f"[Handler] 加载任务ID选项失败: {e}") print(f"[Handler] 加载任务ID选项失败: {e}")
import traceback import traceback
...@@ -405,10 +405,6 @@ class GeneralSetPanelHandler: ...@@ -405,10 +405,6 @@ class GeneralSetPanelHandler:
model_config = default_config.get('model', {}).copy() model_config = default_config.get('model', {}).copy()
model_config['model_path'] = absolute_path model_config['model_path'] = absolute_path
print(f"[Handler] 加载通道 {channel_id} 的模型配置:")
print(f" 相对路径: {channel_model_path}")
print(f" 绝对路径: {absolute_path}")
# 调用widget的方法应用配置 # 调用widget的方法应用配置
if self.general_set_panel: if self.general_set_panel:
self.general_set_panel.applyModelConfigFromHandler( self.general_set_panel.applyModelConfigFromHandler(
...@@ -693,26 +689,15 @@ class GeneralSetPanelHandler: ...@@ -693,26 +689,15 @@ class GeneralSetPanelHandler:
channel_frame = None channel_frame = None
if self.general_set_panel and self.general_set_panel.channel_id: if self.general_set_panel and self.general_set_panel.channel_id:
channel_frame = self.getLatestFrame(self.general_set_panel.channel_id) channel_frame = self.getLatestFrame(self.general_set_panel.channel_id)
if channel_frame is not None:
pass
# 如果没有获取到通道画面,使用测试图像 # 如果没有获取到通道画面,弹出提示框并返回
if channel_frame is None: if channel_frame is None:
pass QtWidgets.QMessageBox.warning(
import numpy as np self.main_window,
"获取画面失败",
channel_frame = np.zeros((720, 1280, 3), dtype=np.uint8) "获取通道画面失败,请先连接通道"
channel_frame[:] = (100, 120, 140) # 灰色背景 )
return
# 添加文字说明
cv2.putText(channel_frame, "Test Annotation Frame", (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 2)
cv2.putText(channel_frame, "Draw detection areas and mark liquid levels", (50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200, 200, 200), 1)
# 添加测试区域
cv2.rectangle(channel_frame, (200, 200), (400, 400), (0, 255, 0), 2)
cv2.rectangle(channel_frame, (500, 300), (700, 500), (0, 0, 255), 2)
# 2. 保存原始帧用于标注结果显示 # 2. 保存原始帧用于标注结果显示
self._annotation_source_frame = channel_frame.copy() if channel_frame is not None else None self._annotation_source_frame = channel_frame.copy() if channel_frame is not None else None
...@@ -724,6 +709,9 @@ class GeneralSetPanelHandler: ...@@ -724,6 +709,9 @@ class GeneralSetPanelHandler:
if self.general_set_panel and self.general_set_panel.channel_name: if self.general_set_panel and self.general_set_panel.channel_name:
annotation_widget.setChannelName(self.general_set_panel.channel_name) annotation_widget.setChannelName(self.general_set_panel.channel_name)
# 3.5. 初始化物理变焦控制器
self._initPhysicalZoomForAnnotation(annotation_widget)
# 4. 连接标注完成信号 # 4. 连接标注完成信号
def on_annotation_completed(boxes, bottoms, tops): def on_annotation_completed(boxes, bottoms, tops):
print(f"\n[DEBUG] ========== 标注完成回调 ==========") print(f"\n[DEBUG] ========== 标注完成回调 ==========")
...@@ -789,6 +777,9 @@ class GeneralSetPanelHandler: ...@@ -789,6 +777,9 @@ class GeneralSetPanelHandler:
# 5. 加载图像并显示标注界面 # 5. 加载图像并显示标注界面
if annotation_widget.loadFrame(channel_frame): if annotation_widget.loadFrame(channel_frame):
# 启用实时画面预览
annotation_widget.enableLivePreview(True)
# 🔥 关键修复:延迟显示窗口,确保全屏应用后再显示 # 🔥 关键修复:延迟显示窗口,确保全屏应用后再显示
# 这样可以确保标注帧在全屏模式下立即显示 # 这样可以确保标注帧在全屏模式下立即显示
QtCore.QTimer.singleShot(150, annotation_widget.show) QtCore.QTimer.singleShot(150, annotation_widget.show)
...@@ -1246,6 +1237,73 @@ class GeneralSetPanelHandler: ...@@ -1246,6 +1237,73 @@ class GeneralSetPanelHandler:
if self.annotation_widget: if self.annotation_widget:
self.annotation_widget.showAnnotationError(f"获取标注数据失败: {str(e)}") self.annotation_widget.showAnnotationError(f"获取标注数据失败: {str(e)}")
def _handleLiveFrameRequest(self):
"""处理实时画面请求"""
try:
# 获取通道最新画面
if self.general_set_panel and self.general_set_panel.channel_id:
channel_frame = self.getLatestFrame(self.general_set_panel.channel_id)
# 更新标注界面的画面
if channel_frame is not None and self.annotation_widget:
self.annotation_widget.updateLiveFrame(channel_frame)
except Exception as e:
pass
def _initPhysicalZoomForAnnotation(self, annotation_widget):
"""为标注界面初始化物理变焦控制器"""
try:
# 尝试导入物理变焦控制器
try:
from handlers.videopage.physical_zoom_controller import PhysicalZoomController
except ImportError:
try:
from physical_zoom_controller import PhysicalZoomController
except ImportError:
return
# 获取通道配置
if not self.general_set_panel or not self.general_set_panel.channel_id:
return
channel_id = self.general_set_panel.channel_id
# 从配置文件获取设备IP
config = self._getChannelConfig(channel_id)
if not config:
return
device_ip = config.get('address', '')
if not device_ip or 'rtsp://' not in device_ip:
return
# 提取IP地址
import re
match = re.search(r'@(\d+\.\d+\.\d+\.\d+)', device_ip)
if not match:
return
device_ip = match.group(1)
# 创建物理变焦控制器
physical_zoom_controller = PhysicalZoomController(
device_ip=device_ip,
username='admin',
password='cei345678',
channel=1
)
# 尝试连接设备
if physical_zoom_controller.connect_device():
# 设置到标注界面
annotation_widget.setPhysicalZoomController(physical_zoom_controller)
print(f"[标注界面] 物理变焦已启用 ({device_ip})")
else:
print(f"[标注界面] 物理变焦设备连接失败")
except Exception as e:
print(f"[标注界面] 初始化物理变焦失败: {e}")
def showGeneralSetPanel(self): def showGeneralSetPanel(self):
"""显示常规设置面板""" """显示常规设置面板"""
from widgets.videopage.general_set import GeneralSetPanel from widgets.videopage.general_set import GeneralSetPanel
......
...@@ -144,10 +144,6 @@ class ModelSettingHandler: ...@@ -144,10 +144,6 @@ class ModelSettingHandler:
model_config['model_path'] = absolute_path model_config['model_path'] = absolute_path
config_source = f"default_config.yaml → {channel_model_key} + model (全局参数)" config_source = f"default_config.yaml → {channel_model_key} + model (全局参数)"
print(f"[Handler] 加载通道 {channel_id} 的模型配置")
print(f" 相对路径: {channel_model_path}")
print(f" 绝对路径: {absolute_path}")
print(f" model_config['model_path'] = {model_config.get('model_path', 'None')}")
else: else:
# 使用全局模型配置 # 使用全局模型配置
model_config = default_config.get('model', {}).copy() model_config = default_config.get('model', {}).copy()
......
"""
Pythonexe
SDK_internal
"""
import os
import base64
from pathlib import Path
def embed_file_to_python(file_path, output_path, var_name):
"""
Pythonbase64
Args:
file_path:
output_path: Python
var_name:
"""
with open(file_path, 'rb') as f:
data = f.read()
encoded = base64.b64encode(data).decode('utf-8')
# Python
code = f'''"""
: {file_path}
"""
import base64
import io
# Base64
_{var_name}_data = """{encoded}"""
def get_{var_name}():
""""""
return base64.b64decode(_{var_name}_data)
def get_{var_name}_path():
""""""
import tempfile
import os
data = get_{var_name}()
#
ext = os.path.splitext("{os.path.basename(file_path)}")[1]
#
fd, temp_path = tempfile.mkstemp(suffix=ext, prefix='embedded_resource_')
try:
with os.fdopen(fd, 'wb') as f:
f.write(data)
return temp_path
except:
os.close(fd)
raise
'''
with open(output_path, 'w', encoding='utf-8') as f:
f.write(code)
print(f" : {file_path} -> {output_path} (: {var_name})")
def embed_directory_to_python(dir_path, output_dir, max_size_mb=1):
"""
Python
Args:
dir_path:
output_dir:
max_size_mb: MB
"""
os.makedirs(output_dir, exist_ok=True)
embedded_files = []
skipped_files = []
for root, dirs, files in os.walk(dir_path):
for file in files:
file_path = os.path.join(root, file)
rel_path = os.path.relpath(file_path, dir_path)
#
size_mb = os.path.getsize(file_path) / (1024 * 1024)
if size_mb > max_size_mb:
skipped_files.append((rel_path, size_mb))
continue
#
var_name = rel_path.replace(os.sep, '_').replace('.', '_').replace('-', '_')
#
output_path = os.path.join(output_dir, f"embedded_{var_name}.py")
try:
embed_file_to_python(file_path, output_path, var_name)
embedded_files.append((rel_path, file_path))
except Exception as e:
print(f" : {file_path} - {e}")
skipped_files.append((rel_path, size_mb))
print(f"\n:")
print(f" : {len(embedded_files)} ")
print(f" : {len(skipped_files)} {max_size_mb}MB")
return embedded_files, skipped_files
if __name__ == '__main__':
# icons
project_root = os.path.abspath('.')
icons_dir = os.path.join(project_root, 'icons')
output_dir = os.path.join(project_root, 'hooks', 'embedded_resources')
if os.path.exists(icons_dir):
print("icons...")
embed_directory_to_python(icons_dir, output_dir, max_size_mb=0.5) # 0.5MB
else:
print(f": {icons_dir}")
# -*- coding: utf-8 -*-
"""
PyInstaller hook for encodings module
encodings encodings
"""
from PyInstaller.utils.hooks import collect_submodules
# encodings
hiddenimports = collect_submodules('encodings')
# encodings exe.spec
# encodings base_library.zip
datas = []
"""
PyArmor Hook for PyInstaller
PyInstallerPyArmor
"""
from PyInstaller.utils.hooks import collect_data_files, collect_submodules
# PyArmor
hiddenimports = [
'pyarmor',
'pyarmor.pyarmor_runtime',
'pyarmor.pyarmor_runtime_000000',
'pyarmor.pytransform',
'pytransform',
]
# pyarmor
try:
hiddenimports += collect_submodules('pyarmor')
except:
pass
# PyArmor
datas = []
try:
datas += collect_data_files('pyarmor')
except:
pass
# PyArmorhook
# PyArmor.so/.dll
binaries = []
try:
from PyInstaller.utils.hooks import collect_dynamic_libs
binaries += collect_dynamic_libs('pyarmor')
except:
pass
from PyInstaller.utils.hooks import collect_submodules, collect_dynamic_libs
#
# - torch .py _internal/torch
# - DLL/PYD Python PYZ.pyc
hiddenimports = collect_submodules('torch')
binaries = collect_dynamic_libs('torch')
datas = []
import os
import sys
import faulthandler
import traceback
from datetime import datetime
def _log_dir() -> str:
# onedir: sys.executable dist/exe/exe.exe
base_dir = os.path.dirname(getattr(sys, "executable", sys.argv[0]))
path = os.path.join(base_dir, "logs")
try:
os.makedirs(path, exist_ok=True)
except Exception:
#
path = os.path.join(os.path.abspath(os.getcwd()), "logs")
os.makedirs(path, exist_ok=True)
return path
def _open_log_file():
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
log_path = os.path.join(_log_dir(), f"app_{timestamp}.log")
#
# encodings
try:
return open(log_path, mode="a", encoding="utf-8", buffering=1)
except (LookupError, NameError):
# encodings
return open(log_path, mode="a", buffering=1)
def _install_handlers():
log_fh = _open_log_file()
#
print_fn = lambda *args: log_fh.write(" ".join(str(a) for a in args) + "\n")
print_fn("===== Application Start =====")
print_fn("Executable:", getattr(sys, "executable", sys.argv[0]))
print_fn("CWD:", os.getcwd())
print_fn("Args:", " ".join(sys.argv))
# stdout/stderr
sys.stdout = log_fh
sys.stderr = log_fh
# faulthandler
try:
faulthandler.enable(log_fh)
except Exception:
pass
#
def _excepthook(exc_type, exc, tb):
log_fh.write("===== Uncaught Exception =====\n")
traceback.print_exception(exc_type, exc, tb, file=log_fh)
log_fh.flush()
sys.excepthook = _excepthook
# import Python
# sys.modules encodings
def _delayed_install():
try:
# encodings
import encodings
_install_handlers()
except Exception:
# encodings
try:
_install_handlers()
except Exception:
#
pass
#
import sys
if hasattr(sys, '_getframe'):
# sys._getframe
try:
_delayed_install()
except:
pass
else:
#
try:
_delayed_install()
except:
pass
import os
import sys
import json
import tempfile
import shutil
# hook
# - Python
# - _internal/encrypted .bin DLL
# - /IV
AES256_KEY = b'0123456789ABCDEF0123456789ABCDEF' # exe.spec
AES_IV = b'ABCDEF0123456789' # exe.spec
PROTECT_KEYWORDS = ['torch', 'torchvision', 'torchaudio']
def _try_import_crypto():
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
return Cipher, algorithms, modes, default_backend
except Exception:
return None, None, None, None
def _decrypt_bytes(data: bytes) -> bytes:
Cipher, algorithms, modes, default_backend = _try_import_crypto()
if Cipher is not None:
cipher = Cipher(algorithms.AES(AES256_KEY), modes.CTR(AES_IV), backend=default_backend())
decryptor = cipher.decryptor()
return decryptor.update(data) + decryptor.finalize()
# XOR
key = AES256_KEY
out = bytearray(len(data))
for i, b in enumerate(data):
out[i] = b ^ key[i % len(key)]
return bytes(out)
def _get_base_dir():
# PyInstaller onedir _MEIPASS _internal
# _MEIPASS
if hasattr(sys, '_MEIPASS') and sys._MEIPASS:
return sys._MEIPASS
# exe
exe_dir = os.path.dirname(sys.executable)
# dist/exe/exe.exe dist/exe/_internal
# a.datas encrypted _internal/encrypted
return os.path.join(exe_dir, '_internal')
def _load_manifest(encrypted_root):
manifest_path = os.path.join(encrypted_root, 'manifest.json')
if not os.path.exists(manifest_path):
return []
try:
with open(manifest_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception:
return []
def _prepare_temp_dir():
base = tempfile.gettempdir()
target = os.path.join(base, 'lllds_decrypt_bins')
os.makedirs(target, exist_ok=True)
return target
def _add_search_path(path):
# Windows: DLL
try:
if hasattr(os, 'add_dll_directory'):
os.add_dll_directory(path)
except Exception:
pass
if path not in os.environ.get('PATH', ''):
os.environ['PATH'] = path + os.pathsep + os.environ.get('PATH', '')
if path not in sys.path:
sys.path.insert(0, path)
def _main():
try:
base_dir = _get_base_dir()
encrypted_root = os.path.join(base_dir, 'encrypted')
if not os.path.isdir(encrypted_root):
return
manifest = _load_manifest(encrypted_root)
if not manifest:
return
out_dir = _prepare_temp_dir()
wrote_any = False
for item in manifest:
name = item.get('name')
cipher_rel = item.get('cipher_path')
if not name or not cipher_rel:
continue
#
low = name.lower()
if not any(k in low for k in PROTECT_KEYWORDS):
continue
cipher_abs = os.path.join(base_dir, cipher_rel.replace('/', os.sep))
if not os.path.exists(cipher_abs):
continue
with open(cipher_abs, 'rb') as f:
enc = f.read()
raw = _decrypt_bytes(enc)
out_path = os.path.join(out_dir, name)
with open(out_path, 'wb') as wf:
wf.write(raw)
wrote_any = True
if wrote_any:
_add_search_path(out_dir)
except Exception:
#
pass
_main()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
PyInstaller Hook - ultralytics
ultralytics
"""
import os
import sys
import tempfile
from pathlib import Path
def setup_ultralytics_environment():
""" ultralytics """
try:
#
if getattr(sys, 'frozen', False):
#
app_dir = Path(sys.executable).parent
else:
#
app_dir = Path(__file__).parent.parent
# ultralytics
os.environ['YOLO_CONFIG_DIR'] = str(app_dir / 'database' / 'config')
# ultralytics
temp_dir = tempfile.mkdtemp(prefix='ultralytics_')
os.environ['ULTRALYTICS_CONFIG_DIR'] = temp_dir
# ultralytics
os.environ['YOLO_VERBOSE'] = 'False'
os.environ['ULTRALYTICS_ANALYTICS'] = 'False'
print(f" ultralytics : {app_dir}")
except Exception as e:
print(f" ultralytics : {e}")
#
setup_ultralytics_environment()
"""
PyArmor Runtime Hook
PyInstallerPyArmor
"""
import os
import sys
def _init_pyarmor_runtime():
"""
PyArmor
PyArmor
"""
try:
# _internalonedir
if hasattr(sys, '_MEIPASS'):
# onefile
base_dir = sys._MEIPASS
else:
# onedirexe.exe_internal
exe_dir = os.path.dirname(sys.executable)
base_dir = os.path.join(exe_dir, '_internal')
# _internalsys.path
if base_dir not in sys.path:
sys.path.insert(0, base_dir)
# PyArmorPyArmor
try:
import pyarmor.pyarmor_runtime # type: ignore #
except ImportError:
# PyArmor
pass
except Exception:
#
pass
#
_init_pyarmor_runtime()
File added
# flake8: noqa
import logging
import sys
from qtpy import QT_VERSION
__appname__ = "labelme"
# Semantic Versioning 2.0.0: https://semver.org/
# 1. MAJOR version when you make incompatible API changes;
# 2. MINOR version when you add functionality in a backwards-compatible manner;
# 3. PATCH version when you make backwards-compatible bug fixes.
# e.g., 1.0.0a0, 1.0.0a1, 1.0.0b0, 1.0.0rc0, 1.0.0, 1.0.0.post0
__version__ = "5.2.1"
QT4 = QT_VERSION[0] == "4"
QT5 = QT_VERSION[0] == "5"
del QT_VERSION
PY2 = sys.version[0] == "2"
PY3 = sys.version[0] == "3"
del sys
from labelme.label_file import LabelFile
from labelme import testing
from labelme import utils
import argparse
import codecs
import logging
import os
import os.path as osp
import sys
import yaml
# Python
parent_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
if parent_dir not in sys.path:
sys.path.insert(0, parent_dir)
from qtpy import QtCore
from qtpy import QtWidgets
from labelme import __appname__
from labelme import __version__
from labelme.app import MainWindow
from labelme.config import get_config
from labelme.logger import logger
from labelme.utils import newIcon
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--version", "-V", action="store_true", help="show version"
)
parser.add_argument(
"--reset-config", action="store_true", help="reset qt config"
)
parser.add_argument(
"--logger-level",
default="info",
choices=["debug", "info", "warning", "fatal", "error"],
help="logger level",
)
parser.add_argument("filename", nargs="?", help="image or label filename")
parser.add_argument(
"--output",
"-O",
"-o",
help="output file or directory (if it ends with .json it is "
"recognized as file, else as directory)",
)
default_config_file = os.path.join(os.path.expanduser("~"), ".labelmerc")
parser.add_argument(
"--config",
dest="config",
help="config file or yaml-format string (default: {})".format(
default_config_file
),
default=default_config_file,
)
# config for the gui
parser.add_argument(
"--nodata",
dest="store_data",
action="store_false",
help="stop storing image data to JSON file",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--autosave",
dest="auto_save",
action="store_true",
help="auto save",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--nosortlabels",
dest="sort_labels",
action="store_false",
help="stop sorting labels",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--flags",
help="comma separated list of flags OR file containing flags",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--labelflags",
dest="label_flags",
help=r"yaml string of label specific flags OR file containing json "
r"string of label specific flags (ex. {person-\d+: [male, tall], "
r"dog-\d+: [black, brown, white], .*: [occluded]})", # NOQA
default=argparse.SUPPRESS,
)
parser.add_argument(
"--labels",
help="comma separated list of labels OR file containing labels",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--validatelabel",
dest="validate_label",
choices=["exact"],
help="label validation types",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--keep-prev",
action="store_true",
help="keep annotation of previous frame",
default=argparse.SUPPRESS,
)
parser.add_argument(
"--epsilon",
type=float,
help="epsilon to find nearest vertex on canvas",
default=argparse.SUPPRESS,
)
args = parser.parse_args()
if args.version:
print("{0} {1}".format(__appname__, __version__))
sys.exit(0)
logger.setLevel(getattr(logging, args.logger_level.upper()))
if hasattr(args, "flags"):
if os.path.isfile(args.flags):
with codecs.open(args.flags, "r", encoding="utf-8") as f:
args.flags = [line.strip() for line in f if line.strip()]
else:
args.flags = [line for line in args.flags.split(",") if line]
if hasattr(args, "labels"):
if os.path.isfile(args.labels):
with codecs.open(args.labels, "r", encoding="utf-8") as f:
args.labels = [line.strip() for line in f if line.strip()]
else:
args.labels = [line for line in args.labels.split(",") if line]
if hasattr(args, "label_flags"):
if os.path.isfile(args.label_flags):
with codecs.open(args.label_flags, "r", encoding="utf-8") as f:
args.label_flags = yaml.safe_load(f)
else:
args.label_flags = yaml.safe_load(args.label_flags)
config_from_args = args.__dict__
config_from_args.pop("version")
reset_config = config_from_args.pop("reset_config")
filename = config_from_args.pop("filename")
output = config_from_args.pop("output")
config_file_or_yaml = config_from_args.pop("config")
config = get_config(config_file_or_yaml, config_from_args)
if not config["labels"] and config["validate_label"]:
logger.error(
"--labels must be specified with --validatelabel or "
"validate_label: true in the config file "
"(ex. ~/.labelmerc)."
)
sys.exit(1)
output_file = None
output_dir = None
if output is not None:
if output.endswith(".json"):
output_file = output
else:
output_dir = output
translator = QtCore.QTranslator()
translator.load(
QtCore.QLocale.system().name(),
osp.dirname(osp.abspath(__file__)) + "/translate",
)
app = QtWidgets.QApplication(sys.argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("icon"))
app.installTranslator(translator)
win = MainWindow(
config=config,
filename=filename,
output_file=output_file,
output_dir=output_dir,
)
if reset_config:
logger.info("Resetting Qt config: %s" % win.settings.fileName())
win.settings.clear()
sys.exit(0)
win.show()
win.raise_()
sys.exit(app.exec_())
# this main block is required to generate executable by pyinstaller
if __name__ == "__main__":
main()
This diff is collapsed. Click to expand it.
# flake8: noqa
from . import draw_json
from . import draw_label_png
from . import json_to_dataset
from . import on_docker
#!/usr/bin/env python
import argparse
import sys
import imgviz
import matplotlib.pyplot as plt
from labelme.label_file import LabelFile
from labelme import utils
PY2 = sys.version_info[0] == 2
def main():
parser = argparse.ArgumentParser()
parser.add_argument("json_file")
args = parser.parse_args()
label_file = LabelFile(args.json_file)
img = utils.img_data_to_arr(label_file.imageData)
label_name_to_value = {"_background_": 0}
for shape in sorted(label_file.shapes, key=lambda x: x["label"]):
label_name = shape["label"]
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
lbl, _ = utils.shapes_to_label(
img.shape, label_file.shapes, label_name_to_value
)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = imgviz.label2rgb(
lbl,
imgviz.asgray(img),
label_names=label_names,
font_size=30,
loc="rb",
)
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(lbl_viz)
plt.show()
if __name__ == "__main__":
main()
import argparse
import imgviz
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
from labelme.logger import logger
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("label_png", help="label PNG file")
args = parser.parse_args()
lbl = np.asarray(PIL.Image.open(args.label_png))
logger.info("label shape: {}".format(lbl.shape))
logger.info("unique label values: {}".format(np.unique(lbl)))
lbl_viz = imgviz.label2rgb(lbl)
plt.imshow(lbl_viz)
plt.show()
if __name__ == "__main__":
main()
import argparse
import base64
import json
import os
import os.path as osp
import imgviz
import PIL.Image
from labelme.logger import logger
from labelme import utils
def main():
logger.warning(
"This script is aimed to demonstrate how to convert the "
"JSON file to a single image dataset."
)
logger.warning(
"It won't handle multiple JSON files to generate a "
"real-use dataset."
)
parser = argparse.ArgumentParser()
parser.add_argument("json_file")
parser.add_argument("-o", "--out", default=None)
args = parser.parse_args()
json_file = args.json_file
if args.out is None:
out_dir = osp.basename(json_file).replace(".", "_")
out_dir = osp.join(osp.dirname(json_file), out_dir)
else:
out_dir = args.out
if not osp.exists(out_dir):
os.mkdir(out_dir)
data = json.load(open(json_file))
imageData = data.get("imageData")
if not imageData:
imagePath = os.path.join(os.path.dirname(json_file), data["imagePath"])
with open(imagePath, "rb") as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode("utf-8")
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {"_background_": 0}
for shape in sorted(data["shapes"], key=lambda x: x["label"]):
label_name = shape["label"]
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
lbl, _ = utils.shapes_to_label(
img.shape, data["shapes"], label_name_to_value
)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = imgviz.label2rgb(
lbl, imgviz.asgray(img), label_names=label_names, loc="rb"
)
PIL.Image.fromarray(img).save(osp.join(out_dir, "img.png"))
utils.lblsave(osp.join(out_dir, "label.png"), lbl)
PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, "label_viz.png"))
with open(osp.join(out_dir, "label_names.txt"), "w") as f:
for lbl_name in label_names:
f.write(lbl_name + "\n")
logger.info("Saved to: {}".format(out_dir))
if __name__ == "__main__":
main()
#!/usr/bin/env python
from __future__ import print_function
import argparse
import distutils.spawn
import json
import os
import os.path as osp
import platform
import shlex
import subprocess
import sys
def get_ip():
dist = platform.platform().split("-")[0]
if dist == "Linux":
return ""
elif dist == "Darwin":
cmd = "ifconfig en0"
output = subprocess.check_output(shlex.split(cmd))
if str != bytes: # Python3
output = output.decode("utf-8")
for row in output.splitlines():
cols = row.strip().split(" ")
if cols[0] == "inet":
ip = cols[1]
return ip
else:
raise RuntimeError("No ip is found.")
else:
raise RuntimeError("Unsupported platform.")
def labelme_on_docker(in_file, out_file):
ip = get_ip()
cmd = "xhost + %s" % ip
subprocess.check_output(shlex.split(cmd))
if out_file:
out_file = osp.abspath(out_file)
if osp.exists(out_file):
raise RuntimeError("File exists: %s" % out_file)
else:
open(osp.abspath(out_file), "w")
cmd = (
"docker run -it --rm"
" -e DISPLAY={0}:0"
" -e QT_X11_NO_MITSHM=1"
" -v /tmp/.X11-unix:/tmp/.X11-unix"
" -v {1}:{2}"
" -w /home/developer"
)
in_file_a = osp.abspath(in_file)
in_file_b = osp.join("/home/developer", osp.basename(in_file))
cmd = cmd.format(
ip,
in_file_a,
in_file_b,
)
if out_file:
out_file_a = osp.abspath(out_file)
out_file_b = osp.join("/home/developer", osp.basename(out_file))
cmd += " -v {0}:{1}".format(out_file_a, out_file_b)
cmd += " wkentaro/labelme labelme {0}".format(in_file_b)
if out_file:
cmd += " -O {0}".format(out_file_b)
subprocess.call(shlex.split(cmd))
if out_file:
try:
json.load(open(out_file))
return out_file
except Exception:
if open(out_file).read() == "":
os.remove(out_file)
raise RuntimeError("Annotation is cancelled.")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("in_file", help="Input file or directory.")
parser.add_argument("-O", "--output")
args = parser.parse_args()
if not distutils.spawn.find_executable("docker"):
print("Please install docker", file=sys.stderr)
sys.exit(1)
try:
out_file = labelme_on_docker(args.in_file, args.output)
if out_file:
print("Saved to: %s" % out_file)
except RuntimeError as e:
sys.stderr.write(e.__str__() + "\n")
sys.exit(1)
if __name__ == "__main__":
main()
import os.path as osp
import shutil
import yaml
from labelme.logger import logger
here = osp.dirname(osp.abspath(__file__))
def update_dict(target_dict, new_dict, validate_item=None):
for key, value in new_dict.items():
if validate_item:
validate_item(key, value)
if key not in target_dict:
logger.warn("Skipping unexpected key in config: {}".format(key))
continue
if isinstance(target_dict[key], dict) and isinstance(value, dict):
update_dict(target_dict[key], value, validate_item=validate_item)
else:
target_dict[key] = value
# -----------------------------------------------------------------------------
def get_default_config():
# labelme
config_file = osp.join(here, "default_config.yaml")
with open(config_file, encoding='utf-8') as f:
config = yaml.safe_load(f)
# save default config to ~/.labelmerc
user_config_file = osp.join(osp.expanduser("~"), ".labelmerc")
if not osp.exists(user_config_file):
try:
shutil.copy(config_file, user_config_file)
except Exception:
logger.warn("Failed to save config: {}".format(user_config_file))
return config
def validate_config_item(key, value):
if key == "validate_label" and value not in [None, "exact"]:
raise ValueError(
"Unexpected value for config key 'validate_label': {}".format(
value
)
)
if key == "shape_color" and value not in [None, "auto", "manual"]:
raise ValueError(
"Unexpected value for config key 'shape_color': {}".format(value)
)
if key == "labels" and value is not None and len(value) != len(set(value)):
raise ValueError(
"Duplicates are detected for config key 'labels': {}".format(value)
)
def get_config(config_file_or_yaml=None, config_from_args=None):
# 1. default config
config = get_default_config()
# 2. specified as file or yaml
if config_file_or_yaml is not None:
config_from_yaml = yaml.safe_load(config_file_or_yaml)
if not isinstance(config_from_yaml, dict):
with open(config_from_yaml, encoding='utf-8') as f:
logger.info(
"Loading config file from: {}".format(config_from_yaml)
)
config_from_yaml = yaml.safe_load(f)
update_dict(
config, config_from_yaml, validate_item=validate_config_item
)
# 3. command line argument or specified config file
if config_from_args is not None:
update_dict(
config, config_from_args, validate_item=validate_config_item
)
return config
auto_save: true
display_label_popup: true
store_data: true
keep_prev: false
keep_prev_scale: false
keep_prev_brightness: false
keep_prev_contrast: false
logger_level: info
flags: null
label_flags: null
labels: null
file_search: null
sort_labels: true
validate_label: null
default_shape_color: [0, 255, 0]
shape_color: auto # null, 'auto', 'manual'
shift_auto_shape_color: 0
label_colors: null
shape:
# drawing
line_color: [0, 255, 0, 128]
fill_color: [0, 255, 0, 0] # transparent
vertex_fill_color: [0, 255, 0, 255]
# selecting / hovering
select_line_color: [255, 255, 255, 255]
select_fill_color: [0, 255, 0, 155]
hvertex_fill_color: [255, 255, 255, 255]
point_size: 8
# main
flag_dock:
show: true
closable: true
movable: true
floatable: true
label_dock:
show: true
closable: true
movable: true
floatable: true
shape_dock:
show: true
closable: true
movable: true
floatable: true
file_dock:
show: true
closable: true
movable: true
floatable: true
# label_dialog
show_label_text_field: true
label_completion: startswith
fit_to_content:
column: true
row: false
# canvas
epsilon: 10.0
canvas:
# None: do nothing
# close: close polygon
double_click: close
# The max number of edits we can undo
num_backups: 10
# show crosshair
crosshair:
polygon: false
rectangle: true
circle: false
line: false
point: false
linestrip: false
shortcuts:
close: Ctrl+W
open: Ctrl+O
open_dir: Ctrl+U
quit: Ctrl+Q
save: Ctrl+S
save_as: Ctrl+Shift+S
save_to: null
delete_file: Ctrl+Delete
open_next: [D, Ctrl+Shift+D]
open_prev: [A, Ctrl+Shift+A]
zoom_in: [Ctrl++, Ctrl+=]
zoom_out: Ctrl+-
zoom_to_original: Ctrl+0
fit_window: Ctrl+F
fit_width: Ctrl+Shift+F
create_polygon: Ctrl+N
create_rectangle: Ctrl+R
create_circle: null
create_line: null
create_point: null
create_linestrip: null
edit_polygon: Ctrl+J
delete_polygon: Delete
duplicate_polygon: Ctrl+D
copy_polygon: Ctrl+C
paste_polygon: Ctrl+V
undo: Ctrl+Z
undo_last_point: Ctrl+Z
add_point_to_edge: Ctrl+Shift+P
edit_label: Ctrl+E
toggle_keep_prev_mode: Ctrl+P
remove_selected_point: [Meta+H, Backspace]
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment