极简版pytorch实现yolov3-tiny

2023-05-16

  • 参考https://github.com/bubbliiiing/yolo3-pytorch
  • train.py流程:加载数据dataloader.py正向传播tiny.py反向传播loss.py
  • dataloader.py
import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset

class YoloDataset(Dataset):
    def __init__(self, annotation_lines, input_shape, train):
        super(YoloDataset, self).__init__()
        self.annotation_lines   = annotation_lines
        self.input_shape        = input_shape
        self.length             = len(self.annotation_lines)
        self.train              = train

    def __len__(self):
        return self.length

    def __getitem__(self, index):
        index       = index % self.length
        image, box  = self.get_random_data(self.annotation_lines[index], self.input_shape[0:2], random = self.train)
        # 归一化(除以255),whc转chw
        image       = np.transpose(np.array(image, dtype=np.float32)/255.0, (2, 0, 1))
        # 左上右下形式
        box         = np.array(box, dtype=np.float32)
        
        if len(box) != 0:
            # 转化成比例形式
            box[:, [0, 2]] = box[:, [0, 2]] / self.input_shape[1]
            box[:, [1, 3]] = box[:, [1, 3]] / self.input_shape[0]
            # 转化成中心+宽高形式
            box[:, 2:4] = box[:, 2:4] - box[:, 0:2]
            box[:, 0:2] = box[:, 0:2] + box[:, 2:4] / 2
        return image, box

    def rand(self, a=0, b=1):
        return np.random.rand()*(b-a) + a

    def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.02, sat=1.5, val=1.5, random=True):
        line    = annotation_line.split()
        # 标签:中心+宽高形式
        label_line = line[0][:-4]+'.txt'
        boxes = []
        for lin in open(label_line):
            t = lin.split()
            boxes.append([t[1],t[2],t[3],t[4],t[0]])
        box = np.array(boxes, dtype=np.float32)
        # 图像
        image   = Image.open(line[0])
        iw, ih  = image.size
        h, w    = input_shape
        if len(box) > 0:
        	# 转化成数字形式
        	box[:, [0,2]] = box[:, [0,2]] * iw
        	box[:, [1,3]] = box[:, [1,3]] * ih
        	# 转化成左上右下形式
        	box[:, 0:2] = box[:, 0:2] - box[:, 2:4] / 2
        	box[:, 2:4] = box[:, 0:2] + box[:, 2:4]
        # 验证
        if not random:
            # 计算图片等比例缩放到输入大小的宽高,可能有一个小于输入尺寸
            scale = min(w/iw, h/ih)
            nw = int(iw*scale)
            nh = int(ih*scale)
            # 嵌入坐标
            dx = (w-nw)//2
            dy = (h-nh)//2
            # 放缩
            image       = image.resize((nw,nh), Image.BICUBIC)
            new_image   = Image.new('RGB', (w,h), (128,128,128))
            # 嵌入在中间
            new_image.paste(image, (dx, dy))
            image_data  = np.array(new_image, np.float32)
            if len(box)>0:
                np.random.shuffle(box)
                # 将标签转换到新图片
                box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
                box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
                # 左上不小于0
                box[:, 0:2][box[:, 0:2]<0] = 0
                # 右下不大于宽高
                box[:, 2][box[:, 2]>w] = w
                box[:, 3][box[:, 3]>h] = h
                # 宽高要大于一个像素
                box_w = box[:, 2] - box[:, 0]
                box_h = box[:, 3] - box[:, 1]
                box = box[np.logical_and(box_w>1, box_h>1)] 
            # 标签形式:数字,左上右下
            return image_data, box
                
        # 训练
        # 宽高的新比率
        new_ar = w/h * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)
        # 相对输入尺寸的放缩比例
        scale = self.rand(.25, 2)
        if new_ar < 1:
            nh = int(scale*h)
            nw = int(nh*new_ar)
        else:
            nw = int(scale*w)
            nh = int(nw/new_ar)
        # 放缩
        image = image.resize((nw,nh), Image.BICUBIC)
        # 随机一个嵌入坐标
        dx = int(self.rand(0, w-nw))
        dy = int(self.rand(0, h-nh))
        new_image = Image.new('RGB', (w,h), (128,128,128))
        # 嵌入
        new_image.paste(image, (dx, dy))
        image = new_image
        # 翻转图像
        flip = self.rand()<.5
        if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
        # 色域扭曲
        hue = self.rand(-hue, hue)
        sat = self.rand(1, sat) if self.rand()<.5 else 1/self.rand(1, sat)
        val = self.rand(1, val) if self.rand()<.5 else 1/self.rand(1, val)
        x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
        x[..., 0] += hue*360
        x[..., 0][x[..., 0]>1] -= 1
        x[..., 0][x[..., 0]<0] += 1
        x[..., 1] *= sat
        x[..., 2] *= val
        x[x[:,:, 0]>360, 0] = 360
        x[:, :, 1:][x[:, :, 1:]>1] = 1
        x[x<0] = 0
        image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
        if len(box)>0:
            np.random.shuffle(box)
            # 将标签转换到新图片
            box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
            box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
            if flip: box[:, [0,2]] = w - box[:, [2,0]]
            # 左上不小于0
            box[:, 0:2][box[:, 0:2]<0] = 0
             # 右下不大于宽高
            box[:, 2][box[:, 2]>w] = w
            box[:, 3][box[:, 3]>h] = h
            # 宽高要大于一个像素
            box_w = box[:, 2] - box[:, 0]
            box_h = box[:, 3] - box[:, 1]
            box = box[np.logical_and(box_w>1, box_h>1)] 
        # 标签形式:数字,左上右下
        return image_data, box
    
# DataLoader中collate_fn使用
def yolo_dataset_collate(batch):
    images = []
    bboxes = []
    for img, box in batch:
        images.append(img)
        bboxes.append(box)
    images = np.array(images)
    return images, bboxes


  • tiny.py在这里
  • loss.py
import torch, math, sys
import numpy as np
import torch.nn as nn

def iou(a,b):
	A=len(a)
	B=len(b)
	area1=a[:,2]*a[:,3]
	area1=area1.unsqueeze(1).expand(A,B)
	area2=b[:,2]*b[:,3]
	area2=area2.unsqueeze(0).expand(A,B)
	aa=torch.zeros_like(a)
	aa[:,0:2]=a[:,0:2]-a[:,2:4]/2
	aa[:,2:4]=aa[:,0:2]+a[:,2:4]
	aa=aa.unsqueeze(1).expand(A,B,4)
	bb=torch.zeros_like(b)
	bb[:,0:2]=b[:,0:2]-b[:,2:4]/2
	bb[:,2:4]=bb[:,0:2]+b[:,2:4]
	bb=bb.unsqueeze(0).expand(A,B,4)
	lt=torch.max(aa[:,:,0:2], bb[:,:,0:2])
	rb=torch.min(aa[:,:,2:4], bb[:,:,2:4])
	inter=torch.clamp((rb-lt), min=0)
	inter=inter[:,:,0]*inter[:,:,1]
	return inter/(area1+area2-inter)

def clip(a):
	mi=1e-7
	ma=1-mi
	b=(a>=mi).float()*a+(a<mi).float()*mi
	b=(b<=ma).float()*b+(b>ma).float()*ma
	return b
	
def BCELoss(pred, target):
	p=clip(pred)
	return -target*torch.log(p)-(1-target)*torch.log(1-p)
	
def MSELoss(pred, target):
	return torch.pow((pred-target), 2)

class Loss(nn.Module):
	def __init__(self, input_size, anchors, classes, anchors_mask=[[0,1,2], [3,4,5]]):
		super().__init__()
		self.input_size = input_size
		self.anchors = anchors
		self.bbox_attrs = 5 + classes
		self.anchors_mask = anchors_mask
		self.ignore_threshold = 0.5
		
	'''
	l:            第l组anchors_mask
	out:         b*255*h*w, 网络输出之一
	targets:      b*N*5,比例形式的gt
	'''
	def forward(self, l, out, target):
		b = out.size(0)
		in_h = out.size(2)
		in_w = out.size(3)
		s = self.input_size[0] // in_w
		scaled_anchors = [(aw/s, ah/s) for aw,ah in self.anchors]
		# 正样本
		y_true, no_obj, scale = self.get_target(l, target, scaled_anchors, in_h, in_w)
		scale=2-scale
		out = out.view(b, 3, self.bbox_attrs, in_h, in_w).permute(0,1,3,4,2)
		x = torch.sigmoid(out[...,0])
		y = torch.sigmoid(out[...,1])
		w = out[...,2]
		h = out[...,3]
		# 记得sigmoid
		c = torch.sigmoid(out[...,4])
		cl=torch.sigmoid(out[...,5:])
		# 负样本
		no_obj = self.get_ignore(l,x,y,h,w,target, scaled_anchors, in_h, in_w, no_obj)
		if x.is_cuda:
			y_true = y_true.cuda()
			no_obj = no_obj.cuda()
			scale = scale.cuda()
		# loss
		xloss=torch.sum(BCELoss(x, y_true[...,0])*y_true[...,4]*scale)
		yloss=torch.sum(BCELoss(y, y_true[...,1])*y_true[...,4]*scale)
		wloss=torch.sum(MSELoss(w, y_true[...,2])*y_true[...,4]*scale*0.5)
		hloss=torch.sum(MSELoss(h, y_true[...,3])*y_true[...,4]*scale*0.5)
		closs=torch.sum(BCELoss(c, y_true[...,4])*y_true[...,4] + BCELoss(c, y_true[...,4])*no_obj)
		clsloss=torch.sum(BCELoss(cl[y_true[...,4]==1], y_true[...,5:][y_true[...,4]==1]))
		loss = xloss + yloss + wloss + hloss + closs + clsloss
		num=torch.sum(y_true[...,4])
		num=torch.max(num, torch.ones_like(num))
		# print(torch.sum(y_true[0,...,4]).item())
		# print(torch.sum(y_true[1,...,4]).item())
		#sys.exit()
		return loss, num
		
	'''
	l:            第l组anchors_mask
	targets:      b*N*5,比例形式的gt
	anchors:      9*2,已经放缩过的
	in_h:        特征图高度
	in_w:        特征图宽度
	每个batch:
		N*4的gt和9*4的anchor求iou
		每个gt的最大IOU对应的anchor:
			如果不在当前mask: continue
			否则:gt中心点坐标和anchor序号确定位置,赋值
	'''
	def get_target(self, l, targets, anchors, in_h, in_w):
		b = len(targets)
		c = len(self.anchors_mask[l])
		y_true = torch.zeros(b,c,in_h, in_w,self.bbox_attrs,requires_grad = False)
		no_obj = torch.ones(b,c,in_h, in_w,requires_grad = False)
		scale = torch.zeros(b,c,in_h, in_w,requires_grad = False)
		# 
		for bi in range(b):
			if(len(targets[bi]) == 0): continue
			# gt和anchors以(0,0)为中心计算iou
			batch_target = torch.zeros(len(targets[bi]), 4)
			batch_target[:,2] = targets[bi][:,2] * in_w
			batch_target[:,3] = targets[bi][:,3] * in_h
			anchor4 = torch.zeros(len(anchors), 4)
			anchor4[:,2:] = torch.FloatTensor(anchors)
			ious = iou(batch_target, anchor4)  # N * 9
			bests = torch.argmax(ious, dim=1)  # 每个值在0~8之间
			#print(bests)
			# 1.忘记赋值
			batch_target[:,0] = targets[bi][:,0] * in_w
			batch_target[:,1] = targets[bi][:,1] * in_h
			for it, best in enumerate(bests):
				if best not in self.anchors_mask[l]:
					continue
				c = self.anchors_mask[l].index(best)  # 0~2之间
				# gt中心点所在网格
				i = torch.floor(batch_target[it,0]).long()
				j = torch.floor(batch_target[it,1]).long()
				#print(bi,c,j,i)
				# 赋值
				no_obj[bi,c,j,i] = 0
				y_true[bi,c,j,i,0] = batch_target[it,0] - i.float()
				y_true[bi,c,j,i,1] = batch_target[it,1] - j.float()
				# 2.用错anchors(没放缩的self.anchors)
				y_true[bi,c,j,i,2] = math.log(batch_target[it,2]/anchors[best][0])
				y_true[bi,c,j,i,3] = math.log(batch_target[it,3]/anchors[best][1])
				y_true[bi,c,j,i,4] = 1
				clss=targets[bi][it][4].long()
				y_true[bi,c,j,i,5+clss] = 1
				scale[bi,c,j,i] = batch_target[it,2]*batch_target[it,3]/in_h/in_w
		return y_true, no_obj, scale
		
	'''
	l:            第l组anchors_mask
	x, y, h, w:   b*3*h*w,网络输出,其中x,y已经过sigmoid
	targets:      b*N*5,比例形式的gt
	anchors:      9*2,已经放缩过的
	in_h:        特征图高度
	in_w:        特征图宽度
	no_obj:      b*3*h*w,标记负样本
	将anchors_mask对应的anchors分布到特征图每个网格上,形状是b*3*h*w*2
	将x, y, h, w结合上面的anchors转化并concat成b*3*h*w*4的预测值
	每个batch:
		计算与gt的iou
		取每个预测框的最大iou值
		最大IOU超过阈值的是忽略样本,即no_obj对应的值设为0
	'''
	def get_ignore(self, l, x, y, h, w, targets, anchors, in_h, in_w, no_obj):
		ft = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
		b = len(targets)
		# 转换h,w
		anchor_l = np.array(anchors)[self.anchors_mask[l]]
		anchor_w = ft(anchor_l[:,0:1])
		anchor_h = ft(anchor_l[:,1:])
		anchor_w = anchor_w.repeat(1,in_h*in_w).repeat(b,1).view(b,3,in_h,in_w)
		anchor_h = anchor_h.repeat(1,in_h*in_w).repeat(b,1).view(b,3,in_h,in_w)
		tw = (torch.exp(w.data)*anchor_w).unsqueeze(-1)
		th = (torch.exp(h.data)*anchor_h).unsqueeze(-1)
		# 转换x,y
		grid_y, grid_x = torch.meshgrid(torch.arange(in_w), torch.arange(in_h))
		# tensor可以这样转设备
		grid_x = grid_x.repeat(b,3,1,1).type(ft)
		grid_y = grid_y.repeat(b,3,1,1).type(ft)
		tx = (x.data + grid_x).unsqueeze(-1)
		ty = (y.data + grid_y).unsqueeze(-1)
		# concat
		pred = torch.cat([tx, ty, tw, th], -1)
		for bi in range(b):
			if(len(targets[bi]) == 0): continue
			# 计算iou
			pre = pred[bi].view(-1,4)
			# 形状,设备信息也一样
			gt = torch.zeros_like(targets[bi])
			gt[:,[0,2]] = targets[bi][:,[0,2]] * in_w
			gt[:,[1,3]] = targets[bi][:,[1,3]] * in_h
			gt = gt[:,:4]
			ious=iou(gt, pre)
			# 判断,赋值
			maxx, _ = torch.max(ious, dim=0)
			maxx = maxx.view(3,in_h,in_w)
			no_obj[bi][maxx > self.ignore_threshold] = 0
		return no_obj
  • train.py
from tiny import Tiny
from loss import Loss
from torch.utils.data import DataLoader
from PIL import Image, ImageDraw
import numpy as np
import torch, sys, cv2
import torch.optim as optim
from dataloader import YoloDataset, yolo_dataset_collate

def show_batch(image, label):
	for i in range(len(image)):
		im = np.transpose(image[i]*255.0,(1,2,0)).astype('uint8')[:,:,[2,1,0]]
		ih, iw = np.shape(im)[0:2]
		cv2.imshow("im", im)
		cv2.waitKey(0)
		# for lab in label[i]:
		# 	print(lab)

# data
batch_size = 2
data_txt='/home/lwd/data/all.txt'
with open(data_txt) as f:
	train_lines = f.readlines()
train_dataset=YoloDataset(train_lines, (416, 416), True)
train_data = DataLoader(train_dataset, shuffle = True, batch_size = batch_size, pin_memory=True, drop_last=True, collate_fn=yolo_dataset_collate)
test_txt='/home/lwd/data/test.txt'
with open(test_txt) as f:
	test_lines = f.readlines()
test_dataset=YoloDataset(test_lines, (416, 416), False)
test_data = DataLoader(test_dataset, shuffle = False, batch_size = batch_size, pin_memory=True, drop_last=True, collate_fn=yolo_dataset_collate)
train_step = len(train_lines) // batch_size
val_step = len(test_lines) // batch_size
# net
model_path=''
net=Tiny()
net.init()
net.load_darknet('/home/lwd/code/darknet/yolov3-tiny.conv.15')
net = net.cuda()

if len(model_path) > 1:
	paras=torch.load(model_path, map_location='cuda')
	net.load_state_dict(paras)
# hyperparameter
anchors = [[44, 43],  [87, 39],  [64,102], [20, 18],  [43, 21],  [28, 34]]
los = Loss((416, 416), anchors, 80)
lr = 1e-4
optimizer = optim.Adam(net.parameters(), lr, weight_decay = 5e-4)
#lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.94)
#lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100, eta_min=1e-4)
# iterator
i = 1
lr_cnt = 0
vl_last = 9
for param in net.backbone.parameters():
	param.requires_grad = False
while True:
	net.train()
	# if i % 111 == 0 and lr > 1e-4:
	# 	lr *= 0.1
	# 	for param_group in optimizer.param_groups:
	# 		param_group["lr"] = lr
	if i == 400:
	# 	optimizer = optim.Adam(net.parameters(), 1e-4, weight_decay = 5e-4)
	# 	lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.94)
		for param in net.backbone.parameters():
			param.requires_grad = True
	train_loss = 0
	for bi, (batch_image, batch_label) in enumerate(train_data):
		loss = 0
		number = 0
		#show_batch(batch_image, batch_label)
		batch_image  = torch.from_numpy(batch_image).type(torch.FloatTensor).cuda()
		batch_label = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in batch_label]
		optimizer.zero_grad()
		outputs = net(batch_image)
		for oi, output in enumerate(outputs):
			loss_item, num_pos = los(oi, output, batch_label)
			loss += loss_item
			number += num_pos
		loss_value = loss / number
		loss_value.backward()
		optimizer.step()
		train_loss += loss_value.item()
	net.eval()
	val_loss = 0
	with torch.no_grad():
		for bi, (batch_image, batch_label) in enumerate(test_data):
			loss = 0
			number = 0
			# show_batch(batch_image, batch_label)
			batch_image  = torch.from_numpy(batch_image).type(torch.FloatTensor).cuda()
			batch_label = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in batch_label]
			optimizer.zero_grad()
			outputs = net(batch_image)
			for oi, output in enumerate(outputs):
				loss_item, num_pos = los(oi, output, batch_label)
				loss += loss_item
				number += num_pos
			loss_value = loss / number
			val_loss += loss_value.item()
	vl=val_loss / val_step
	print('epoch: ', i, ' ------ train_loss:', train_loss / train_step, '   val_loss:', val_loss / val_step)
	print(optimizer.param_groups[0]['lr'])
		
	if vl < vl_last: 
		torch.save(net.state_dict(), 'result/model/'+str(i)+':'+str(vl)[:5]+'.pth')
		vl_last = vl
		#break
	# lr_scheduler.step()
	if i > 999: 
		break
	i += 1
  • 备注
    • 使用darknet的预训练模型训练
    • 学习率固定1e-4
    • 前400次训练不更新预训练权重
    • 在验证loss=5.5左右得到可用的模型
    • 试了从头训练,loss在9附近降不下去,可能是训练集太小
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

极简版pytorch实现yolov3-tiny 的相关文章

随机推荐

  • 【GNSS高精度定位应用】

    GNSS定位技术具有全天候 高精度 覆盖全球 自动化程度高 实时服务能力强等优点 xff0c 已经广泛应用于交通 军事 农业等领域 xff0c 例如车辆自主导航 自然灾害监测 紧急事故安全救援 精确制导武器 精准农业 建筑物结构安全监测等
  • 高精度定位成就智能物联|高精度定位的“智能化”进化史

    智能化发展日益成熟 xff0c AI当道 xff0c 互联网基建2 0时代 AIoT时代来临 AIoT是AI IoT 空间三个要素的结合 AI是大脑 xff0c 是不断进阶的算法能力 xff0c IoT是躯壳和骨干 xff0c 是更多智能化
  • svn 分支(branch)和标签(tag)管理

    版本控制的一大功能是可以隔离变化在某个开发线上 xff0c 这个开发线就是分支 xff08 branch xff09 分支通常用于开发新功能 xff0c 而不会影响主干的开发 也就是说分支上的代码的编译错误 bug不会对主干 xff08 t
  • 【HTTP协议】---HTTP协议详解

    HTTP协议详解 一 HTTP简介 1 HTTP协议 xff0c 即超文本传输协议 Hypertext transfer protocol 是一种详细规定了浏览器和万维网 WWW 61 World Wide Web 服务器 之间互相通信的规
  • 智驾发展的前世今生|自动驾驶完好性解决方案,只在千寻FindAUTO

    7月6日消息称 xff0c 深圳经济特区智能网联汽车管理条例 将于8月1日起实施 这部国内首部关于智能网联汽车管理的法规 xff0c 对智能网联汽车自动驾驶的定义 市场准入规则 路权 权责认定等多方面进行了具体规定 该条例意味着深圳或成为国
  • 智驾发展的前世今生|为高阶自动驾驶而生的千寻FindAUTO NSSR解决方案

    众所周知 xff0c 定位技术作为智能驾驶的关键核心 xff0c 解决了 我在哪 的问题 xff0c 与感知 决策 执行一起 xff0c 构成了智能驾驶四大主体功能 在L3自动驾驶发展落地如火如荼之际 xff0c 对于定位服务的需求愈发细致
  • 记一次 php curl 访问 https站点遇到的问题

    问题描述 今天大壮的PHP项目上 xff0c 在后端要去从微信那边取用户手机端上传的图片文件 微信提供的获取文件资源地址是 xff1a https api weixin qq com cgi bin media get access tok
  • 从入门到入土:基于C语言采用UDP协议实现通信功能的程序

    此博客仅用于记录个人学习进度 xff0c 学识浅薄 xff0c 若有错误观点欢迎评论区指出 欢迎各位前来交流 xff08 部分材料来源网络 xff0c 若有侵权 xff0c 立即删除 xff09 本人博客所有文章纯属学习之用 xff0c 不
  • 【Linux】【Ubuntu 】Ubuntu下安装Chrome

    本文记录 Ubuntu 下安装 Chrome 的方法 文章目录 1 安装 Chrome 的两种方式1 1 通过下载 deb 包安装 推荐 1 2 通过 apt install 安装 2 使用 Chrome 1 安装 Chrome 的两种方式
  • Arduino实现压力传感器(使用HX711模块)

    span class token comment 以下为函数声明 span span class token keyword extern span span class token keyword unsigned span span c
  • Arduino操作MPU6050模块

    MPU6050是集成三轴陀螺仪 xff0c 三轴加速度计 xff0c 温度传感器于一体的模块 本文档基于Adafruit MPU6050实现MPU6050模块基本操作 Adafruit MPU6050库 xff1a https github
  • ROS小车研究笔记2/25/2023自动导航

    使用说明 xff1a 1 将小车手动放在地图起点或者通过rviz设置起点 2 运行导航launch文件 roslaunch turn on wheeltec robot navigation launch rviz 运行rviz终端 要在r
  • 倾斜摄影测量无人机炸机的八大原因及“炸机”预防措施

    引言 采用无人机航测的方式代替人工完成相应的测绘工作不仅效率高 xff0c 其精度也有保证 但飞行事故却是一个不容忽视的重要潜在危险源 航测设备的价格往往不低 xff0c 每一个事故的发生都会给客户造成巨大损失 有鉴于此 xff0c 睿铂根
  • 向量积的二维物理意义,二维向量叉乘几何意义

    1 自然语言 二维向量叉乘 xff1a x1 y1 x2 y2 61 x1y2 x2y1值的绝对值是两向量同起点 xff0c 构成平行四边形的面积值为正 xff0c x2 y2 在 x1 y1 逆时针方向值为负 xff0c x2 y2 在
  • Boost.Asio C++ 网络编程之十:基于TCP的异步服务端

    这个流程图是相当复杂的 xff1a 从Boost Asio出来你可以看到4个箭头指向on accept xff0c on read xff0c on write和on check ping 这也就意味着你永远不知道哪个异步调用是下一个完成的
  • 使用usrmod时报错user does not exist

    笔者在sudo usermod a G users whoami 时遇到这个问题使用vigr编辑 etc groups xff1a sudo vigr etc groups内容依次是 xff1a 组名 xff1a 密码 xff1a 组号 x
  • invalid conversion from ‘const char*’ to ‘char*’解决方法

    string类的c str函数返回的是const char类型 xff0c 如果将它用作实参传递给char的形参就会报这个错误解决方法 xff1a 在调用是加 char 转换一下 xff0c 比如 char cfgfile c str
  • undefined symbol的解决记录

    一 看这里说 xff0c 是链接顺序的问题 xff0c 于是在CMakeLists里加了这个 xff1a Wl start group PROJECT SOURCE DIR lib libngraph so PROJECT SOURCE D
  • C++多线程加速for循环

    引用传参需要使用std ref普通循环 span class token macro property span class token directive hash span span class token directive keyw
  • 极简版pytorch实现yolov3-tiny

    参考https github com bubbliiiing yolo3 pytorchtrain py流程 xff1a 加载数据dataloader py正向传播tiny py反向传播loss pydataloader py span c