javaee论坛

普通会员

225648

帖子

341

回复

355

积分

楼主
发表于 2019-10-30 17:44:31 | 查看: 310 | 回复: 0

DL之Yolov3:基于深度学习Yolov3算法实现视频目标检测之对《我要打篮球》视频段进行实时目标检测

 

 

 

 

目录

输出结果

设计思路

核心代码

 

 

 

 

相关文章成功解决AttributeError:'NoneType'objecthasnoattribute'__array_interface__'

输出结果

更新……

 

 

 

 

设计思路

DL之YoloV3:YoloV3算法的简介(论文介绍)、架构详解、案例应用等配图集合之详细攻略

 

 

 

 

核心代码

更新……

Yolov3代码实现的时候,是将输入的视频分为帧图像进行目标检测的!

1、yolo.py文件#-*-coding:utf-8-*-"""ClassdefinitionofYOLO_v3styledetectionmodelonimageandvideo"""importcolorsysimportosfromtimeitimportdefault_timerastimerimportnumpyasnpfromkerasimportbackendasKfromkeras.modelsimportload_modelfromkeras.layersimportInputfromPILimportImage,ImageFont,ImageDrawfromyolo3.modelimportyolo_eval,yolo_body,tiny_yolo_bodyfromyolo3.utilsimportletterbox_imageimportosfromkeras.utilsimportmulti_gpu_modelclassYOLO(object):_defaults={"model_path":'model_data/yolo.h5',"anchors_path":'model_data/yolo_anchors.txt',"classes_path":'model_data/coco_classes.txt',"score":0.3,"iou":0.45,"model_image_size":(416,416),"gpu_num":1,}@classmethoddefget_defaults(cls,n):ifnincls._defaults:returncls._defaults[n]else:return"Unrecognizedattributename'"+n+"'"def__init__(self,**kwargs):self.__dict__.update(self._defaults)#setupdefaultvaluesself.__dict__.update(kwargs)#andupdatewithuseroverridesself.class_names=self._get_class()self.anchors=self._get_anchors()self.sess=K.get_session()self.boxes,self.scores,self.classes=self.generate()def_get_class(self):classes_path=os.path.expanduser(self.classes_path)withopen(classes_path)asf:class_names=f.readlines()class_names=[c.strip()forcinclass_names]returnclass_namesdef_get_anchors(self):anchors_path=os.path.expanduser(self.anchors_path)withopen(anchors_path)asf:anchors=f.readline()anchors=[float(x)forxinanchors.split(',')]returnnp.array(anchors).reshape(-1,2)defgenerate(self):model_path=os.path.expanduser(self.model_path)assertmodel_path.endswith('.h5'),'Kerasmodelorweightsmustbea.h5file.'#Loadmodel,orconstructmodelandloadweights.num_anchors=len(self.anchors)num_classes=len(self.class_names)is_tiny_version=num_anchors==6#defaultsettingtry:self.yolo_model=load_model(model_path,compile=False)except:self.yolo_model=tiny_yolo_body(Input(shape=(None,None,3)),num_anchors//2,num_classes)\ifis_tiny_versionelseyolo_body(Input(shape=(None,None,3)),num_anchors//3,num_classes)self.yolo_model.load_weights(self.model_path)#makesuremodel,anchorsandclassesmatchelse:assertself.yolo_model.layers[-1].output_shape[-1]==\num_anchors/len(self.yolo_model.output)*(num_classes+5),\'Mismatchbetweenmodelandgivenanchorandclasssizes'print('{}model,anchors,andclassesloaded.'.format(model_path))#Generatecolorsfordrawingboundingboxes.hsv_tuples=[(x/len(self.class_names),1.,1.)forxinrange(len(self.class_names))]self.colors=list(map(lambdax:colorsys.hsv_to_rgb(*x),hsv_tuples))self.colors=list(map(lambdax:(int(x[0]*255),int(x[1]*255),int(x[2]*255)),self.colors))np.random.seed(10101)#Fixedseedforconsistentcolorsacrossruns.np.random.shuffle(self.colors)#Shufflecolorstodecorrelateadjacentclasses.np.random.seed(None)#Resetseedtodefault.#Generateoutputtensortargetsforfilteredboundingboxes.self.input_image_shape=K.placeholder(shape=(2,))ifself.gpu_num>=2:self.yolo_model=multi_gpu_model(self.yolo_model,gpus=self.gpu_num)boxes,scores,classes=yolo_eval(self.yolo_model.output,self.anchors,len(self.class_names),self.input_image_shape,score_threshold=self.score,iou_threshold=self.iou)returnboxes,scores,classesdefdetect_image(self,image):start=timer()ifself.model_image_size!=(None,None):assertself.model_image_size[0]%32==0,'Multiplesof32required'assertself.model_image_size[1]%32==0,'Multiplesof32required'boxed_image=letterbox_image(image,tuple(reversed(self.model_image_size)))else:new_image_size=(image.width-(image.width%32),image.height-(image.height%32))boxed_image=letterbox_image(image,new_image_size)image_data=np.array(boxed_image,dtype='float32')print(image_data.shape)image_data/=255.image_data=np.expand_dims(image_data,0)#Addbatchdimension.out_boxes,out_scores,out_classes=self.sess.run([self.boxes,self.scores,self.classes],feed_dict={self.yolo_model.input:image_data,self.input_image_shape:[image.size[1],image.size[0]],K.learning_phase():0})print('Found{}boxesfor{}'.format(len(out_boxes),'img'))font=ImageFont.truetype(font='font/FiraMono-Medium.otf',size=np.floor(3e-2*image.size[1]+0.5).astype('int32'))thickness=(image.size[0]+image.size[1])//300fori,cinreversed(list(enumerate(out_classes))):predicted_class=self.class_names[c]box=out_boxes[i]score=out_scores[i]label='{}{:.2f}'.format(predicted_class,score)draw=ImageDraw.Draw(image)label_size=draw.textsize(label,font)top,left,bottom,right=boxtop=max(0,np.floor(top+0.5).astype('int32'))left=max(0,np.floor(left+0.5).astype('int32'))bottom=min(image.size[1],np.floor(bottom+0.5).astype('int32'))right=min(image.size[0],np.floor(right+0.5).astype('int32'))print(label,(left,top),(right,bottom))iftop-label_size[1]>=0:text_origin=np.array([left,top-label_size[1]])else:text_origin=np.array([left,top+1])#Mykingdomforagoodredistributableimagedrawinglibrary.foriinrange(thickness):draw.rectangle([left+i,top+i,right-i,bottom-i],outline=self.colors[c])draw.rectangle([tuple(text_origin),tuple(text_origin+label_size)],fill=self.colors[c])draw.text(text_origin,label,fill=(0,0,0),font=font)deldrawend=timer()print(end-start)returnimagedefclose_session(self):self.sess.close()defdetect_video(yolo,video_path,output_path=""):importcv2vid=cv2.VideoCapture(video_path)ifnotvid.isOpened():raiseIOError("Couldn'topenwebcamorvideo")video_FourCC=int(vid.get(cv2.CAP_PROP_FOURCC))video_fps=vid.get(cv2.CAP_PROP_FPS)video_size=(int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))isOutput=Trueifoutput_path!=""elseFalseifisOutput:print("!!!TYPE:",type(output_path),type(video_FourCC),type(video_fps),type(video_size))out=cv2.VideoWriter(output_path,video_FourCC,video_fps,video_size)accum_time=0curr_fps=0fps="FPS:??"prev_time=timer()whileTrue:return_value,frame=vid.read()image=Image.fromarray(frame)image=yolo.detect_image(image)result=np.asarray(image)curr_time=timer()exec_time=curr_time-prev_timeprev_time=curr_timeaccum_time=accum_time+exec_timecurr_fps=curr_fps+1ifaccum_time>1:accum_time=accum_time-1fps="FPS:"+str(curr_fps)curr_fps=0cv2.putText(result,text=fps,org=(3,15),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=0.50,color=(255,0,0),thickness=2)cv2.namedWindow("result",cv2.WINDOW_NORMAL)cv2.imshow("result",result)ifisOutput:out.write(result)ifcv2.waitKey(1)&0xFF==ord('q'):breakyolo.close_session()2、model.py文件

   上边网络模型的代码定义,比如DarknetConv2D、DarknetConv2D_BN_Leaky、resblock_body、darknet_body、make_last_layers、yolo_body、yolo_head、yolo_eval等函数,其中preprocess_true_boxes()函数找出人工标定框GT框,通过计算候选框和GT框的IOU,才会找出best_abchor。    可以参考网络模型图去理解。详见DL之Yolov3:基于深度学习Yolov3算法实现视频目标检测之对《我要打篮球》视频段进行实时目标检测 的model.py文件。

"""YOLO_v3ModelDefinedinKeras."""fromfunctoolsimportwrapsimportnumpyasnpimporttensorflowastffromkerasimportbackendasKfromkeras.layersimportConv2D,Add,ZeroPadding2D,UpSampling2D,Concatenate,MaxPooling2Dfromkeras.layers.advanced_activationsimportLeakyReLUfromkeras.layers.normalizationimportBatchNormalizationfromkeras.modelsimportModelfromkeras.regularizersimportl2fromyolo3.utilsimportcompose@wraps(Conv2D)defDarknetConv2D(*args,**kwargs):"""WrappertosetDarknetparametersforConvolution2D."""darknet_conv_kwargs={'kernel_regularizer':l2(5e-4)}darknet_conv_kwargs['padding']='valid'ifkwargs.get('strides')==(2,2)else'same'darknet_conv_kwargs.update(kwargs)returnConv2D(*args,**darknet_conv_kwargs)defDarknetConv2D_BN_Leaky(*args,**kwargs):"""DarknetConvolution2DfollowedbyBatchNormalizationandLeakyReLU."""no_bias_kwargs={'use_bias':False}no_bias_kwargs.update(kwargs)returncompose(DarknetConv2D(*args,**no_bias_kwargs),BatchNormalization(),LeakyReLU(alpha=0.1))defresblock_body(x,num_filters,num_blocks):'''AseriesofresblocksstartingwithadownsamplingConvolution2D'''#Darknetusesleftandtoppaddinginsteadof'same'modex=ZeroPadding2D(((1,0),(1,0)))(x)x=DarknetConv2D_BN_Leaky(num_filters,(3,3),strides=(2,2))(x)foriinrange(num_blocks):y=compose(DarknetConv2D_BN_Leaky(num_filters//2,(1,1)),DarknetConv2D_BN_Leaky(num_filters,(3,3)))(x)x=Add()([x,y])returnxdefdarknet_body(x):'''Darknentbodyhaving52Convolution2Dlayers'''x=DarknetConv2D_BN_Leaky(32,(3,3))(x)x=resblock_body(x,64,1)x=resblock_body(x,128,2)x=resblock_body(x,256,8)x=resblock_body(x,512,8)x=resblock_body(x,1024,4)returnxdefmake_last_layers(x,num_filters,out_filters):'''6Conv2D_BN_LeakylayersfollowedbyaConv2D_linearlayer'''x=compose(DarknetConv2D_BN_Leaky(num_filters,(1,1)),DarknetConv2D_BN_Leaky(num_filters*2,(3,3)),DarknetConv2D_BN_Leaky(num_filters,(1,1)),DarknetConv2D_BN_Leaky(num_filters*2,(3,3)),DarknetConv2D_BN_Leaky(num_filters,(1,1)))(x)y=compose(DarknetConv2D_BN_Leaky(num_filters*2,(3,3)),DarknetConv2D(out_filters,(1,1)))(x)returnx,ydefyolo_body(inputs,num_anchors,num_classes):"""CreateYOLO_V3modelCNNbodyinKeras."""darknet=Model(inputs,darknet_body(inputs))x,y1=make_last_layers(darknet.output,512,num_anchors*(num_classes+5))x=compose(DarknetConv2D_BN_Leaky(256,(1,1)),UpSampling2D(2))(x)x=Concatenate()([x,darknet.layers[152].output])x,y2=make_last_layers(x,256,num_anchors*(num_classes+5))x=compose(DarknetConv2D_BN_Leaky(128,(1,1)),UpSampling2D(2))(x)x=Concatenate()([x,darknet.layers[92].output])x,y3=make_last_layers(x,128,num_anchors*(num_classes+5))returnModel(inputs,[y1,y2,y3])deftiny_yolo_body(inputs,num_anchors,num_classes):'''CreateTinyYOLO_v3modelCNNbodyinkeras.'''x1=compose(DarknetConv2D_BN_Leaky(16,(3,3)),MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'),DarknetConv2D_BN_Leaky(32,(3,3)),MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'),DarknetConv2D_BN_Leaky(64,(3,3)),MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'),DarknetConv2D_BN_Leaky(128,(3,3)),MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'),DarknetConv2D_BN_Leaky(256,(3,3)))(inputs)x2=compose(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same'),DarknetConv2D_BN_Leaky(512,(3,3)),MaxPooling2D(pool_size=(2,2),strides=(1,1),padding='same'),DarknetConv2D_BN_Leaky(1024,(3,3)),DarknetConv2D_BN_Leaky(256,(1,1)))(x1)y1=compose(DarknetConv2D_BN_Leaky(512,(3,3)),DarknetConv2D(num_anchors*(num_classes+5),(1,1)))(x2)x2=compose(DarknetConv2D_BN_Leaky(128,(1,1)),UpSampling2D(2))(x2)y2=compose(Concatenate(),DarknetConv2D_BN_Leaky(256,(3,3)),DarknetConv2D(num_anchors*(num_classes+5),(1,1)))([x2,x1])returnModel(inputs,[y1,y2])defyolo_head(feats,anchors,num_classes,input_shape,calc_loss=False):"""Convertfinallayerfeaturestoboundingboxparameters."""num_anchors=len(anchors)#Reshapetobatch,height,width,num_anchors,box_params.anchors_tensor=K.reshape(K.constant(anchors),[1,1,1,num_anchors,2])grid_shape=K.shape(feats)[1:3]#height,widthgrid_y=K.tile(K.reshape(K.arange(0,stop=grid_shape[0]),[-1,1,1,1]),[1,grid_shape[1],1,1])grid_x=K.tile(K.reshape(K.arange(0,stop=grid_shape[1]),[1,-1,1,1]),[grid_shape[0],1,1,1])grid=K.concatenate([grid_x,grid_y])grid=K.cast(grid,K.dtype(feats))feats=K.reshape(feats,[-1,grid_shape[0],grid_shape[1],num_anchors,num_classes+5])#Adjustpreditionstoeachspatialgridpointandanchorsize.box_xy=(K.sigmoid(feats[...,:2])+grid)/K.cast(grid_shape[::-1],K.dtype(feats))box_wh=K.exp(feats[...,2:4])*anchors_tensor/K.cast(input_shape[::-1],K.dtype(feats))box_confidence=K.sigmoid(feats[...,4:5])box_class_probs=K.sigmoid(feats[...,5:])ifcalc_loss==True:returngrid,feats,box_xy,box_whreturnbox_xy,box_wh,box_confidence,box_class_probsdefyolo_correct_boxes(box_xy,box_wh,input_shape,image_shape):'''Getcorrectedboxes'''box_yx=box_xy[...,::-1]box_hw=box_wh[...,::-1]input_shape=K.cast(input_shape,K.dtype(box_yx))image_shape=K.cast(image_shape,K.dtype(box_yx))new_shape=K.round(image_shape*K.min(input_shape/image_shape))offset=(input_shape-new_shape)/2./input_shapescale=input_shape/new_shapebox_yx=(box_yx-offset)*scalebox_hw*=scalebox_mins=box_yx-(box_hw/2.)box_maxes=box_yx+(box_hw/2.)boxes=K.concatenate([box_mins[...,0:1],#y_minbox_mins[...,1:2],#x_minbox_maxes[...,0:1],#y_maxbox_maxes[...,1:2]#x_max])#Scaleboxesbacktooriginalimageshape.boxes*=K.concatenate([image_shape,image_shape])returnboxesdefyolo_boxes_and_scores(feats,anchors,num_classes,input_shape,image_shape):'''ProcessConvlayeroutput'''box_xy,box_wh,box_confidence,box_class_probs=yolo_head(feats,anchors,num_classes,input_shape)boxes=yolo_correct_boxes(box_xy,box_wh,input_shape,image_shape)boxes=K.reshape(boxes,[-1,4])box_scores=box_confidence*box_class_probsbox_scores=K.reshape(box_scores,[-1,num_classes])returnboxes,box_scoresdefyolo_eval(yolo_outputs,anchors,num_classes,image_shape,max_boxes=20,score_threshold=.6,iou_threshold=.5):"""EvaluateYOLOmodelongiveninputandreturnfilteredboxes."""num_layers=len(yolo_outputs)anchor_mask=[[6,7,8],[3,4,5],[0,1,2]]ifnum_layers==3else[[3,4,5],[1,2,3]]#defaultsettinginput_shape=K.shape(yolo_outputs[0])[1:3]*32boxes=[]box_scores=[]forlinrange(num_layers):_boxes,_box_scores=yolo_boxes_and_scores(yolo_outputs[l],anchors[anchor_mask[l]],num_classes,input_shape,image_shape)boxes.append(_boxes)box_scores.append(_box_scores)boxes=K.concatenate(boxes,axis=0)box_scores=K.concatenate(box_scores,axis=0)mask=box_scores>=score_thresholdmax_boxes_tensor=K.constant(max_boxes,dtype='int32')boxes_=[]scores_=[]classes_=[]forcinrange(num_classes):#TODO:usekerasbackendinsteadoftf.class_boxes=tf.boolean_mask(boxes,mask[:,c])class_box_scores=tf.boolean_mask(box_scores[:,c],mask[:,c])nms_index=tf.image.non_max_suppression(class_boxes,class_box_scores,max_boxes_tensor,iou_threshold=iou_threshold)class_boxes=K.gather(class_boxes,nms_index)class_box_scores=K.gather(class_box_scores,nms_index)classes=K.ones_like(class_box_scores,'int32')*cboxes_.append(class_boxes)scores_.append(class_box_scores)classes_.append(classes)boxes_=K.concatenate(boxes_,axis=0)scores_=K.concatenate(scores_,axis=0)classes_=K.concatenate(classes_,axis=0)returnboxes_,scores_,classes_defpreprocess_true_boxes(true_boxes,input_shape,anchors,num_classes):'''PreprocesstrueboxestotraininginputformatParameters----------true_boxes:array,shape=(m,T,5)Absolutex_min,y_min,x_max,y_max,class_idrelativetoinput_shape.input_shape:array-like,hw,multiplesof32anchors:array,shape=(N,2),whnum_classes:integerReturns-------y_true:listofarray,shapelikeyolo_outputs,xywharereletivevalue'''assert(true_boxes[...,4]<num_classes).all(),'classidmustbelessthannum_classes'num_layers=len(anchors)//3#defaultsettinganchor_mask=[[6,7,8],[3,4,5],[0,1,2]]ifnum_layers==3else[[3,4,5],[1,2,3]]true_boxes=np.array(true_boxes,dtype='float32')input_shape=np.array(input_shape,dtype='int32')boxes_xy=(true_boxes[...,0:2]+true_boxes[...,2:4])//2boxes_wh=true_boxes[...,2:4]-true_boxes[...,0:2]true_boxes[...,0:2]=boxes_xy/input_shape[::-1]true_boxes[...,2:4]=boxes_wh/input_shape[::-1]m=true_boxes.shape[0]grid_shapes=[input_shape//{0:32,1:16,2:8}[l]forlinrange(num_layers)]y_true=[np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),dtype='float32')forlinrange(num_layers)]#Expanddimtoapplybroadcasting.anchors=np.expand_dims(anchors,0)anchor_maxes=anchors/2.anchor_mins=-anchor_maxesvalid_mask=boxes_wh[...,0]>0forbinrange(m):#Discardzerorows.wh=boxes_wh[b,valid_mask[b]]iflen(wh)==0:continue#Expanddimtoapplybroadcasting.wh=np.expand_dims(wh,-2)box_maxes=wh/2.box_mins=-box_maxesintersect_mins=np.maximum(box_mins,anchor_mins)intersect_maxes=np.minimum(box_maxes,anchor_maxes)intersect_wh=np.maximum(intersect_maxes-intersect_mins,0.)intersect_area=intersect_wh[...,0]*intersect_wh[...,1]box_area=wh[...,0]*wh[...,1]anchor_area=anchors[...,0]*anchors[...,1]iou=intersect_area/(box_area+anchor_area-intersect_area)#Findbestanchorforeachtrueboxbest_anchor=np.argmax(iou,axis=-1)fort,ninenumerate(best_anchor):forlinrange(num_layers):ifninanchor_mask[l]:i=np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')j=np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')k=anchor_mask[l].index(n)c=true_boxes[b,t,4].astype('int32')y_true[l][b,j,i,k,0:4]=true_boxes[b,t,0:4]y_true[l][b,j,i,k,4]=1y_true[l][b,j,i,k,5+c]=1returny_truedefbox_iou(b1,b2):'''ReturnioutensorParameters----------b1:tensor,shape=(i1,...,iN,4),xywhb2:tensor,shape=(j,4),xywhReturns-------iou:tensor,shape=(i1,...,iN,j)'''#Expanddimtoapplybroadcasting.b1=K.expand_dims(b1,-2)b1_xy=b1[...,:2]b1_wh=b1[...,2:4]b1_wh_half=b1_wh/2.b1_mins=b1_xy-b1_wh_halfb1_maxes=b1_xy+b1_wh_half#Expanddimtoapplybroadcasting.b2=K.expand_dims(b2,0)b2_xy=b2[...,:2]b2_wh=b2[...,2:4]b2_wh_half=b2_wh/2.b2_mins=b2_xy-b2_wh_halfb2_maxes=b2_xy+b2_wh_halfintersect_mins=K.maximum(b1_mins,b2_mins)intersect_maxes=K.minimum(b1_maxes,b2_maxes)intersect_wh=K.maximum(intersect_maxes-intersect_mins,0.)intersect_area=intersect_wh[...,0]*intersect_wh[...,1]b1_area=b1_wh[...,0]*b1_wh[...,1]b2_area=b2_wh[...,0]*b2_wh[...,1]iou=intersect_area/(b1_area+b2_area-intersect_area)returnioudefyolo_loss(args,anchors,num_classes,ignore_thresh=.5,print_loss=False):'''Returnyolo_losstensorParameters----------yolo_outputs:listoftensor,theoutputofyolo_bodyortiny_yolo_bodyy_true:listofarray,theoutputofpreprocess_true_boxesanchors:array,shape=(N,2),whnum_classes:integerignore_thresh:float,theiouthresholdwhethertoignoreobjectconfidencelossReturns-------loss:tensor,shape=(1,)'''num_layers=len(anchors)//3#defaultsettingyolo_outputs=args[:num_layers]y_true=args[num_layers:]anchor_mask=[[6,7,8],[3,4,5],[0,1,2]]ifnum_layers==3else[[3,4,5],[1,2,3]]input_shape=K.cast(K.shape(yolo_outputs[0])[1:3]*32,K.dtype(y_true[0]))grid_shapes=[K.cast(K.shape(yolo_outputs[l])[1:3],K.dtype(y_true[0]))forlinrange(num_layers)]loss=0m=K.shape(yolo_outputs[0])[0]#batchsize,tensormf=K.cast(m,K.dtype(yolo_outputs[0]))forlinrange(num_layers):object_mask=y_true[l][...,4:5]true_class_probs=y_true[l][...,5:]grid,raw_pred,pred_xy,pred_wh=yolo_head(yolo_outputs[l],anchors[anchor_mask[l]],num_classes,input_shape,calc_loss=True)pred_box=K.concatenate([pred_xy,pred_wh])#Darknetrawboxtocalculateloss.raw_true_xy=y_true[l][...,:2]*grid_shapes[l][::-1]-gridraw_true_wh=K.log(y_true[l][...,2:4]/anchors[anchor_mask[l]]*input_shape[::-1])raw_true_wh=K.switch(object_mask,raw_true_wh,K.zeros_like(raw_true_wh))#avoidlog(0)=-infbox_loss_scale=2-y_true[l][...,2:3]*y_true[l][...,3:4]#Findignoremask,iterateovereachofbatch.ignore_mask=tf.TensorArray(K.dtype(y_true[0]),size=1,dynamic_size=True)object_mask_bool=K.cast(object_mask,'bool')defloop_body(b,ignore_mask):true_box=tf.boolean_mask(y_true[l][b,...,0:4],object_mask_bool[b,...,0])iou=box_iou(pred_box[b],true_box)best_iou=K.max(iou,axis=-1)ignore_mask=ignore_mask.write(b,K.cast(best_iou<ignore_thresh,K.dtype(true_box)))returnb+1,ignore_mask_,ignore_mask=K.control_flow_ops.while_loop(lambdab,*args:b<m,loop_body,[0,ignore_mask])ignore_mask=ignore_mask.stack()ignore_mask=K.expand_dims(ignore_mask,-1)#K.binary_crossentropyishelpfultoavoidexpoverflow.xy_loss=object_mask*box_loss_scale*K.binary_crossentropy(raw_true_xy,raw_pred[...,0:2],from_logits=True)wh_loss=object_mask*box_loss_scale*0.5*K.square(raw_true_wh-raw_pred[...,2:4])confidence_loss=object_mask*K.binary_crossentropy(object_mask,raw_pred[...,4:5],from_logits=True)+\(1-object_mask)*K.binary_crossentropy(object_mask,raw_pred[...,4:5],from_logits=True)*ignore_maskclass_loss=object_mask*K.binary_crossentropy(true_class_probs,raw_pred[...,5:],from_logits=True)xy_loss=K.sum(xy_loss)/mfwh_loss=K.sum(wh_loss)/mfconfidence_loss=K.sum(confidence_loss)/mfclass_loss=K.sum(class_loss)/mfloss+=xy_loss+wh_loss+confidence_loss+class_lossifprint_loss:loss=tf.Print(loss,[loss,xy_loss,wh_loss,confidence_loss,class_loss,K.sum(ignore_mask)],message='loss:')returnloss3、训练train.py文件

图像尺寸也是416*416,训练过程中,也可以保存权重。

"""RetraintheYOLOmodelforyourowndataset."""importnumpyasnpimportkeras.backendasKfromkeras.layersimportInput,Lambdafromkeras.modelsimportModelfromkeras.optimizersimportAdamfromkeras.callbacksimportTensorBoard,ModelCheckpoint,ReduceLROnPlateau,EarlyStoppingfromyolo3.modelimportpreprocess_true_boxes,yolo_body,tiny_yolo_body,yolo_lossfromyolo3.utilsimportget_random_datadef_main():annotation_path='train.txt'log_dir='logs/000/'classes_path='model_data/voc_classes.txt'anchors_path='model_data/yolo_anchors.txt'class_names=get_classes(classes_path)num_classes=len(class_names)anchors=get_anchors(anchors_path)input_shape=(416,416)#multipleof32,hwis_tiny_version=len(anchors)==6#defaultsettingifis_tiny_version:model=create_tiny_model(input_shape,anchors,num_classes,freeze_body=2,weights_path='model_data/tiny_yolo_weights.h5')else:model=create_model(input_shape,anchors,num_classes,freeze_body=2,weights_path='model_data/yolo_weights.h5')#makesureyouknowwhatyoufreezelogging=TensorBoard(log_dir=log_dir)checkpoint=ModelCheckpoint(log_dir+'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',monitor='val_loss',save_weights_only=True,save_best_only=True,period=3)reduce_lr=ReduceLROnPlateau(monitor='val_loss',factor=0.1,patience=3,verbose=1)early_stopping=EarlyStopping(monitor='val_loss',min_delta=0,patience=10,verbose=1)val_split=0.1withopen(annotation_path)asf:lines=f.readlines()np.random.seed(10101)np.random.shuffle(lines)np.random.seed(None)num_val=int(len(lines)*val_split)num_train=len(lines)-num_val#Trainwithfrozenlayersfirst,togetastableloss.#Adjustnumepochstoyourdataset.Thisstepisenoughtoobtainanotbadmodel.ifTrue:model.compile(optimizer=Adam(lr=1e-3),loss={#usecustomyolo_lossLambdalayer.'yolo_loss':lambday_true,y_pred:y_pred})batch_size=32print('Trainon{}samples,valon{}samples,withbatchsize{}.'.format(num_train,num_val,batch_size))model.fit_generator(data_generator_wrapper(lines[:num_train],batch_size,input_shape,anchors,num_classes),steps_per_epoch=max(1,num_train//batch_size),validation_data=data_generator_wrapper(lines[num_train:],batch_size,input_shape,anchors,num_classes),validation_steps=max(1,num_val//batch_size),epochs=50,initial_epoch=0,callbacks=[logging,checkpoint])model.save_weights(log_dir+'trained_weights_stage_1.h5')#Unfreezeandcontinuetraining,tofine-tune.#Trainlongeriftheresultisnotgood.ifTrue:foriinrange(len(model.layers)):model.layers[i].trainable=Truemodel.compile(optimizer=Adam(lr=1e-4),loss={'yolo_loss':lambday_true,y_pred:y_pred})#recompiletoapplythechangeprint('Unfreezeallofthelayers.')batch_size=32#notethatmoreGPUmemoryisrequiredafterunfreezingthebodyprint('Trainon{}samples,valon{}samples,withbatchsize{}.'.format(num_train,num_val,batch_size))model.fit_generator(data_generator_wrapper(lines[:num_train],batch_size,input_shape,anchors,num_classes),steps_per_epoch=max(1,num_train//batch_size),validation_data=data_generator_wrapper(lines[num_train:],batch_size,input_shape,anchors,num_classes),validation_steps=max(1,num_val//batch_size),epochs=100,initial_epoch=50,callbacks=[logging,checkpoint,reduce_lr,early_stopping])model.save_weights(log_dir+'trained_weights_final.h5')#Furthertrainingifneeded.defget_classes(classes_path):'''loadstheclasses'''withopen(classes_path)asf:class_names=f.readlines()class_names=[c.strip()forcinclass_names]returnclass_namesdefget_anchors(anchors_path):'''loadstheanchorsfromafile'''withopen(anchors_path)asf:anchors=f.readline()anchors=[float(x)forxinanchors.split(',')]returnnp.array(anchors).reshape(-1,2)defcreate_model(input_shape,anchors,num_classes,load_pretrained=True,freeze_body=2,weights_path='model_data/yolo_weights.h5'):'''createthetrainingmodel'''K.clear_session()#getanewsessionimage_input=Input(shape=(None,None,3))h,w=input_shapenum_anchors=len(anchors)y_true=[Input(shape=(h//{0:32,1:16,2:8}[l],w//{0:32,1:16,2:8}[l],\num_anchors//3,num_classes+5))forlinrange(3)]model_body=yolo_body(image_input,num_anchors//3,num_classes)print('CreateYOLOv3modelwith{}anchorsand{}classes.'.format(num_anchors,num_classes))ifload_pretrained:model_body.load_weights(weights_path,by_name=True,skip_mismatch=True)print('Loadweights{}.'.format(weights_path))iffreeze_bodyin[1,2]:#Freezedarknet53bodyorfreezeallbut3outputlayers.num=(185,len(model_body.layers)-3)[freeze_body-1]foriinrange(num):model_body.layers[i].trainable=Falseprint('Freezethefirst{}layersoftotal{}layers.'.format(num,len(model_body.layers)))model_loss=Lambda(yolo_loss,output_shape=(1,),name='yolo_loss',arguments={'anchors':anchors,'num_classes':num_classes,'ignore_thresh':0.5})([*model_body.output,*y_true])model=Model([model_body.input,*y_true],model_loss)returnmodeldefcreate_tiny_model(input_shape,anchors,num_classes,load_pretrained=True,freeze_body=2,weights_path='model_data/tiny_yolo_weights.h5'):'''createthetrainingmodel,forTinyYOLOv3'''K.clear_session()#getanewsessionimage_input=Input(shape=(None,None,3))h,w=input_shapenum_anchors=len(anchors)y_true=[Input(shape=(h//{0:32,1:16}[l],w//{0:32,1:16}[l],\num_anchors//2,num_classes+5))forlinrange(2)]model_body=tiny_yolo_body(image_input,num_anchors//2,num_classes)print('CreateTinyYOLOv3modelwith{}anchorsand{}classes.'.format(num_anchors,num_classes))ifload_pretrained:model_body.load_weights(weights_path,by_name=True,skip_mismatch=True)print('Loadweights{}.'.format(weights_path))iffreeze_bodyin[1,2]:#Freezethedarknetbodyorfreezeallbut2outputlayers.num=(20,len(model_body.layers)-2)[freeze_body-1]foriinrange(num):model_body.layers[i].trainable=Falseprint('Freezethefirst{}layersoftotal{}layers.'.format(num,len(model_body.layers)))model_loss=Lambda(yolo_loss,output_shape=(1,),name='yolo_loss',arguments={'anchors':anchors,'num_classes':num_classes,'ignore_thresh':0.7})([*model_body.output,*y_true])model=Model([model_body.input,*y_true],model_loss)returnmodeldefdata_generator(annotation_lines,batch_size,input_shape,anchors,num_classes):'''datageneratorforfit_generator'''n=len(annotation_lines)i=0whileTrue:image_data=[]box_data=[]forbinrange(batch_size):ifi==0:np.random.shuffle(annotation_lines)image,box=get_random_data(annotation_lines[i],input_shape,random=True)image_data.append(image)box_data.append(box)i=(i+1)%nimage_data=np.array(image_data)box_data=np.array(box_data)y_true=preprocess_true_boxes(box_data,input_shape,anchors,num_classes)yield[image_data,*y_true],np.zeros(batch_size)defdata_generator_wrapper(annotation_lines,batch_size,input_shape,anchors,num_classes):n=len(annotation_lines)ifn==0orbatch_size<=0:returnNonereturndata_generator(annotation_lines,batch_size,input_shape,anchors,num_classes)if__name__=='__main__':_main()

 

 

 

 

 

 

 

 

 

 

 

 

 

 


您需要登录后才可以回帖 登录 | 立即注册

触屏版| 电脑版

技术支持 历史网 V2.0 © 2016-2017