Last active
June 30, 2021 06:04
-
-
Save northeastsquare/bb44c1e14e3f3cbcbe9c0df18ca1bb12 to your computer and use it in GitHub Desktop.
Revisions
-
northeastsquare revised this gist
Sep 9, 2020 . 1 changed file with 9 additions and 2 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -315,6 +315,7 @@ def __init__(self, cfg_path): self.mean_file = [] self.norm_type = [] self.data_scale = [] self.image_type = 1 for line in cfg_data_lines: if( line.strip() == ""): @@ -338,6 +339,8 @@ def __init__(self, cfg_path): print("image_file:", self.image_file) if "[mean_file]" == line.split()[0].strip(): self.mean_file.append(line.split()[1].strip()) if "[image_type]" == line.split()[0].strip(): self.image_type = int(line.split()[1].strip()) if "[norm_type]" == line.split()[0].strip(): if(False == isValidNormType(line.split()[1].strip())): print("Error: Input parameter normType is not valid, range is 0 to 5 and it should be integer") @@ -429,12 +432,15 @@ def load_data_from_image(data_layer, cfg, i, net, output_dir): image_to_bgr(img_filename, net.blobs[data_layer].data.shape[1:], output_dir) color = True img = cv2.imdecode(np.fromfile(img_filename, dtype=np.uint8), -1) height, width = net.blobs[data_layer].data.shape[2:] img = cv2.resize(img, (height, width)) img = img[..., ::-1] #转换成rgb #img = cv2.imread(img_filename) inputs = img print("inputs:", inputs) transformer = caffe.io.Transformer({data_layer: net.blobs[data_layer].data.shape}) if net.blobs[data_layer].data.shape[1]==3: # 通道转换c,h,w transformer.set_transpose(data_layer, (2,0,1)) if norm_type == '1' or norm_type == '4' and os.path.isfile(meanfile): # (sub mean by meanfile): if net.blobs[data_layer].data.shape[1]==3: @@ -463,7 +469,8 @@ def load_data_from_image(data_layer, cfg, i, net, output_dir): inputs = inputs - np.array(list(map(float, [lmeanfile[0]]))) elif norm_type == '3': #inputs = inputs * float(data_scale) transformer.set_input_scale(data_layer, data_scale) ''' if img_filename.endswith('.txt') or img_filename.endswith('.float') or img_filename.endswith('.hex'): print (inputs.shape) -
northeastsquare created this gist
Sep 9, 2020 .There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,641 @@ #from __future__ import print_function import sys sys.path.insert(0, 'D:\\hisi\\30\\HiSVP_PC_V1.1.3.0\\tools\\nnie\\windows\\ruyi_env_setup-2.0.38\\python35\\Lib\\site-packages\\caffe\\python') import caffe import pickle from datetime import datetime import numpy as np import struct import sys, getopt import cv2, os, re import pickle as p import matplotlib.pyplot as pyplot import ctypes import codecs import caffe.proto.caffe_pb2 as caffe_pb2 import google.protobuf as caffe_protobuf import google.protobuf.text_format import platform import math cpu_supported_layers=[ "Convolution", "Deconvolution", "Pooling", "InnerProduct", "LRN", "BatchNorm", "Scale", "Bias", "Eltwise", "ReLU", "PReLU", "AbsVal", "TanH", "Sigmoid", "BNLL", "ELU", "LSTM", "RNN", "Softmax", "Exp", "Log", "Reshape", "Flatten", "Split", "Slice", "Concat", "SPP", "Power", "Threshold", "MVN", "Parameter", "Reduction", "Input", "Dropout", "ROIPooling", "Upsample", "Normalize", "Permute", "PSROIPooling", "PassThrough", "Python"] cuda_supported_layers=["Convolution", "Deconvolution", "Pooling", "InnerProduct", "LRN", "BatchNorm", "Scale", "Bias", "Eltwise", "ReLU", "PReLU", "AbsVal", "TanH", "Sigmoid", "BNLL", "ELU", "LSTM", "RNN", "Softmax", "Exp", "Log", "Reshape", "Flatten", "Split", "Slice", "Concat", "SPP", "Power", "Threshold", "MVN", "Parameter", "Reduction", "Input", "Dropout"] def isValidNormType(normType): if (normType == '0' or normType == '1' or normType == '2' or normType == '3' or normType == '4' or normType == '5') : return True return False def isSupportedLayer(layer_type, cuda_flag): if '1' == cuda_flag: for type in cuda_supported_layers: if(layer_type == type): return True else: for type in cpu_supported_layers: if(layer_type == type): return True return False def image_to_array(img_file, shape_c_h_w, output_dir): result = np.array([]) print("converting begins ...") resizeimage = cv2.resize(cv2.imread(img_file), (shape_c_h_w[2],shape_c_h_w[1])) b,g,r = cv2.split(resizeimage ) height, width, channels = resizeimage.shape length = height*width #print(channels ) r_arr = np.array(r).reshape(length) g_arr = np.array(g).reshape(length) b_arr = np.array(b).reshape(length) image_arr = np.concatenate((r_arr, g_arr, b_arr)) result = image_arr.reshape((1, length*3)) print("converting finished ...") file_path = os.path.join(output_dir, "test_input_img_%d_%d_%d.bin"%(channels,height,width)) with open(file_path, mode='wb') as f: p.dump(result, f) print("save bin file success") def image_to_rgb(img_file,shape_c_h_w, output_dir): print("converting begins ...", img_file) #image = cv2.imread(img_file) #print("image.shape", image.shape) image = cv2.imdecode(np.fromfile(img_file, dtype=np.uint8), 1) image = cv2.resize(image, (shape_c_h_w[2],shape_c_h_w[1])) image = image.astype('uint8') height = image.shape[0] width = image.shape[1] channels = image.shape[2] file_path = os.path.join(output_dir, "test_input_img_%d_%d_%d.rgb"%(channels,height,width)) fileSave = open(file_path,'wb') for step in range(0,height): for step2 in range (0, width): fileSave.write(image[step,step2,2]) for step in range(0,height): for step2 in range (0, width): fileSave.write(image[step,step2,1]) for step in range(0,height): for step2 in range (0, width): fileSave.write(image[step,step2,0]) fileSave.close() print("converting finished ...") def image_to_bin(img_file,shape_c_h_w, output_dir): print("converting begins ...") #image = cv2.imread(img_file) image = cv2.imdecode(np.fromfile(img_file, dtype=np.uint8), cv2.IMREAD_GRAYSCALE) image = image.astype('uint8') height = image.shape[0] width = image.shape[1] file_path = os.path.join(output_dir, "test_input_img_%d_%d_%d.bin"%(1,height,width)) fileSave = open(file_path,'wb') for step in range(0,height): for step2 in range (0, width): fileSave.write(image[step,step2]) fileSave.close() print("converting finished ...") def image_to_bgr(img_file,shape_c_h_w, output_dir): print("image_to_bgr converting begins ...", img_file) image = cv2.imread(img_file) #image = cv2.imdecode(np.fromfile(img_file, dtype=np.uint8), -1) image = cv2.resize(image, (shape_c_h_w[2],shape_c_h_w[1])) image = image.astype('uint8') b,g,r = cv2.split(image) height = image.shape[0] width = image.shape[1] channels = image.shape[2] file_path = os.path.join(output_dir, "test_input_img_%d_%d_%d.bgr"%(channels,height,width)) fileSave = open(file_path,'wb') for step in range(0,height): for step2 in range (0, width): fileSave.write(b[step,step2]) for step in range(0,height): for step2 in range (0, width): fileSave.write(g[step,step2]) for step in range(0,height): for step2 in range (0, width): fileSave.write(r[step,step2]) fileSave.close() print("converting finished ...") def bin_to_image(bin_file,shape_c_h_w): #fileReader = open(bin_file,'rb', encoding='utf-8') if(platform.system()=="Linux"): fileReader = open(bin_file,'rb') else: fileReader = open(bin_file.encode('gbk'),'rb') height = shape_c_h_w[1] width = shape_c_h_w[2] channel = shape_c_h_w[0] imageRead = np.zeros((shape_c_h_w[1], shape_c_h_w[2], shape_c_h_w[0]), np.uint8) for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,2] = a[0] for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,1] = a[0] for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,0] = a[0] fileReader.close() return imageRead def isfloat(value): try: float(value) return True except ValueError: return False def isValidDataScale(value): if(True == isfloat(value)): if(float(value) >= 0.000244140625 and float(value) <= 4294967296.0): return True else: return False else: return False def get_float_numbers(floatfile): mat = [] if(platform.system()=="Linux"): with open(floatfile, 'rb') as input_file: for line in input_file: line = line.strip() for number in line.split(): if isfloat(number): mat.append(float(number)) else: with open(floatfile.encode('gbk'), 'rb') as input_file: for line in input_file: line = line.strip() for number in line.split(): if isfloat(number): mat.append(float(number)) return mat def isHex(value): try: int(value,16) return True except ValueError: return False def isHex_old(value): strvalue=str(value) length = len(strvalue) if length == 0: return False i = 0 while(i < length): if not (strvalue[i] >= 'a' and strvalue[i] <= 'e' or strvalue[i] >= 'A' and strvalue[i] <= 'E' or strvalue[i] >= '0' and strvalue[i] <= '9'): return False i += 1 return True def get_hex_numbers(hexfile): mat = [] if(platform.system()=="Linux"): with open(hexfile) as input_file: for line in input_file: line = line.strip() for number in line.split(): if isHex(number): mat.append(1.0*ctypes.c_int32(int(number,16)).value/4096) else: with open(hexfile.encode("gbk")) as input_file: for line in input_file: line = line.strip() for number in line.split(): if isHex(number): mat.append(1.0*ctypes.c_int32(int(number,16)).value/4096) return mat def print_CNNfeaturemap(net, output_dir): params = list(net.blobs.keys()) print (params) for pr in params[0:]: print (pr) res = net.blobs[pr].data[...] pr = pr.replace('/', '_').replace('-','_') print (res.shape) for index in range(0,res.shape[0]): if len(res.shape) == 4: filename = os.path.join(output_dir, "%s_output%d_%d_%d_%d_caffe.linear.float"%(pr,index,res.shape[1],res.shape[2],res.shape[3])) elif len(res.shape) == 3: filename = os.path.join(output_dir, "%s_output%d_%d_%d_caffe.linear.float"%(pr, index,res.shape[1],res.shape[2])) elif len(res.shape) == 2: filename = os.path.join(output_dir, "%s_output%d_%d_caffe.linear.float"%(pr,index,res.shape[1])) elif len(res.shape) == 1: filename = os.path.join(output_dir, "%s_output%d_caffe.linear.float"%(pr,index)) f = open(filename, 'wb') np.savetxt(f, list(res.reshape(-1, 1))) # save result by layer name def save_result(train_net, net, output_dir): #logging.debug(net_param) max_len = len(train_net.layer) # input data layer index = 0 for input in train_net.input: layer_data = net.blobs[input].data[...] layer_name=input.replace("/", "_").replace("-","_"); shape_str= str(layer_data.shape) shape_str=shape_str[shape_str.find(", ") + 1:].replace("(", "").replace(")", "").replace(" ", "").replace(",", "_"); filename = os.path.join(output_dir, "%s_output%d_%s_caffe.linear.float"%(layer_name, index, shape_str)) np.savetxt(filename, layer_data.reshape(-1, 1)) index = index + 1 # other layer i = 0 for layer in train_net.layer: index = 0 for top in layer.top: # ignore inplace layer if 1 == len(layer.top) and 1 == len(layer.bottom) and layer.top[0] == layer.bottom[0]: break layer_data = net.blobs[top].data[...] layer_name=layer.name.replace("/", "_").replace("-","_"); shape_str= str(layer_data.shape) shape_str=shape_str[shape_str.find(", ") + 1:].replace("(", "").replace(")", "").replace(" ", "").replace(",", "_"); filename = os.path.join(output_dir, "%s_output%d_%s_caffe.linear.float"%(layer_name, index, shape_str)) np.savetxt(filename, layer_data.reshape(-1, 1)) index = index + 1 # update the process_bar i = i + 1 k = i * 100 / max_len process_str = ">" * int(k) + " " * (100 - int(k)) sys.stdout.write('\r'+ process_str +'[%s%%]'%(k)) sys.stdout.flush() sys.stdout.write("\n") sys.stdout.flush() def check_arg_num(argv): if len(argv) < 6: print ('CNN_convert_bin_and_print_featuremap.py -i <ini_file> -m <model_file> -w <weight_file> -o <output_dir> -c <0 or 1>') print ('-i <cfg_file>: .cfg, store the information of input data') print ('-m <model_file>: .prototxt, batch num should be 1') print ('-w <weight_file>: .caffemodel') print ('-o <output_dir>: optional, if not set, there will be a directory named output created in current dir>') print ('-c <0 or 1> 1, gpu, 0 cpu') print ('any parameter only need one input') return False return True def hint(): print ('CNN_convert_bin_and_print_featuremap.py -m <model_file> -w <weight_file> -i <img_file or bin_file or float_file> -p <"104","117","123" or "ilsvrc_2012_mean.npy">') print ('-i <cfg_file>: .cfg, store the information of input data') print ('-m <model_file>: .prototxt, batch num should be 1') print ('-w <weight_file>: .caffemodel') #print ('-n <norm_type>: 0(default): no process, 1: sub img-val and please give the img path in the parameter p, 2: sub channel mean value and please give each channel value in the parameter p in BGR order, 3: dividing 256, 4: sub mean image file and dividing 256, 5: sub channel mean value and dividing 256') #print ('-s <data_scale>: optional, if not set, 0.003906 is set by default') #print ('-p <"104", "117", "123", "ilsvrc_2012_mean.npy" or "xxx.binaryproto">: -p "104", "117", "123" is sub channel-mean-val, -p "ilsvrc_2012_mean.npy" is sub img-val and need a ilsvrc_2012_mean.npy') print ('-o <output_dir: optional, if not set, there will be a directory named output created in current dir>') print ('-c <0 or 1> 1, gpu, 0 cpu') print ('any parameter only need one input') class CfgParser: def __init__(self, cfg_path): fread = open(cfg_path, "r") cfg_data_lines = fread.readlines() self.image_file = [] self.mean_file = [] self.norm_type = [] self.data_scale = [] for line in cfg_data_lines: if( line.strip() == ""): continue if "[image_list]" == line.split()[0].strip(): image_list_file = line.split()[1].strip() with open(image_list_file, 'r') as ilf: lines = ilf.readlines() last_image_file = None for ln in lines[::-1]: ln = ln.strip() if not ln: continue last_image_file = ln break if last_image_file: self.image_file.append(last_image_file) else: print("must specify image_list file") assert(False) print("image_file:", self.image_file) if "[mean_file]" == line.split()[0].strip(): self.mean_file.append(line.split()[1].strip()) if "[norm_type]" == line.split()[0].strip(): if(False == isValidNormType(line.split()[1].strip())): print("Error: Input parameter normType is not valid, range is 0 to 5 and it should be integer") sys.exit(2) self.norm_type.append(line.split()[1].strip()) if "[data_scale]" == line.split()[0].strip(): if(True == isValidDataScale(line.split()[1].strip())): self.data_scale = float(line.split()[1].strip()) else: self.data_scale = -1 def __str__(self): return str(self.__class__) + '\n' + '\n'.join((str(item) + ' = ' + str(self.__dict__[item]) for item in sorted(self.__dict__))) def judge_supported_layer(model_filename, train_net, cuda_flag): if(platform.system()=="Linux"): f=open(model_filename, 'rb') else: f=open(model_filename.encode('gbk'), 'rb') train_str = f.read() caffe_protobuf.text_format.Parse(train_str, train_net) f.close() layers = train_net.layer for layer in layers: if(False == isSupportedLayer(layer.type, cuda_flag)): print("Layer " + layer.name + " with type " + layer.type + " is not supported, please refer to chapter 3.1.4 and FAQ of \"HiSVP Development Guide.pdf\" to extend caffe!") sys.exit(1) def print_log_info(model_filename, weight_filename, cfg, output_dir): print('model file is: ' + model_filename) print('weight file is: ' + weight_filename) print('output dir is: ' + output_dir) for i in range(len(cfg.image_file)): print('data number: ' + str(i)) print('image file is: ' + cfg.image_file[i]) print('image preprocessing method is: ' + str(cfg.norm_type[i])) print('data_scale is: ' + str(cfg.data_scale)) def load_data_from_text(image_file, shape, net): if(image_file.endswith('.float')): data = np.asarray(get_float_numbers(image_file)) inputs = data inputs= np.reshape(inputs, net.blobs[list(net.blobs.keys())[0]].data.shape) return inputs elif(image_file.endswith('.hex')): data = np.asarray(get_hex_numbers(image_file)) inputs = data inputs= np.reshape(inputs,net.blobs[list(net.blobs.keys())[0]].data.shape) return inputs #elif(image_file.endswith('.txt')): # input_data = np.loadtxt(x.replace(b',',b' ') for x in f).reshape(shape) def load_data_from_image(data_layer, cfg, i, net, output_dir): #preprocess img_filename = cfg.image_file[i] norm_type = cfg.norm_type[i] meanfile = cfg.mean_file[i] data_scale = cfg.data_scale if norm_type == '1' or norm_type == '4': if not os.path.isfile(meanfile): print("Please give the mean image file path") sys.exit(1) if meanfile.endswith('.binaryproto'): meanfileBlob = caffe.proto.caffe_pb2.BlobProto() if(platform.system()=="Linux"): meanfileData = open(meanfile, 'rb').read() else: meanfileData = open(meanfile.encode('gbk'), 'rb').read() meanfileBlob.ParseFromString(meanfileData) arr = np.array(caffe.io.blobproto_to_array(meanfileBlob)) out = arr[0] np.save('transMean.npy', out) meanfile = 'transMean.npy' if(img_filename.endswith('.bgr')): inputs = bgr_to_image(img_filename, net.blobs[data_layer].data.shape[1:]) elif(img_filename.endswith('.rgb')): inputs = rgb_to_image(img_filename, net.blobs[data_layer].data.shape[1:]) else: if net.blobs[data_layer].data.shape[1]==1: color = False image_to_bin(img_filename, net.blobs[data_layer].data.shape[1:], output_dir) #transform into raw data elif net.blobs[data_layer].data.shape[1]==3: image_to_rgb(img_filename, net.blobs[data_layer].data.shape[1:], output_dir) image_to_bgr(img_filename, net.blobs[data_layer].data.shape[1:], output_dir) color = True img = cv2.imdecode(np.fromfile(img_filename, dtype=np.uint8), -1) img = img[..., ::-1] #img = cv2.imread(img_filename) inputs = img print("inputs:", inputs) transformer = caffe.io.Transformer({data_layer: net.blobs[data_layer].data.shape}) if net.blobs[data_layer].data.shape[1]==3: transformer.set_transpose(data_layer, (2,0,1)) if norm_type == '1' or norm_type == '4' and os.path.isfile(meanfile): # (sub mean by meanfile): if net.blobs[data_layer].data.shape[1]==3: transformer.set_mean(data_layer,np.load(meanfile).mean(1).mean(1)) elif net.blobs[data_layer].data.shape[1]==1: tempMeanValue = np.load(meanfile).mean(1).mean(1) tempa = list(tempMeanValue) inputs = inputs - np.array(list(map(float, [tempa[0]]))) elif norm_type == '2' or norm_type == '5': if net.blobs[data_layer].data.shape[1]==3: mean_value = np.loadtxt(meanfile) print('mean channel value: ', mean_value) if len(mean_value) != 3: print("Please give the channel mean value in BGR order with 3 values, like 112,113,120") sys.exit(1) if not isfloat(mean_value[0]) or not isfloat(mean_value[1]) or not isfloat(mean_value[2]): print("Please give the channel mean value in BGR order") sys.exit(1) else: transformer.set_mean(data_layer, mean_value) elif net.blobs[data_layer].data.shape[1]==1: with open(meanfile, 'r') as f: lmeanfile = f.read().splitlines() print(lmeanfile) if isfloat(lmeanfile[0]): # (sub mean by channel) inputs = inputs - np.array(list(map(float, [lmeanfile[0]]))) elif norm_type == '3': inputs = inputs * float(data_scale) ''' if img_filename.endswith('.txt') or img_filename.endswith('.float') or img_filename.endswith('.hex'): print (inputs.shape) data = inputs else: data = np.asarray([transformer.preprocess('data', inputs)]) ''' data = inputs if norm_type == '4' or norm_type == '5': #data = data * float(data_scale) transformer.set_input_scale(data_layer, data_scale) # set im_info for RCNN net if 'im_info' in net.blobs: data_shape = net.blobs[net.inputs[0]].data.shape im_shape = data.shape #logging.debug("data shape:" + str(data_shape)) #logging.debug("image shape:" + str(im_shape)) im_scale_height = float(data_shape[2])/float(im_shape[0]) im_scale_width = float(data_shape[3])/float(im_shape[1]) #if math.fabs(im_scale_height - im_scale_width) > 0.1: # logging.warning("im_scale_height[%f] is not equal to im_scale_width[%f].\nPlease reshape data input layer to (%d, %d) in prototxt, otherwise it may detect failed."%(im_scale_height, im_scale_width, im_shape[0], im_shape[1])) # im_scale = data(w,h) / image(w,h) im_scale = im_scale_height if im_scale_height > im_scale_width else im_scale_width im_info_data = np.array([[data_shape[2], data_shape[3], im_scale]], dtype=np.float32).reshape(net.blobs['im_info'].data.shape) np.set_printoptions(suppress=True) #logging.debug("im_info:" + str(im_info_data)) net.blobs['im_info'].data[...] = im_info_data data = transformer.preprocess(data_layer, data) print("after pre:", data) return data def rgb_to_image(bin_file,shape_c_h_w): #fileReader = open(bin_file,'rb', encoding='utf-8') if(platform.system()=="Linux"): fileReader = open(bin_file,'rb') else: fileReader = open(bin_file.encode('gbk'),'rb') height = shape_c_h_w[1] width = shape_c_h_w[2] channel = shape_c_h_w[0] imageRead = np.zeros((shape_c_h_w[1], shape_c_h_w[2], shape_c_h_w[0]), np.uint8) for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,2] = a[0] for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,1] = a[0] for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,0] = a[0] fileReader.close() return imageRead def bgr_to_image(bin_file, shape_c_h_w): #fileReader = open(bin_file,'rb', encoding='utf-8') if(platform.system()=="Linux"): fileReader = open(bin_file,'rb') else: fileReader = open(bin_file.encode('gbk'),'rb') height = shape_c_h_w[1] width = shape_c_h_w[2] channel = shape_c_h_w[0] imageRead = np.zeros((shape_c_h_w[1], shape_c_h_w[2], shape_c_h_w[0]), np.uint8) for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,0] = a[0] for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,1] = a[0] for step in range(0,height): for step2 in range (0, width): a = struct.unpack("B", fileReader.read(1)) imageRead[step,step2,2] = a[0] fileReader.close() return imageRead def preprocess_data(cfg, net, output_dir, train_net): input_layers = [] for layer in train_net.layer: if(layer.type == 'Input'): input_layers.append(layer) #for top in layer.top: j = 0 for i in range(len(cfg.image_file)): if i < len(net.inputs): data_layer = net.inputs[i] if (cfg.image_file[i].endswith('.float') or cfg.image_file[i].endswith('.hex')): input_data = load_data_from_text(cfg.image_file[i], net.blobs[data_layer].data.shape, net) else: input_data = load_data_from_image(data_layer, cfg, i, net, output_dir) net.blobs[data_layer].data[...] = input_data else: for top in input_layers[j].top: if (cfg.image_file[i].endswith('.float') or cfg.image_file[i].endswith('.hex')): input_data = load_data_from_text(cfg.image_file[i], net.blobs[top].data.shape, net) else: input_data = load_data_from_image(top, cfg, i, net, output_dir) net.blobs[top].data[...] = input_data j = j + 1 def main(argv): #check if the input args meet the format if(False == check_arg_num(argv)): sys.exit(2) output_dir = 'output/' cuda_flag = 0 opts, args = getopt.getopt(argv, "hm:w:i:o:c:") for opt, arg in opts: if opt == '-h': hint() sys.exit() elif opt == "-m": model_filename = arg elif opt == "-w": weight_filename = arg elif opt == "-i": cfg_filename = arg elif opt == "-o": output_dir = arg elif opt == "-c": cuda_flag = arg if('1' == cuda_flag): caffe.set_mode_gpu() caffe.set_device(0) #parse image params cfg = CfgParser(cfg_filename) if(False == isValidDataScale(cfg.data_scale)): print("The datascale in cfg file is invalid, it should be no less than 0.000244140625 and no more than 4294967296.0") sys.exit(2) #parse prototxt train_net = caffe_pb2.NetParameter() #judge if the prototxt has unspporterd layers judge_supported_layer(model_filename, train_net, cuda_flag) #print debug info print_log_info(model_filename, weight_filename, cfg, output_dir) #load caffe prototxt and caffe model if(platform.system()=="Linux"): net = caffe.Net(model_filename, weight_filename, caffe.TEST) else: net = caffe.Net(model_filename.encode('gbk'), weight_filename.encode('gbk'), caffe.TEST) print ('model load success') if not os.path.isdir(output_dir): os.mkdir(output_dir) #data preprocess ####################################################### data = preprocess_data(cfg, net, output_dir, train_net)##### ####################################################### #data_reshape= np.reshape(data,net.blobs[list(net.blobs.keys())[0]].data.shape) #net.blobs[list(net.blobs.keys())[0]].data[...] = data_reshape.astype('float') out = net.forward() save_result(train_net, net, output_dir) #print_CNNfeaturemap(net, output_dir) sys.exit(0) if __name__=='__main__': main(sys.argv[1:])