Skip to content
Snippets Groups Projects
Commit 115162ac authored by Hashim Sharif's avatar Hashim Sharif
Browse files

Merging

parents fc92225a 4ae62ea4
No related branches found
No related tags found
No related merge requests found
......@@ -37,10 +37,6 @@ VAL_SIZE = 100
def relu6(x):
return K.relu(x, max_value=6)
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
channel_axis = 1
filters = int(filters * alpha)
......@@ -51,7 +47,7 @@ def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
use_bias=False,
strides=strides)(x)
x = BatchNormalization(axis=channel_axis)(x)
return Activation(relu6)(x)
return Activation('relu')(x)
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
......@@ -70,14 +66,14 @@ def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
strides=strides,
use_bias=False)(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation(relu6)(x)
x = Activation('relu')(x)
x = Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1))(x)
x = BatchNormalization(axis=channel_axis)(x)
return Activation(relu6)(x)
return Activation('relu')(x)
......@@ -210,6 +206,52 @@ y_true = np.array(y_true)
# def train_helper(x):
# try:
# x = x.decode('utf-8')
# except:
# pass
# image = load_image(x)
# y = np.zeros(1000, dtype=np.uint8)
# y[synset_to_keras_idx[x.split('/')[-2]]]= 1
# return image, y
# train_images = glob.glob(IMAGENET_DIR + 'train/*/*')
# random.shuffle(train_images)
# dataset = tf.data.Dataset().from_tensor_slices(train_images)
# dataset = dataset.map(
# lambda x : tf.py_func(train_helper, [x], [tf.float32, tf.uint8]),
# num_parallel_calls=16
# )
# dataset = dataset.shuffle(buffer_size=1000)
# dataset = dataset.batch(32)
# dataset = dataset.repeat()
# next_element = dataset.make_one_shot_iterator().get_next()
# sess = tf.Session()
# def generate():
# while True:
# yield sess.run(next_element)
# model.compile(optimizer=keras.optimizers.Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['acc'])
# model.fit_generator(generate(), steps_per_epoch=1000, validation_data=(X_test, to_categorical(y_true, num_classes=1000)), epochs=7)
translate_to_approxhpvm(model, OUTPUT_DIR, X_test[:VAL_SIZE], y_true[:VAL_SIZE], 1000)
dumpCalibrationData(OUTPUT_DIR + 'test_input.bin', X_test, OUTPUT_DIR + 'test_labels.bin', y_true)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment