diff --git a/README.md b/README.md index 9536700..43ad95d 100644 --- a/README.md +++ b/README.md @@ -10,11 +10,11 @@ A Python 3 and Keras 2 implementation of MSCNN for people countingand provide tr ## MSCNN and MSB architectures **MSCNN** -![MSCNN](/images/mscnn.png) +![MSCNN](images/mscnn.png) **MSB** -![MSB](/images/msb.png) +![MSB](images/msb.png) ## Experiment @@ -24,7 +24,7 @@ A Python 3 and Keras 2 implementation of MSCNN for people countingand provide tr Generate density_map from data: -![density map](/images/density_map.png) +![density map](images/density_map.png) **train** @@ -38,7 +38,7 @@ python train.py --size 224 --batch 16 --epochs 100 real count:30 pred count:27 ``` -![res](/images/res1.png) +![res](images/res1.png) ## Reference @@ -52,4 +52,4 @@ pred count:27 ## Copyright See [LICENSE](LICENSE) for details. - + diff --git a/model.py b/model.py index b97a5f6..4d8596a 100644 --- a/model.py +++ b/model.py @@ -16,7 +16,7 @@ def MSB(filters): Returns: f: function, layer func. """ - params = {'activation': 'relu', 'padding': 'same', + params = {'activation': 'elu', 'padding': 'same', 'kernel_regularizer': l2(5e-4)} def f(x): @@ -26,7 +26,7 @@ def f(x): x4 = Conv2D(filters, 3, **params)(x) x = concatenate([x1, x2, x3, x4]) x = BatchNormalization()(x) - x = Activation('relu')(x) + x = Activation('elu')(x) return x return f @@ -43,7 +43,7 @@ def MSCNN(input_shape): """ inputs = Input(shape=input_shape) - x = Conv2D(64, 9, activation='relu', padding='same')(inputs) + x = Conv2D(64, 9, activation='elu', padding='same')(inputs) x = MSB(4 * 16)(x) x = MaxPooling2D()(x) x = MSB(4 * 32)(x) @@ -51,8 +51,8 @@ def MSCNN(input_shape): x = MaxPooling2D()(x) x = MSB(3 * 64)(x) x = MSB(3 * 64)(x) - x = Conv2D(1000, 1, activation='relu', kernel_regularizer=l2(5e-4))(x) - x = Conv2D(1, 1, activation='relu')(x) + x = Conv2D(1000, 1, activation='elu', kernel_regularizer=l2(5e-4))(x) + x = Conv2D(1, 1, activation='elu')(x) model = Model(inputs=inputs, outputs=x) diff --git a/test.py b/test.py index 9466864..b8b2b57 100644 --- a/test.py +++ b/test.py @@ -38,5 +38,5 @@ def eva_regress(y_true, y_pred): dmap = model.predict(img)[0][:, :, 0] dmap = cv2.GaussianBlur(dmap, (15, 15), 0) - visualization(img[0], dmap) + visualization(img[0], np.expand_dims(dmap, axis=-1)) print('count:', int(np.sum(dmap))) diff --git a/train.py b/train.py index bd6de3b..738229d 100644 --- a/train.py +++ b/train.py @@ -53,13 +53,13 @@ def train(batch, epochs, size): lr = ReduceLROnPlateau(monitor='loss', min_lr=1e-7) indices = list(range(1500)) - train, test = train_test_split(indices, test_size=0.25) + train_ids, test_ids = train_test_split(indices, test_size=0.25) hist = model.fit_generator( - generator(train, batch, size), - validation_data=generator(test, batch, size), - steps_per_epoch=len(train) // batch, - validation_steps=len(test) // batch, + generator(train_ids, batch, size), + validation_data=generator(test_ids, batch, size), + steps_per_epoch=len(train_ids) // batch, + validation_steps=len(test_ids) // batch, epochs=epochs, callbacks=[lr])