https://github.com/briqr/CSPN
Raw File
Tip revision: d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346 authored by Rania Briq on 25 July 2018, 13:06:58 UTC
Add files via upload
Tip revision: d3d01e5
train.prototxt
name: "CSPN"

layer {
  name: "Input"
  type: "ImageData"
  top: "images"
  top: "image_ids"
  transform_param {
    mirror: false
    mean_value: 104
    mean_value: 117
    mean_value: 123
  }
  image_data_param {
    root_folder: "/media/datasets/VOC2012/JPEGImages/"
    source: "/home/briq/libs/CSPN/training/input_list.txt"
    batch_size: 15
    new_height: 321
    new_width: 321
    shuffle: true
  }
}

layer {
  type: "Python"
  name: 'Annotation'
  bottom: 'image_ids'
  top: 'labels'
  top: 'cues'
  python_param {
      module: 'pylayers'
      layer: 'AnnotationLayer'
      }
  propagate_down: 0
}

layer {
  bottom: "images"
  top: "conv1_1"
  name: "conv1_1"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 64
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv1_1"
  top: "conv1_1"
  name: "relu1_1"
  type: "ReLU"
}

layer {
  bottom: "conv1_1"
  top: "conv1_2"
  name: "conv1_2"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 64
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv1_2"
  top: "conv1_2"
  name: "relu1_2"
  type: "ReLU"
}

layer {
  bottom: "conv1_2"
  top: "pool1"
  name: "pool1"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
    pad: 1
  }
}

layer {
  bottom: "pool1"
  top: "conv2_1"
  name: "conv2_1"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 128
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv2_1"
  top: "conv2_1"
  name: "relu2_1"
  type: "ReLU"
}

layer {
  bottom: "conv2_1"
  top: "conv2_2"
  name: "conv2_2"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 128
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv2_2"
  top: "conv2_2"
  name: "relu2_2"
  type: "ReLU"
}

layer {
  bottom: "conv2_2"
  top: "pool2"
  name: "pool2"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
    pad: 1
  }
}

layer {
  bottom: "pool2"
  top: "conv3_1"
  name: "conv3_1"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv3_1"
  top: "conv3_1"
  name: "relu3_1"
  type: "ReLU"
}

layer {
  bottom: "conv3_1"
  top: "conv3_2"
  name: "conv3_2"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv3_2"
  top: "conv3_2"
  name: "relu3_2"
  type: "ReLU"
}

layer {
  bottom: "conv3_2"
  top: "conv3_3"
  name: "conv3_3"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 } 
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv3_3"
  top: "conv3_3"
  name: "relu3_3"
  type: "ReLU"
}

layer {
  bottom: "conv3_3"
  top: "pool3"
  name: "pool3"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
    pad: 1
  }
}

layer {
  bottom: "pool3"
  top: "conv4_1"
  name: "conv4_1" 
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv4_1"
  top: "conv4_1"
  name: "relu4_1"
  type: "ReLU"
}

layer {
  bottom: "conv4_1"
  top: "conv4_2"
  name: "conv4_2"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv4_2"
  top: "conv4_2"
  name: "relu4_2"
  type: "ReLU"
}

layer {
  bottom: "conv4_2"
  top: "conv4_3"
  name: "conv4_3"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 512
    pad: 1
    kernel_size: 3
  }
}

layer {
  bottom: "conv4_3"
  top: "conv4_3"
  name: "relu4_3"
  type: "ReLU"
}

layer {
  bottom: "conv4_3"
  top: "pool4"
  name: "pool4"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 3
    pad: 1
    stride: 1
  }
}

layer {
  bottom: "pool4"
  top: "conv5_1"
  name: "conv5_1"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 512
    pad: 2
    dilation: 2
    kernel_size: 3
  }
}

layer {
  bottom: "conv5_1"
  top: "conv5_1"
  name: "relu5_1"
  type: "ReLU"
}

layer {
  bottom: "conv5_1"
  top: "conv5_2"
  name: "conv5_2"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 512
    pad: 2
    dilation: 2
    kernel_size: 3
  }
}

layer {
  bottom: "conv5_2"
  top: "conv5_2"
  name: "relu5_2"
  type: "ReLU"
}

layer {
  bottom: "conv5_2"
  top: "conv5_3"
  name: "conv5_3"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 512
    pad: 2
    dilation: 2
    kernel_size: 3
  }
}

layer {
  bottom: "conv5_3"
  top: "conv5_3"
  name: "relu5_3"
  type: "ReLU"
}

layer {
  bottom: "conv5_3"
  top: "pool5"
  name: "pool5"
  type: "Pooling"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  bottom: "pool5"
  top: "pool5a"
  name: "pool5a"
  type: "Pooling"
  pooling_param {
    pool: AVE
    kernel_size: 3
    stride: 1
    pad: 1
  }
}

layer {
  bottom: "pool5a"
  top: "fc6"
  name: "fc6"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 1024
    pad: 12
    dilation: 12
    kernel_size: 3
  }
}

layer {
  bottom: "fc6"
  top: "fc6"
  name: "relu6"
  type: "ReLU"
}

layer {
  bottom: "fc6"
  top: "fc6"
  name: "drop6"
  type: "Dropout"
  dropout_param {
    dropout_ratio: 0.5
  }
}

layer {
  bottom: "fc6"
  top: "fc7"
  name: "fc7"
  type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }  
  convolution_param {
    num_output: 1024
    kernel_size: 1
  }
}

layer {
  bottom: "fc7"
  top: "fc7"
  name: "relu7"
  type: "ReLU"
}

layer {
  bottom: "fc7"
  top: "fc7"
  name: "drop7"
  type: "Dropout"
  dropout_param {
    dropout_ratio: 0.5
  }
}

layer {
  bottom: "fc7"
  top: "fc8-SEC"
  name: "fc8-SEC"
  type: "Convolution"
  param { lr_mult: 10 decay_mult: 1 }
  param { lr_mult: 20 decay_mult: 0 }  
  convolution_param {
    num_output: 21
    kernel_size: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
  type: "Python"
  name: 'Softmax'
  bottom: 'fc8-SEC'
  top: 'fc8-SEC-Softmax'
  python_param {
      module: 'pylayers'
      layer: 'SoftmaxLayer'
      }
  propagate_down: 1
}


layer {
  type: "Python"
  name: 'CRF'
  bottom: 'fc8-SEC'
  bottom: 'images'
  top: 'fc8-SEC-CRF-log'
  python_param {
      module: 'pylayers'
      layer: 'CRFLayer'
      }
  propagate_down: 1
  propagate_down: 0
}




layer {
 type: "Python" 
 name: "simplex" 
 bottom: "fc8-SEC-Softmax" 
 bottom: "labels" # the weak labels
 bottom: "image_ids"
 bottom: "fc8-SEC"
 top: "simplex_proj"
python_param {
   module: "projection_layer_CSPN" 
   layer: "SimplexProjectionLayer" 
  }
}


layer {
  bottom: "simplex_proj"
  top: "label_proj"
  name: "argmax"
  type: "Python"
  python_param {
  module: "argmax_layer_CSPN" 
   layer: "ArgmaxLayer" 
   }

}


layer {
  name: "loss_hard"
  type: "SoftmaxWithLoss"
  bottom: "fc8-SEC"
  bottom: "label_proj"
  top: "softmax-loss-argmax"
 loss_param {
    ignore_label: 255
  }
   loss_weight: 1.0
  include: { phase: TRAIN }
}



#layer {
# name: "loss_soft"
#  type: "Python"
#  bottom: "fc8-SEC-Softmax"
# bottom: "simplex_proj"
#  top: "softmax-loss-soft"
#  python_param {
#  module: "softmax_loss_CSPN"
#    layer: "ProjectionSoftmax"
#  }
#   loss_weight: 1.0
#  include: { phase: TRAIN }
#}




layer {
  bottom: "fc8-SEC-Softmax"
  bottom: "cues"
  top: "loss-Seed"
  name: "loss-Seed"
  type: "Python"
  python_param {
    module: "pylayers"
    layer: "SeedLossLayer"
  }
  loss_weight:  1
}

layer {
  bottom: "fc8-SEC-Softmax"
  bottom: "fc8-SEC-CRF-log"
  top: "loss-Constrain"
  name: "loss-Constrain"
 type: "Python"
 python_param {
    module: "pylayers"
    layer: "ConstrainLossLayer"
  }
  loss_weight: 1 
}



back to top