https://github.com/Ardavans/DSR
Raw File
Tip revision: 373ee5a8c9fbdf15331f68be5778022254dc107c authored by Simanta Gautam on 25 August 2016, 17:14:58 UTC
Merge branch 'master' of https://github.com/Ardavans/DSR
Tip revision: 373ee5a
run_gpu
#!/bin/bash
if [ -z "$1" ]
  then echo "Please provide a game name, seed number, dsr or dqn, gpu id, learn_start, eval_steps, number of color channels, feature dimension, collect data 0/1: 
  ./run_gpu MovingGoalsMedium 8000 dsr 1 5000 500 1 256 0";
  #1-game_name 2-seed 3-dsr/dqn 4-gpuid 5-learn_start 6-eval_steps 7-color 8-sr_dims 9-collect_sr
  # doom => ./run_gpu doom 8000 dsr 1 30000 10000 3 512 0
  exit 0
fi

ENV=$1
game_name=$1
if [ "$game_name" = "doom" ]; then
	FRAMEWORK="../games/init.lua" # "../mbwrap/init.lua" #"alewrap"
else
	FRAMEWORK="../mbwrap/init.lua" #"alewrap"
fi	
num_samples=50000
sample_collect=$9 #if 1 then collects num_samples samples of (SR, raw state) in hdf5
agent_folder=$3 #'dqn' or 'dsr'
game_path=$PWD"/roms/"
env_params="useRGB=true"
agent="NeuralQLearner"
n_replay=1
netfile="\"convnet_atari3\""
update_freq=4
actrep=4
discount=0.99
seed=$2
learn_start=$5 #50000
pool_frms_type="\"max\""
pool_frms_size=2
initial_priority="false"
replay_memory=1000000
eps_end=0.1
eps_endt=100000 # 1000000
lr=0.00025
srdims=$8
agent_type="DQN3_0_1"
preproc_net="\"net_downsample_2x_full_y\""
agent_name=$agent_folder"_"$1"_"$2"_"$3"_"$4
ncols=$7
state_dim=7056*${ncols}
agent_params="num_samples="$num_samples",sample_collect="$sample_collect",lr="$lr",ep=1,ep_end="$eps_end",srdims="$srdims",ep_endt="$eps_endt",discount="$discount",hist_len=1,learn_start="$learn_start",replay_memory="$replay_memory",update_freq="$update_freq",n_replay="$n_replay",network="$netfile",preproc="$preproc_net",state_dim="$state_dim",minibatch_size=64,rescale_r=1,ncols="$ncols",bufferSize=128,valid_size=64,target_q=5000,clip_delta=1,min_reward=-1,max_reward=1"
steps=50000000
eval_freq=3000 #250000
eval_steps=$6 #125000
prog_freq=5000
save_freq=10000 
gpu=$4
random_starts=0
pool_frms="type="$pool_frms_type",size="$pool_frms_size
num_threads=4
reconstruction_steps=1

# if [ $agent_folder == "dsr" ]; 
# then 
args="-game_name $game_name -framework $FRAMEWORK -game_path $game_path -name $agent_name -env $ENV -env_params $env_params -agent $agent -agent_params $agent_params -steps $steps -eval_freq $eval_freq -eval_steps $eval_steps -prog_freq $prog_freq -save_freq $save_freq -actrep $actrep -gpu $gpu -random_starts $random_starts -pool_frms $pool_frms -seed $seed -threads $num_threads"
# else args="-game_name $game_name -framework $FRAMEWORK -game_path $game_path -name $agent_name -env $ENV -env_params $env_params -agent $agent -agent_params $agent_params -steps $steps -eval_freq $eval_freq -eval_steps $eval_steps -prog_freq $prog_freq -save_freq $save_freq -actrep $actrep -gpu $gpu -random_starts $random_starts -pool_frms $pool_frms -seed $seed -threads $num_threads"
# fi
echo $args

cd $agent_folder
th train_agent.lua $args
# qlua train_agent.lua $args
back to top