diff --git a/.travis.yml b/.travis.yml index 2ce8731d6c8..aeeeef03cc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,6 +21,7 @@ addons: - gfortran-4.9 - liblapack-dev - clang-3.8 + - shellcheck branches: only: diff --git a/egs/wsj/s5/local/append_utterances.sh b/egs/wsj/s5/local/append_utterances.sh index e94c19d5cb7..7daa5cff76c 100755 --- a/egs/wsj/s5/local/append_utterances.sh +++ b/egs/wsj/s5/local/append_utterances.sh @@ -7,10 +7,10 @@ pad_silence=0.5 # End configuration section. -echo "$0 $@" +echo "$0 $*" [ -f ./path.sh ] && . ./path.sh -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -ne 2 ]; then echo "Usage: $0 [options] " @@ -27,8 +27,12 @@ for f in spk2gender spk2utt text utt2spk wav.scp; do done # Checks if sox is on the path. -sox=`which sox` -[ $? -ne 0 ] && "sox: command not found." && exit 1; +#sox=`which sox` +#[ $? -ne 0 ] && "sox: command not found." && exit 1; +if ! sox=`which sox`; then + echo "sox: command not found"; + exit 1; +fi sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe [ ! -x $sph2pipe ] && "sph2pipe: command not found." && exit 1; diff --git a/egs/wsj/s5/local/chain/compare_wer.sh b/egs/wsj/s5/local/chain/compare_wer.sh index edfefad547f..a403af8b689 100755 --- a/egs/wsj/s5/local/chain/compare_wer.sh +++ b/egs/wsj/s5/local/chain/compare_wer.sh @@ -59,7 +59,7 @@ set_names() { echo -n "# System " -for x in $*; do printf "% 10s" " $(basename $x)"; done +for x in "$@"; do printf "% 10s" " $(basename $x)"; done echo strings=( @@ -74,7 +74,7 @@ strings=( for n in 0 1 2 3 4 5 6 7; do echo -n "${strings[$n]}" - for x in $*; do + for x in "$@"; do set_names $x # sets $dirname and $epoch_infix decode_names=(tgpr_dev93 tg_dev93 bd_tgpr_dev93 bd_tgpr_dev93_fg tgpr_eval92 tg_eval92 bd_tgpr_eval92 bd_tgpr_eval92_fg) @@ -84,7 +84,7 @@ for n in 0 1 2 3 4 5 6 7; do echo if $include_looped; then echo -n "# [looped:] " - for x in $*; do + for x in "$@"; do set_names $x # sets $dirname and $epoch_infix wer=$(cat $dirname/decode_looped_${decode_names[$n]}/scoring_kaldi/best_wer | utils/best_wer.sh | awk '{print $2}') printf "% 10s" $wer @@ -93,7 +93,7 @@ for n in 0 1 2 3 4 5 6 7; do fi if $include_online; then echo -n "# [online:] " - for x in $*; do + for x in "$@"; do set_names $x # sets $dirname and $epoch_infix wer=$(cat ${dirname}_online/decode_${decode_names[$n]}/scoring_kaldi/best_wer | utils/best_wer.sh | awk '{print $2}') printf "% 10s" $wer @@ -109,28 +109,28 @@ fi echo -n "# Final train prob " -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -v xent | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final valid prob " -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -v xent | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final train prob (xent)" -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -w xent | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final valid prob (xent)" -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -w xent | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done diff --git a/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1a.sh b/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1a.sh index ceca428f5c1..3198f76b256 100755 --- a/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1a.sh +++ b/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1a.sh @@ -66,7 +66,7 @@ remove_egs=true test_online_decoding=false # if true, it will run the last decoding stage. # End configuration section. -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . ./cmd.sh @@ -219,7 +219,7 @@ fi if [ $stage -le 16 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-$(date +'%m_%d_%H_%M')/s5_r2/$dir/egs/storage $dir/egs/storage + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-"$(date +'%m_%d_%H_%M')"/s5_r2/$dir/egs/storage $dir/egs/storage fi steps/nnet3/chain/train.py --stage=$train_stage \ diff --git a/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1b.sh b/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1b.sh index a3a747ed743..ec66f205741 100755 --- a/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1b.sh +++ b/egs/wsj/s5/local/chain/tuning/run_cnn_tdnn_1b.sh @@ -69,7 +69,7 @@ remove_egs=true test_online_decoding=false # if true, it will run the last decoding stage. # End configuration section. -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . ./cmd.sh @@ -222,7 +222,7 @@ fi if [ $stage -le 16 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-$(date +'%m_%d_%H_%M')/s5_r2/$dir/egs/storage $dir/egs/storage + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-"$(date +'%m_%d_%H_%M')"/s5_r2/$dir/egs/storage $dir/egs/storage fi steps/nnet3/chain/train.py --stage=$train_stage \ diff --git a/egs/wsj/s5/local/chain/tuning/run_tdnn_1a.sh b/egs/wsj/s5/local/chain/tuning/run_tdnn_1a.sh index 10a9c608811..73d2cb096da 100755 --- a/egs/wsj/s5/local/chain/tuning/run_tdnn_1a.sh +++ b/egs/wsj/s5/local/chain/tuning/run_tdnn_1a.sh @@ -82,7 +82,7 @@ remove_egs=true test_online_decoding=false # if true, it will run the last decoding stage. # End configuration section. -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . ./cmd.sh @@ -226,7 +226,7 @@ fi if [ $stage -le 16 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-"$(date +'%m_%d_%H_%M')"/s5/$dir/egs/storage $dir/egs/storage fi steps/nnet3/chain/train.py --stage=$train_stage \ diff --git a/egs/wsj/s5/local/chain/tuning/run_tdnn_1b.sh b/egs/wsj/s5/local/chain/tuning/run_tdnn_1b.sh index a2bb7e93388..efbd775f6af 100755 --- a/egs/wsj/s5/local/chain/tuning/run_tdnn_1b.sh +++ b/egs/wsj/s5/local/chain/tuning/run_tdnn_1b.sh @@ -57,7 +57,7 @@ remove_egs=true test_online_decoding=false # if true, it will run the last decoding stage. # End configuration section. -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . ./cmd.sh @@ -201,7 +201,7 @@ fi if [ $stage -le 16 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-"$(date +'%m_%d_%H_%M')"/s5/$dir/egs/storage $dir/egs/storage fi steps/nnet3/chain/train.py --stage=$train_stage \ diff --git a/egs/wsj/s5/local/chain/tuning/run_tdnn_lstm_1a.sh b/egs/wsj/s5/local/chain/tuning/run_tdnn_lstm_1a.sh index 4b752a55a4b..f4949d1e689 100755 --- a/egs/wsj/s5/local/chain/tuning/run_tdnn_lstm_1a.sh +++ b/egs/wsj/s5/local/chain/tuning/run_tdnn_lstm_1a.sh @@ -80,7 +80,7 @@ remove_egs=true test_online_decoding=false # if true, it will run the last decoding stage. # End configuration section. -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . ./cmd.sh @@ -225,7 +225,7 @@ fi if [ $stage -le 16 ]; then if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-$(date +'%m_%d_%H_%M')/s5_r2/$dir/egs/storage $dir/egs/storage + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/tedlium-"$(date +'%m_%d_%H_%M')"/s5_r2/$dir/egs/storage $dir/egs/storage fi steps/nnet3/chain/train.py --stage=$train_stage \ diff --git a/egs/wsj/s5/local/cstr_wsj_data_prep.sh b/egs/wsj/s5/local/cstr_wsj_data_prep.sh index 755edda9fed..0014e99be35 100755 --- a/egs/wsj/s5/local/cstr_wsj_data_prep.sh +++ b/egs/wsj/s5/local/cstr_wsj_data_prep.sh @@ -9,7 +9,7 @@ set -e # - Arnab Ghoshal, 29/05/12 if [ $# -ne 1 ]; then - printf "\nUSAGE: %s \n\n" `basename $0` + printf "\nUSAGE: %s \n\n" "`basename $0`" echo "The argument should be a the top-level WSJ corpus directory." echo "It is assumed that there will be a 'wsj0' and a 'wsj1' subdirectory" echo "within the top-level corpus directory." @@ -169,7 +169,7 @@ prune-lm --threshold=1e-7 $lmdir/lm_tg_5k.arpa.gz $lmdir/lm_tgpr_5k.arpa || exit gzip -f $lmdir/lm_tgpr_5k.arpa || exit 1; -if [ ! -f wsj0-train-spkrinfo.txt ] || [ `cat wsj0-train-spkrinfo.txt | wc -l` -ne 134 ]; then +if [ ! -f wsj0-train-spkrinfo.txt ] || [ "`cat wsj0-train-spkrinfo.txt | wc -l`" -ne 134 ]; then rm -f wsj0-train-spkrinfo.txt wget https://catalog.ldc.upenn.edu/docs/LDC93S6A/wsj0-train-spkrinfo.txt \ || ( echo "Getting wsj0-train-spkrinfo.txt from backup location" && \ diff --git a/egs/wsj/s5/local/cstr_wsj_extend_dict.sh b/egs/wsj/s5/local/cstr_wsj_extend_dict.sh index 8004db1d924..295d7660a39 100755 --- a/egs/wsj/s5/local/cstr_wsj_extend_dict.sh +++ b/egs/wsj/s5/local/cstr_wsj_extend_dict.sh @@ -14,7 +14,7 @@ dict_suffix= -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . utils/parse_options.sh || exit 1; if [ $# -ne 1 ]; then @@ -57,7 +57,7 @@ echo "Getting training data [this should take at least a few seconds; if not, th # oov.counts below (before adding this rule). touch $dir/cleaned.gz -if [ `du -m $dir/cleaned.gz | cut -f 1` -eq 73 ]; then +if [ "`du -m $dir/cleaned.gz | cut -f 1`" -eq 73 ]; then echo "Not getting cleaned data in $dir/cleaned.gz again [already exists]"; else gunzip -c $srcdir/lng_modl/lm_train/np_data/{87,88,89}/*.z \ @@ -126,7 +126,7 @@ reverse_dict.pl $dir/f/oovs > $dir/b/oovs # that it finds. for d in $dir/f $dir/b; do ( - cd $d + cd $d || exit 1; cat dict | get_rules.pl 2>get_rules.log >rules get_rule_hierarchy.pl rules >hierarchy awk '{print $1}' dict | get_candidate_prons.pl rules dict | \ @@ -165,7 +165,7 @@ head $dir/oovlist.not_handled.counts echo "Count of OOVs we handled is `awk '{x+=$1} END{print x}' $dir/oovlist.handled.counts`" echo "Count of OOVs we couldn't handle is `awk '{x+=$1} END{print x}' $dir/oovlist.not_handled.counts`" echo "Count of OOVs we didn't handle due to low count is" \ - `awk -v thresh=$mincount '{if ($1 < thresh) x+=$1; } END{print x;}' $dir/oov.counts` + "`awk -v thresh=$mincount '{if ($1 < thresh) x+=$1; } END{print x;}' $dir/oov.counts`" # The two files created above are for humans to look at, as diagnostics. cat < $dir/lexicon.txt diff --git a/egs/wsj/s5/local/nnet3/compare_wer.sh b/egs/wsj/s5/local/nnet3/compare_wer.sh index 7a2fbd8a123..39f766eb6c5 100755 --- a/egs/wsj/s5/local/nnet3/compare_wer.sh +++ b/egs/wsj/s5/local/nnet3/compare_wer.sh @@ -59,7 +59,7 @@ set_names() { echo -n "# System " -for x in $*; do printf "% 10s" " $(basename $x)"; done +for x in "$@"; do printf "% 10s" " $(basename $x)"; done echo strings=( @@ -74,7 +74,7 @@ strings=( for n in 0 1 2 3 4 5 6 7; do echo -n "${strings[$n]}" - for x in $*; do + for x in "$@"; do set_names $x # sets $dirname and $epoch_infix decode_names=(tgpr_dev93 tg_dev93 bd_tgpr_dev93 bd_tgpr_dev93_fg tgpr_eval92 tg_eval92 bd_tgpr_eval92 bd_tgpr_eval92_fg) @@ -84,7 +84,7 @@ for n in 0 1 2 3 4 5 6 7; do echo if $include_looped; then echo -n "# [looped:] " - for x in $*; do + for x in "$@"; do set_names $x # sets $dirname and $epoch_infix wer=$(cat $dirname/decode_looped_${decode_names[$n]}$epoch_infix/scoring_kaldi/best_wer | utils/best_wer.sh | awk '{print $2}') printf "% 10s" $wer @@ -93,7 +93,7 @@ for n in 0 1 2 3 4 5 6 7; do fi if $include_online; then echo -n "# [online:] " - for x in $*; do + for x in "$@"; do set_names $x # sets $dirname and $epoch_infix wer=$(cat ${dirname}_online/decode_${decode_names[$n]}$epoch_infix/scoring_kaldi/best_wer | utils/best_wer.sh | awk '{print $2}') printf "% 10s" $wer @@ -109,28 +109,28 @@ fi echo -n "# Final train prob " -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_train.{final,combined}.log 2>/dev/null | grep log-like | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final valid prob " -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_valid.{final,combined}.log 2>/dev/null | grep log-like | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final train acc " -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_train.{final,combined}.log 2>/dev/null | grep accuracy | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done echo echo -n "# Final valid acc " -for x in $*; do +for x in "$@"; do prob=$(grep Overall $x/log/compute_prob_valid.{final,combined}.log 2>/dev/null | grep accuracy | awk '{printf("%.4f", $8)}') printf "% 10s" $prob done diff --git a/egs/wsj/s5/local/nnet3/run_ivector_common.sh b/egs/wsj/s5/local/nnet3/run_ivector_common.sh index eb63754d23f..fab444227b4 100755 --- a/egs/wsj/s5/local/nnet3/run_ivector_common.sh +++ b/egs/wsj/s5/local/nnet3/run_ivector_common.sh @@ -57,7 +57,7 @@ if [ $stage -le 2 ]; then # them overwrite each other. mfccdir=data/${train_set}_sp_hires/data if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then - utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage + utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/egs/wsj-"$(date +'%m_%d_%H_%M')"/s5/$mfccdir/storage $mfccdir/storage fi for datadir in ${train_set}_sp ${test_sets}; do @@ -84,7 +84,7 @@ if [ $stage -le 3 ]; then # train a diagonal UBM using a subset of about a quarter of the data num_utts_total=$(wc -l [decode-dir3 ... ] +help_message="Usage: $(basename $0) [options] [decode-dir3 ... ] Options: --cmd (run.pl|queue.pl...) # specify how to run the sub-processes. --min-lmwt INT # minumum LM-weight for lattice rescoring @@ -38,10 +38,10 @@ Options: "; [ -f ./path.sh ] && . ./path.sh -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -lt 5 ]; then - printf "$help_message\n"; + printf "%s\n" "$help_message"; exit 1; fi @@ -49,7 +49,7 @@ data=$1 graphdir=$2 odir=${@: -1} # last argument to the script shift 2; -decode_dirs=( $@ ) # read the remaining arguments into an array +decode_dirs=( "$@" ) # read the remaining arguments into an array unset decode_dirs[${#decode_dirs[@]}-1] # 'pop' the last argument which is odir num_sys=${#decode_dirs[@]} # number of systems to combine @@ -60,7 +60,7 @@ symtab=$graphdir/words.txt mkdir -p $odir/log -for i in `seq 0 $[num_sys-1]`; do +for i in `seq 0 $((num_sys-1))`; do model=${decode_dirs[$i]}/../final.mdl # model one level up from decode dir for f in $model ${decode_dirs[$i]}/lat.1.gz ; do [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1; @@ -75,13 +75,13 @@ cat $data/text | sed 's:::g' | sed 's:::g' \ if [ -z "$lat_weights" ]; then $cmd LMWT=$min_lmwt:$max_lmwt $odir/log/combine_lats.LMWT.log \ - lattice-combine --inv-acoustic-scale=LMWT ${lats[@]} ark:- \| \ + lattice-combine --inv-acoustic-scale=LMWT "${lats[@]}" ark:- \| \ lattice-mbr-decode --word-symbol-table=$symtab ark:- \ ark,t:$odir/scoring/LMWT.tra || exit 1; else $cmd LMWT=$min_lmwt:$max_lmwt $odir/log/combine_lats.LMWT.log \ lattice-combine --inv-acoustic-scale=LMWT --lat-weights=$lat_weights \ - ${lats[@]} ark:- \| \ + "${lats[@]}" ark:- \| \ lattice-mbr-decode --word-symbol-table=$symtab ark:- \ ark,t:$odir/scoring/LMWT.tra || exit 1; fi diff --git a/egs/wsj/s5/local/score_mbr.sh b/egs/wsj/s5/local/score_mbr.sh index 04b84ccce5a..9d02869d91d 100755 --- a/egs/wsj/s5/local/score_mbr.sh +++ b/egs/wsj/s5/local/score_mbr.sh @@ -11,7 +11,7 @@ max_lmwt=17 #end configuration section. [ -f ./path.sh ] && . ./path.sh -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -ne 3 ]; then echo "Usage: local/score_mbr.sh [--cmd (run.pl|queue.pl...)] " diff --git a/egs/wsj/s5/local/wsj_data_prep.sh b/egs/wsj/s5/local/wsj_data_prep.sh index 04f2f6390d8..fa7bc68bd22 100755 --- a/egs/wsj/s5/local/wsj_data_prep.sh +++ b/egs/wsj/s5/local/wsj_data_prep.sh @@ -36,15 +36,15 @@ if ! command -v prune-lm >/dev/null 2>&1 ; then exit 1 fi -cd $dir +cd $dir || exit 1; # Make directory of links to the WSJ disks such as 11-13.1. This relies on the command # line arguments being absolute pathnames. rm -r links/ 2>/dev/null mkdir links/ -ln -s $* links +ln -s "$@" links # Do some basic checks that we have what we expected. -if [ ! -d links/11-13.1 -o ! -d links/13-34.1 -o ! -d links/11-2.1 ]; then +if [ ! -d links/11-13.1 ] || [ ! -d links/13-34.1 ] || [ ! -d links/11-2.1 ]; then echo "wsj_data_prep.sh: Spot check of command line arguments failed" echo "Command line arguments must be absolute pathnames to WSJ directories" echo "with names like 11-13.1." @@ -56,7 +56,7 @@ fi # This version for SI-84 cat links/11-13.1/wsj0/doc/indices/train/tr_s_wv1.ndx | \ - $local/ndx2flist.pl $* | sort | \ + $local/ndx2flist.pl "$@" | sort | \ grep -v -i 11-2.1/wsj0/si_tr_s/401 > train_si84.flist nl=`cat train_si84.flist | wc -l` @@ -65,7 +65,7 @@ nl=`cat train_si84.flist | wc -l` # This version for SI-284 cat links/13-34.1/wsj1/doc/indices/si_tr_s.ndx \ links/11-13.1/wsj0/doc/indices/train/tr_s_wv1.ndx | \ - $local/ndx2flist.pl $* | sort | \ + $local/ndx2flist.pl "$@" | sort | \ grep -v -i 11-2.1/wsj0/si_tr_s/401 > train_si284.flist nl=`cat train_si284.flist | wc -l` @@ -85,32 +85,32 @@ nl=`cat train_si284.flist | wc -l` # These index files have a slightly different format; # have to add .wv1 cat links/11-13.1/wsj0/doc/indices/test/nvp/si_et_20.ndx | \ - $local/ndx2flist.pl $* | awk '{printf("%s.wv1\n", $1)}' | \ + $local/ndx2flist.pl "$@" | awk '{printf("%s.wv1\n", $1)}' | \ sort > test_eval92.flist # Nov'92 (330 utts, 5k vocab) cat links/11-13.1/wsj0/doc/indices/test/nvp/si_et_05.ndx | \ - $local/ndx2flist.pl $* | awk '{printf("%s.wv1\n", $1)}' | \ + $local/ndx2flist.pl "$@" | awk '{printf("%s.wv1\n", $1)}' | \ sort > test_eval92_5k.flist # Nov'93: (213 utts) # Have to replace a wrong disk-id. cat links/13-32.1/wsj1/doc/indices/wsj1/eval/h1_p0.ndx | \ sed s/13_32_1/13_33_1/ | \ - $local/ndx2flist.pl $* | sort > test_eval93.flist + $local/ndx2flist.pl "$@" | sort > test_eval93.flist # Nov'93: (213 utts, 5k) cat links/13-32.1/wsj1/doc/indices/wsj1/eval/h2_p0.ndx | \ sed s/13_32_1/13_33_1/ | \ - $local/ndx2flist.pl $* | sort > test_eval93_5k.flist + $local/ndx2flist.pl "$@" | sort > test_eval93_5k.flist # Dev-set for Nov'93 (503 utts) cat links/13-34.1/wsj1/doc/indices/h1_p0.ndx | \ - $local/ndx2flist.pl $* | sort > test_dev93.flist + $local/ndx2flist.pl "$@" | sort > test_dev93.flist # Dev-set for Nov'93 (513 utts, 5k vocab) cat links/13-34.1/wsj1/doc/indices/h2_p0.ndx | \ - $local/ndx2flist.pl $* | sort > test_dev93_5k.flist + $local/ndx2flist.pl "$@" | sort > test_dev93_5k.flist # Dev-set Hub 1,2 (503, 913 utterances) @@ -118,12 +118,12 @@ cat links/13-34.1/wsj1/doc/indices/h2_p0.ndx | \ # Note: the ???'s below match WSJ and SI_DT, or wsj and si_dt. # Sometimes this gets copied from the CD's with upcasing, don't know # why (could be older versions of the disks). -find `readlink links/13-16.1`/???1/??_??_20 -print | grep -i ".wv1" | sort > dev_dt_20.flist -find `readlink links/13-16.1`/???1/??_??_05 -print | grep -i ".wv1" | sort > dev_dt_05.flist +find "`readlink links/13-16.1`"/???1/??_??_20 -print | grep -i ".wv1" | sort > dev_dt_20.flist +find "`readlink links/13-16.1`"/???1/??_??_05 -print | grep -i ".wv1" | sort > dev_dt_05.flist # Finding the transcript files: -for x in $*; do find -L $x -iname '*.dot'; done > dot_files.flist +for x in "$@"; do find -L $x -iname '*.dot'; done > dot_files.flist # Convert the transcripts into our format (no normalization yet) for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do @@ -184,7 +184,7 @@ prune-lm --threshold=1e-7 $lmdir/lm_tg_5k.arpa.gz $lmdir/lm_tgpr_5k.arpa || exit gzip -f $lmdir/lm_tgpr_5k.arpa || exit 1; -if [ ! -f wsj0-train-spkrinfo.txt ] || [ `cat wsj0-train-spkrinfo.txt | wc -l` -ne 134 ]; then +if [ ! -f wsj0-train-spkrinfo.txt ] || [ "`cat wsj0-train-spkrinfo.txt | wc -l`" -ne 134 ]; then rm wsj0-train-spkrinfo.txt ! wget https://catalog.ldc.upenn.edu/docs/LDC93S6A/wsj0-train-spkrinfo.txt && \ echo "Getting wsj0-train-spkrinfo.txt from backup location" && \ diff --git a/egs/wsj/s5/local/wsj_extend_dict.sh b/egs/wsj/s5/local/wsj_extend_dict.sh index c2b11b8dc8b..fb993ddc6a9 100755 --- a/egs/wsj/s5/local/wsj_extend_dict.sh +++ b/egs/wsj/s5/local/wsj_extend_dict.sh @@ -14,7 +14,7 @@ dict_suffix= -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . utils/parse_options.sh || exit 1; if [ $# -ne 1 ]; then @@ -62,7 +62,7 @@ echo "Getting training data [this should take at least a few seconds; if not, th # oov.counts below (before adding this rule). touch $dir/cleaned.gz -if [ `du -m $dir/cleaned.gz | cut -f 1` -eq 73 ]; then +if [ "`du -m $dir/cleaned.gz | cut -f 1`" -eq 73 ]; then echo "Not getting cleaned data in $dir/cleaned.gz again [already exists]"; else gunzip -c $srcdir/wsj1/doc/lng_modl/lm_train/np_data/{87,88,89}/*.z \ @@ -131,7 +131,7 @@ reverse_dict.pl $dir/f/oovs > $dir/b/oovs # that it finds. for d in $dir/f $dir/b; do ( - cd $d + cd $d || exit 1; cat dict | get_rules.pl 2>get_rules.log >rules get_rule_hierarchy.pl rules >hierarchy awk '{print $1}' dict | get_candidate_prons.pl rules dict | \ @@ -170,7 +170,7 @@ head $dir/oovlist.not_handled.counts echo "Count of OOVs we handled is `awk '{x+=$1} END{print x}' $dir/oovlist.handled.counts`" echo "Count of OOVs we couldn't handle is `awk '{x+=$1} END{print x}' $dir/oovlist.not_handled.counts`" echo "Count of OOVs we didn't handle due to low count is" \ - `awk -v thresh=$mincount '{if ($1 < thresh) x+=$1; } END{print x;}' $dir/oov.counts` + "`awk -v thresh=$mincount '{if ($1 < thresh) x+=$1; } END{print x;}' $dir/oov.counts`" # The two files created above are for humans to look at, as diagnostics. cat < $dir/lexicon.txt diff --git a/egs/wsj/s5/local/wsj_format_data.sh b/egs/wsj/s5/local/wsj_format_data.sh index 897b904db83..e83ba5c73e5 100755 --- a/egs/wsj/s5/local/wsj_format_data.sh +++ b/egs/wsj/s5/local/wsj_format_data.sh @@ -15,7 +15,7 @@ lang_suffix= -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . utils/parse_options.sh || exit 1; . ./path.sh || exit 1; diff --git a/egs/wsj/s5/local/wsj_format_local_lms.sh b/egs/wsj/s5/local/wsj_format_local_lms.sh index c415a806fff..f875694a357 100755 --- a/egs/wsj/s5/local/wsj_format_local_lms.sh +++ b/egs/wsj/s5/local/wsj_format_local_lms.sh @@ -5,7 +5,7 @@ lang_suffix= -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . ./path.sh . utils/parse_options.sh || exit 1; diff --git a/egs/wsj/s5/local/wsj_prepare_dict.sh b/egs/wsj/s5/local/wsj_prepare_dict.sh index c644f91bc6e..edd8454cb92 100755 --- a/egs/wsj/s5/local/wsj_prepare_dict.sh +++ b/egs/wsj/s5/local/wsj_prepare_dict.sh @@ -31,7 +31,7 @@ # run this from ../ dict_suffix= -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . utils/parse_options.sh || exit 1; dir=data/local/dict${dict_suffix} diff --git a/egs/wsj/s5/local/wsj_train_lms.sh b/egs/wsj/s5/local/wsj_train_lms.sh index 0807210be18..891af985307 100755 --- a/egs/wsj/s5/local/wsj_train_lms.sh +++ b/egs/wsj/s5/local/wsj_train_lms.sh @@ -8,7 +8,7 @@ dict_suffix= -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging . utils/parse_options.sh || exit 1; dir=data/local/local_lm @@ -34,7 +34,7 @@ export PATH=$KALDI_ROOT/tools/kaldi_lm:$PATH -if [ ! -f $srcdir/cleaned.gz -o ! -f $srcdir/lexicon.txt ]; then +if [ ! -f $srcdir/cleaned.gz ] || [ ! -f $srcdir/lexicon.txt ]; then echo "Expecting files $srcdir/cleaned.gz and $srcdir/lexicon.txt to exist"; echo "You need to run local/wsj_extend_dict.sh before running this script." exit 1; @@ -194,8 +194,8 @@ gunzip -c $srcdir/cleaned.gz | tail -n +$heldout_sent | add-start-end.sh | \ gzip -c > $idir/train.gz dict -i=WSJ.cleaned.irstlm.txt -o=dico -f=y -sort=no - cat dico | gawk 'BEGIN{while (getline<"vocab.20k.nooov") v[$1]=1; print "DICTIONARY 0 "length(v);}FNR>1{if ($1 in v)\ -{print $0;}}' > vocab.irstlm.20k + cat dico | gawk 'BEGIN{while (getline<"vocab.20k.nooov") v[$1]=1; print "DICTIONARY 0 "length(v);}FNR>1{if ($1 in v)'\ +'{print $0;}}' > vocab.irstlm.20k build-lm.sh -i "gunzip -c $idir/train.gz" -o $idir/lm_3gram.gz -p yes \ diff --git a/egs/wsj/s5/local/wsj_train_rnnlms.sh b/egs/wsj/s5/local/wsj_train_rnnlms.sh index 4472c1c52ca..781b71b21f4 100755 --- a/egs/wsj/s5/local/wsj_train_rnnlms.sh +++ b/egs/wsj/s5/local/wsj_train_rnnlms.sh @@ -43,7 +43,7 @@ $KALDI_ROOT/tools/extras/check_for_rnnlm.sh "$rnnlm_ver" || exit 1 export PATH=$KALDI_ROOT/tools/$rnnlm_ver:$PATH -if [ ! -f $srcdir/cleaned.gz -o ! -f $srcdir/lexicon.txt ]; then +if [ ! -f $srcdir/cleaned.gz ] || [ ! -f $srcdir/lexicon.txt ]; then echo "Expecting files $srcdir/cleaned.gz and $srcdir/wordlist.final to exist"; echo "You need to run local/wsj_extend_dict.sh before running this script." exit 1; diff --git a/egs/wsj/s5/steps/scoring/score_kaldi_wer.sh b/egs/wsj/s5/steps/scoring/score_kaldi_wer.sh index 9988c941441..5944c92330f 100755 --- a/egs/wsj/s5/steps/scoring/score_kaldi_wer.sh +++ b/egs/wsj/s5/steps/scoring/score_kaldi_wer.sh @@ -18,9 +18,9 @@ max_lmwt=17 iter=final #end configuration section. -echo "$0 $@" # Print the command line for logging +echo "$0 $*" # Print the command line for logging [ -f ./path.sh ] && . ./path.sh -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -ne 3 ]; then echo "Usage: $0 [--cmd (run.pl|queue.pl...)] " diff --git a/egs/wsj/s5/utils/combine_data.sh b/egs/wsj/s5/utils/combine_data.sh index d0c754f71b5..e9aeeac3c74 100755 --- a/egs/wsj/s5/utils/combine_data.sh +++ b/egs/wsj/s5/utils/combine_data.sh @@ -16,7 +16,7 @@ skip_fix=false # skip the fix_data_dir.sh in the end echo "$0 $@" # Print the command line for logging if [ -f path.sh ]; then . ./path.sh; fi -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -lt 2 ]; then echo "Usage: combine_data.sh [--extra-files 'file1 file2'] ..." @@ -28,14 +28,12 @@ fi dest=$1; shift; -first_src=$1; - rm -r $dest 2>/dev/null mkdir -p $dest; export LC_ALL=C -for dir in $*; do +for dir in "$@"; do if [ ! -f $dir/utt2spk ]; then echo "$0: no such file $dir/utt2spk" exit 1; @@ -46,7 +44,7 @@ done # it is not compulsary for it to exist in src directories, but if it exists in # even one it should exist in all. We will create the files where necessary has_utt2uniq=false -for in_dir in $*; do +for in_dir in "$@"; do if [ -f $in_dir/utt2uniq ]; then has_utt2uniq=true break @@ -55,7 +53,7 @@ done if $has_utt2uniq; then # we are going to create an utt2uniq file in the destdir - for in_dir in $*; do + for in_dir in "$@"; do if [ ! -f $in_dir/utt2uniq ]; then # we assume that utt2uniq is a one to one mapping cat $in_dir/utt2spk | awk '{printf("%s %s\n", $1, $1);}' @@ -73,7 +71,7 @@ extra_files=$(echo "$extra_files"|sed -e "s/utt2uniq//g") # segments are treated similarly to utt2uniq. If it exists in some, but not all # src directories, then we generate segments where necessary. has_segments=false -for in_dir in $*; do +for in_dir in "$@"; do if [ -f $in_dir/segments ]; then has_segments=true break @@ -81,7 +79,7 @@ for in_dir in $*; do done if $has_segments; then - for in_dir in $*; do + for in_dir in "$@"; do if [ ! -f $in_dir/segments ]; then echo "$0 [info]: will generate missing segments for $in_dir" 1>&2 utils/data/get_segments_for_data.sh $in_dir @@ -97,7 +95,7 @@ fi for file in utt2spk utt2lang utt2dur feats.scp text cmvn.scp reco2file_and_channel wav.scp spk2gender $extra_files; do exists_somewhere=false absent_somewhere=false - for d in $*; do + for d in "$@"; do if [ -f $d/$file ]; then exists_somewhere=true else @@ -107,7 +105,7 @@ for file in utt2spk utt2lang utt2dur feats.scp text cmvn.scp reco2file_and_chann if ! $absent_somewhere; then set -o pipefail - ( for f in $*; do cat $f/$file; done ) | sort -k1 > $dest/$file || exit 1; + ( for f in "$@"; do cat $f/$file; done ) | sort -k1 > $dest/$file || exit 1; set +o pipefail echo "$0: combined $file" else diff --git a/egs/wsj/s5/utils/convert_slf_parallel.sh b/egs/wsj/s5/utils/convert_slf_parallel.sh index 1b242ed2c38..4f2edb534a2 100755 --- a/egs/wsj/s5/utils/convert_slf_parallel.sh +++ b/egs/wsj/s5/utils/convert_slf_parallel.sh @@ -14,7 +14,7 @@ word_to_node=false # Words in arcs or nodes? [default:arcs] echo "$0 $@" [ -f ./path.sh ] && . ./path.sh -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -ne 3 ]; then echo "Usage: $0 [options] " @@ -42,7 +42,6 @@ done echo "$0: Converting lattices into '$dir/$dirname'" # Words in arcs or nodes? [default:nodes] -word_to_link_arg= $word_to_node && word_to_node_arg="--word-to-node" nj=$(cat $dir/num_jobs) @@ -56,7 +55,7 @@ $cmd $parallel_opts JOB=1:$nj $dir/$dirname/log/lat_convert.JOB.log \ utils/convert_slf.pl $word_to_node_arg - $dir/$dirname/JOB/ || exit 1 # make list of lattices -find -L $PWD/$dir/$dirname -name *.lat.gz > $dir/$dirname/lat_htk.scp || exit 1 +find -L $PWD/$dir/$dirname -name "*.lat.gz" > $dir/$dirname/lat_htk.scp || exit 1 # check number of lattices: nseg=$(cat $data/segments | wc -l) diff --git a/egs/wsj/s5/utils/format_lm.sh b/egs/wsj/s5/utils/format_lm.sh index 7d1ec0e6eef..6f1131c4b8a 100755 --- a/egs/wsj/s5/utils/format_lm.sh +++ b/egs/wsj/s5/utils/format_lm.sh @@ -39,7 +39,6 @@ for f in phones.txt words.txt topo L.fst L_disambig.fst phones/ oov.int oov.txt; cp -r $lang_dir/$f $out_dir done -lm_base=$(basename $lm '.gz') gunzip -c $lm \ | arpa2fst --disambig-symbol=#0 \ --read-symbol-table=$out_dir/words.txt - $out_dir/G.fst diff --git a/egs/wsj/s5/utils/format_lm_sri.sh b/egs/wsj/s5/utils/format_lm_sri.sh index 4ef31d925ca..d3cbc92b782 100755 --- a/egs/wsj/s5/utils/format_lm_sri.sh +++ b/egs/wsj/s5/utils/format_lm_sri.sh @@ -40,6 +40,8 @@ fi if [ $# -eq 4 ] ; then lang_dir=$1 lm=$2 + # shellcheck disable=2034 + # This unused variable is here for backwards compatibility reasons lexicon=$3 out_dir=$4 else @@ -73,7 +75,6 @@ trap 'rm -rf "$tmpdir"' EXIT mkdir -p $out_dir cp -r $lang_dir/* $out_dir || exit 1; -lm_base=$(basename $lm '.gz') awk '{print $1}' $out_dir/words.txt > $tmpdir/voc || exit 1; # Change the LM vocabulary to be the intersection of the current LM vocabulary diff --git a/egs/wsj/s5/utils/lang/make_phone_bigram_lang.sh b/egs/wsj/s5/utils/lang/make_phone_bigram_lang.sh index dcb77bb1342..141ebdcc03d 100755 --- a/egs/wsj/s5/utils/lang/make_phone_bigram_lang.sh +++ b/egs/wsj/s5/utils/lang/make_phone_bigram_lang.sh @@ -16,7 +16,7 @@ echo "$0 $@" # Print the command line for logging [ -f ./path.sh ] && . ./path.sh; # source the path. -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# != 3 ]; then diff --git a/egs/wsj/s5/utils/make_absolute.sh b/egs/wsj/s5/utils/make_absolute.sh index 523e19ac975..c14b30ec3d2 100755 --- a/egs/wsj/s5/utils/make_absolute.sh +++ b/egs/wsj/s5/utils/make_absolute.sh @@ -4,13 +4,13 @@ # It turns a pathname into an absolute pathname, including following soft links. target_file=$1 -cd $(dirname $target_file) +cd $(dirname $target_file) || exit 1 target_file=$(basename $target_file) # Iterate down a (possible) chain of symlinks while [ -L "$target_file" ]; do target_file=$(readlink $target_file) - cd $(dirname $target_file) + cd $(dirname $target_file) || exit 1 target_file=$(basename $target_file) done diff --git a/egs/wsj/s5/utils/mkgraph.sh b/egs/wsj/s5/utils/mkgraph.sh index 1becfc45be3..23da2e4ea6d 100755 --- a/egs/wsj/s5/utils/mkgraph.sh +++ b/egs/wsj/s5/utils/mkgraph.sh @@ -13,6 +13,8 @@ # (this is compiled from this repository using Doxygen, # the source for this part is in src/doc/graph_recipe_test.dox) +# shellcheck disable=2064 + set -o pipefail tscale=1.0 @@ -20,8 +22,8 @@ loopscale=0.1 remove_oov=false -for x in `seq 4`; do - [ "$1" == "--mono" -o "$1" == "--left-biphone" -o "$1" == "--quinphone" ] && shift && \ +for _ in `seq 4`; do + [ "$1" == "--mono" ] || [ "$1" == "--left-biphone" ] || [ "$1" == "--quinphone" ] && shift && \ echo "WARNING: the --mono, --left-biphone and --quinphone options are now deprecated and ignored." [ "$1" == "--remove-oov" ] && remove_oov=true && shift; [ "$1" == "--transition-scale" ] && tscale=$2 && shift 2; @@ -137,7 +139,7 @@ if [[ ! -s $dir/HCLG.fst || $dir/HCLG.fst -ot $dir/HCLGa.fst ]]; then add-self-loops --self-loop-scale=$loopscale --reorder=true \ $model < $dir/HCLGa.fst | fstconvert --fst_type=const > $dir/HCLG.fst.$$ || exit 1; mv $dir/HCLG.fst.$$ $dir/HCLG.fst - if [ $tscale == 1.0 -a $loopscale == 1.0 ]; then + if [ $tscale == 1.0 ] && [ $loopscale == 1.0 ]; then # No point doing this test if transition-scale not 1, as it is bound to fail. fstisstochastic $dir/HCLG.fst || echo "[info]: final HCLG is not stochastic." fi diff --git a/egs/wsj/s5/utils/parse_options.sh b/egs/wsj/s5/utils/parse_options.sh index fdc8a362843..2315660d612 100755 --- a/egs/wsj/s5/utils/parse_options.sh +++ b/egs/wsj/s5/utils/parse_options.sh @@ -46,11 +46,12 @@ done ### while true; do [ -z "${1:-}" ] && break; # break if there are no arguments + # shellcheck disable=2154 case "$1" in # If the enclosing script is called with --help option, print the help # message and exit. Scripts should put help messages in $help_message --help|-h) if [ -z "$help_message" ]; then echo "No help found." 1>&2; - else printf "$help_message\n" 1>&2 ; fi; + else printf "%s\n" "$help_message" 1>&2 ; fi; exit 0 ;; --*=*) echo "$0: options to scripts must be of the form --name value, got '$1'" exit 1 ;; @@ -63,6 +64,7 @@ while true; do # The test [ -z ${foo_bar+xxx} ] will return true if the variable foo_bar # is undefined. We then have to wrap this test inside "eval" because # foo_bar is itself inside a variable ($name). + # shellcheck disable=2016 eval '[ -z "${'$name'+xxx}" ]' && echo "$0: invalid option $1" 1>&2 && exit 1; oldval="`eval echo \\$$name`"; diff --git a/egs/wsj/s5/utils/prepare_online_nnet_dist_build.sh b/egs/wsj/s5/utils/prepare_online_nnet_dist_build.sh index adc2cefbe42..9ad9d524f85 100755 --- a/egs/wsj/s5/utils/prepare_online_nnet_dist_build.sh +++ b/egs/wsj/s5/utils/prepare_online_nnet_dist_build.sh @@ -12,7 +12,7 @@ ivec_extractor_files="final.dubm final.ie final.mat global_cmvn.stats online_cmv echo "$0 $@" # Print the command line for logging [ -f path.sh ] && . ./path.sh; -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# -ne 3 ]; then echo "Usage: $0 " diff --git a/egs/wsj/s5/utils/remove_data_links.sh b/egs/wsj/s5/utils/remove_data_links.sh index 8ec68f91bc9..908d97fd461 100755 --- a/egs/wsj/s5/utils/remove_data_links.sh +++ b/egs/wsj/s5/utils/remove_data_links.sh @@ -27,7 +27,7 @@ if [ $# == 0 ]; then echo " With --dry-run, just prints what it would do." fi -for dir in $*; do +for dir in "$@"; do if [ ! -d $dir ]; then echo "$0: not a directory: $dir" ret=1 diff --git a/egs/wsj/s5/utils/split_data.sh b/egs/wsj/s5/utils/split_data.sh index bc5894e7551..ed5133bf91d 100755 --- a/egs/wsj/s5/utils/split_data.sh +++ b/egs/wsj/s5/utils/split_data.sh @@ -50,10 +50,6 @@ else fi n=0; -feats="" -wavs="" -utt2spks="" -texts="" nu=`cat $data/utt2spk | wc -l` nf=`cat $data/feats.scp 2>/dev/null | wc -l` diff --git a/egs/wsj/s5/utils/subset_data_dir_tr_cv.sh b/egs/wsj/s5/utils/subset_data_dir_tr_cv.sh index 1bf3951b9dc..468d401eae4 100755 --- a/egs/wsj/s5/utils/subset_data_dir_tr_cv.sh +++ b/egs/wsj/s5/utils/subset_data_dir_tr_cv.sh @@ -32,12 +32,13 @@ uttbase=true; # by default, we choose last 10% utterances for CV if [ "$1" == "--cv-spk-percent" ]; then uttbase=false; - spkbase=true; + # Otherwise, do splitting based on speakers, such that the same + # speaker won't be in both train and cross validation sets. fi [ -f path.sh ] && . ./path.sh; -. parse_options.sh || exit 1; +. utils/parse_options.sh || exit 1; if [ $# != 3 ]; then echo "Usage: $0 [--cv-spk-percent P|--cv-utt-percent P] " diff --git a/tools/extras/travis_script.sh b/tools/extras/travis_script.sh index 6acc805abda..a6d222eb0d6 100755 --- a/tools/extras/travis_script.sh +++ b/tools/extras/travis_script.sh @@ -66,16 +66,24 @@ CCC=$(mtoken CXX "$CXX") #fi echo "Building tools..." [Time: $(date)] -runvx cd tools -runvx make -j$MAXPAR openfst "$CCC" CXXFLAGS="$CF" \ - OPENFST_CONFIGURE="--disable-static --enable-shared --disable-bin --disable-dependency-tracking" -cd .. +# runvx cd tools +# runvx make -j$MAXPAR openfst "$CCC" CXXFLAGS="$CF" \ +# OPENFST_CONFIGURE="--disable-static --enable-shared --disable-bin --disable-dependency-tracking" +# runvx cd .. runvx cd src -runvx touch base/.depend.mk # Fool make depend into skipping the dependency step. -runvx touch .short_version # Make version short, or else ccache will miss everything. -runvx "$CCC" CXXFLAGS="$CF" LDFLAGS="$LDF" ./configure --shared --use-cuda=no "$DPF" --mathlib=OPENBLAS --openblas-root="$XROOT/usr" -runvx make -j$MAXPAR $CI_TARGETS CI_NOLINKBINARIES=1 +# runvx touch base/.depend.mk # Fool make depend into skipping the dependency step. +# runvx touch .short_version # Make version short, or else ccache will miss everything. +# runvx "$CCC" CXXFLAGS="$CF" LDFLAGS="$LDF" ./configure --shared --use-cuda=no "$DPF" --mathlib=OPENBLAS --openblas-root="$XROOT/usr" +# runvx make -j$MAXPAR $CI_TARGETS CI_NOLINKBINARIES=1 + +runvx cd ../egs/wsj/s5 +#runvx find ./ -mindepth 1 -name "*.sh" -exec shellcheck --exclude=SC1090,SC2002,SC2006, --shell=bash --external-sources '{}' ';' +runvx find ./utils ./steps -name "*.sh" -exec shellcheck --exclude=SC1090,SC2002,SC2006 --shell=bash --external-sources '{}' ';' +# for bash_file in $(find ../egs -name "*.sh"); do +# runvx shellcheck -s bas $perl_file +# done + # Travis has a 10k line log limit, so use smaller CI_TARGETS when logging. if [ -r "$CCACHE_LOGFILE" ]; then