Original here. This basically recreates this blog post, but with different test sets, and on Kaggle, where setting up Kaldi is a little more involved than usual.

Results:

test-clean test-other
tgsmall LM 7.13 17.92
rnnlm rescored: 5.85 15.98

Unpack Kaldi

%cd /opt
/opt
%%capture
!tar xvf /kaggle/input/extract-prebuilt-kaldi-from-docker/kaldi.tar
import os
os.environ['LD_LIBRARY_PATH'] = '/opt/conda/lib:/opt/kaldi/tools/openfst-1.6.7/lib:/opt/kaldi/src/lib:'
EXISTING_PATH = os.environ['PATH']
%cd /
/
%%capture
!tar xvf /kaggle/input/extract-cuda-from-kaldi-docker/cuda.tar
import os
os.environ['LD_LIBRARY_PATH'] = f'{os.environ["LD_LIBRARY_PATH"]}:/usr/local/cuda-10.0/targets/x86_64-linux/lib/'
%cd /opt/kaldi/egs
/opt/kaldi/egs

Install flac

%%capture
!apt install -y flac

Create a work directory

!mkdir -p usels/s5
%cd usels/s5
/opt/kaldi/egs/usels/s5
!mkdir /kaggle/working/data
!mkdir /kaggle/working/exp
!ln -s /kaggle/working/data
!ln -s /kaggle/working/exp
!ln -s ../../wsj/s5/steps
!ln -s ../../wsj/s5/utils
!ln -s ../../librispeech/s5/local
!mkdir conf
%%writefile conf/mfcc_hires.conf
# config for high-resolution MFCC features, intended for neural network training
# Note: we keep all cepstra, so it has the same info as filterbank features,
# but MFCC is more easily compressible (because less correlated) which is why 
# we prefer this method.
--use-energy=false   # use average of log energy, not energy.
--num-mel-bins=40     # similar to Google's setup.
--num-ceps=40     # there is no dimensionality reduction.
--low-freq=20     # low cutoff frequency for mel bins... this is high-bandwidth data, so
                  # there might be some information at the low end.
--high-freq=-400 # high cutoff frequently, relative to Nyquist of 8000 (=7600)
Writing conf/mfcc_hires.conf

Setting up paths

(In the scripts, you just source path.sh)

%env KALDI_ROOT=/opt/kaldi
env: KALDI_ROOT=/opt/kaldi
!cat ../../wsj/s5/path.sh
export KALDI_ROOT=`pwd`/../../..
[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh
export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH
[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1
. $KALDI_ROOT/tools/config/common_path.sh
export LC_ALL=C
%env LC_ALL=C
#PWD = !pwd
PWD = '/opt/kaldi/egs/usels/s5'
KALDI_ROOT = '/opt/kaldi'
WSJ_PATH = f'{PWD}/utils/:{KALDI_ROOT}/tools/openfst/bin:{PWD}:{EXISTING_PATH}'
env: LC_ALL=C
!cat $KALDI_ROOT/tools/config/common_path.sh
# we assume KALDI_ROOT is already defined
[ -z "$KALDI_ROOT" ] && echo >&2 "The variable KALDI_ROOT must be already defined" && exit 1
# The formatting of the path export command is intentionally weird, because
# this allows for easy diff'ing
export PATH=\
${KALDI_ROOT}/src/bin:\
${KALDI_ROOT}/src/chainbin:\
${KALDI_ROOT}/src/featbin:\
${KALDI_ROOT}/src/fgmmbin:\
${KALDI_ROOT}/src/fstbin:\
${KALDI_ROOT}/src/gmmbin:\
${KALDI_ROOT}/src/ivectorbin:\
${KALDI_ROOT}/src/kwsbin:\
${KALDI_ROOT}/src/latbin:\
${KALDI_ROOT}/src/lmbin:\
${KALDI_ROOT}/src/nnet2bin:\
${KALDI_ROOT}/src/nnet3bin:\
${KALDI_ROOT}/src/nnetbin:\
${KALDI_ROOT}/src/online2bin:\
${KALDI_ROOT}/src/onlinebin:\
${KALDI_ROOT}/src/rnnlmbin:\
${KALDI_ROOT}/src/sgmm2bin:\
${KALDI_ROOT}/src/sgmmbin:\
${KALDI_ROOT}/src/tfrnnlmbin:\
${KALDI_ROOT}/src/cudadecoderbin:\
$PATH
raw_kaldi_paths=!cat $KALDI_ROOT/tools/config/common_path.sh|grep '/src/'|awk -F':' '{print $1}'|awk -F'/' '{print "/opt/kaldi/src/"$NF}'
KALDI_PATHS=raw_kaldi_paths.nlstr.replace('\n',':')
!cat $KALDI_ROOT/tools/env.sh
export PATH=/opt/kaldi/tools/python:${PATH}
export PHONETISAURUS="/tmp/output/opt/kaldi/tools/phonetisaurus-g2p"
export PATH="$PATH:${PHONETISAURUS}:${PHONETISAURUS}/src/scripts"
PHONETISAURUS = "/tmp/output/opt/kaldi/tools/phonetisaurus-g2p"
TOOLS_PATH = f'/opt/kaldi/tools/python:{PHONETISAURUS}:{PHONETISAURUS}/src/scripts'
%env PATH = f"{WSJ_PATH}:{KALDI_PATHS}:{TOOLS_PATH}"
env: PATH=f"/opt/kaldi/egs/usels/s5/utils/:/opt/kaldi/tools/openfst/bin:/opt/kaldi/egs/usels/s5:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kaldi/src/bin:/opt/kaldi/src/chainbin:/opt/kaldi/src/featbin:/opt/kaldi/src/fgmmbin:/opt/kaldi/src/fstbin:/opt/kaldi/src/gmmbin:/opt/kaldi/src/ivectorbin:/opt/kaldi/src/kwsbin:/opt/kaldi/src/latbin:/opt/kaldi/src/lmbin:/opt/kaldi/src/nnet2bin:/opt/kaldi/src/nnet3bin:/opt/kaldi/src/nnetbin:/opt/kaldi/src/online2bin:/opt/kaldi/src/onlinebin:/opt/kaldi/src/rnnlmbin:/opt/kaldi/src/sgmm2bin:/opt/kaldi/src/sgmmbin:/opt/kaldi/src/tfrnnlmbin:/opt/kaldi/src/cudadecoderbin:/opt/kaldi/tools/python:/tmp/output/opt/kaldi/tools/phonetisaurus-g2p:/tmp/output/opt/kaldi/tools/phonetisaurus-g2p/src/scripts"
!cat ../../wsj/s5/cmd.sh
# you can change cmd.sh depending on what type of queue you are using.
# If you have no queueing system and want to run on a local machine, you
# can change all instances 'queue.pl' to run.pl (but be careful and run
# commands one by one: most recipes will exhaust the memory on your
# machine).  queue.pl works with GridEngine (qsub).  slurm.pl works
# with slurm.  Different queues are configured differently, with different
# queue names and different ways of specifying things like memory;
# to account for these differences you can create and edit the file
# conf/queue.conf to match your queue's configuration.  Search for
# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information,
# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl.

export train_cmd=queue.pl
export decode_cmd="queue.pl --mem 2G"
# the use of cuda_cmd is deprecated, used only in 'nnet1',
export cuda_cmd="queue.pl --gpu 1"

if [ "$(hostname -d)" == "fit.vutbr.cz" ]; then
  queue_conf=$HOME/queue_conf/default.conf # see example /homes/kazi/iveselyk/queue_conf/default.conf,
  export train_cmd="queue.pl --config $queue_conf --mem 2G --matylda 0.2"
  export decode_cmd="queue.pl --config $queue_conf --mem 3G --matylda 0.1"
  export cuda_cmd="queue.pl --config $queue_conf --gpu 1 --mem 10G --tmp 40G"
fi
%env train_cmd=run.pl
%env decode_cmd=run.pl
env: train_cmd=run.pl
env: decode_cmd=run.pl
!ln -s ../../wsj/s5/cmd.sh
!ln -s ../../wsj/s5/path.sh
!ln -s utils/queue.pl
!ln -s utils/run.pl
!rm *.pl

Data prep

!local/data_prep.sh /kaggle/input/librispeech-test-clean-and-other/LibriSpeech/test-other data/test-other
!local/data_prep.sh /kaggle/input/librispeech-test-clean-and-other/LibriSpeech/test-clean data/test-clean
utils/validate_data_dir.sh: Successfully validated data-directory data/test-other
local/data_prep.sh: successfully prepared data in data/test-other
utils/validate_data_dir.sh: Successfully validated data-directory data/test-clean
local/data_prep.sh: successfully prepared data in data/test-clean
!utils/copy_data_dir.sh data/test-clean data/test-clean_hires
!utils/copy_data_dir.sh data/test-other data/test-other_hires
utils/copy_data_dir.sh: copied data from data/test-clean to data/test-clean_hires
utils/validate_data_dir.sh: Successfully validated data-directory data/test-clean_hires
utils/copy_data_dir.sh: copied data from data/test-other to data/test-other_hires
utils/validate_data_dir.sh: Successfully validated data-directory data/test-other_hires
!ln -s utils/parse_options.sh
!steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf --cmd "$train_cmd" data/test-clean_hires
!steps/compute_cmvn_stats.sh data/test-clean_hires
!utils/fix_data_dir.sh data/test-clean_hires
!steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf --cmd "$train_cmd" data/test-other_hires
!steps/compute_cmvn_stats.sh data/test-other_hires
!utils/fix_data_dir.sh data/test-other_hires
steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf --cmd run.pl data/test-clean_hires
utils/validate_data_dir.sh: Successfully validated data-directory data/test-clean_hires
steps/make_mfcc.sh: [info]: no segments file exists: assuming wav.scp indexed by utterance.
steps/make_mfcc.sh: Succeeded creating MFCC features for test-clean_hires
steps/compute_cmvn_stats.sh data/test-clean_hires
Succeeded creating CMVN stats for test-clean_hires
fix_data_dir.sh: kept all 2620 utterances.
fix_data_dir.sh: old files are kept in data/test-clean_hires/.backup
steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf --cmd run.pl data/test-other_hires
utils/validate_data_dir.sh: Successfully validated data-directory data/test-other_hires
steps/make_mfcc.sh: [info]: no segments file exists: assuming wav.scp indexed by utterance.
steps/make_mfcc.sh: Succeeded creating MFCC features for test-other_hires
steps/compute_cmvn_stats.sh data/test-other_hires
Succeeded creating CMVN stats for test-other_hires
fix_data_dir.sh: kept all 2939 utterances.
fix_data_dir.sh: old files are kept in data/test-other_hires/.backup

Extract i-vectors

!ln -s /kaggle/input/kaldi-librispeech-model/exp/nnet3_cleaned/ exp/nnet3_cleaned
!ln -s /kaggle/input/kaldi-librispeech-model/exp/chain_cleaned/ exp/chain_cleaned
%env nspk=$(wc -l <data/test-clean_hires/spk2utt)
!steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj "${nspk}" data/test-clean_hires exp/nnet3_cleaned/extractor exp/nnet3_cleaned_out/ivectors_test-clean_hires
%env nspk=$(wc -l <data/test-other_hires/spk2utt)
!steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj "${nspk}" data/test-other_hires exp/nnet3_cleaned/extractor exp/nnet3_cleaned_out/ivectors_test-other_hires
env: nspk=$(wc -l <data/test-clean_hires/spk2utt)
steps/online/nnet2/extract_ivectors_online.sh --cmd run.pl --nj $(wc -l <data/test-clean_hires/spk2utt) data/test-clean_hires exp/nnet3_cleaned/extractor exp/nnet3_cleaned_out/ivectors_test-clean_hires
steps/online/nnet2/extract_ivectors_online.sh: extracting iVectors
steps/online/nnet2/extract_ivectors_online.sh: combining iVectors across jobs
steps/online/nnet2/extract_ivectors_online.sh: done extracting (online) iVectors to exp/nnet3_cleaned_out/ivectors_test-clean_hires using the extractor in exp/nnet3_cleaned/extractor.
env: nspk=$(wc -l <data/test-other_hires/spk2utt)
steps/online/nnet2/extract_ivectors_online.sh --cmd run.pl --nj $(wc -l <data/test-other_hires/spk2utt) data/test-other_hires exp/nnet3_cleaned/extractor exp/nnet3_cleaned_out/ivectors_test-other_hires
steps/online/nnet2/extract_ivectors_online.sh: extracting iVectors
steps/online/nnet2/extract_ivectors_online.sh: combining iVectors across jobs
steps/online/nnet2/extract_ivectors_online.sh: done extracting (online) iVectors to exp/nnet3_cleaned_out/ivectors_test-other_hires using the extractor in exp/nnet3_cleaned/extractor.

Build decoding graph

Just linking this directory won't work, as it expects to be able to write to it (Kaldi scripts, smh)

!cp -r /kaggle/input/kaldi-librispeech-model/data/lang_test_tgsmall data
%env tdnndir=exp/chain_cleaned/tdnn_1d_sp
%env graph_dir=exp/chain_cleaned_out/graph_tgsmall
!utils/mkgraph.sh --self-loop-scale 1.0 --remove-oov data/lang_test_tgsmall $tdnndir $graph_dir
env: tdnndir=exp/chain_cleaned/tdnn_1d_sp
env: graph_dir=exp/chain_cleaned_out/graph_tgsmall
tree-info exp/chain_cleaned/tdnn_1d_sp/tree 
tree-info exp/chain_cleaned/tdnn_1d_sp/tree 
fstpushspecial 
fstdeterminizestar --use-log=true 
fstminimizeencoded 
fsttablecompose data/lang_test_tgsmall/L_disambig.fst data/lang_test_tgsmall/G.fst 
fstisstochastic data/lang_test_tgsmall/tmp/LG.fst 
-0.0459745 -0.0466771
[info]: LG not stochastic.
fstcomposecontext --context-size=2 --central-position=1 --read-disambig-syms=data/lang_test_tgsmall/phones/disambig.int --write-disambig-syms=data/lang_test_tgsmall/tmp/disambig_ilabels_2_1.int data/lang_test_tgsmall/tmp/ilabels_2_1.113735 data/lang_test_tgsmall/tmp/LG.fst 
fstisstochastic data/lang_test_tgsmall/tmp/CLG_2_1.fst 
-0.0459745 -0.0466771
[info]: CLG not stochastic.
make-h-transducer --disambig-syms-out=exp/chain_cleaned_out/graph_tgsmall/disambig_tid.int --transition-scale=1.0 data/lang_test_tgsmall/tmp/ilabels_2_1 exp/chain_cleaned/tdnn_1d_sp/tree exp/chain_cleaned/tdnn_1d_sp/final.mdl 
fstdeterminizestar --use-log=true 
fsttablecompose exp/chain_cleaned_out/graph_tgsmall/Ha.fst 'fstrmsymbols --remove-arcs=true --apply-to-output=true data/lang_test_tgsmall/oov.int data/lang_test_tgsmall/tmp/CLG_2_1.fst|' 
fstminimizeencoded 
fstrmsymbols exp/chain_cleaned_out/graph_tgsmall/disambig_tid.int 
fstrmepslocal 
fstrmsymbols --remove-arcs=true --apply-to-output=true data/lang_test_tgsmall/oov.int data/lang_test_tgsmall/tmp/CLG_2_1.fst 
fstisstochastic exp/chain_cleaned_out/graph_tgsmall/HCLGa.fst 
3.39453 -0.209239
HCLGa is not stochastic
add-self-loops --self-loop-scale=1.0 --reorder=true exp/chain_cleaned/tdnn_1d_sp/final.mdl exp/chain_cleaned_out/graph_tgsmall/HCLGa.fst 
fstisstochastic exp/chain_cleaned_out/graph_tgsmall/HCLG.fst 
3.05078 -0.127788
[info]: final HCLG is not stochastic.

Decode

!mkdir exp/tdnn_1d_sp
%pushd exp/tdnn_1d_sp
!for i in /kaggle/input/kaldi-librispeech-model/exp/chain_cleaned/tdnn_1d_sp/*;do ln -s $i;done
%popd
/kaggle/working/exp/tdnn_1d_sp
/opt/kaldi/egs/usels/s5
popd -> /opt/kaldi/egs/usels/s5
%env tdnndir=exp/tdnn_1d_sp
!steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 --nj 8 --cmd "$decode_cmd" --online-ivector-dir exp/nnet3_cleaned_out/ivectors_test-clean_hires $graph_dir data/test-clean_hires $tdnndir/decode_test-clean_tgsmall
!steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 --nj 8 --cmd "$decode_cmd" --online-ivector-dir exp/nnet3_cleaned_out/ivectors_test-other_hires $graph_dir data/test-other_hires $tdnndir/decode_test-other_tgsmall
env: tdnndir=exp/tdnn_1d_sp
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 --nj 8 --cmd run.pl --online-ivector-dir exp/nnet3_cleaned_out/ivectors_test-clean_hires exp/chain_cleaned_out/graph_tgsmall data/test-clean_hires exp/tdnn_1d_sp/decode_test-clean_tgsmall
steps/nnet3/decode.sh: feature type is raw
steps/diagnostic/analyze_lats.sh --cmd run.pl --iter final exp/chain_cleaned_out/graph_tgsmall exp/tdnn_1d_sp/decode_test-clean_tgsmall
steps/diagnostic/analyze_lats.sh: see stats in exp/tdnn_1d_sp/decode_test-clean_tgsmall/log/analyze_alignments.log
Overall, lattice depth (10,50,90-percentile)=(1,2,5) and mean=2.8
steps/diagnostic/analyze_lats.sh: see stats in exp/tdnn_1d_sp/decode_test-clean_tgsmall/log/analyze_lattice_depth_stats.log
score best paths
score confidence and timing with sclite
Decoding done.
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 --nj 8 --cmd run.pl --online-ivector-dir exp/nnet3_cleaned_out/ivectors_test-other_hires exp/chain_cleaned_out/graph_tgsmall data/test-other_hires exp/tdnn_1d_sp/decode_test-other_tgsmall
steps/nnet3/decode.sh: feature type is raw
steps/diagnostic/analyze_lats.sh --cmd run.pl --iter final exp/chain_cleaned_out/graph_tgsmall exp/tdnn_1d_sp/decode_test-other_tgsmall
steps/diagnostic/analyze_lats.sh: see stats in exp/tdnn_1d_sp/decode_test-other_tgsmall/log/analyze_alignments.log
Overall, lattice depth (10,50,90-percentile)=(1,3,13) and mean=6.3
steps/diagnostic/analyze_lats.sh: see stats in exp/tdnn_1d_sp/decode_test-other_tgsmall/log/analyze_lattice_depth_stats.log
score best paths
score confidence and timing with sclite
Decoding done.

Score

!steps/score_kaldi.sh --cmd "run.pl" data/test-clean_hires $graph_dir $tdnndir/decode_test-clean_tgsmall
!steps/score_kaldi.sh --cmd "run.pl" data/test-other_hires $graph_dir $tdnndir/decode_test-other_tgsmall
steps/score_kaldi.sh --cmd run.pl data/test-clean_hires exp/chain_cleaned_out/graph_tgsmall exp/tdnn_1d_sp/decode_test-clean_tgsmall
steps/score_kaldi.sh: scoring with word insertion penalty=0.0,0.5,1.0
steps/score_kaldi.sh --cmd run.pl data/test-other_hires exp/chain_cleaned_out/graph_tgsmall exp/tdnn_1d_sp/decode_test-other_tgsmall
steps/score_kaldi.sh: scoring with word insertion penalty=0.0,0.5,1.0
!cat exp/tdnn_1d_sp/decode_test-clean_tgsmall/scoring_kaldi/best_wer
!cat exp/tdnn_1d_sp/decode_test-other_tgsmall/scoring_kaldi/best_wer
%WER 7.13 [ 3747 / 52576, 648 ins, 242 del, 2857 sub ] exp/tdnn_1d_sp/decode_test-clean_tgsmall/wer_17_0.5
%WER 17.92 [ 9378 / 52343, 1384 ins, 723 del, 7271 sub ] exp/tdnn_1d_sp/decode_test-other_tgsmall/wer_17_1.0

Rescoring

!cp -r /kaggle/input/kaldi-librispeech-model/exp/rnnlm_lstm_1a/ exp
!ln -s /opt/kaldi/scripts/rnnlm
%env decode_dir=exp/tdnn_1d_sp/decode_test-clean_tgsmall
!rnnlm/lmrescore_pruned.sh --cmd "$decode_cmd" --weight 0.45 --max-ngram-order 4 data/lang_test_tgsmall exp/rnnlm_lstm_1a data/test-clean_hires ${decode_dir} $tdnndir/decode_test-clean_rescore
%env decode_dir=exp/tdnn_1d_sp/decode_test-other_tgsmall
!rnnlm/lmrescore_pruned.sh --cmd "$decode_cmd" --weight 0.45 --max-ngram-order 4 data/lang_test_tgsmall exp/rnnlm_lstm_1a data/test-other_hires ${decode_dir} $tdnndir/decode_test-other_rescore
env: decode_dir=exp/tdnn_1d_sp/decode_test-clean_tgsmall
rnnlm/lmrescore_pruned.sh --cmd run.pl --weight 0.45 --max-ngram-order 4 data/lang_test_tgsmall exp/rnnlm_lstm_1a data/test-clean_hires exp/tdnn_1d_sp/decode_test-clean_tgsmall exp/tdnn_1d_sp/decode_test-clean_rescore
local/score.sh --cmd run.pl data/test-clean_hires data/lang_test_tgsmall exp/tdnn_1d_sp/decode_test-clean_rescore
env: decode_dir=exp/tdnn_1d_sp/decode_test-other_tgsmall
rnnlm/lmrescore_pruned.sh --cmd run.pl --weight 0.45 --max-ngram-order 4 data/lang_test_tgsmall exp/rnnlm_lstm_1a data/test-other_hires exp/tdnn_1d_sp/decode_test-other_tgsmall exp/tdnn_1d_sp/decode_test-other_rescore
local/score.sh --cmd run.pl data/test-other_hires data/lang_test_tgsmall exp/tdnn_1d_sp/decode_test-other_rescore
!steps/score_kaldi.sh --cmd "run.pl" data/test-clean_hires $graph_dir $tdnndir/decode_test-clean_rescore
!steps/score_kaldi.sh --cmd "run.pl" data/test-other_hires $graph_dir $tdnndir/decode_test-other_rescore
steps/score_kaldi.sh --cmd run.pl data/test-clean_hires exp/chain_cleaned_out/graph_tgsmall exp/tdnn_1d_sp/decode_test-clean_rescore
steps/score_kaldi.sh: scoring with word insertion penalty=0.0,0.5,1.0
steps/score_kaldi.sh --cmd run.pl data/test-other_hires exp/chain_cleaned_out/graph_tgsmall exp/tdnn_1d_sp/decode_test-other_rescore
steps/score_kaldi.sh: scoring with word insertion penalty=0.0,0.5,1.0
!cat $tdnndir/decode_test-clean_rescore/scoring_kaldi/best_wer
!cat $tdnndir/decode_test-other_rescore/scoring_kaldi/best_wer
%WER 5.85 [ 3078 / 52576, 617 ins, 198 del, 2263 sub ] exp/tdnn_1d_sp/decode_test-clean_rescore/wer_17_0.5
%WER 15.98 [ 8362 / 52343, 1381 ins, 588 del, 6393 sub ] exp/tdnn_1d_sp/decode_test-other_rescore/wer_17_1.0