<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://index.cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Liuyibo</id>
		<title>cslt Wiki - 用户贡献 [zh-cn]</title>
		<link rel="self" type="application/atom+xml" href="http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Liuyibo"/>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E7%89%B9%E6%AE%8A:%E7%94%A8%E6%88%B7%E8%B4%A1%E7%8C%AE/Liuyibo"/>
		<updated>2026-05-06T09:44:59Z</updated>
		<subtitle>用户贡献</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf</id>
		<title>文件:Technical report - poetry generation.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf"/>
				<updated>2019-11-08T23:39:19Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Technical report - poetry generation.pdf”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf</id>
		<title>文件:Technical report - poetry generation.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf"/>
				<updated>2019-11-08T23:36:57Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Technical report - poetry generation.pdf”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Publication-trp</id>
		<title>Publication-trp</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Publication-trp"/>
				<updated>2019-11-08T23:34:29Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f4/Technical_report_-_poetry_generation.pdf TRP-20190001 A Rhythm Model for Chinese Poetry Generation, Yibo Liu, Dong Wang]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/b3/ASR_trp.pdf TRP-20180002 A Research of ASR, Jiayao Wu]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e9/TRP-20180001_Review_of_LID%2C_Zhaodi_Qi.pdf TRP-20180001 Review of LID, Zhaodi Qi]&lt;br /&gt;
[[文件:hedge.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/26/Dynamic_Hedge_using_RNN.pdf TRP-20170005 Dynamic Hedge Using RNN, Xin Jing ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/5f/Neural_sparseness.pdf TRP-20170004 Neural Sparseness in Speech Recognition Based on Kaldi (in Chinese), Yanqing Wang, Zhiyuan Tang, Dong Wang ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f0/Long-term_dropconnect.pdf TRP-20170003 Long-term DropConnect in Speech Recognition (in Chinese), Yanqing Wang, Zhiyuan Tang, Dong Wang ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/ff/Connection_sparseness.pdf TRP-20170002 Connection Sparseness in Speech Recognition Based on Kaldi (in Chinese), Yanqing Wang, Zhiyuan Tang, Dong Wang ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e3/TRP-201700011.pdf TRP-20170001 重叠语音与原始语音关系的研究与分析 唐辉]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/53/TRP-20160039.pdf TRP-20160039 Speaker Segmentation Using Deep Speaker Vectors for Fast Speaker Change Scenarios, Renyu Wang, Mingliang Gu, Lantian Li, Mingxing Xu, Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/06/TRP-20160038.pdf TRP-20160038 生物特征识别技术综述, Thomas Fang Zheng, Askar Rozi, Renyu Wang, Lantian Li]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/3d/TRP-20160037.pdf TRP-20160037 声纹识别技术及其应用现状, Thomas Fang Zheng, Lantian Li, Hui Zhang, Askar Rozi]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/5f/Dtq.pdf TRP-20160036 Deep Q-trading, Yang Wang, Dong Wang, Shiyue Zhang,Yang Feng, Shiyao Li,and Qiang Zhou]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/27/Mose_User_Guide.pdf TRP-20160035  Moses中文操作手册, 冯洋]&lt;br /&gt;
&lt;br /&gt;
[[文件:future.jpg|300px]]&lt;br /&gt;
*[[媒体文件:CCF-ASR.pdf|TRP-20160034: The Present and Future of Speech Recognition, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Memoryless.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a2/Memory.pdf TRP-20160033  Memoryless Document Vector, Dongxu Zhang, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
[[文件:Turing-test.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7a/Turing.pdf TRP-20160032 Can Machine Generate Traditional Chinese Poetry? A Turing Test, Qixin Wang, Tianyi Luo, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
[[文件:Mix-lingual.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ce/TRP-20160031.pdf TRP-20160031 OC16-CE80: A Chinese-English Mixlingual Database and A Speech Recognition Baseline, Dong Wang, Zhiyuan Tang, Difei Tang and Qing Chen]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9e/TRP-20160030.pdf TRP-20160030 Collaborative Joint Training with Multi-task Recurrent Model for Speech and Speaker Recognition, Zhiyuan Tang, Lantian Li, Dong Wang and Ravichander Vipperla]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/09/TRP-20160029.pdf TRP-20160029 Multi-task Recurrent Model for Speech and Speaker Recognition, Zhiyuan Tang, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/55/TRP-20160028.pdf TRP-20160028 Multi-task Recurrent Model for True Multilingual Speech Recognition, Zhiyuan Tang, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/27/TRP-20160027.pdf TRP-20160027 Collaborative Learning for Language and Speaker Recognition, Lantian Li, Zhiyuan Tang, Dong Wang, Yang Feng and Shiyue Zhang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9e/TRP-20160026.pdf TRP-20160026 Weakly Supervised PLDA Training, Lantian Li, Dong Wang, Yixiang Chen and Chenghui Zhao]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/4/4c/TRP-20160025.pdf TRP-20160025 Local Training for PLDA in Speaker Veriﬁcation, Chenghui Zhao, Lantian Li, Dong Wang and  April Pu]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c5/TRP-20160024.pdf TRP-20160024 Decision Making Based on Cohort Scores for Speaker Verification, Lantian Li, Renyu Wang, Gang Wang, Caixia Wang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
[[文件:Multi-lingual.jpg|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f0/TRP-20160023.pdf TRP-20160023 AP16-OL7: A Multilingual Database for Oriental Languages and A Language Recognition Baseline, Dong Wang, Lantian Li, Difei Tang and Qing Chen]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f8/TRP-20160022.pdf TRP-20160022 System Combination for Short Utterance Speaker Recognition, Lantian Li, Dong Wang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/93/TRP-20160021.pdf TRP-20160021 Improving Short Utterance Speaker Recognition by Modeling Speech Unit Classes, Lantian Li, Dong Wang, Chenhao Zhang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/76/TRP-20160020.pdf TRP-20160020 Feature Transformation For Speaker Verification Under Speaking Rate Mismatch Condition, Askar Rozi, Lantian Li, Dong Wang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c9/TRP-20160019.pdf TRP-20160019 Language-aware PLDA for Multilingual Speaker Recognition, Askar Rozi, Dong Wang, Lantian Li and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/b5/Ijcai16.pdf TRP-20160018 Chinese Song Iambics Generation with Neural Attention-based Model, Qixin Wang, Tianyi Luo, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://tangzy.cslt.org/files/trp_nnet3_config.pdf TRP-20160017 How to Config Kaldi nnet3 (in Chinese), Zhiyuan Tang and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e6/Joint_training_config.pdf TRP-20160016 How to deploy joint training in Kaldi (in Chinese), Hang Luo, Zhiyuan Tang and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7d/Kazak_speech_recognition_chinese.pdf TRP-20160015 How to run ASR system for Kazak (in Chinese), Ying Shi, Zhiyuan Tang，Nurbolat and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/58/TRP-20160014.pdf TRP-20160014 Exploring The Role of Deep Speaker Features for Speaker Verification, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/15/Sda.pdf TRP-20160013 Sparse Discriminative Analysis and Its Application in Distraction Classification, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/20/Ivector.pdf TRP-20160012 i-vector system in Kaldi (in Chinese) Yixang Chen, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9c/Replay.pdf TRP-20160011 基于说话人信道相关的录音重放检测若干方法探究 Lantian Li, Yixiang Chen and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/95/Dkeyword.pdf TRP-20160010 Highly Restricted Keyword Selection Based on Sparse Analysis for Uyghur Text Categorization, Dong Wang, Askar Humdulla, Rayilam Parhat, Javier Tejedor]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/2b/RNNG_Code_Use_Guide_simplified.pdf TRP-20160009: RNNG Code User Guide, Shiyue Zhang and Yang Feng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c4/Memory-atten-model-public.pdf TRP-20160008: Different styles of poetry generation based on memory model, Jiyuan Zhang,Yang Feng and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:dis.png|300px]]&lt;br /&gt;
*[[媒体文件:sda.pdf|TRP-20160013: Sparse Discriminative Analysis and Its Application in Distraction Classification, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:dkeyword.png|300px]]&lt;br /&gt;
*[[媒体文件:dkeyword.pdf|TRP-20160010: Highly Restricted Keyword Selection Based on Sparse Analysis for Uyghur Text Categorization, Dong Wang, Askar Humdulla, Rayilam Parhat, Javier Tejedor]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Rnng user guide.png|300px]]&lt;br /&gt;
*[[媒体文件:RNNG Code Use Guide simplified.pdf|TRP-20160009: RNNG Code User Guide, Shiyue Zhang and Yang Feng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Memory_attention_model.jpg|300px]]&lt;br /&gt;
*[[媒体文件:Memory-atten-model-public.pdf|TRP-20160008: Different styles of poetry generation based on memory model, Jiyuan Zhang,Yang Feng and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Distraction.png|300px]]&lt;br /&gt;
*[[媒体文件:Distraction.pdf|TRP-20160007: Distraction Detection Using Sparse Discriminative Analysis, Dong Wang and Guozhen Zhao]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Visual_fig.png|200px]]&lt;br /&gt;
*[[媒体文件:Visual.pdf|TRP-20160006: Visualization Analysis for Recurrent Networks, Zhiyuan Tang, Ying Shi and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件: FIG-TRP2016.png |200px]]&lt;br /&gt;
*[[媒体文件:Distributed_Representation_Learning_for_Knowledge_Graphs_with_Entity_Descriptions.pdf| TRP-20160005: Distributed Representation Learning for Knowledge Graphs with Entity Descriptions; Miao Fan, Qiang Zhou, Thomas Fang Zheng, Ralph Grishman]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Aikefu.bmp|200px]]&lt;br /&gt;
*[[媒体文件:Cslt-trp-template.pdf|TRP-20160004: A Review of Neural QA, Tianyi Luo and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:simpair.png|200px]]&lt;br /&gt;
*[[媒体文件:Trp20160003.pdf|TRP-20160003: A study of Similar Word Model for Unfrequent Word Enhancement in Speech Recognition, Xi Ma, Dong Wang and Javier Tejedor]]&lt;br /&gt;
&lt;br /&gt;
[[文件:low-freq.png|200px]]&lt;br /&gt;
*[[媒体文件:How to deal with low frequency words.pdf|TRP-20160002: Low-Frequency Words Embedding, Chao Xing, Yiqiao Pan, Dong Wang]]&lt;br /&gt;
[[文件:maxmargin.png|200px]]&lt;br /&gt;
*[[媒体文件:Max-margin.pdf|TRP-20160001: Max-margin metric learning for speaker recognition, Lantian Li, Chao Xing, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:lowv.png|200px]]&lt;br /&gt;
*[[媒体文件:Lowv.pdf|TRP-20150033: Learning Ordered Word Representations, Xiaoxi Wang, Chao Xing, Dong Wang, Rong Liu and Yiqiao Pan]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Adamax.png|200px]]&lt;br /&gt;
*[[媒体文件:Adamax Online Training for Speech Recognition.pdf|TRP-20150032: Adamax Online Training for Speech Recognition, Xiangyu Zeng, Zhiyong Zhang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Ptrnets.png|200px]]&lt;br /&gt;
*[[媒体文件: Ptrnets.pdf|TRP-20150031: An implementation of Pointer-Networks with Extensions, Xiaoxi Wang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:dvad.png|200px]]&lt;br /&gt;
*[[媒体文件:dvad.pdf|TRP-20150030: DNN-based Voice Activity Detection for Speaker Recognition, Fanhu Bie, Zhiyong Zhang, Dong Wang, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:uyghur.jpg|200px]]&lt;br /&gt;
*[[媒体文件:urghur.pdf|TRP-20150029: THUYG-20：A Free Uyghur Speech Database, Askar Rozi, Shi Yin, Zhiyong Zhang, Dong Wang,  Askar Hamdulla]]&lt;br /&gt;
&lt;br /&gt;
[[文件:nnpre.jpg|200px]]&lt;br /&gt;
*[[媒体文件:nnpre.pdf|TRP-20150028: Knowledge Transfer Pre-training, Zhiyuang Tang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:mmargin.png|200px]]&lt;br /&gt;
*[[媒体文件:mmargin.pdf|TRP-20150027: Max-Margin Metric Learning for Speaker Recognition, Lantian Li, Chao Xing, Dong Wang, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:binary.jpg|200px]]&lt;br /&gt;
*[[媒体文件:binary.pdf|TRP-20150026: Binary Speaker Embedding, Lantian Li, Chao Xing, Dong Wang, Kaimin Yu, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:rnnrl.png|200px]]&lt;br /&gt;
*[[媒体文件:rnnrl.pdf|TRP-20150025: Relation Classification via Recurrent Neural Network, Dong Xu Zhang, Dong Wang, Rong Liu]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:dplda.png|200px]]&lt;br /&gt;
*[[媒体文件:dplda.pdf|TRP-20150024: Learning from LDA using Deep Neural Networks, Dongxu Zhang, Tianyi Luo and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:jsrl.png|200px]]&lt;br /&gt;
*[[媒体文件:jsrl.pdf|TRP-20150023: Joint Semantic Relevance Learning with Text Data and Graph Knowledge, Dongxu Zhang, Bin Yuan, Dong Wang, Rong Liu]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:listnet.png|200px]]&lt;br /&gt;
*[[媒体文件:listnet.pdf|TRP-20150022: Stochastic Top-k ListNet, Tianyi Luo, Dong Wang, Rong Liu, Yiqiao Pan]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:segvector.png|200px]]&lt;br /&gt;
*[[媒体文件:segvector.pdf|TRP-20150021: Improved Deep Speaker Feature Learning for Text-Dependent Speaker Recognition, Lantian Li, Yiye Lin, Zhiyong Zhang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:Vmclass.png|200px]]&lt;br /&gt;
*[[媒体文件:Vmclass.pdf|TRP-20150020: Document Classification with Spherical Word Vectors, Yiqiao Pan, Chao Xing, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:Tlearn.png|200px]]&lt;br /&gt;
*[[媒体文件:Tlearn.pdf|TRP-20150019: Transfer Learning for Speech and Language Processing, Dong Wang and Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Songcisample.png|200px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7a/Cslt20150018_revisedversion.pdf TRP-20150018:Chinese Song Iambics Generation with Neural Attention-based Model, Qixin Wang, Tianyi Luo, Dong Wang, Chao Xing]&lt;br /&gt;
&lt;br /&gt;
[[文件:database.jpg|200px]]&lt;br /&gt;
*[[媒体文件:Thuyg20-sre.pdf|TRP-20150017: AN OPEN/FREE DATABASE AND BENCHMARK FOR UYGHUR SPEAKER RECOGNITION, Askar Rozi, Dong Wang, Zhiyong Zhang, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Thchs.png|200px]]&lt;br /&gt;
*[[媒体文件:Thchs30.pdf|TRP-20150016: THCHS-30 : A Free Chinese Speech Corpus, Dong Wang and Xuewei Zhang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:Su.jpg|200px]]&lt;br /&gt;
*[[媒体文件:SUSR.pdf|TRP-20150015: Improving Short Utterance Speaker Recognition by Modeling Speech Unit Classes, Chenhao Zhang Dong Wang, Lantian Li and Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:dv.png|200px]]&lt;br /&gt;
*[[媒体文件:Dvector.pdf|TRP-20150014: Deep Speaker Vectors for Semi Text-independent Speaker Verification, Lantian Li, Dong Wang, Zhiyong Zhang and Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:dark.png|200px]]&lt;br /&gt;
*[[媒体文件:Dark.pdf|TRP-20150013: Recurrent Neural Network Training with Dark Knowledge Transfer, Dong Wang, Chao Liu, Zhiyuan Tang, Zhiyong Zhang, Mengyuan Zhao]]&lt;br /&gt;
&lt;br /&gt;
[[文件:PBE.png|200px]]&lt;br /&gt;
*[[媒体文件:Probabilistic_Belief_Embedding_for_Knowledge_Population_(TRP).pdf|TRP-20150012: Probabilistic Belief Embedding for Large-scale Knowledge Population. Miao Fan, Qiang Zhou, Andrew Abel, Thomas Fang Zheng and Ralph Grishman]]&lt;br /&gt;
&lt;br /&gt;
[[文件:fst-fw.png|200px]]&lt;br /&gt;
*[[媒体文件:wpair.pdf|TRP-20150011: Recognize Foreign Low-Frequency Words with Similar Pairs, Xi Ma1, Xiaoxi Wang1 and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Cdae.png|200px]]&lt;br /&gt;
*[[媒体文件:Music.pdf|TRP-20150010: Music Removal by Denoising Autoencoder in Speech Recognition. Mengyuan Zhao, Dong Wang, Zhiyong Zhang and Xuewei Zhang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:vmfsne.png|200px]]&lt;br /&gt;
*[[媒体文件:Cslt-trp-template-vmfsne.pdf|TRP-20150009: VMF-SNE: Embedding for Spherical Data. Mian Wang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:ros.png|200px]]&lt;br /&gt;
*[[媒体文件:Ros.pdf|TRP-20150008: Learning Speech Rate in Speech Recognition. Xiangyu Zeng, Shi Yin.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Dnnvadstru.png|200px]]&lt;br /&gt;
*[[媒体文件:DNNVADTRP.pdf|TRP-20150007: Voice Activity Detection Based on Deep Neural Networks. Shi Yin.]] ([[媒体文件:Vad.pdf|Paper submiited to Tsinghua Xuebao]])&lt;br /&gt;
&lt;br /&gt;
[[文件:Uyghur-training.png|200px]]&lt;br /&gt;
*[[媒体文件:UyghurTRP.pdf|TRP-20150006: Low-resource Uyghur Acoustic Model Training based on Cross-lingual Features. Shi Yin.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Beam-forming.png|200px]]&lt;br /&gt;
*[[媒体文件:Multi-Microphones_Reverberation_Cancellation_for_Distant_Speech_Recognition.pdf|TRP-20150005: Multi-Microphones_Reverberation_Cancellation_for_Distant_Speech_Recognition. Xuewei Zhang.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Clipping-speaker.png|200px]]&lt;br /&gt;
*[[媒体文件:Clip.pdf|TRP-20150004: Detection and Reconstruction of Clipped Speech in Speaker Recognition. Fanhu Bie et al.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Semi-dynamic-embedding.png|200px]]&lt;br /&gt;
*[[媒体文件:Taglm.pdf|TRP-20150003: Semi-Dynamic Graph Embedding for Large Scale Language Model Adaptation. Bin Yuan et al.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Speaker-discriminative-score.png|200px]]&lt;br /&gt;
*[[媒体文件:DNN-based Discriminative Scoring for Speaker.pdf|TRP-20150002: DNN-based Discriminative Scoring for Speaker Recognition Based on i-vector. Jun Wang et al. ]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Noisy-traiing.png|200px]]&lt;br /&gt;
*[[媒体文件:Noisy Training for Deep Neural Networks in.pdf|TRP-20150001: Noisy Training for Deep Neural Networks in Speech Recognition. Shi Yin et al.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:English-scroing.png|200px]]&lt;br /&gt;
*[[媒体文件:AutomaticScoringforEnglishUtterances.pdf|TRP-20140001: Automatic Scoring for English Utterances. Bo Hu.]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[媒体文件:Template.rar|CSLT-TRP latex template]]&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf</id>
		<title>文件:Technical report - poetry generation.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf"/>
				<updated>2019-11-08T23:32:57Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Technical report - poetry generation.pdf”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Poem_example.png</id>
		<title>文件:Poem example.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Poem_example.png"/>
				<updated>2019-11-08T23:26:11Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Publication-trp</id>
		<title>Publication-trp</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Publication-trp"/>
				<updated>2019-11-08T23:19:12Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f4/Technical_report_-_poetry_generation.pdf TRP-20190001 A Rhythm Model for Chinese Poetry Generation, Yibo Liu]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/b3/ASR_trp.pdf TRP-20180002 A Research of ASR, Jiayao Wu]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e9/TRP-20180001_Review_of_LID%2C_Zhaodi_Qi.pdf TRP-20180001 Review of LID, Zhaodi Qi]&lt;br /&gt;
[[文件:hedge.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/26/Dynamic_Hedge_using_RNN.pdf TRP-20170005 Dynamic Hedge Using RNN, Xin Jing ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/5f/Neural_sparseness.pdf TRP-20170004 Neural Sparseness in Speech Recognition Based on Kaldi (in Chinese), Yanqing Wang, Zhiyuan Tang, Dong Wang ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f0/Long-term_dropconnect.pdf TRP-20170003 Long-term DropConnect in Speech Recognition (in Chinese), Yanqing Wang, Zhiyuan Tang, Dong Wang ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/ff/Connection_sparseness.pdf TRP-20170002 Connection Sparseness in Speech Recognition Based on Kaldi (in Chinese), Yanqing Wang, Zhiyuan Tang, Dong Wang ]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e3/TRP-201700011.pdf TRP-20170001 重叠语音与原始语音关系的研究与分析 唐辉]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/53/TRP-20160039.pdf TRP-20160039 Speaker Segmentation Using Deep Speaker Vectors for Fast Speaker Change Scenarios, Renyu Wang, Mingliang Gu, Lantian Li, Mingxing Xu, Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/06/TRP-20160038.pdf TRP-20160038 生物特征识别技术综述, Thomas Fang Zheng, Askar Rozi, Renyu Wang, Lantian Li]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/3d/TRP-20160037.pdf TRP-20160037 声纹识别技术及其应用现状, Thomas Fang Zheng, Lantian Li, Hui Zhang, Askar Rozi]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/5f/Dtq.pdf TRP-20160036 Deep Q-trading, Yang Wang, Dong Wang, Shiyue Zhang,Yang Feng, Shiyao Li,and Qiang Zhou]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/27/Mose_User_Guide.pdf TRP-20160035  Moses中文操作手册, 冯洋]&lt;br /&gt;
&lt;br /&gt;
[[文件:future.jpg|300px]]&lt;br /&gt;
*[[媒体文件:CCF-ASR.pdf|TRP-20160034: The Present and Future of Speech Recognition, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Memoryless.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a2/Memory.pdf TRP-20160033  Memoryless Document Vector, Dongxu Zhang, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
[[文件:Turing-test.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7a/Turing.pdf TRP-20160032 Can Machine Generate Traditional Chinese Poetry? A Turing Test, Qixin Wang, Tianyi Luo, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
[[文件:Mix-lingual.png|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ce/TRP-20160031.pdf TRP-20160031 OC16-CE80: A Chinese-English Mixlingual Database and A Speech Recognition Baseline, Dong Wang, Zhiyuan Tang, Difei Tang and Qing Chen]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9e/TRP-20160030.pdf TRP-20160030 Collaborative Joint Training with Multi-task Recurrent Model for Speech and Speaker Recognition, Zhiyuan Tang, Lantian Li, Dong Wang and Ravichander Vipperla]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/09/TRP-20160029.pdf TRP-20160029 Multi-task Recurrent Model for Speech and Speaker Recognition, Zhiyuan Tang, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/55/TRP-20160028.pdf TRP-20160028 Multi-task Recurrent Model for True Multilingual Speech Recognition, Zhiyuan Tang, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/27/TRP-20160027.pdf TRP-20160027 Collaborative Learning for Language and Speaker Recognition, Lantian Li, Zhiyuan Tang, Dong Wang, Yang Feng and Shiyue Zhang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9e/TRP-20160026.pdf TRP-20160026 Weakly Supervised PLDA Training, Lantian Li, Dong Wang, Yixiang Chen and Chenghui Zhao]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/4/4c/TRP-20160025.pdf TRP-20160025 Local Training for PLDA in Speaker Veriﬁcation, Chenghui Zhao, Lantian Li, Dong Wang and  April Pu]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c5/TRP-20160024.pdf TRP-20160024 Decision Making Based on Cohort Scores for Speaker Verification, Lantian Li, Renyu Wang, Gang Wang, Caixia Wang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
[[文件:Multi-lingual.jpg|300px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f0/TRP-20160023.pdf TRP-20160023 AP16-OL7: A Multilingual Database for Oriental Languages and A Language Recognition Baseline, Dong Wang, Lantian Li, Difei Tang and Qing Chen]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f8/TRP-20160022.pdf TRP-20160022 System Combination for Short Utterance Speaker Recognition, Lantian Li, Dong Wang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/93/TRP-20160021.pdf TRP-20160021 Improving Short Utterance Speaker Recognition by Modeling Speech Unit Classes, Lantian Li, Dong Wang, Chenhao Zhang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/76/TRP-20160020.pdf TRP-20160020 Feature Transformation For Speaker Verification Under Speaking Rate Mismatch Condition, Askar Rozi, Lantian Li, Dong Wang and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c9/TRP-20160019.pdf TRP-20160019 Language-aware PLDA for Multilingual Speaker Recognition, Askar Rozi, Dong Wang, Lantian Li and Thomas Fang Zheng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/b5/Ijcai16.pdf TRP-20160018 Chinese Song Iambics Generation with Neural Attention-based Model, Qixin Wang, Tianyi Luo, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://tangzy.cslt.org/files/trp_nnet3_config.pdf TRP-20160017 How to Config Kaldi nnet3 (in Chinese), Zhiyuan Tang and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e6/Joint_training_config.pdf TRP-20160016 How to deploy joint training in Kaldi (in Chinese), Hang Luo, Zhiyuan Tang and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7d/Kazak_speech_recognition_chinese.pdf TRP-20160015 How to run ASR system for Kazak (in Chinese), Ying Shi, Zhiyuan Tang，Nurbolat and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/58/TRP-20160014.pdf TRP-20160014 Exploring The Role of Deep Speaker Features for Speaker Verification, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/15/Sda.pdf TRP-20160013 Sparse Discriminative Analysis and Its Application in Distraction Classification, Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/20/Ivector.pdf TRP-20160012 i-vector system in Kaldi (in Chinese) Yixang Chen, Lantian Li and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9c/Replay.pdf TRP-20160011 基于说话人信道相关的录音重放检测若干方法探究 Lantian Li, Yixiang Chen and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/95/Dkeyword.pdf TRP-20160010 Highly Restricted Keyword Selection Based on Sparse Analysis for Uyghur Text Categorization, Dong Wang, Askar Humdulla, Rayilam Parhat, Javier Tejedor]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/2b/RNNG_Code_Use_Guide_simplified.pdf TRP-20160009: RNNG Code User Guide, Shiyue Zhang and Yang Feng]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c4/Memory-atten-model-public.pdf TRP-20160008: Different styles of poetry generation based on memory model, Jiyuan Zhang,Yang Feng and Dong Wang]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:dis.png|300px]]&lt;br /&gt;
*[[媒体文件:sda.pdf|TRP-20160013: Sparse Discriminative Analysis and Its Application in Distraction Classification, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:dkeyword.png|300px]]&lt;br /&gt;
*[[媒体文件:dkeyword.pdf|TRP-20160010: Highly Restricted Keyword Selection Based on Sparse Analysis for Uyghur Text Categorization, Dong Wang, Askar Humdulla, Rayilam Parhat, Javier Tejedor]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Rnng user guide.png|300px]]&lt;br /&gt;
*[[媒体文件:RNNG Code Use Guide simplified.pdf|TRP-20160009: RNNG Code User Guide, Shiyue Zhang and Yang Feng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Memory_attention_model.jpg|300px]]&lt;br /&gt;
*[[媒体文件:Memory-atten-model-public.pdf|TRP-20160008: Different styles of poetry generation based on memory model, Jiyuan Zhang,Yang Feng and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Distraction.png|300px]]&lt;br /&gt;
*[[媒体文件:Distraction.pdf|TRP-20160007: Distraction Detection Using Sparse Discriminative Analysis, Dong Wang and Guozhen Zhao]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Visual_fig.png|200px]]&lt;br /&gt;
*[[媒体文件:Visual.pdf|TRP-20160006: Visualization Analysis for Recurrent Networks, Zhiyuan Tang, Ying Shi and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件: FIG-TRP2016.png |200px]]&lt;br /&gt;
*[[媒体文件:Distributed_Representation_Learning_for_Knowledge_Graphs_with_Entity_Descriptions.pdf| TRP-20160005: Distributed Representation Learning for Knowledge Graphs with Entity Descriptions; Miao Fan, Qiang Zhou, Thomas Fang Zheng, Ralph Grishman]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Aikefu.bmp|200px]]&lt;br /&gt;
*[[媒体文件:Cslt-trp-template.pdf|TRP-20160004: A Review of Neural QA, Tianyi Luo and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:simpair.png|200px]]&lt;br /&gt;
*[[媒体文件:Trp20160003.pdf|TRP-20160003: A study of Similar Word Model for Unfrequent Word Enhancement in Speech Recognition, Xi Ma, Dong Wang and Javier Tejedor]]&lt;br /&gt;
&lt;br /&gt;
[[文件:low-freq.png|200px]]&lt;br /&gt;
*[[媒体文件:How to deal with low frequency words.pdf|TRP-20160002: Low-Frequency Words Embedding, Chao Xing, Yiqiao Pan, Dong Wang]]&lt;br /&gt;
[[文件:maxmargin.png|200px]]&lt;br /&gt;
*[[媒体文件:Max-margin.pdf|TRP-20160001: Max-margin metric learning for speaker recognition, Lantian Li, Chao Xing, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:lowv.png|200px]]&lt;br /&gt;
*[[媒体文件:Lowv.pdf|TRP-20150033: Learning Ordered Word Representations, Xiaoxi Wang, Chao Xing, Dong Wang, Rong Liu and Yiqiao Pan]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Adamax.png|200px]]&lt;br /&gt;
*[[媒体文件:Adamax Online Training for Speech Recognition.pdf|TRP-20150032: Adamax Online Training for Speech Recognition, Xiangyu Zeng, Zhiyong Zhang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Ptrnets.png|200px]]&lt;br /&gt;
*[[媒体文件: Ptrnets.pdf|TRP-20150031: An implementation of Pointer-Networks with Extensions, Xiaoxi Wang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:dvad.png|200px]]&lt;br /&gt;
*[[媒体文件:dvad.pdf|TRP-20150030: DNN-based Voice Activity Detection for Speaker Recognition, Fanhu Bie, Zhiyong Zhang, Dong Wang, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:uyghur.jpg|200px]]&lt;br /&gt;
*[[媒体文件:urghur.pdf|TRP-20150029: THUYG-20：A Free Uyghur Speech Database, Askar Rozi, Shi Yin, Zhiyong Zhang, Dong Wang,  Askar Hamdulla]]&lt;br /&gt;
&lt;br /&gt;
[[文件:nnpre.jpg|200px]]&lt;br /&gt;
*[[媒体文件:nnpre.pdf|TRP-20150028: Knowledge Transfer Pre-training, Zhiyuang Tang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:mmargin.png|200px]]&lt;br /&gt;
*[[媒体文件:mmargin.pdf|TRP-20150027: Max-Margin Metric Learning for Speaker Recognition, Lantian Li, Chao Xing, Dong Wang, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:binary.jpg|200px]]&lt;br /&gt;
*[[媒体文件:binary.pdf|TRP-20150026: Binary Speaker Embedding, Lantian Li, Chao Xing, Dong Wang, Kaimin Yu, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:rnnrl.png|200px]]&lt;br /&gt;
*[[媒体文件:rnnrl.pdf|TRP-20150025: Relation Classification via Recurrent Neural Network, Dong Xu Zhang, Dong Wang, Rong Liu]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:dplda.png|200px]]&lt;br /&gt;
*[[媒体文件:dplda.pdf|TRP-20150024: Learning from LDA using Deep Neural Networks, Dongxu Zhang, Tianyi Luo and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:jsrl.png|200px]]&lt;br /&gt;
*[[媒体文件:jsrl.pdf|TRP-20150023: Joint Semantic Relevance Learning with Text Data and Graph Knowledge, Dongxu Zhang, Bin Yuan, Dong Wang, Rong Liu]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:listnet.png|200px]]&lt;br /&gt;
*[[媒体文件:listnet.pdf|TRP-20150022: Stochastic Top-k ListNet, Tianyi Luo, Dong Wang, Rong Liu, Yiqiao Pan]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:segvector.png|200px]]&lt;br /&gt;
*[[媒体文件:segvector.pdf|TRP-20150021: Improved Deep Speaker Feature Learning for Text-Dependent Speaker Recognition, Lantian Li, Yiye Lin, Zhiyong Zhang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:Vmclass.png|200px]]&lt;br /&gt;
*[[媒体文件:Vmclass.pdf|TRP-20150020: Document Classification with Spherical Word Vectors, Yiqiao Pan, Chao Xing, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:Tlearn.png|200px]]&lt;br /&gt;
*[[媒体文件:Tlearn.pdf|TRP-20150019: Transfer Learning for Speech and Language Processing, Dong Wang and Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Songcisample.png|200px]]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7a/Cslt20150018_revisedversion.pdf TRP-20150018:Chinese Song Iambics Generation with Neural Attention-based Model, Qixin Wang, Tianyi Luo, Dong Wang, Chao Xing]&lt;br /&gt;
&lt;br /&gt;
[[文件:database.jpg|200px]]&lt;br /&gt;
*[[媒体文件:Thuyg20-sre.pdf|TRP-20150017: AN OPEN/FREE DATABASE AND BENCHMARK FOR UYGHUR SPEAKER RECOGNITION, Askar Rozi, Dong Wang, Zhiyong Zhang, Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Thchs.png|200px]]&lt;br /&gt;
*[[媒体文件:Thchs30.pdf|TRP-20150016: THCHS-30 : A Free Chinese Speech Corpus, Dong Wang and Xuewei Zhang]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[文件:Su.jpg|200px]]&lt;br /&gt;
*[[媒体文件:SUSR.pdf|TRP-20150015: Improving Short Utterance Speaker Recognition by Modeling Speech Unit Classes, Chenhao Zhang Dong Wang, Lantian Li and Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:dv.png|200px]]&lt;br /&gt;
*[[媒体文件:Dvector.pdf|TRP-20150014: Deep Speaker Vectors for Semi Text-independent Speaker Verification, Lantian Li, Dong Wang, Zhiyong Zhang and Thomas Fang Zheng]]&lt;br /&gt;
&lt;br /&gt;
[[文件:dark.png|200px]]&lt;br /&gt;
*[[媒体文件:Dark.pdf|TRP-20150013: Recurrent Neural Network Training with Dark Knowledge Transfer, Dong Wang, Chao Liu, Zhiyuan Tang, Zhiyong Zhang, Mengyuan Zhao]]&lt;br /&gt;
&lt;br /&gt;
[[文件:PBE.png|200px]]&lt;br /&gt;
*[[媒体文件:Probabilistic_Belief_Embedding_for_Knowledge_Population_(TRP).pdf|TRP-20150012: Probabilistic Belief Embedding for Large-scale Knowledge Population. Miao Fan, Qiang Zhou, Andrew Abel, Thomas Fang Zheng and Ralph Grishman]]&lt;br /&gt;
&lt;br /&gt;
[[文件:fst-fw.png|200px]]&lt;br /&gt;
*[[媒体文件:wpair.pdf|TRP-20150011: Recognize Foreign Low-Frequency Words with Similar Pairs, Xi Ma1, Xiaoxi Wang1 and Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Cdae.png|200px]]&lt;br /&gt;
*[[媒体文件:Music.pdf|TRP-20150010: Music Removal by Denoising Autoencoder in Speech Recognition. Mengyuan Zhao, Dong Wang, Zhiyong Zhang and Xuewei Zhang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:vmfsne.png|200px]]&lt;br /&gt;
*[[媒体文件:Cslt-trp-template-vmfsne.pdf|TRP-20150009: VMF-SNE: Embedding for Spherical Data. Mian Wang, Dong Wang]]&lt;br /&gt;
&lt;br /&gt;
[[文件:ros.png|200px]]&lt;br /&gt;
*[[媒体文件:Ros.pdf|TRP-20150008: Learning Speech Rate in Speech Recognition. Xiangyu Zeng, Shi Yin.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Dnnvadstru.png|200px]]&lt;br /&gt;
*[[媒体文件:DNNVADTRP.pdf|TRP-20150007: Voice Activity Detection Based on Deep Neural Networks. Shi Yin.]] ([[媒体文件:Vad.pdf|Paper submiited to Tsinghua Xuebao]])&lt;br /&gt;
&lt;br /&gt;
[[文件:Uyghur-training.png|200px]]&lt;br /&gt;
*[[媒体文件:UyghurTRP.pdf|TRP-20150006: Low-resource Uyghur Acoustic Model Training based on Cross-lingual Features. Shi Yin.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Beam-forming.png|200px]]&lt;br /&gt;
*[[媒体文件:Multi-Microphones_Reverberation_Cancellation_for_Distant_Speech_Recognition.pdf|TRP-20150005: Multi-Microphones_Reverberation_Cancellation_for_Distant_Speech_Recognition. Xuewei Zhang.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Clipping-speaker.png|200px]]&lt;br /&gt;
*[[媒体文件:Clip.pdf|TRP-20150004: Detection and Reconstruction of Clipped Speech in Speaker Recognition. Fanhu Bie et al.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Semi-dynamic-embedding.png|200px]]&lt;br /&gt;
*[[媒体文件:Taglm.pdf|TRP-20150003: Semi-Dynamic Graph Embedding for Large Scale Language Model Adaptation. Bin Yuan et al.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Speaker-discriminative-score.png|200px]]&lt;br /&gt;
*[[媒体文件:DNN-based Discriminative Scoring for Speaker.pdf|TRP-20150002: DNN-based Discriminative Scoring for Speaker Recognition Based on i-vector. Jun Wang et al. ]]&lt;br /&gt;
&lt;br /&gt;
[[文件:Noisy-traiing.png|200px]]&lt;br /&gt;
*[[媒体文件:Noisy Training for Deep Neural Networks in.pdf|TRP-20150001: Noisy Training for Deep Neural Networks in Speech Recognition. Shi Yin et al.]]&lt;br /&gt;
&lt;br /&gt;
[[文件:English-scroing.png|200px]]&lt;br /&gt;
*[[媒体文件:AutomaticScoringforEnglishUtterances.pdf|TRP-20140001: Automatic Scoring for English Utterances. Bo Hu.]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[媒体文件:Template.rar|CSLT-TRP latex template]]&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf</id>
		<title>文件:Technical report - poetry generation.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Technical_report_-_poetry_generation.pdf"/>
				<updated>2019-11-08T23:13:46Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E5%BE%AE%E4%BF%A1%E6%88%AA%E5%9B%BE_20190802145503.png</id>
		<title>文件:微信截图 20190802145503.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E5%BE%AE%E4%BF%A1%E6%88%AA%E5%9B%BE_20190802145503.png"/>
				<updated>2019-08-02T05:39:08Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_2d.jpg</id>
		<title>文件:Lv 2d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_2d.jpg"/>
				<updated>2019-07-26T06:45:25Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_2d.jpg</id>
		<title>文件:Lm 2d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_2d.jpg"/>
				<updated>2019-07-26T06:44:57Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_07240456_12.png</id>
		<title>文件:Lv 07240456 12.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_07240456_12.png"/>
				<updated>2019-07-25T08:21:23Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv3.png</id>
		<title>文件:Lv3.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv3.png"/>
				<updated>2019-07-25T08:09:08Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg</id>
		<title>文件:Lv 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg"/>
				<updated>2019-07-24T12:36:46Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg</id>
		<title>文件:Lv 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg"/>
				<updated>2019-07-24T12:29:50Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg</id>
		<title>文件:Lv 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg"/>
				<updated>2019-07-24T12:28:42Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg</id>
		<title>文件:Lv 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg"/>
				<updated>2019-07-24T12:28:07Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg</id>
		<title>文件:Lv 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg"/>
				<updated>2019-07-24T12:26:04Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d_3.jpg</id>
		<title>文件:Yun 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d_3.jpg"/>
				<updated>2019-07-24T12:25:32Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Yun 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d_3.jpg</id>
		<title>文件:Lm 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d_3.jpg"/>
				<updated>2019-07-24T12:24:08Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lm 3d 3.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d_3.jpg</id>
		<title>文件:Lm 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d_3.jpg"/>
				<updated>2019-07-24T11:18:16Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d_3.jpg</id>
		<title>文件:Yun 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d_3.jpg"/>
				<updated>2019-07-24T11:17:54Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg</id>
		<title>文件:Lv 3d 3.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_3.jpg"/>
				<updated>2019-07-24T11:16:07Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d_2.jpg</id>
		<title>文件:Lm 3d 2.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d_2.jpg"/>
				<updated>2019-07-24T11:05:55Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_2.jpg</id>
		<title>文件:Lv 3d 2.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d_2.jpg"/>
				<updated>2019-07-24T11:05:29Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d_2.jpg</id>
		<title>文件:Yun 3d 2.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d_2.jpg"/>
				<updated>2019-07-24T11:04:59Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d.jpg</id>
		<title>文件:Lm 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d.jpg"/>
				<updated>2019-07-24T04:46:39Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lm 3d.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d.jpg</id>
		<title>文件:Lm 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d.jpg"/>
				<updated>2019-07-24T04:46:28Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lm 3d.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d.jpg</id>
		<title>文件:Lv 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d.jpg"/>
				<updated>2019-07-24T04:45:54Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d.jpg</id>
		<title>文件:Lv 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d.jpg"/>
				<updated>2019-07-24T04:45:39Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Lv 3d.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d.jpg</id>
		<title>文件:Yun 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d.jpg"/>
				<updated>2019-07-24T04:44:41Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Yun 3d.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d.jpg</id>
		<title>文件:Yun 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d.jpg"/>
				<updated>2019-07-24T04:44:13Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Liuyibo上传“文件:Yun 3d.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d.jpg</id>
		<title>文件:Lm 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lm_3d.jpg"/>
				<updated>2019-07-23T12:22:36Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d.jpg</id>
		<title>文件:Lv 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Lv_3d.jpg"/>
				<updated>2019-07-23T12:22:22Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d.jpg</id>
		<title>文件:Yun 3d.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Yun_3d.jpg"/>
				<updated>2019-07-23T12:22:01Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Heatmap.png</id>
		<title>文件:Heatmap.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Heatmap.png"/>
				<updated>2019-07-05T06:21:13Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Abstract_for_poem_new.doc</id>
		<title>文件:Abstract for poem new.doc</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Abstract_for_poem_new.doc"/>
				<updated>2019-06-19T07:30:56Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Abstract_for_poem_new.doc&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Abstract_for_poem_new.doc&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Abstract_for_poem.doc</id>
		<title>文件:Abstract for poem.doc</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Abstract_for_poem.doc"/>
				<updated>2019-06-18T08:32:13Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：Abstract for poem generation&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Abstract for poem generation&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E8%AF%97%E8%AF%8D_%E5%85%B3%E9%94%AE%E8%AF%8Dplan%E8%BF%87%E7%A8%8B_%E5%8E%9F%E7%89%88.pdf</id>
		<title>文件:诗词 关键词plan过程 原版.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E8%AF%97%E8%AF%8D_%E5%85%B3%E9%94%AE%E8%AF%8Dplan%E8%BF%87%E7%A8%8B_%E5%8E%9F%E7%89%88.pdf"/>
				<updated>2019-06-06T05:19:58Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：诗词&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;诗词&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kw_plan_for_test.txt</id>
		<title>文件:Kw plan for test.txt</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kw_plan_for_test.txt"/>
				<updated>2019-06-06T02:56:59Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：诗词生成的中间文件&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;诗词生成的中间文件&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2018</id>
		<title>2018</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2018"/>
				<updated>2019-06-05T04:30:56Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;2018-2019&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
== ASR ==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:语音识别系统.pdf  | 181107-吴嘉瑶-Overview of ASR]]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/38/Unsupervised_pre-training_for_speech_recognition.pdf  190515-董文伟-Unsupervised_pre-training_for_speech_recognition]]&lt;br /&gt;
&lt;br /&gt;
==VPR==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181107-SRE-YJW.pptx | 181107-于嘉威-Overview of VPR]]&lt;br /&gt;
&lt;br /&gt;
*[https://arxiv.org/pdf/1808.00158.pdf 181114-VPR from raw waveform]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:190306-zy-report.pptx | 190306-张阳 experiments report]]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/82/I-vector_representation_based_on_GMM_and_DNN.pdf 190418-齐诏娣-I-vector_representation_based_on_GMM_and_DNN]&lt;br /&gt;
&lt;br /&gt;
==LRE==&lt;br /&gt;
&lt;br /&gt;
*[[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/80/Zero-resource_LID.pdf  190529-于嘉威-Zero-resource-LID]]&lt;br /&gt;
&lt;br /&gt;
==Scoring==&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/5c/190117-DWW-Scoring.pptx  190117-董文伟-Overview of Scoring]]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/bc/Kandeng-English-scoring.pdf  190425-邓侃-English Evaluation techniques]]&lt;br /&gt;
&lt;br /&gt;
==Text generation==&lt;br /&gt;
&lt;br /&gt;
*[https://arxiv.org/pdf/1803.07133.pdf Overview-2018-Neural Text Generation: Past, Present and Beyond]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Conversational system==&lt;br /&gt;
*[https://arxiv.org/pdf/1809.08267.pdf Overview-2018-Neural Approaches to Conversational AI: Question Answering, Task-Oriented Dialogue and Chatbots: A Unified View] [https://www.microsoft.com/en-us/research/uploads/prod/2018/07/neural-approaches-to-conversational-AI.pdf slides]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Deep architecture and mechanism==&lt;br /&gt;
&lt;br /&gt;
*[https://arxiv.org/pdf/1510.00149.pdf 181114-deep compression]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:Tensor factorization neural net.pdf | 181212-何丹-Tensor factorization neural net]]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:Ensemble_2019.5.8.pdf | 190508-吴嘉瑶-ensemble of NN]]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:Knowledge_distillation_19.5.29.pdf | 190529-吴嘉瑶-knowledge distillation]]&lt;br /&gt;
&lt;br /&gt;
==Learning theory==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181205 Meta-Learning and Zero-Shot Learning JXQ.pdf | 181205 姜修齐 Meta-Learning and Zero-Shot Learning]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Platform and tool==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181116-张阳-Conda_&amp;amp;_Python.pdf | 181116-张阳-Conda &amp;amp; Python]]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181117-张阳-Linux.pdf | 181117-张阳-Linux]]&lt;br /&gt;
&lt;br /&gt;
*[https://pan.baidu.com/s/13qf-GqOSE4DK7q5VjbtWNA    PyTorch 1.0 - Bringing research and production together Presentation]&lt;br /&gt;
&lt;br /&gt;
==NLP language model==&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/07/Bert%E7%AE%80%E4%BB%8B.pdf   Bert模型简介]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/6f/Punc_prediction_%E6%80%BB%E7%BB%93.pdf  bert based punctuation_prediction 实验总结]&lt;br /&gt;
&lt;br /&gt;
==Medical Image==&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/b1/%E7%AD%94%E8%BE%A92.pdf   190522-刘逸博-基于人工智能的乳腺癌诊断]&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2018</id>
		<title>2018</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2018"/>
				<updated>2019-06-05T04:30:24Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;2018-2019&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
== ASR ==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:语音识别系统.pdf  | 181107-吴嘉瑶-Overview of ASR]]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/38/Unsupervised_pre-training_for_speech_recognition.pdf  190515-董文伟-Unsupervised_pre-training_for_speech_recognition]]&lt;br /&gt;
&lt;br /&gt;
==VPR==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181107-SRE-YJW.pptx | 181107-于嘉威-Overview of VPR]]&lt;br /&gt;
&lt;br /&gt;
*[https://arxiv.org/pdf/1808.00158.pdf 181114-VPR from raw waveform]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:190306-zy-report.pptx | 190306-张阳 experiments report]]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/82/I-vector_representation_based_on_GMM_and_DNN.pdf 190418-齐诏娣-I-vector_representation_based_on_GMM_and_DNN]&lt;br /&gt;
&lt;br /&gt;
==LRE==&lt;br /&gt;
&lt;br /&gt;
*[[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/80/Zero-resource_LID.pdf  190529-于嘉威-Zero-resource-LID]]&lt;br /&gt;
&lt;br /&gt;
==Scoring==&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/5c/190117-DWW-Scoring.pptx  190117-董文伟-Overview of Scoring]]&lt;br /&gt;
&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/bc/Kandeng-English-scoring.pdf  190425-邓侃-English Evaluation techniques]]&lt;br /&gt;
&lt;br /&gt;
==Text generation==&lt;br /&gt;
&lt;br /&gt;
*[https://arxiv.org/pdf/1803.07133.pdf Overview-2018-Neural Text Generation: Past, Present and Beyond]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Conversational system==&lt;br /&gt;
*[https://arxiv.org/pdf/1809.08267.pdf Overview-2018-Neural Approaches to Conversational AI: Question Answering, Task-Oriented Dialogue and Chatbots: A Unified View] [https://www.microsoft.com/en-us/research/uploads/prod/2018/07/neural-approaches-to-conversational-AI.pdf slides]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Deep architecture and mechanism==&lt;br /&gt;
&lt;br /&gt;
*[https://arxiv.org/pdf/1510.00149.pdf 181114-deep compression]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:Tensor factorization neural net.pdf | 181212-何丹-Tensor factorization neural net]]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:Ensemble_2019.5.8.pdf | 190508-吴嘉瑶-ensemble of NN]]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:Knowledge_distillation_19.5.29.pdf | 190529-吴嘉瑶-knowledge distillation]]&lt;br /&gt;
&lt;br /&gt;
==Learning theory==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181205 Meta-Learning and Zero-Shot Learning JXQ.pdf | 181205 姜修齐 Meta-Learning and Zero-Shot Learning]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
==Platform and tool==&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181116-张阳-Conda_&amp;amp;_Python.pdf | 181116-张阳-Conda &amp;amp; Python]]&lt;br /&gt;
&lt;br /&gt;
*[[媒体文件:181117-张阳-Linux.pdf | 181117-张阳-Linux]]&lt;br /&gt;
&lt;br /&gt;
*[https://pan.baidu.com/s/13qf-GqOSE4DK7q5VjbtWNA    PyTorch 1.0 - Bringing research and production together Presentation]&lt;br /&gt;
&lt;br /&gt;
==NLP language model==&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/07/Bert%E7%AE%80%E4%BB%8B.pdf   Bert模型简介]&lt;br /&gt;
*[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/6f/Punc_prediction_%E6%80%BB%E7%BB%93.pdf  bert based punctuation_prediction 实验总结]&lt;br /&gt;
&lt;br /&gt;
==Medical Image==&lt;br /&gt;
 *[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/b1/%E7%AD%94%E8%BE%A92.pdf   190522-刘逸博-基于人工智能的乳腺癌诊断]&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E7%AD%94%E8%BE%A92.pdf</id>
		<title>文件:答辩2.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:%E7%AD%94%E8%BE%A92.pdf"/>
				<updated>2019-06-05T04:19:22Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：5月22日周会分享-刘逸博&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;5月22日周会分享-刘逸博&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2019-05-24</id>
		<title>2019-05-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2019-05-24"/>
				<updated>2019-05-24T00:15:43Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yibo Liu&lt;br /&gt;
|| &lt;br /&gt;
* Released a version on github https://github.com/CSLT-THU/Vivi_3.0 . Added Readme.&lt;br /&gt;
||&lt;br /&gt;
* Do some experiments to solve the problem of bad effect on the last 2 sentences, e.g. modify training method, modify loss function.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiuqi Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Compiling the data of planning and training set, unifying data formats&lt;br /&gt;
|| &lt;br /&gt;
* Re-plan training set using planning results&lt;br /&gt;
* Doing summary work&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiayao Wu&lt;br /&gt;
|| &lt;br /&gt;
* made further research on sentence-level resample boosting and trying frame-level method.&lt;br /&gt;
||&lt;br /&gt;
* prepare for the weekly report.&lt;br /&gt;
*keep on doing frame-level research.&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhaodi Qi&lt;br /&gt;
|| &lt;br /&gt;
* Do experiment on Multi-language BN &lt;br /&gt;
|| &lt;br /&gt;
* Summary of the experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiawei Yu&lt;br /&gt;
|| &lt;br /&gt;
* Done part of ivector and dvector in-set LID experiment.&lt;br /&gt;
* Experimented different test utterance lengths(1s, 3s, full length) for zero-resource LID.&lt;br /&gt;
|| &lt;br /&gt;
* Test the in-set language for different test utterance length(1s, 3s).&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Wenwei Dong&lt;br /&gt;
||&lt;br /&gt;
* -&lt;br /&gt;
|| &lt;br /&gt;
* Try speaker level correlation.&lt;br /&gt;
* Collect different language background speaker's English data to train info-GAN.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xueyi Wang&lt;br /&gt;
||&lt;br /&gt;
* reproducing the experiments of v-vector and c-vector; result is close to the one in the paper&lt;br /&gt;
|| &lt;br /&gt;
* get down to unsupervised PLDA&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ziya Zhou&lt;br /&gt;
|| &lt;br /&gt;
* Downloaded videos of 258 celebrities &lt;br /&gt;
* Complete the downloading task by next Monday&lt;br /&gt;
||&lt;br /&gt;
* Edit the videos&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|kaicheng li&lt;br /&gt;
||&lt;br /&gt;
* Collect celebrity audio data （100）&lt;br /&gt;
|| &lt;br /&gt;
* Continue collecting celebrity audio data&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haolin Chen&lt;br /&gt;
||&lt;br /&gt;
* Maths: statistical inference, integer programming&lt;br /&gt;
|| &lt;br /&gt;
* Continue learning statistics&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
||&lt;br /&gt;
* Completed the testing table of asr engine&lt;br /&gt;
|| &lt;br /&gt;
* Test the concurrence of asr engine&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Vocab.txt</id>
		<title>文件:Vocab.txt</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Vocab.txt"/>
				<updated>2019-03-28T07:07:35Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:58k_lr%3D1_batchsize%3D80_epoch%3D7.jpg</id>
		<title>文件:58k lr=1 batchsize=80 epoch=7.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:58k_lr%3D1_batchsize%3D80_epoch%3D7.jpg"/>
				<updated>2019-03-27T09:01:51Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:58k_lr%3D1_batchsize%3D80_epoch%3D29.jpg</id>
		<title>文件:58k lr=1 batchsize=80 epoch=29.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:58k_lr%3D1_batchsize%3D80_epoch%3D29.jpg"/>
				<updated>2019-03-25T09:00:13Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Poetry_baselines_investigation.pdf</id>
		<title>文件:Poetry baselines investigation.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Poetry_baselines_investigation.pdf"/>
				<updated>2019-03-13T05:25:38Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2019-03-13</id>
		<title>2019-03-13</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2019-03-13"/>
				<updated>2019-03-13T05:24:42Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：以“{| class=&amp;quot;wikitable&amp;quot; !People !! Last Week !! This Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;) |-     |- |Yibo Liu ||  * || *  || *   |-     |- |Xiuqi J...”为内容创建页面&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! Last Week !! This Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yibo Liu&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiuqi Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiayao Wu&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhaodi Qi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiawei Yu&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Dan He&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yang Zhang&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Wenwei Dong&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Status_report</id>
		<title>Status report</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Status_report"/>
				<updated>2019-03-13T05:23:50Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;[[2019-03-13]]&lt;br /&gt;
&lt;br /&gt;
[[2019-02-27]]&lt;br /&gt;
&lt;br /&gt;
[[2019-02-20]]&lt;br /&gt;
&lt;br /&gt;
[[2019-01-23]]&lt;br /&gt;
&lt;br /&gt;
[[2019-01-16]]&lt;br /&gt;
&lt;br /&gt;
[[2019-01-09]]&lt;br /&gt;
&lt;br /&gt;
[[2019-01-02]]&lt;br /&gt;
&lt;br /&gt;
[[2018-12-26]]&lt;br /&gt;
&lt;br /&gt;
[[2018-12-19]]&lt;br /&gt;
&lt;br /&gt;
[[2018-12-12]]&lt;br /&gt;
&lt;br /&gt;
[[2018-12-05]]&lt;br /&gt;
&lt;br /&gt;
[[2018-11-28]]&lt;br /&gt;
&lt;br /&gt;
[[Past]]&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2019-02-27</id>
		<title>2019-02-27</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2019-02-27"/>
				<updated>2019-03-04T07:03:38Z</updated>
		
		<summary type="html">&lt;p&gt;Liuyibo：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! Last Week !! This Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yibo Liu&lt;br /&gt;
|| &lt;br /&gt;
* Trained the model.&lt;br /&gt;
||&lt;br /&gt;
* Reconstruct the model according to Jiyuan's work.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Xiuqi Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiayao Wu&lt;br /&gt;
|| &lt;br /&gt;
*did some experiments on small model &amp;amp; node_sparse model.&lt;br /&gt;
*research different layers' importance in noise environment on simple DNN using prune_node_method and waiting for the results.&lt;br /&gt;
||&lt;br /&gt;
*change environment and research layers' importance in different factors.&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zhaodi Qi&lt;br /&gt;
|| &lt;br /&gt;
* Did some experiments on xvector&lt;br /&gt;
|| &lt;br /&gt;
* Adjust training data and continue experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiawei Yu&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Dan He&lt;br /&gt;
|| &lt;br /&gt;
*Study code about TTlayer&lt;br /&gt;
*Collect the latest information about tensor train decompositions&lt;br /&gt;
|| &lt;br /&gt;
*Directly perform a TT-decomposition on a fully connected layer trained parameter&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yang Zhang&lt;br /&gt;
||&lt;br /&gt;
* read two papers&lt;br /&gt;
* build a vae in tensorflow&lt;br /&gt;
* trained some model, and did some test in d/x -vector&lt;br /&gt;
|| &lt;br /&gt;
* continue to train and test my model&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Wenwei Dong&lt;br /&gt;
||&lt;br /&gt;
* did some GAN experiment&lt;br /&gt;
|| &lt;br /&gt;
*test model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liuyibo</name></author>	</entry>

	</feed>