<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://index.cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Sunhaoran</id>
		<title>cslt Wiki - 用户贡献 [zh-cn]</title>
		<link rel="self" type="application/atom+xml" href="http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Sunhaoran"/>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E7%89%B9%E6%AE%8A:%E7%94%A8%E6%88%B7%E8%B4%A1%E7%8C%AE/Sunhaoran"/>
		<updated>2026-04-08T08:57:28Z</updated>
		<subtitle>用户贡献</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-06-13</id>
		<title>2022-06-13</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-06-13"/>
				<updated>2022-06-13T10:55:24Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Valid test, INFO)&lt;br /&gt;
* Glance at ICASSP 2022 (1/2)&lt;br /&gt;
* Proof reading&lt;br /&gt;
||&lt;br /&gt;
* PUFA delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* school project proposal&lt;br /&gt;
* baseline: Few shot kws&lt;br /&gt;
||&lt;br /&gt;
* continue on kws baseline &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some for journal paper&lt;br /&gt;
* speaker embedding code&lt;br /&gt;
* i-vector for VCTK&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review cnc code, write documents&lt;br /&gt;
||&lt;br /&gt;
* ICASSP review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* PUFA report, code, model&lt;br /&gt;
||&lt;br /&gt;
* Augment and mask&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* datasets-CMU_Wilderness&lt;br /&gt;
* Preparation for Paper Sharing&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Unsupervised ASR experiments&lt;br /&gt;
||&lt;br /&gt;
* Unsupervised ASR experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Test on face verification&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* prepare for the final exam&lt;br /&gt;
||&lt;br /&gt;
* prepare for the final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* Research on new projects&lt;br /&gt;
||&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-06-06</id>
		<title>2022-06-06</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-06-06"/>
				<updated>2022-06-06T10:57:06Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AIGraph-4 (1/2)&lt;br /&gt;
* AIShell course recording&lt;br /&gt;
* TASLP paper ready for submission&lt;br /&gt;
||&lt;br /&gt;
* TASLP submission&lt;br /&gt;
* AIGraph-4 done&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (CN-Celeb3)&lt;br /&gt;
* PUFA report (1/2)&lt;br /&gt;
||&lt;br /&gt;
* PUFA delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Location guided attention kws (Samsung's work in interspeech 2021)&lt;br /&gt;
* CN3&lt;br /&gt;
||&lt;br /&gt;
* kws baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* quantitative evaluation for speaker embedding&lt;br /&gt;
* cycleflow code&lt;br /&gt;
||&lt;br /&gt;
* more explorations for speaker embedding&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb data&lt;br /&gt;
||&lt;br /&gt;
* Update CNCeleb dataset toolkits &amp;amp; docs&lt;br /&gt;
* Course pres&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* PUFA report&lt;br /&gt;
* CN3&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Preparation for Paper Sharing&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Generate phone-level alignment labels&lt;br /&gt;
* CNC3&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb3 data&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* CN3&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-06-06</id>
		<title>2022-06-06</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-06-06"/>
				<updated>2022-06-06T10:55:05Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AIGraph-4 (1/2)&lt;br /&gt;
* AIShell course recording&lt;br /&gt;
* TASLP paper ready for submission&lt;br /&gt;
||&lt;br /&gt;
* TASLP submission&lt;br /&gt;
* AIGraph-4 done&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Location guided attention kws (Samsung's work in interspeech 2021)&lt;br /&gt;
* CN3&lt;br /&gt;
||&lt;br /&gt;
* kws baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* quantitative evaluation for speaker embedding&lt;br /&gt;
* cycleflow code&lt;br /&gt;
||&lt;br /&gt;
* more explorations for speaker embedding&lt;br /&gt;
* TASLP submission&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb data&lt;br /&gt;
||&lt;br /&gt;
* Update CNCeleb dataset toolkits &amp;amp; docs&lt;br /&gt;
* Course pres&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* PUFA report&lt;br /&gt;
* CN3&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Generate phone-level alignment labels&lt;br /&gt;
* CNC3&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb3 data&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* CN3&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-05-23</id>
		<title>2022-05-23</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-05-23"/>
				<updated>2022-05-23T10:57:42Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI-Graph Part3 (1/2) completed&lt;br /&gt;
* Keep on review for interpretation in ASR/SID&lt;br /&gt;
||&lt;br /&gt;
* AI-Graph Part3 (2/2) &lt;br /&gt;
* Keep on review for interpretation in ASR/SID&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (CN-Celeb3)&lt;br /&gt;
* Release SegTool v1.0&lt;br /&gt;
* Posdoc stuff&lt;br /&gt;
||&lt;br /&gt;
* Finish PUFA project delivery&lt;br /&gt;
* CN-Celeb3&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* graduation reply&lt;br /&gt;
||&lt;br /&gt;
* subjective evaluation&lt;br /&gt;
* quantitative experiments for speaker embedding&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Course projects&lt;br /&gt;
* 1of3 website&lt;br /&gt;
* CNCeleb pipeline&lt;br /&gt;
||&lt;br /&gt;
* CNCeleb&lt;br /&gt;
* Course projects&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*Some project task&lt;br /&gt;
*PUFA report&lt;br /&gt;
||&lt;br /&gt;
*Finish PUFA report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Tibetan speech recognition experiment based on WAV2VEC feature(debug)&lt;br /&gt;
* Read paper Improving Speech Recognition for Indic Languages using Language Model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Train supervised ASR using w2v features&lt;br /&gt;
* Count occurrences of each word&lt;br /&gt;
||&lt;br /&gt;
* Count occurrences of each word&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Face verification test&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* concurrent testing of speaker-diarization&lt;br /&gt;
* calculate RT of speaker-diarization&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-05-16</id>
		<title>2022-05-16</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-05-16"/>
				<updated>2022-05-16T11:04:38Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Graph-AI book&lt;br /&gt;
* Review paper for interpreability for speech processing&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* reproduce Google attention RNN-T system&lt;br /&gt;
||&lt;br /&gt;
* continue work on attention RNN-T&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* vending machines projects&lt;br /&gt;
* experiments for speaker embedding&lt;br /&gt;
||&lt;br /&gt;
* graduation reply&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers&lt;br /&gt;
* Prepare for weekly reading&lt;br /&gt;
||&lt;br /&gt;
* Review papers&lt;br /&gt;
* Finish course projects&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*Some project task&lt;br /&gt;
||&lt;br /&gt;
*Finish PUFA report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Tibetan speech recognition experiment based on MFCC&lt;br /&gt;
* Tibetan speech recognition experiment based on WAV2VEC feature&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Remove speaker information from features&lt;br /&gt;
* Do max-pooling for feature&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Finish speaker verification test&lt;br /&gt;
* Data preparation for face verification&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* deliver the first version of segTool&lt;br /&gt;
* setup the experiment environment of LSH&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* do course projects&lt;br /&gt;
||&lt;br /&gt;
* do course projects&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
* Continue aishell KALDI course&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* Research on abusive sound detection&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-25</id>
		<title>2022-04-25</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-25"/>
				<updated>2022-04-25T10:50:19Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Posdoc report (4/5)&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Template)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report (Done)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* investigate CNN channel [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=854 cvss]&lt;br /&gt;
||&lt;br /&gt;
* continue work on CNN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* l1 adv loss&lt;br /&gt;
||&lt;br /&gt;
* subjective evaluation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Pre-process LRW-1000 dataset and test pretrain model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Noss experimental system construction&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
* Generate lexicon  with 10 Clusters&lt;br /&gt;
||&lt;br /&gt;
* Remove the speaker information for phonetic classification&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* continue organizing the code structure for speaker diarization&lt;br /&gt;
* add time license&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-18</id>
		<title>2022-04-18</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-18"/>
				<updated>2022-04-18T10:53:38Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review paper on speech visalization&lt;br /&gt;
||&lt;br /&gt;
* Keep on visualization review paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Toolkit/QA/ECAPA)&lt;br /&gt;
* Posdoc report (2/5)&lt;br /&gt;
* ICASSP 2022 presentation&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Template)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report (Done)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* School project proposal&lt;br /&gt;
||&lt;br /&gt;
* back to zeus/kws&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* The dissertation&lt;br /&gt;
||&lt;br /&gt;
* Cycle paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
||&lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Project(1/3)&lt;br /&gt;
* study attention&lt;br /&gt;
||&lt;br /&gt;
* Project&lt;br /&gt;
* study methods of pooling related to attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* ABX score for Librispeech&lt;br /&gt;
||&lt;br /&gt;
* Generate lexicon with CTC&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Organize the code structure for speaker diarization&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-21</id>
		<title>2022-03-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-21"/>
				<updated>2022-03-21T10:59:24Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Rewrite the TASLP RC paper&lt;br /&gt;
* Design new architecture for speech engrave&lt;br /&gt;
||&lt;br /&gt;
* Interspeech paper polishment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system and Leaderboard Open)&lt;br /&gt;
* PUFA project delivery [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=832 cvss]&lt;br /&gt;
* Sunine update&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* experiments for cycle loss&lt;br /&gt;
* website of cycleflow&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
||&lt;br /&gt;
* Review Mandrain Lip Reading Datasets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* more Experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* Interspeech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* Go on dictionary generation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-14</id>
		<title>2022-03-14</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-14"/>
				<updated>2022-03-14T10:09:15Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* results of CycleVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/05/CycleVCd128.pdf pdf]&lt;br /&gt;
* tools for vc&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare report of Lip Reading &amp;amp; AVSR&lt;br /&gt;
||&lt;br /&gt;
* Reproduce experiments of reviewed methods&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* redo for MI and CKA&lt;br /&gt;
* achieve dePLDA&lt;br /&gt;
||&lt;br /&gt;
* improve dePLDA&lt;br /&gt;
* do some experiment for score fusion&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:CycleVCd128.pdf</id>
		<title>文件:CycleVCd128.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:CycleVCd128.pdf"/>
				<updated>2022-03-14T10:08:24Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-07</id>
		<title>2022-03-07</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-07"/>
				<updated>2022-03-07T10:49:43Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Odyssey papers done&lt;br /&gt;
* Trans RC paper draft done&lt;br /&gt;
||&lt;br /&gt;
* More literature review for the Trans TC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* the odyssey paper&lt;br /&gt;
* cycle and adversarial training on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/14/CycleVC.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* CycleVC adjustment&lt;br /&gt;
* Cycle loss plus adversarial loss on AutoVC&lt;br /&gt;
* experiments for RC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Do experimrnts of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Try finetune &amp;amp; pretrain of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* check and restruct experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* DO experiments on MOSES&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* test binary embedding performance(mAP,speed) on speaker retrieval task&lt;br /&gt;
||&lt;br /&gt;
* do some tests on AE&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* learn and try to use dePLDA&lt;br /&gt;
||&lt;br /&gt;
* finish task of dePLDA&lt;br /&gt;
* prepare for sharing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:CycleVC.pdf</id>
		<title>文件:CycleVC.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:CycleVC.pdf"/>
				<updated>2022-03-07T10:45:07Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-28</id>
		<title>2022-02-28</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-28"/>
				<updated>2022-02-28T10:59:41Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Investigation on IB/VC &lt;br /&gt;
* Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
* Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* NCFS project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Update Sunine)&lt;br /&gt;
* Finish ASVSpoof response&lt;br /&gt;
* Polish C-P Map&lt;br /&gt;
||&lt;br /&gt;
* Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* Autovc debug&lt;br /&gt;
||&lt;br /&gt;
* AutoVC with adversarial training&lt;br /&gt;
* odyssey paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Done: Prepare data &amp;amp; environment for experiments of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Do experimrnts of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Find and learn the mose system&lt;br /&gt;
||&lt;br /&gt;
* Train and test the mose system&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Train the CN-Celeb baseline&lt;br /&gt;
||&lt;br /&gt;
* Go on training&lt;br /&gt;
* Prepare data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* finish speaker diarization interface and generate diarization figure&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* test and adjust single-modality modal&lt;br /&gt;
||&lt;br /&gt;
* learn and use decoupled PLDA for cross-modal test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-21</id>
		<title>2022-02-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-21"/>
				<updated>2022-02-21T10:58:31Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Experiment on IB control with conditional model [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangd&amp;amp;step=view_request&amp;amp;cvssid=847], rough conclusions were obtained. &lt;br /&gt;
* Refine the AV speaker recognition theoretical part.&lt;br /&gt;
* Review for ICME.&lt;br /&gt;
||&lt;br /&gt;
* Complete ICME review&lt;br /&gt;
* VQMIVC reproduction, update with random mask&lt;br /&gt;
* Some missing papers treatment: (1) true nonlinear LDA (2) CycleFlow (3) Thermal-visual database&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*NSFC Application&lt;br /&gt;
*Materials inverse design investigation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Data release and SR baseline)&lt;br /&gt;
* Submit Tencent AI Lab project&lt;br /&gt;
* Submit M2ASR concluding report&lt;br /&gt;
* Write ASVSpoof response&lt;br /&gt;
||&lt;br /&gt;
* Submit ASVSpoof response&lt;br /&gt;
* Finish Draft of C-P Map paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
* M2ASR final report&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* autoVC with cycle loss [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/16/Autovc-cyc.pdf pdf] [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/66/Pre.rar demo]&lt;br /&gt;
||&lt;br /&gt;
* cycle loss after adverserial training&lt;br /&gt;
* VQMIVC&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Prepare data &amp;amp; environment for experiments of AV-Hubert&lt;br /&gt;
||&lt;br /&gt;
* &amp;lt;-- keep doing these tasks&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* collated the visualization methods that have been reproduced&lt;br /&gt;
* some scripts for baseline(cncsrc)&lt;br /&gt;
||&lt;br /&gt;
* study feature aggregation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* 3~6 spk cycle loss models on wav2vec+seq2seq model&lt;br /&gt;
* Rewrite paper and focus on cycle loss&lt;br /&gt;
||&lt;br /&gt;
* Finish paper framework&lt;br /&gt;
* Push test on WER scoring&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Multi-language W2V model features were used for ASR experiments and compared with traditional MFCC features&lt;br /&gt;
||&lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Make an experiment plan&lt;br /&gt;
* Read the HuBERT paper and code&lt;br /&gt;
||&lt;br /&gt;
* Finish the hubert-U framework&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*Find the baseline for CN-Celeb speaker identification&lt;br /&gt;
||&lt;br /&gt;
*Train this baseline and find more face recognition baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Check CKA&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Pre.rar</id>
		<title>文件:Pre.rar</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Pre.rar"/>
				<updated>2022-02-21T10:55:56Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Autovc-cyc.pdf</id>
		<title>文件:Autovc-cyc.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Autovc-cyc.pdf"/>
				<updated>2022-02-21T10:55:37Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-24</id>
		<title>2022-01-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-24"/>
				<updated>2022-01-24T09:32:30Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Reschedule the cycleFlow paper&lt;br /&gt;
* Keep on investigation for multi-modality information fusion&lt;br /&gt;
||&lt;br /&gt;
* Rewrite cycleFlow&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some experiments on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/1c/Autovc.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* more experiments for cycle loss&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* study representation learning(self-supervised learning)&lt;br /&gt;
* Using mel-spectrum and standard softmax on small data, models are trained&lt;br /&gt;
* Data preprocessing&lt;br /&gt;
||&lt;br /&gt;
* Visualization on a small models(mel-spectrum &amp;amp; MFCC)&lt;br /&gt;
* Implement RELAX&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Autovc.pdf</id>
		<title>文件:Autovc.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Autovc.pdf"/>
				<updated>2022-01-24T09:29:56Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-17</id>
		<title>2022-01-17</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-17"/>
				<updated>2022-01-17T10:59:21Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* OLR workshop preparation&lt;br /&gt;
* CNSRC plan: metric design&lt;br /&gt;
* Capsulate review: emotion recognition&lt;br /&gt;
* Visual-audio SID experiment plan design&lt;br /&gt;
||&lt;br /&gt;
* Keep on multi-modal review &amp;amp; information fusion&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*Low-light image simulation and data preparation&lt;br /&gt;
*capsule network review&lt;br /&gt;
*Research plan guidance for interns&lt;br /&gt;
*Conceptual model of intelligent sensor&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Polish CNCSRC plan&lt;br /&gt;
* Write hard trials paper&lt;br /&gt;
||&lt;br /&gt;
* Finish hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Analysis FA of Speech engrave&lt;br /&gt;
* Speech engrave with position embedding [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* Analysis Speech engrave with position embedding &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* experiments for rebuttal&lt;br /&gt;
* update CycleFlow code&lt;br /&gt;
* AutoVC with cycle loss&lt;br /&gt;
||&lt;br /&gt;
* More training and evaluation on AutoVC&lt;br /&gt;
* More attempts on cycle loss&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Read Paper (RELAX,RISE); Finish Demo about RISE&lt;br /&gt;
* Evaluate the importance maps  with Deletion and Insertion&lt;br /&gt;
||&lt;br /&gt;
* Test MFCC&lt;br /&gt;
* Finish RELAX&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Paper sharing on any-to-any VC&lt;br /&gt;
* Finish training wav2vec model&lt;br /&gt;
* Ongoing wav2vec+AE model&lt;br /&gt;
||&lt;br /&gt;
* Finish all model training &lt;br /&gt;
* Test and comparison analysis&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Write code for extracting wav2vec features in Kaldi&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Extract MFCC features for GAN&lt;br /&gt;
* Write a project application&lt;br /&gt;
||&lt;br /&gt;
* Go on GAN experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*Complete the data partition&lt;br /&gt;
*Prepare group report&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Read the UIS-RNN paper&lt;br /&gt;
* Run some tests on speaker diarization&lt;br /&gt;
||&lt;br /&gt;
* Try to train the UIS-RNN model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do three experiments for PMI.&lt;br /&gt;
* Do something for programming TA.&lt;br /&gt;
||&lt;br /&gt;
* Finish experiments for PMI.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-03</id>
		<title>2022-01-03</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-03"/>
				<updated>2022-01-03T10:59:49Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review multi-modal and multi-view literature &lt;br /&gt;
* Design lip2wav models (possibly based on GCN)&lt;br /&gt;
* Continuous construct theory for noisy linear flow and PCA&lt;br /&gt;
||&lt;br /&gt;
* Keep on multi-modal design&lt;br /&gt;
* CNSRC test plan&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC plan&lt;br /&gt;
* Update Sunine toolkit [https://gitlab.com/csltstu/sunine]&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some modification on x-vector&lt;br /&gt;
||&lt;br /&gt;
* intorduce bottleneck into it&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish final exam&lt;br /&gt;
* Finish course projects&lt;br /&gt;
||&lt;br /&gt;
* 3 small course thesis&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Do wav2vec-u in Swedish&lt;br /&gt;
* Prepare bi-weekly report&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Recalculate FAR&lt;br /&gt;
* Divide the data into several parts&lt;br /&gt;
||&lt;br /&gt;
* Calculate the FAR of the divided data&lt;br /&gt;
* To complete the data partition&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
* Finish the remaining homework&lt;br /&gt;
* Do some work&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-27</id>
		<title>2021-12-27</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-27"/>
				<updated>2021-12-27T10:57:06Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Hard trails paper revised&lt;br /&gt;
* Investigate link between AE and flow in the linear case [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/91/Linear.png]&lt;br /&gt;
* Investigate audio visual pretraining&lt;br /&gt;
||&lt;br /&gt;
* Keep on neural PCA theory&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* AE baseline&lt;br /&gt;
* Alignet &amp;amp; AE joint training for Infra to Visible domain transfer&lt;br /&gt;
* Gatenet for Visible and Infra latent code fusion&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Plan CNCSRC stuffs.&lt;br /&gt;
* Maintain Sunine.&lt;br /&gt;
* Deliver projects.&lt;br /&gt;
* Modify patents.&lt;br /&gt;
||&lt;br /&gt;
* Go on Hard trials paper.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* m2asr Kirgiz check&lt;br /&gt;
||&lt;br /&gt;
* continue on speech engrave (text attend speech; HMM based attention)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some contrast experiment&lt;br /&gt;
||&lt;br /&gt;
* design the timbre encoder with a design similar to x-vector&lt;br /&gt;
* insert the new encoder into our four-encoder structure&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do course project&lt;br /&gt;
* Organize Kirgiz data&lt;br /&gt;
||&lt;br /&gt;
* Do course project&lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Debug bugs encountered when using the fine-tuning model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Adjust the parameters to use thchs30 training&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Sampling out pictures compute FAR&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do course project&lt;br /&gt;
||&lt;br /&gt;
* Do course project&lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-20</id>
		<title>2021-12-20</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-20"/>
				<updated>2021-12-20T11:07:36Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Hard trials paper revision - on going&lt;br /&gt;
* ICASSP paper review done&lt;br /&gt;
||&lt;br /&gt;
* Hard trials paper reviewion - keep on&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Upload AI course exam questions.&lt;br /&gt;
* Merge ECAPA-TDNN and Auto-LR into Sunine.&lt;br /&gt;
* Submit ETM TASLP response.&lt;br /&gt;
||&lt;br /&gt;
* Clean up PUFA data and build cross-channel system.&lt;br /&gt;
* Take charge of CNCSRC.&lt;br /&gt;
* Exps of hard trials.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Some analyse about fncmd and speech engrave [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 cvss]&lt;br /&gt;
* Prepare huawei retrain&lt;br /&gt;
||&lt;br /&gt;
* Consider about HMM-based attention mechanism&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* add contrastive loss&lt;br /&gt;
||&lt;br /&gt;
* short-term ib dimension adjustment&lt;br /&gt;
* speaker encoder adjustment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* a little analysis about aamsoftmax on cam &amp;amp;&amp;amp; saliency map on image domain&lt;br /&gt;
* homework&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Preparation for sharing about capsule&lt;br /&gt;
* Revise the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Unsupervised experiment of thchs30&lt;br /&gt;
* Revise the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Compute the cosine distance&lt;br /&gt;
* Revise the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-13</id>
		<title>2021-12-13</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-13"/>
				<updated>2021-12-13T10:52:48Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Spoof paper refined&lt;br /&gt;
* Start the hard trials paper&lt;br /&gt;
||&lt;br /&gt;
* Hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Test fncmd and speech engrave on huawei_cross_channel data [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/82/Speech_engrave_fncmd_huawei_cross.png here]&lt;br /&gt;
||&lt;br /&gt;
* Retrain speech engrave model(make speech engrave and fncmd are Comparable on far field test set)&lt;br /&gt;
** Huawei cross channel data&lt;br /&gt;
** Score margin&lt;br /&gt;
** Discriminative training&lt;br /&gt;
* Retrain fncmd model with huawei data.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some analysis on c-vector&lt;br /&gt;
* training processing of c-vector&lt;br /&gt;
||&lt;br /&gt;
* remove f0 decoder of c-vector&lt;br /&gt;
* a easier model with only content and speaker encoders based on long-short term assumption&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* perform kmeans and pca on wav2vec result&lt;br /&gt;
* check GAN&lt;br /&gt;
||&lt;br /&gt;
* fix bug of uasr_model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Verifying the correctness of the a series of cam method&lt;br /&gt;
* reproduce the method of Layer-CAM on classification&lt;br /&gt;
||&lt;br /&gt;
* more experiment and analysis on this method&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish training for not-ever-seen speaker on baseline AE and cycle model&lt;br /&gt;
* Build the framework of wav2vec model&lt;br /&gt;
||&lt;br /&gt;
* Full test on baseline &amp;amp; cycle model&lt;br /&gt;
* More details need to be discussed on wav2vec model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Fine-tune the wav2vec model on dev-other &lt;br /&gt;
* Test the effect of Tibetan adjusted model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare the effects of TIMIT and Tibetan fine-tune&lt;br /&gt;
||&lt;br /&gt;
* More comparative experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval_lda_100.png</id>
		<title>文件:Ivec eval lda 100.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval_lda_100.png"/>
				<updated>2021-12-13T09:57:34Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval_lda.png</id>
		<title>文件:Ivec eval lda.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval_lda.png"/>
				<updated>2021-12-13T09:57:07Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev_lda_100.png</id>
		<title>文件:Ivec dev lda 100.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev_lda_100.png"/>
				<updated>2021-12-13T09:56:34Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev_lda.png</id>
		<title>文件:Ivec dev lda.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev_lda.png"/>
				<updated>2021-12-13T09:56:08Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval_100.png</id>
		<title>文件:Ivec eval 100.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval_100.png"/>
				<updated>2021-12-13T09:55:45Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval.png</id>
		<title>文件:Ivec eval.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_eval.png"/>
				<updated>2021-12-13T09:54:30Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev_100.png</id>
		<title>文件:Ivec dev 100.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev_100.png"/>
				<updated>2021-12-13T09:54:04Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev.png</id>
		<title>文件:Ivec dev.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Ivec_dev.png"/>
				<updated>2021-12-13T09:53:37Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval_100_lda.png</id>
		<title>文件:Cvec vad eval 100 lda.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval_100_lda.png"/>
				<updated>2021-12-13T09:52:59Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval_lda.png</id>
		<title>文件:Cvec vad eval lda.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval_lda.png"/>
				<updated>2021-12-13T09:52:35Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev_100_lda.png</id>
		<title>文件:Cvec vad dev 100 lda.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev_100_lda.png"/>
				<updated>2021-12-13T09:51:59Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev_lda.png</id>
		<title>文件:Cvec vad dev lda.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev_lda.png"/>
				<updated>2021-12-13T09:51:37Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval_100.png</id>
		<title>文件:Cvec vad eval 100.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval_100.png"/>
				<updated>2021-12-13T09:51:03Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval.png</id>
		<title>文件:Cvec vad eval.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_eval.png"/>
				<updated>2021-12-13T09:50:29Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev_100.png</id>
		<title>文件:Cvec vad dev 100.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev_100.png"/>
				<updated>2021-12-13T09:49:48Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev.png</id>
		<title>文件:Cvec vad dev.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vad_dev.png"/>
				<updated>2021-12-13T09:49:11Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vox.png</id>
		<title>文件:Cvec vox.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cvec_vox.png"/>
				<updated>2021-12-13T09:48:21Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss_z.png</id>
		<title>文件:Loss z.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss_z.png"/>
				<updated>2021-12-13T09:47:42Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss_x.png</id>
		<title>文件:Loss x.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss_x.png"/>
				<updated>2021-12-13T09:47:17Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss_f0.png</id>
		<title>文件:Loss f0.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss_f0.png"/>
				<updated>2021-12-13T09:46:49Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss.png</id>
		<title>文件:Loss.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Loss.png"/>
				<updated>2021-12-13T09:45:53Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-06</id>
		<title>2021-12-06</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-06"/>
				<updated>2021-12-06T10:55:52Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Refine spoof paper&lt;br /&gt;
* Prepare talk for information theory in NN&lt;br /&gt;
* Prepare talk for representation investigation.&lt;br /&gt;
||&lt;br /&gt;
* Finish poof paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
|| &lt;br /&gt;
*review papers about CQDs&lt;br /&gt;
*Verify the deconvolution of infrared and visible faces&lt;br /&gt;
*Verify infrared and visible image fusion based on GLOW model&lt;br /&gt;
*Arrange research plans for interns&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Finish course on AI.&lt;br /&gt;
* Study speaker separation and think about structural embedding. &lt;br /&gt;
||&lt;br /&gt;
* Finish ETM response.&lt;br /&gt;
* Exps of hard trials.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Report about e2e kws&lt;br /&gt;
* speech engrave (garbage node, sil training data, text to speech attention)&lt;br /&gt;
* analyse fenyinta test data&lt;br /&gt;
||&lt;br /&gt;
* more analyse about speech engrave(speech to text attention)&lt;br /&gt;
* speech engrave (text to speech attention)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some tests on our model&lt;br /&gt;
||&lt;br /&gt;
* make some more efficient attempts&lt;br /&gt;
* ——remove rhythm and pitch encoders&lt;br /&gt;
* ——increase distance between speakers&lt;br /&gt;
* ——improve content encoder&lt;br /&gt;
* ——make use of speaker label&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* pre-process audio data &amp;amp; train GAN with wav2vec2 output data directly&lt;br /&gt;
||&lt;br /&gt;
* use kmeans and pca clustering wav2vec2 output to build better segment representation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* reproduce a series of CAM method on speaker classification&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish the first version on improved exemplar autoencoder with cycle loss&lt;br /&gt;
* Rethink the theory analysis part&lt;br /&gt;
||&lt;br /&gt;
* Test on never-before-seen speaker conversion&lt;br /&gt;
* Review the code of wav2vec, StarGAN and PPG based GAN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Face sampling in CNCeleb dataset&lt;br /&gt;
* Filter videos without the target's face&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_meeting</id>
		<title>Weekly meeting</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_meeting"/>
				<updated>2021-11-29T13:12:34Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;*Location: FIT-1-304&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials !! On duty&lt;br /&gt;
|-&lt;br /&gt;
| 2012/08/27  ||Dong Wang  || Heterogeneous Convolutive Non-negative Sparse Coding ||[[媒体文件:Heterogeneous_convolutive_non-negative_sparse_coding.pdf|slides]] [http://homepages.inf.ed.ac.uk/v1dwang2/public/pdf/inerspeech2012-hetero.pdf paper] ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/09/03  ||NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/09/10  || NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/09/17  ||WALEED ABDULLA||Auditory Based Feature Vectors for Speech Recognition ||[[媒体文件:AuditoryBasedFeatureVectors.pdf|slides]]||范淼&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;2&amp;quot;|2012/09/24  ||刘超|| N-gram FST indexing for Spoken Term Detection || [[媒体文件:120924-N_gram_FST_indexing_for_Spoken_Term_Detection-LC-0.pdf|slides]] ||尹聪&lt;br /&gt;
|-&lt;br /&gt;
|范淼||Micro-blogging, Wikipedia, Folksonomy, What's Next? ||[[媒体文件:120924-Micro-blogging, Wikipedia, Folksonomy, What's Next-FM--01-FM-.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| 2012/10/08 ||NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
| 2012/10/15  ||NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/10/22||Wu Xiaojun||speaker recognition in CSLT ||[[媒体文件:VPR_in_CSLT.pdf|slides]]||卡尔&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/10/29  ||王军||An overview of Automatic Speaker Diarization Systems || [[媒体文件:121027-Speaker Diarization-WJ.pdf|slides]] ||别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/05  ||别凡虎||Experiments on Emotional Speaker Recognition||[[媒体文件:121104-Experiments_on_Emotional_Speaker_Recognition-BFH.pdf|slides]] ||刘超&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/12  ||唐国瑜||Statistical Word Sense Improves Document Clustering ||[[媒体文件:121112_Statistical_Word_Sense_Improves_Document_Clustering_TGY.pdf‎ |slides]]||邱晗&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/19  ||张陈昊||TDSR with Long-term Features Based on Functional Data Analysis||[[媒体文件:121118-ISCSLP-FDA_SR-ZCH.pdf|slides]] ||王俊俊&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/26  ||王琳琳||Time-Varying Speaker Recognition: An Introduction||[[媒体文件:121126-Time_Varying_Speaker_Recognition_I-Wll.pdf‎|slides]] ||龚宬&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/12/03  ||No meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/12/10  ||No meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/12/17  ||No meeting|| || ||&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/01/07  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/01/07  ||王军||基于DF-MAP的说话人模型训练方法||[[媒体文件:130107-基于DFMAP的说话人模型训练方法-WJ.pdf|slides]] ||唐国瑜&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/01/14  ||王东|| Computing in CSLT ||[[媒体文件:Computing_in_CSLT.pdf|slides]] ||王琳琳&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/04  ||王军||Sequential Adaptive Learning for Speaker Verification ||[[媒体文件:130301-Sequential adaptive learning for speaker verification-WJ.pdf|slides]] ||别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/11  || Du Jinle|| VAD stuff || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/18  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/25  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/01  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/08  || 张陈昊|| A Fishervoice based Feature Fusion Method for SUSR ||[[媒体文件:130408-FisherVoice-ZCH.pdf|slides]] ||谢仲达&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/15  ||龚宬|| An Exploration on Influence Factors of VAD's Performance in Speaker Recognition ||[[媒体文件:130415-An_Exploration_on_Influence_Factors_of_VAD-GC.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/22  ||王俊俊 || Understanding the Query: THCIB and THUIS at NTCIR-10 Intent Task ||[[媒体文件:130422-Understanding_the_Query-WJJ.pdf|slides‎]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/29  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/06  ||别凡虎 ||MLLR on Emotional Speaker Recognition ||[[媒体文件:130506-MLLR on Emotional Speaker Recognition-BFH.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/13  ||刘超 || The Use of Deep Neural Network for Speech Recognition || [[媒体文件:130513-the_use_of_dnn_for_asr-lc.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/20  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/27  ||王琳琳|| 说话人识别中的时变鲁棒性问题研究 || [[媒体文件:130527-TVSV-Wll.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/03  ||王俊俊|| 汉语搜索结果聚类系统研究与实现 || [[媒体文件:130601-毕业答辩-02-WJJ.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/10  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/17  ||范淼 || Relation Extraction ||[[媒体文件:130617-relation_extraction-fm.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/24  ||唐国瑜 || Incorporating Statistical Word Senses in Topic Model  ||[[媒体文件:130624_Incorporating Statistical Word Senses in Topic Model_TGY.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/07/01  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/07/08  ||  || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/07/15  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/09  ||王东 || Research Frontier in Speech Technology||[[媒体文件:Research Frontier in Speech Technology.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/16  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/23  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/30  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/07  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/14  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/21  ||范淼 ||Transduction Classification with Matrix Completion （中文报告）||[[媒体文件: Transduction_Classifiction_with_Matrix_Completion.pdf‎|slides]] [http://pages.cs.wisc.edu/~jerryzhu/pub/mc4ssl_FINAL.pdf paper]|| 李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/28  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/04  || 王军 || 基于i-vector的intersession补偿及打分方法(综述) || [[媒体文件:131104-ivecto下intersession补偿及打分方法--01-WJ-.pdf‎|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/11  ||张陈昊 ||PLDA介绍及PLDA在说话人识别中的应用 ||[[媒体文件:PLDA.pdf|slides]] || 唐国瑜&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/18  ||别凡虎 ||i-vector理论介绍（讨论）||[[媒体文件:131118-i-vector_and_GMM-UBM-BFH.pdf|slides]]‎  ||王军&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/25  ||刘超 || Pruning Neural Networks By Optimal Brain Damage(综述)||[[媒体文件:131125-OBD-LC-01.pdf|slides]] ||范淼&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/02  ||范淼 ||Distant Supervision for Relation Extraction with Matrix Completion （英文报告）||[[媒体文件:131202-DRMC-FM-01.pdf|slides]] || 李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/09  || Dong Wang|| Introduction to the HMM-based speech synthesis||[http://hts.sp.nitech.ac.jp/archives/2.2/HTS_Slides.zip slides] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/16  ||张陈昊 ||语音研究中的基元介绍 ||[[媒体文件:131215-Phonology-ZCH.pdf|slides]]  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/23  || Dong Wang|| Introduction to the HMM-based speech synthesis (2)||[http://hts.sp.nitech.ac.jp/archives/2.2/HTS_Slides.zip slides] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/23  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/30  ||刘荣 || continuous space language model||[[媒体文件:Cslm-cslt.pdf|slides]]  ||刘超&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/01/06  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/01/13  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/01/20  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/02/24  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/03  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/10  ||范淼|| Distant Supervision for Information Extraction (英文报告)|| || 李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/17  ||唐国瑜 || Topic Models Incorporating Statistical Word Senses || [[媒体文件:TMISWS_For_CICLing2014.pdf|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/24  ||孟祥涛 || Noisy training for Deep Neural Networks|| ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/31  ||范淼|| Translating Embeddings for Modeling Multi-relational Data （中文报告） || [https://www.hds.utc.fr/everest/lib/exe/fetch.php?id=en%3Atranse&amp;amp;cache=cache&amp;amp;media=en:cr_paper_nips13.pdf paper]||李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/07  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/14  || Wang Jun|| I-vector and PLDA in depth ||[[媒体文件:131104-ivector-microsoft-wj.pdf|slides]]  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/21  || 邱晗||汉语事件句式规范化处理 ||[[媒体文件:140421-汉语事件句式规范化-QH.pdf‎|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/28  || 唐国瑜|| Some papers in　CICLing2014 ||[[媒体文件:Some_papers_in_CICling2014.pdf|slides]]  ||刘超&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/05/05  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/05/12  || 卡尔|| paper introduction || [[媒体文件:Acoustic Factor Analysis.pdf|slides]] || 邱晗&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;2&amp;quot;|2014/05/19  || 邱晗|| 汉语事件句式CCG推导树重构 ||[[媒体文件:140519-CCG_reConstruction.pdf‎|slides]]‎|| 卡尔&lt;br /&gt;
|-&lt;br /&gt;
|Liu Chao|| master proposal: sparse and deep neural networks || [[媒体文件:140519-proposal-LC-01.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| || Liu Chao|| 2nd master proposal: sparse and deep neural networks|| ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/06/16  || 别凡虎 || Truncated Wave based VPR and Some Recent Work || [[媒体文件:140614-Truncated_Speech_based_VPR.pdf‎|slides]]‎ || 别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/06/23  || 别凡虎 || Block-wise training for I-vector || [[媒体文件:140623-Block-wise training for I-vector.pdf‎|slides]]‎ || 别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/07/07||王军 ||Discriminative Scoring for Speaker Recognition Based on I-vectors || [[媒体文件:140707-work_report.pdf|slides]]|| 王军&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/09/01|| || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/09/09 ||别凡虎 ||Reseach on Truncated Wave based VPR||[[媒体文件:140909-Truncated Speech based VPR.pdf|slides]] || 别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/09/15|| || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/09/22  || Miao Fan|| Large-scale Entity Relation Extraction based on Low-dimensional Representations (中文报告，博士开题)&lt;br /&gt;
||[[媒体文件:基于低维表示的大规模实体关系挖掘技术.pdf‎|slides]] || Lan TianLi&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/09/29 || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/10/13  || Miao Fan|| The Frontier of Knowledge Embedding （英文报告）|| [[媒体文件:The_Frontier_of_Knowledge_Embedding.pdf‎|slides]]|| Lan TianLi&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/10/20  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/10/27  || Li Yi || Phonemes, Features, and Syllables: Converting Onset and Rime Inventories to Consonants and Vowels||[[媒体文件:Lanzhou Phonemes, Features, and Syllables- fianl.pdf|paper]] [[媒体文件:Syllables and phonemes - 20141027.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/3   || 米吉提|| Automatic Speech Recognition of Agglutinative Language based on Lexicon Optimization||[[媒体文件:Mijit-slides-清华大学-2014-11-3.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/10  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/17  ||Dong Wang || Highly restricted keyword spotting for Uyghur using sparse analysis|| [[媒体文件:Highly Restricted Keyword Selection Based on Sparse Analysis.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/24  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/1  ||ZhongDa Xie ||Incorporating Fine-Grained Ontological Relations in Medical Document Ranking || [[媒体文件:Fine-grained_relations.pdf|slides]]|| Lantian Li &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/8  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/15  || 唐国瑜 || 跨语言话题分析关键技术研究 ||[[媒体文件:141205-答辩-TGY.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/22  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/29  || Askar || Language Mismatch in Speaker Recognition System||[[媒体文件:141229--askar.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/5  ||Lantian Li || Deep Neural Networks for Speaker Recognition || [[媒体文件:150104_Deep_Neural_Networks_for_Speaker_Recognition_LLT.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/12  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/19  || Dong Wang || Machine Learning Paradigms for Speech Recognition||[[媒体文件:Machine Learning Paradigms for Speech Recognition.pdf|slides]]  [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6423821 paper] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/26  || Chen Guorong || Information Transmission and Distribution on Web ||[[媒体文件:An_introduction_of_complex_network1.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot; |2015/3/9 || Dong Wang || Joint Deep Learning || [[媒体文件:Joint Deep Learning.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/3/16  || Dongxu Zhang || Knowledge learning from text data and knowledge bases || [[媒体文件:Joint Deep Learning.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/4/13  || Xuewei Zhang || Lasso-based Reverberation Suppression In Automatic Speech Recognition || [[媒体文件:Lasso-based Reverberation Suppression In Automatic Speech Recognition.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/5/11  || Dong Wang ||ASR and SID Research Frontier ||[[媒体文件:ASR and SID Research Frontier.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/11/23  || Zhiyuan Tang|| CTC learning|| [[媒体文件:CTC.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/11/30  || Mengyuan Zhao|| CNN-based music removal|| [[媒体文件:Music Removal by Convolutional Denoising.pdf | slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/3  || Zhiyuan Tang|| Networks of Memory|| [[媒体文件:Memory_net.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/7  || Yiqiao Pan|| Document Classification with Spherical Word Vectors||[[媒体文件:Document Classification with Spherical Word Vectors.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/14  || Dong Wang || Transfer Learning for Speech and Language Processing ||[[媒体文件:Transfer_Learning_for_Speech_and_Language_Processing.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/21  || Qixin Wang || Attention for poem generation ||[[媒体文件:Ijcai 2016.pptx|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/28  || Lantian Li || Max-margin metric learning for speaker recognition || [[媒体文件:Max-margin-Metric-Learning.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/1/4  || Zhiyong Zhang || Parallel training,MPE and natural gradient||[[媒体文件:20160104_张之勇_Large-scale Parallel Training in Speech Recognition.pdf|slides]]||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/1/18  || Dongxu Zhang || Memoryless Document Vector ||[[媒体文件:Memoryless_document_vector.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/3/14  || Zhiyuan Tang|| Oral presentation for &amp;quot;vMF-SNE: Embedding for Spherical Data&amp;quot;|| [[媒体文件:embedding.pdf|slides]] ||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/3/28  || Tianyi Luo || Review for Neural QA || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/29/CSLT_Weekly_Report--20160328.pdf slides] ||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/4/11  || Rong Liu || Recommendation in Youku || [http://cslt.riit.tsinghua.edu.cn/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cslt%E5%AE%9E%E9%AA%8C%E5%AE%A4%E4%BA%A4%E6%B5%81.pptx slides] ||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/09 || Miao Fan || Learning contextual embeddings of knowledge base with entity descriptions.|| [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9c/Techreport_CSLT_2016_M.F..pdf slides]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/16 || Yang Wang || Research on conversation thread detection. || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/bb/%E6%B1%AA%E6%B4%8B-%E6%AF%95%E8%AE%BE-CSLT.pdf slides]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/20 || Yang Wang &amp;amp;  Maoning Wang || Research on portfolio selection. || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/89/%E6%B1%AA%E6%B4%8B-%E9%87%91%E8%9E%8D%E7%AC%AC%E4%B8%80%E6%AC%A1%E5%88%86%E4%BA%AB.pdf slides1]  [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/bb/%E6%B1%87%E6%8A%A5_%E8%B5%84%E4%BA%A7%E7%BB%84%E5%90%88%E4%B8%AD%E5%87%A0%E4%B8%AA%E8%AF%84%E4%BB%B7%E6%8C%87%E6%A0%87%E7%9A%84%E8%A7%A3%E9%87%8A.pdf slides2]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/20  || Zhiyuan Tang || ICASSP 2016 summary || [[媒体文件:Note icassp16.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/23 || Dong Wang || graphical model and neural model || [[媒体文件:Graphic Model and Neural Model.pdf|slides]] [[媒体文件:Generative-Pdf.rar|papers]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/8/02 || Zhiyuan Tang || Visualizing, Measuring and Understanding Neural Networks: A Brief Survey|| [[媒体文件:Nn analysis.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/8/03 || Yang Wang || Neural networks and genetic programming for financial forecasting || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/79/GeneticNN.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/05 || Yang Wang || Reinforcement Learning Models and Simulations || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ca/RRL_and_sim.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/08 || April Pu || SOFTWARE DEVELIPMENT METHODOLOGIES || [http://wangd.cslt.org/talks/pdf/april_software.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/12 || Yang Wang || Generative Adversarial Nets || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c9/Generative_adversarial_network.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/22 || Zhiyuan Tang || INTERSPEECH 2016 summary || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/65/Interspeech16_review.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/30 || Dong Wang || Deep and sparse learning in speech and language: an overview || [http://wangd.cslt.org/talks/pdf/bics2016.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/2/17 || Yang Wang || Review understanding deep learning requires rethinking generalization || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/3b/Review_understanding_deep_learning_requires_rethinking_generalization.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/5 || Dong Wang || Deep speech factorization || [http://wangd.cslt.org/talks/pdf/Deep-Speech-Factorization.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/8 || Shiyue Zhang || Convolutional Sequence to Sequence Learning  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f3/Conv_seq2seq.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/12 || Shiyue Zhang || Memory-augmented Neural Machine Translation || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/36/Memory-augmented_Neural_Machine_Translation_.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/21 || Shiyue Zhang || Attention Is All You Need  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/68/Attention_is_all_you_need.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/26 || Jiyuan Zhang || Chinese poem generation using neural model  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/50/Flexible_and_Creative_Chinese_Poetry_Generation_Using_Neural_Memory_.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/21 || Miao Zhang || Speaker recognition on cough,laugh and wei  || &lt;br /&gt;
[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f6/Zm_cough.pdf slides]  &lt;br /&gt;
||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/7/10 || Aodong Li || Enhanced Neural Machine Translation by Learning from Draft  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ca/Learning_from_draft.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/7/17 || Lantian Li || Study on Speaker Recognition  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/ec/170716-Study_on_SRE.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2018/12/6 || Xiuqi Jiang ||  Meta-Learning and Zero-Shot Learning  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/18/181205_Meta-Learning_and_Zero-Shot_Learning_JXQ.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2018/12/12 || Dan He ||  Tensor factorization neural net  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/3d/Tensor_factorization_neural_net.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2018/12/26 || Dong Wang || Towards deep statistical speaker representation  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/4/48/V.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/01/04 || Dong Wang || Speech in NIPS 2017/2018  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c8/Speech_in_NIPS_2017.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/07/17 || Dong Wang || Deep Feature Learning and Normalization for Speaker Recognition  || [http://wangd.cslt.org/talks/pdf/india.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/08/19 || Sitong Cheng &amp;amp; Pengyuan Zhang || Periodic Report of Celebrity Video Data Collection.   || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/08/C-STAR.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/08/19 || Dong Wang|| Continuous Learning for Neural Nets || [[媒体文件:Continuous Learning for Neural Nets.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/09/11 || Dong Wang || Language Recognition in ICASSP 2019   || [http://wangd.cslt.org/talks/pdf/LRE-ICASSP-2019.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/09/11 || Sitong Cheng || Language Recognition in Interspeech 2019   || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a9/Language_Recognition_in_Interspeech_2019.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/10/14 || Haoran Sun || Dimension Reduction  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7b/DimensionReduction.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/10/27 || Dong Wang || Back to Matrix  || [[媒体文件:Back to Matrix.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/11/11 || Dong Wang || Helmholtz Machine &amp;amp; The ML criterion  || [[媒体文件:Helmholtz Machine &amp;amp; The ML criterion.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/02 || Jiawen Kang || Gan Laten Space Manipulation &amp;amp; Flow Application Papers  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ca/GAN_Lantent_Space_manunipulation_%26_Flow_Application.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/09 || Dong Wang || Style transfer and information factorization || [[媒体文件:Style Transfer with Generative Models.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/16 || Zhiyuan Tang ||  Conditional Generative Flow  ||  [[媒体文件:Conditional GLow.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/23 || Lantian Li ||  Deep Generative Model in Speaker Recognition || [[媒体文件:Deep Generative Model in Speaker Recognition.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/30 || Wenqiang Du ||  Cross-bandwidth Train || [[媒体文件:Cross-bandwidth_Train.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/01/06 || Yunqi Cai ||  Do Deep Generative Models Know What They Don't Know ?|| [[媒体文件:2020.1.6_group_meeting.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/01/10 || Haoran Sun ||  Flow++: Improving Flow-Based Generative Models with Variational Dequantization and Architecture Design || [[媒体文件:Flow++.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/01/13 || Ying Shi ||  Deep Generative Model Energy Based Model || [[媒体文件:Deep_Generative_Model.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/02/10 || Dong Wang ||  Deep Generative Models for Discriminative Tasks || [[媒体文件:Re-Thinking for Discriminative and Generative Models.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/02/17 || Zhiyuan Tang ||  Unsupervised Learning of Disentangled Representations  || [[媒体文件:20200217 Unsupervised disentanglement.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/02/24 || Lantian Li ||  Weakly- &amp;amp; Self-Supervised Learning || [[媒体文件:Weakly-_%26_Self-Supervised_Learning.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/02 || Yunqi Cai ||  Deep Normalization for Speaker Vectors|| [[媒体文件:Deep_Normalization_for_Speaker_Vectors_.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/09 || Ying Shi ||  Speech Enhancement base on Double Flow || [[媒体文件:Speech_Enhancement_base_on_Double_Flow.pdf|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/16 || Dong Wang ||  Bayesian scoring and uncertainty manipulation || [[媒体文件:Uncertainty Propagation.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/23 || Zhiyuan Tang || Classifier involves Energy Based Model  || [[媒体文件:200323 energy model.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/30 || Lantian Li ||  Bayesian scoring in speaker verification || Temporarily held for security || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/04/06 || Yunqi Cai ||  Posterior Collapse|| [[媒体文件:Posterior_Collapse.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/04/13 || Lantian Li || NDA in ASV || Temporarily held for security [cvss 761] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/04/20 || Ying Shi ||  Speech_Enhancement_base_on_Flow ||[[媒体文件:Speech_Enhancement_base_on_Flow.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/11 || Dong Wang  ||  Real DNF || [[媒体文件:Real_DNF.pdf|Slide]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Sitong Cheng ||  ASR-Free Pronunciation Assessment || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9a/ASR-Free_Pronunciation_Assessment.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Jiawen Kang ||  RobustMAML || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/8e/RobustMAML.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Jiawen Kang ||  Domain adaptation review || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/6d/Presentation-Meta-learning.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Jiawen Kang ||  SOTA models for VPR || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/d/d2/SOTA_models_for_VPR.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/01 || Dong Wang || How MAML succeeded?  || [https://arxiv.org/pdf/1909.09157.pdf][https://pdfs.semanticscholar.org/e6e9/c9d50b11ced939faf42f1c65bf9360eefd73.pdf][https://arxiv.org/pdf/1706.05806.pdf] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/09 || Zhiyuan Tang  ||  Flow Wheels || [[媒体文件:20200408 flow wheels.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/15 || Lantian Li  ||  Uncertainty Modeling and Inference || [[媒体文件:200615-Uncertainty.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/22 || Lantian Li  ||  Gaussians in High Dimension || [[媒体文件:High-dimensioaln-Gaussian.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/22 || Dong Wang  ||  Self training for SE and ASR || [[媒体文件:Self-Training.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/29 || Ying Shi  ||  Speech enhancement &amp;amp; separation || [[媒体文件:Speech-Separation-and-Enhancement.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/07/06 || Haolin Chen  ||  Self-supervised Learning in Speech Processing || [[媒体文件:Self-Supervised.pptx|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/07/13 || Zhiyuan Tang  || Exploding inverse in INN || [[媒体文件:20200713 dig into flow.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/07/20 || Lantian Li  || Principle Solution for Enroll-Test Mismatch || [[媒体文件:200720-mismatch.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/08/17 || Dong Wang  || Decoupled scoring || [[媒体文件:Decoupled.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/08/24 || Zhiyuan Tang || G &amp;amp; D Acoustic model ||  [[媒体文件:20200824 flow asr.pdf | slides]]   || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/09/01 || Lantian Li || Decoupled NL ||     || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/09/07 || Yunqi Cai ||Deep generative model based Anomaly detection||[[媒体文件:Anomaly_detection.pdf | slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/09/14 || Dong Wang || How we factorize speech? || [[媒体文件:Factorization.pdf|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/05 || Dong Wang || Remarks on DNF || [[媒体文件:Remakrs on DNF.pptx|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/12 || Dong Wang || Paper Reading: Challenging Common Assumptions in the Unsupervised Learning of Disentangled Representations || [[媒体文件:Challenge-disentanglement.pptx|slides]]  [http://proceedings.mlr.press/v97/locatello19a/locatello19a.pdf paper link]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/19 || Haoran Sun || Informational Speech Factorization by Factorial Discriminative Normalization Flow || [[媒体文件:Informational_Speech_Factorization.pdf|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/27 || Jiao Han || Experimental report mainly based on DNF models || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e9/Experimental_report_mainly_based_on_DNF_models.pdf slides]    ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/02 || Lantian Li || INTERSPEECH 2020 (SRE) || [[媒体文件:201102-INTERSPEECH_2020-SRE-LLT.pdf|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/09 || Yunqi Cai || Deep normalization_V1 || [[媒体文件:Deep_norm_trilogy_v1.pdf|slides]] [http://caiyq.cslt.org/doc/deepnorm_v1.mp4 video]    || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/16 || Yunqi Cai || Deep normalization_V2 || [http://caiyq.cslt.org/doc/deep-norm-trilogy_v2.pptx slides] [http://caiyq.cslt.org/doc/deepnorm_v2.mp4 video]     || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/17 || Di Wang || Statistics decomposition for NL Scoring || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/97/Statistics_decomposition_for_NL_Scoring.pdf slides]    || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/23 || Yunqi Cai || Deep normalization_V3 || [http://caiyq.cslt.org/doc/deep-norm-trilogy_v3.pptx slides] [http://caiyq.cslt.org/doc/deepnorm_v3.mp4 video]    || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/08 || Yunqi Cai || From materials science to perceptual intelligence || [http://caiyq.cslt.org/doc/perceptual_intelligence.pptx slides] [http://caiyq.cslt.org/doc/**.mp4 video]    ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/08 || Dong Wang || From noise injection to Bayes PLDA || [[媒体文件:Bayes-plda.ppt|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/21 || Lantian Li || Speech in NIPS 2019/2020 || [[媒体文件:Speech in NIPS 19&amp;amp;20.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/28 || Pengqi Li || Domain generalization via robust optimization || [[媒体文件:201228-Device_Generalization.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/01/07 || Dong Wang || What we believe || [[媒体文件:What we believe.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/01/14 || Dong Wang || Reparametric trick || [[媒体文件:Reparametric.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/02/01 || Dong Wang || Data augmentation as regularization || [[媒体文件:Data-augmentation.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/02/22 || Lantian Li || Ensemble and Distillation || [[媒体文件:2012.09816.pdf|paper]] [[媒体文件:Ensemble_And_Distillation.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/08 || Dong Wang || HIERARCHICAL GENERATIVE MODELING FOR CONTROLLABLE SPEECH SYNTHESIS || [https://arxiv.org/pdf/1810.07217.pdf paper] [[媒体文件:HIERARCHICALGENERATIVEMODELING FORCONTROLLABLESPEECHSYNTHESIS.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/15 || Dong Wang || 第三代人工智能 || [http://scis.scichina.com/cn/2020/SSI-2020-0204.pdf  paper] [[媒体文件:第三代人工智能.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/22 || Chao Xing || Complexity neural net in speech enhancement || [http://web.cse.ohio-state.edu/~wang.77/papers/WWW.taslp20.pdf paper1][https://openreview.net/pdf?id=SkeRTsAcYm paper2] [https://arxiv.org/pdf/2008.00264.pdf paper3] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/29 || Ying Shi || Some methods about speech enhancement || [[媒体文件:SPEECH ENHANCMENGT.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/05 || Jiyuan Zhang || 推理 &amp;amp; 知识推理调研 || [[媒体文件:知识推理相关调研.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/12 || Zicheng Qiu || Some work on minorlingual speech recognition||  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/19 || Shiyue Zhang || Text summarization||  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/26 || Dong Wang || Paper reading: Metadata normalization || [[媒体文件:Meta normalization.pdf|slides]] [https://arxiv.org/pdf/2104.09052.pdf paper]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/05/10 || Lantian Li || Explainable ML || [[媒体文件:Explainable_ML.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/05/17 || Jie Li || Tea cake Re-identification || [[媒体文件:Tea-cake.pptx|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/05/24 || Dong Wang || Deep speech prior|| [[媒体文件:Deep speech prior.pptx|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/06/28 || Haoran Sun || Information Bottleneck and Deep Learning || [[媒体文件:Information_bottleneck.pdf|slides]] [[媒体文件:IBpapers.pdf|papers]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/07/05 || Chen Chen || Speech Enhancement Overview || [[媒体文件:SpeechEnhancementOverview.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/07/19 || JiaWen Kang || Diseaseof detection from speech signal ||  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/07/26 || 毛丽旦 || Characters of Uygher in learning Chinese || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/08/02 || ChaoXing ||  Multi-Modal Speech Interaction||  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/08/16 || Dong Wang || Training Mutual Information|| [[媒体文件:Information max.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/09/06 || Jie Li || Detection of object falling  || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/09/13 || Zhiyuan Tang|| Construction of Ke speech database || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/09/20 || Shiyue Zhang || Objective evaluation for text summarization || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/10/11 || Chao Xing  || KNowedge-driven small foootprint backbone || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/10/18 || Haoran Sun || CycleFlow: PURIFY INFORMATION FACTORS BY CYCLE LOSS || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/10/25 || Lantian Li || Real Additive Margin Softmax for Speaker Verification || [[媒体文件:RAM-Softmax.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/01 || Ying Shi || End-to-End kws || [[媒体文件:Kws.pdf | slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/08 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/15 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/22 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/29 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/06 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/13 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/20 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/27 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[http://cslt.riit.tsinghua.edu.cn/mediawiki/index.php/Weekly_reading See more related]&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_meeting</id>
		<title>Weekly meeting</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_meeting"/>
				<updated>2021-11-29T13:12:00Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;*Location: FIT-1-304&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials !! On duty&lt;br /&gt;
|-&lt;br /&gt;
| 2012/08/27  ||Dong Wang  || Heterogeneous Convolutive Non-negative Sparse Coding ||[[媒体文件:Heterogeneous_convolutive_non-negative_sparse_coding.pdf|slides]] [http://homepages.inf.ed.ac.uk/v1dwang2/public/pdf/inerspeech2012-hetero.pdf paper] ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/09/03  ||NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/09/10  || NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/09/17  ||WALEED ABDULLA||Auditory Based Feature Vectors for Speech Recognition ||[[媒体文件:AuditoryBasedFeatureVectors.pdf|slides]]||范淼&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;2&amp;quot;|2012/09/24  ||刘超|| N-gram FST indexing for Spoken Term Detection || [[媒体文件:120924-N_gram_FST_indexing_for_Spoken_Term_Detection-LC-0.pdf|slides]] ||尹聪&lt;br /&gt;
|-&lt;br /&gt;
|范淼||Micro-blogging, Wikipedia, Folksonomy, What's Next? ||[[媒体文件:120924-Micro-blogging, Wikipedia, Folksonomy, What's Next-FM--01-FM-.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| 2012/10/08 ||NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
| 2012/10/15  ||NO Meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/10/22||Wu Xiaojun||speaker recognition in CSLT ||[[媒体文件:VPR_in_CSLT.pdf|slides]]||卡尔&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/10/29  ||王军||An overview of Automatic Speaker Diarization Systems || [[媒体文件:121027-Speaker Diarization-WJ.pdf|slides]] ||别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/05  ||别凡虎||Experiments on Emotional Speaker Recognition||[[媒体文件:121104-Experiments_on_Emotional_Speaker_Recognition-BFH.pdf|slides]] ||刘超&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/12  ||唐国瑜||Statistical Word Sense Improves Document Clustering ||[[媒体文件:121112_Statistical_Word_Sense_Improves_Document_Clustering_TGY.pdf‎ |slides]]||邱晗&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/19  ||张陈昊||TDSR with Long-term Features Based on Functional Data Analysis||[[媒体文件:121118-ISCSLP-FDA_SR-ZCH.pdf|slides]] ||王俊俊&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/11/26  ||王琳琳||Time-Varying Speaker Recognition: An Introduction||[[媒体文件:121126-Time_Varying_Speaker_Recognition_I-Wll.pdf‎|slides]] ||龚宬&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/12/03  ||No meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/12/10  ||No meeting|| || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/12/17  ||No meeting|| || ||&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/01/07  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
|2012/01/07  ||王军||基于DF-MAP的说话人模型训练方法||[[媒体文件:130107-基于DFMAP的说话人模型训练方法-WJ.pdf|slides]] ||唐国瑜&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2012/01/14  ||王东|| Computing in CSLT ||[[媒体文件:Computing_in_CSLT.pdf|slides]] ||王琳琳&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/04  ||王军||Sequential Adaptive Learning for Speaker Verification ||[[媒体文件:130301-Sequential adaptive learning for speaker verification-WJ.pdf|slides]] ||别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/11  || Du Jinle|| VAD stuff || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/18  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/03/25  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/01  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/08  || 张陈昊|| A Fishervoice based Feature Fusion Method for SUSR ||[[媒体文件:130408-FisherVoice-ZCH.pdf|slides]] ||谢仲达&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/15  ||龚宬|| An Exploration on Influence Factors of VAD's Performance in Speaker Recognition ||[[媒体文件:130415-An_Exploration_on_Influence_Factors_of_VAD-GC.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/22  ||王俊俊 || Understanding the Query: THCIB and THUIS at NTCIR-10 Intent Task ||[[媒体文件:130422-Understanding_the_Query-WJJ.pdf|slides‎]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/04/29  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/06  ||别凡虎 ||MLLR on Emotional Speaker Recognition ||[[媒体文件:130506-MLLR on Emotional Speaker Recognition-BFH.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/13  ||刘超 || The Use of Deep Neural Network for Speech Recognition || [[媒体文件:130513-the_use_of_dnn_for_asr-lc.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/20  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/05/27  ||王琳琳|| 说话人识别中的时变鲁棒性问题研究 || [[媒体文件:130527-TVSV-Wll.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/03  ||王俊俊|| 汉语搜索结果聚类系统研究与实现 || [[媒体文件:130601-毕业答辩-02-WJJ.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/10  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/17  ||范淼 || Relation Extraction ||[[媒体文件:130617-relation_extraction-fm.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/06/24  ||唐国瑜 || Incorporating Statistical Word Senses in Topic Model  ||[[媒体文件:130624_Incorporating Statistical Word Senses in Topic Model_TGY.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/07/01  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/07/08  ||  || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/07/15  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/09  ||王东 || Research Frontier in Speech Technology||[[媒体文件:Research Frontier in Speech Technology.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/16  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/23  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/09/30  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/07  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/14  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/21  ||范淼 ||Transduction Classification with Matrix Completion （中文报告）||[[媒体文件: Transduction_Classifiction_with_Matrix_Completion.pdf‎|slides]] [http://pages.cs.wisc.edu/~jerryzhu/pub/mc4ssl_FINAL.pdf paper]|| 李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/10/28  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/04  || 王军 || 基于i-vector的intersession补偿及打分方法(综述) || [[媒体文件:131104-ivecto下intersession补偿及打分方法--01-WJ-.pdf‎|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/11  ||张陈昊 ||PLDA介绍及PLDA在说话人识别中的应用 ||[[媒体文件:PLDA.pdf|slides]] || 唐国瑜&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/18  ||别凡虎 ||i-vector理论介绍（讨论）||[[媒体文件:131118-i-vector_and_GMM-UBM-BFH.pdf|slides]]‎  ||王军&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/11/25  ||刘超 || Pruning Neural Networks By Optimal Brain Damage(综述)||[[媒体文件:131125-OBD-LC-01.pdf|slides]] ||范淼&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/02  ||范淼 ||Distant Supervision for Relation Extraction with Matrix Completion （英文报告）||[[媒体文件:131202-DRMC-FM-01.pdf|slides]] || 李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/09  || Dong Wang|| Introduction to the HMM-based speech synthesis||[http://hts.sp.nitech.ac.jp/archives/2.2/HTS_Slides.zip slides] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/16  ||张陈昊 ||语音研究中的基元介绍 ||[[媒体文件:131215-Phonology-ZCH.pdf|slides]]  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/23  || Dong Wang|| Introduction to the HMM-based speech synthesis (2)||[http://hts.sp.nitech.ac.jp/archives/2.2/HTS_Slides.zip slides] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/23  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2013/12/30  ||刘荣 || continuous space language model||[[媒体文件:Cslm-cslt.pdf|slides]]  ||刘超&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/01/06  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/01/13  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/01/20  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/02/24  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/03  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/10  ||范淼|| Distant Supervision for Information Extraction (英文报告)|| || 李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/17  ||唐国瑜 || Topic Models Incorporating Statistical Word Senses || [[媒体文件:TMISWS_For_CICLing2014.pdf|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/24  ||孟祥涛 || Noisy training for Deep Neural Networks|| ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/03/31  ||范淼|| Translating Embeddings for Modeling Multi-relational Data （中文报告） || [https://www.hds.utc.fr/everest/lib/exe/fetch.php?id=en%3Atranse&amp;amp;cache=cache&amp;amp;media=en:cr_paper_nips13.pdf paper]||李蓝天&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/07  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/14  || Wang Jun|| I-vector and PLDA in depth ||[[媒体文件:131104-ivector-microsoft-wj.pdf|slides]]  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/21  || 邱晗||汉语事件句式规范化处理 ||[[媒体文件:140421-汉语事件句式规范化-QH.pdf‎|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/04/28  || 唐国瑜|| Some papers in　CICLing2014 ||[[媒体文件:Some_papers_in_CICling2014.pdf|slides]]  ||刘超&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/05/05  || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/05/12  || 卡尔|| paper introduction || [[媒体文件:Acoustic Factor Analysis.pdf|slides]] || 邱晗&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;2&amp;quot;|2014/05/19  || 邱晗|| 汉语事件句式CCG推导树重构 ||[[媒体文件:140519-CCG_reConstruction.pdf‎|slides]]‎|| 卡尔&lt;br /&gt;
|-&lt;br /&gt;
|Liu Chao|| master proposal: sparse and deep neural networks || [[媒体文件:140519-proposal-LC-01.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| || Liu Chao|| 2nd master proposal: sparse and deep neural networks|| ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/06/16  || 别凡虎 || Truncated Wave based VPR and Some Recent Work || [[媒体文件:140614-Truncated_Speech_based_VPR.pdf‎|slides]]‎ || 别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/06/23  || 别凡虎 || Block-wise training for I-vector || [[媒体文件:140623-Block-wise training for I-vector.pdf‎|slides]]‎ || 别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/07/07||王军 ||Discriminative Scoring for Speaker Recognition Based on I-vectors || [[媒体文件:140707-work_report.pdf|slides]]|| 王军&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/09/01|| || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/09/09 ||别凡虎 ||Reseach on Truncated Wave based VPR||[[媒体文件:140909-Truncated Speech based VPR.pdf|slides]] || 别凡虎&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/09/15|| || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/09/22  || Miao Fan|| Large-scale Entity Relation Extraction based on Low-dimensional Representations (中文报告，博士开题)&lt;br /&gt;
||[[媒体文件:基于低维表示的大规模实体关系挖掘技术.pdf‎|slides]] || Lan TianLi&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;| 2014/09/29 || || || ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/10/13  || Miao Fan|| The Frontier of Knowledge Embedding （英文报告）|| [[媒体文件:The_Frontier_of_Knowledge_Embedding.pdf‎|slides]]|| Lan TianLi&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/10/20  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/10/27  || Li Yi || Phonemes, Features, and Syllables: Converting Onset and Rime Inventories to Consonants and Vowels||[[媒体文件:Lanzhou Phonemes, Features, and Syllables- fianl.pdf|paper]] [[媒体文件:Syllables and phonemes - 20141027.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/3   || 米吉提|| Automatic Speech Recognition of Agglutinative Language based on Lexicon Optimization||[[媒体文件:Mijit-slides-清华大学-2014-11-3.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/10  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/17  ||Dong Wang || Highly restricted keyword spotting for Uyghur using sparse analysis|| [[媒体文件:Highly Restricted Keyword Selection Based on Sparse Analysis.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/11/24  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/1  ||ZhongDa Xie ||Incorporating Fine-Grained Ontological Relations in Medical Document Ranking || [[媒体文件:Fine-grained_relations.pdf|slides]]|| Lantian Li &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/8  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/15  || 唐国瑜 || 跨语言话题分析关键技术研究 ||[[媒体文件:141205-答辩-TGY.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/22  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2014/12/29  || Askar || Language Mismatch in Speaker Recognition System||[[媒体文件:141229--askar.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/5  ||Lantian Li || Deep Neural Networks for Speaker Recognition || [[媒体文件:150104_Deep_Neural_Networks_for_Speaker_Recognition_LLT.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/12  || || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/19  || Dong Wang || Machine Learning Paradigms for Speech Recognition||[[媒体文件:Machine Learning Paradigms for Speech Recognition.pdf|slides]]  [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6423821 paper] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/1/26  || Chen Guorong || Information Transmission and Distribution on Web ||[[媒体文件:An_introduction_of_complex_network1.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot; |2015/3/9 || Dong Wang || Joint Deep Learning || [[媒体文件:Joint Deep Learning.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/3/16  || Dongxu Zhang || Knowledge learning from text data and knowledge bases || [[媒体文件:Joint Deep Learning.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/4/13  || Xuewei Zhang || Lasso-based Reverberation Suppression In Automatic Speech Recognition || [[媒体文件:Lasso-based Reverberation Suppression In Automatic Speech Recognition.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/5/11  || Dong Wang ||ASR and SID Research Frontier ||[[媒体文件:ASR and SID Research Frontier.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/11/23  || Zhiyuan Tang|| CTC learning|| [[媒体文件:CTC.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/11/30  || Mengyuan Zhao|| CNN-based music removal|| [[媒体文件:Music Removal by Convolutional Denoising.pdf | slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/3  || Zhiyuan Tang|| Networks of Memory|| [[媒体文件:Memory_net.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/7  || Yiqiao Pan|| Document Classification with Spherical Word Vectors||[[媒体文件:Document Classification with Spherical Word Vectors.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/14  || Dong Wang || Transfer Learning for Speech and Language Processing ||[[媒体文件:Transfer_Learning_for_Speech_and_Language_Processing.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/21  || Qixin Wang || Attention for poem generation ||[[媒体文件:Ijcai 2016.pptx|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2015/12/28  || Lantian Li || Max-margin metric learning for speaker recognition || [[媒体文件:Max-margin-Metric-Learning.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/1/4  || Zhiyong Zhang || Parallel training,MPE and natural gradient||[[媒体文件:20160104_张之勇_Large-scale Parallel Training in Speech Recognition.pdf|slides]]||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/1/18  || Dongxu Zhang || Memoryless Document Vector ||[[媒体文件:Memoryless_document_vector.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/3/14  || Zhiyuan Tang|| Oral presentation for &amp;quot;vMF-SNE: Embedding for Spherical Data&amp;quot;|| [[媒体文件:embedding.pdf|slides]] ||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/3/28  || Tianyi Luo || Review for Neural QA || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/2/29/CSLT_Weekly_Report--20160328.pdf slides] ||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/4/11  || Rong Liu || Recommendation in Youku || [http://cslt.riit.tsinghua.edu.cn/mediawiki/index.php/%E6%96%87%E4%BB%B6:Cslt%E5%AE%9E%E9%AA%8C%E5%AE%A4%E4%BA%A4%E6%B5%81.pptx slides] ||  &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/09 || Miao Fan || Learning contextual embeddings of knowledge base with entity descriptions.|| [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9c/Techreport_CSLT_2016_M.F..pdf slides]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/16 || Yang Wang || Research on conversation thread detection. || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/bb/%E6%B1%AA%E6%B4%8B-%E6%AF%95%E8%AE%BE-CSLT.pdf slides]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/20 || Yang Wang &amp;amp;  Maoning Wang || Research on portfolio selection. || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/89/%E6%B1%AA%E6%B4%8B-%E9%87%91%E8%9E%8D%E7%AC%AC%E4%B8%80%E6%AC%A1%E5%88%86%E4%BA%AB.pdf slides1]  [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/b/bb/%E6%B1%87%E6%8A%A5_%E8%B5%84%E4%BA%A7%E7%BB%84%E5%90%88%E4%B8%AD%E5%87%A0%E4%B8%AA%E8%AF%84%E4%BB%B7%E6%8C%87%E6%A0%87%E7%9A%84%E8%A7%A3%E9%87%8A.pdf slides2]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/20  || Zhiyuan Tang || ICASSP 2016 summary || [[媒体文件:Note icassp16.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/5/23 || Dong Wang || graphical model and neural model || [[媒体文件:Graphic Model and Neural Model.pdf|slides]] [[媒体文件:Generative-Pdf.rar|papers]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/8/02 || Zhiyuan Tang || Visualizing, Measuring and Understanding Neural Networks: A Brief Survey|| [[媒体文件:Nn analysis.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/8/03 || Yang Wang || Neural networks and genetic programming for financial forecasting || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/79/GeneticNN.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/05 || Yang Wang || Reinforcement Learning Models and Simulations || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ca/RRL_and_sim.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/08 || April Pu || SOFTWARE DEVELIPMENT METHODOLOGIES || [http://wangd.cslt.org/talks/pdf/april_software.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/12 || Yang Wang || Generative Adversarial Nets || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c9/Generative_adversarial_network.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/22 || Zhiyuan Tang || INTERSPEECH 2016 summary || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/65/Interspeech16_review.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2016/11/30 || Dong Wang || Deep and sparse learning in speech and language: an overview || [http://wangd.cslt.org/talks/pdf/bics2016.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/2/17 || Yang Wang || Review understanding deep learning requires rethinking generalization || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/3b/Review_understanding_deep_learning_requires_rethinking_generalization.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/5 || Dong Wang || Deep speech factorization || [http://wangd.cslt.org/talks/pdf/Deep-Speech-Factorization.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/8 || Shiyue Zhang || Convolutional Sequence to Sequence Learning  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f3/Conv_seq2seq.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/12 || Shiyue Zhang || Memory-augmented Neural Machine Translation || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/36/Memory-augmented_Neural_Machine_Translation_.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/21 || Shiyue Zhang || Attention Is All You Need  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/68/Attention_is_all_you_need.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/26 || Jiyuan Zhang || Chinese poem generation using neural model  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/5/50/Flexible_and_Creative_Chinese_Poetry_Generation_Using_Neural_Memory_.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/6/21 || Miao Zhang || Speaker recognition on cough,laugh and wei  || &lt;br /&gt;
[http://cslt.riit.tsinghua.edu.cn/mediawiki/images/f/f6/Zm_cough.pdf slides]  &lt;br /&gt;
||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/7/10 || Aodong Li || Enhanced Neural Machine Translation by Learning from Draft  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ca/Learning_from_draft.pptx slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2017/7/17 || Lantian Li || Study on Speaker Recognition  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/ec/170716-Study_on_SRE.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2018/12/6 || Xiuqi Jiang ||  Meta-Learning and Zero-Shot Learning  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/18/181205_Meta-Learning_and_Zero-Shot_Learning_JXQ.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2018/12/12 || Dan He ||  Tensor factorization neural net  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/3/3d/Tensor_factorization_neural_net.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2018/12/26 || Dong Wang || Towards deep statistical speaker representation  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/4/48/V.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/01/04 || Dong Wang || Speech in NIPS 2017/2018  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/c8/Speech_in_NIPS_2017.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/07/17 || Dong Wang || Deep Feature Learning and Normalization for Speaker Recognition  || [http://wangd.cslt.org/talks/pdf/india.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/08/19 || Sitong Cheng &amp;amp; Pengyuan Zhang || Periodic Report of Celebrity Video Data Collection.   || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/08/C-STAR.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/08/19 || Dong Wang|| Continuous Learning for Neural Nets || [[媒体文件:Continuous Learning for Neural Nets.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/09/11 || Dong Wang || Language Recognition in ICASSP 2019   || [http://wangd.cslt.org/talks/pdf/LRE-ICASSP-2019.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/09/11 || Sitong Cheng || Language Recognition in Interspeech 2019   || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/a/a9/Language_Recognition_in_Interspeech_2019.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/10/14 || Haoran Sun || Dimension Reduction  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/7/7b/DimensionReduction.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/10/27 || Dong Wang || Back to Matrix  || [[媒体文件:Back to Matrix.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/11/11 || Dong Wang || Helmholtz Machine &amp;amp; The ML criterion  || [[媒体文件:Helmholtz Machine &amp;amp; The ML criterion.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/02 || Jiawen Kang || Gan Laten Space Manipulation &amp;amp; Flow Application Papers  || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/c/ca/GAN_Lantent_Space_manunipulation_%26_Flow_Application.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/09 || Dong Wang || Style transfer and information factorization || [[媒体文件:Style Transfer with Generative Models.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/16 || Zhiyuan Tang ||  Conditional Generative Flow  ||  [[媒体文件:Conditional GLow.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/23 || Lantian Li ||  Deep Generative Model in Speaker Recognition || [[媒体文件:Deep Generative Model in Speaker Recognition.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/12/30 || Wenqiang Du ||  Cross-bandwidth Train || [[媒体文件:Cross-bandwidth_Train.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/01/06 || Yunqi Cai ||  Do Deep Generative Models Know What They Don't Know ?|| [[媒体文件:2020.1.6_group_meeting.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2019/01/10 || Haoran Sun ||  Flow++: Improving Flow-Based Generative Models with Variational Dequantization and Architecture Design || [[媒体文件:Flow++.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/01/13 || Ying Shi ||  Deep Generative Model Energy Based Model || [[媒体文件:Deep_Generative_Model.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/02/10 || Dong Wang ||  Deep Generative Models for Discriminative Tasks || [[媒体文件:Re-Thinking for Discriminative and Generative Models.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/02/17 || Zhiyuan Tang ||  Unsupervised Learning of Disentangled Representations  || [[媒体文件:20200217 Unsupervised disentanglement.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/02/24 || Lantian Li ||  Weakly- &amp;amp; Self-Supervised Learning || [[媒体文件:Weakly-_%26_Self-Supervised_Learning.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/02 || Yunqi Cai ||  Deep Normalization for Speaker Vectors|| [[媒体文件:Deep_Normalization_for_Speaker_Vectors_.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/09 || Ying Shi ||  Speech Enhancement base on Double Flow || [[媒体文件:Speech_Enhancement_base_on_Double_Flow.pdf|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/16 || Dong Wang ||  Bayesian scoring and uncertainty manipulation || [[媒体文件:Uncertainty Propagation.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/23 || Zhiyuan Tang || Classifier involves Energy Based Model  || [[媒体文件:200323 energy model.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/03/30 || Lantian Li ||  Bayesian scoring in speaker verification || Temporarily held for security || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/04/06 || Yunqi Cai ||  Posterior Collapse|| [[媒体文件:Posterior_Collapse.pdf|slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/04/13 || Lantian Li || NDA in ASV || Temporarily held for security [cvss 761] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/04/20 || Ying Shi ||  Speech_Enhancement_base_on_Flow ||[[媒体文件:Speech_Enhancement_base_on_Flow.pdf|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/11 || Dong Wang  ||  Real DNF || [[媒体文件:Real_DNF.pdf|Slide]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Sitong Cheng ||  ASR-Free Pronunciation Assessment || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/9a/ASR-Free_Pronunciation_Assessment.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Jiawen Kang ||  RobustMAML || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/8/8e/RobustMAML.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Jiawen Kang ||  Domain adaptation review || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/6d/Presentation-Meta-learning.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/05/26 || Jiawen Kang ||  SOTA models for VPR || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/d/d2/SOTA_models_for_VPR.pdf slides] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/01 || Dong Wang || How MAML succeeded?  || [https://arxiv.org/pdf/1909.09157.pdf][https://pdfs.semanticscholar.org/e6e9/c9d50b11ced939faf42f1c65bf9360eefd73.pdf][https://arxiv.org/pdf/1706.05806.pdf] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/09 || Zhiyuan Tang  ||  Flow Wheels || [[媒体文件:20200408 flow wheels.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/15 || Lantian Li  ||  Uncertainty Modeling and Inference || [[媒体文件:200615-Uncertainty.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/22 || Lantian Li  ||  Gaussians in High Dimension || [[媒体文件:High-dimensioaln-Gaussian.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/22 || Dong Wang  ||  Self training for SE and ASR || [[媒体文件:Self-Training.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/06/29 || Ying Shi  ||  Speech enhancement &amp;amp; separation || [[媒体文件:Speech-Separation-and-Enhancement.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/07/06 || Haolin Chen  ||  Self-supervised Learning in Speech Processing || [[媒体文件:Self-Supervised.pptx|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/07/13 || Zhiyuan Tang  || Exploding inverse in INN || [[媒体文件:20200713 dig into flow.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/07/20 || Lantian Li  || Principle Solution for Enroll-Test Mismatch || [[媒体文件:200720-mismatch.pdf|slides]]  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/08/17 || Dong Wang  || Decoupled scoring || [[媒体文件:Decoupled.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/08/24 || Zhiyuan Tang || G &amp;amp; D Acoustic model ||  [[媒体文件:20200824 flow asr.pdf | slides]]   || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/09/01 || Lantian Li || Decoupled NL ||     || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/09/07 || Yunqi Cai ||Deep generative model based Anomaly detection||[[媒体文件:Anomaly_detection.pdf | slides]]|| &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/09/14 || Dong Wang || How we factorize speech? || [[媒体文件:Factorization.pdf|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/05 || Dong Wang || Remarks on DNF || [[媒体文件:Remakrs on DNF.pptx|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/12 || Dong Wang || Paper Reading: Challenging Common Assumptions in the Unsupervised Learning of Disentangled Representations || [[媒体文件:Challenge-disentanglement.pptx|slides]]  [http://proceedings.mlr.press/v97/locatello19a/locatello19a.pdf paper link]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/19 || Haoran Sun || Informational Speech Factorization by Factorial Discriminative Normalization Flow || [[媒体文件:Informational_Speech_Factorization.pdf|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/10/27 || Jiao Han || Experimental report mainly based on DNF models || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/e/e9/Experimental_report_mainly_based_on_DNF_models.pdf slides]    ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/02 || Lantian Li || INTERSPEECH 2020 (SRE) || [[媒体文件:201102-INTERSPEECH_2020-SRE-LLT.pdf|slides]]      || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/09 || Yunqi Cai || Deep normalization_V1 || [[媒体文件:Deep_norm_trilogy_v1.pdf|slides]] [http://caiyq.cslt.org/doc/deepnorm_v1.mp4 video]    || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/16 || Yunqi Cai || Deep normalization_V2 || [http://caiyq.cslt.org/doc/deep-norm-trilogy_v2.pptx slides] [http://caiyq.cslt.org/doc/deepnorm_v2.mp4 video]     || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/17 || Di Wang || Statistics decomposition for NL Scoring || [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/97/Statistics_decomposition_for_NL_Scoring.pdf slides]    || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/11/23 || Yunqi Cai || Deep normalization_V3 || [http://caiyq.cslt.org/doc/deep-norm-trilogy_v3.pptx slides] [http://caiyq.cslt.org/doc/deepnorm_v3.mp4 video]    || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/08 || Yunqi Cai || From materials science to perceptual intelligence || [http://caiyq.cslt.org/doc/perceptual_intelligence.pptx slides] [http://caiyq.cslt.org/doc/**.mp4 video]    ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/08 || Dong Wang || From noise injection to Bayes PLDA || [[媒体文件:Bayes-plda.ppt|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/21 || Lantian Li || Speech in NIPS 2019/2020 || [[媒体文件:Speech in NIPS 19&amp;amp;20.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2020/12/28 || Pengqi Li || Domain generalization via robust optimization || [[媒体文件:201228-Device_Generalization.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/01/07 || Dong Wang || What we believe || [[媒体文件:What we believe.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/01/14 || Dong Wang || Reparametric trick || [[媒体文件:Reparametric.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/02/01 || Dong Wang || Data augmentation as regularization || [[媒体文件:Data-augmentation.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/02/22 || Lantian Li || Ensemble and Distillation || [[媒体文件:2012.09816.pdf|paper]] [[媒体文件:Ensemble_And_Distillation.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/08 || Dong Wang || HIERARCHICAL GENERATIVE MODELING FOR CONTROLLABLE SPEECH SYNTHESIS || [https://arxiv.org/pdf/1810.07217.pdf paper] [[媒体文件:HIERARCHICALGENERATIVEMODELING FORCONTROLLABLESPEECHSYNTHESIS.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/15 || Dong Wang || 第三代人工智能 || [http://scis.scichina.com/cn/2020/SSI-2020-0204.pdf  paper] [[媒体文件:第三代人工智能.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/22 || Chao Xing || Complexity neural net in speech enhancement || [http://web.cse.ohio-state.edu/~wang.77/papers/WWW.taslp20.pdf paper1][https://openreview.net/pdf?id=SkeRTsAcYm paper2] [https://arxiv.org/pdf/2008.00264.pdf paper3] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/03/29 || Ying Shi || Some methods about speech enhancement || [[媒体文件:SPEECH ENHANCMENGT.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/05 || Jiyuan Zhang || 推理 &amp;amp; 知识推理调研 || [[媒体文件:知识推理相关调研.pdf|slides]]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/12 || Zicheng Qiu || Some work on minorlingual speech recognition||  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/19 || Shiyue Zhang || Text summarization||  ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/04/26 || Dong Wang || Paper reading: Metadata normalization || [[媒体文件:Meta normalization.pdf|slides]] [https://arxiv.org/pdf/2104.09052.pdf paper]   ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/05/10 || Lantian Li || Explainable ML || [[媒体文件:Explainable_ML.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/05/17 || Jie Li || Tea cake Re-identification || [[媒体文件:Tea-cake.pptx|slides]] ||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/05/24 || Dong Wang || Deep speech prior|| [[媒体文件:Deep speech prior.pptx|slides]]||&lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/06/28 || Haoran Sun || Information Bottleneck and Deep Learning || [[媒体文件:Information_bottleneck.pdf|slides]] [[媒体文件:IBpapers.pdf|papers]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/07/05 || Chen Chen || Speech Enhancement Overview || [[媒体文件:SpeechEnhancementOverview.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/07/19 || JiaWen Kang || Diseaseof detection from speech signal ||  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/07/26 || 毛丽旦 || Characters of Uygher in learning Chinese || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/08/02 || ChaoXing ||  Multi-Modal Speech Interaction||  || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/08/16 || Dong Wang || Training Mutual Information|| [[媒体文件:Information max.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/09/06 || Jie Li || Detection of object falling  || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/09/13 || Zhiyuan Tang|| Construction of Ke speech database || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/09/20 || Shiyue Zhang || Objective evaluation for text summarization || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/10/11 || Chao Xing  || KNowedge-driven small foootprint backbone || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/10/18 || Haoran Sun || CycleFlow: PURIFY INFORMATION FACTORS BY CYCLE LOSS || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/10/25 || Lantian Li || Real Additive Margin Softmax for Speaker Verification || [[媒体文件:RAM-Softmax.pdf|slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/01 || Ying Shi || End-to-End kws || [[Kws.pdf | slides]] || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/08 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/15 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/22 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/11/29 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/06 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/13 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/20 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
| rowspan=&amp;quot;1&amp;quot;|2021/12/27 ||  || || || &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[http://cslt.riit.tsinghua.edu.cn/mediawiki/index.php/Weekly_reading See more related]&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kws.pdf</id>
		<title>文件:Kws.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Kws.pdf"/>
				<updated>2021-11-29T13:07:28Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：Sunhaoran上传“文件:Kws.pdf”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-11-15</id>
		<title>2021-11-15</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-11-15"/>
				<updated>2021-11-15T10:54:21Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Spoof paper almost done&lt;br /&gt;
||&lt;br /&gt;
* Spoof paper cleaning&lt;br /&gt;
* Hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* Completed THS2021 data preprocessing, image feature extraction, and baseline&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Complete hard trials paper v1.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* Go on preparing my defence.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* make a comparison  between fncmd with new methods&lt;br /&gt;
* find some cross-modality methods(Cross modality attention) &lt;br /&gt;
||&lt;br /&gt;
* Implement Cross modality attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* speaker and robustness test for CycleFlow-spk&lt;br /&gt;
||&lt;br /&gt;
* some improvement&lt;br /&gt;
* some other exploration for the model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Modify wav2vec-u gan network code and output intermediate results&lt;br /&gt;
* Training wav2vec2 model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare the training process of TIMIT and Tibetan&lt;br /&gt;
||&lt;br /&gt;
* Fine-tune the Tibetan wav2vec model&lt;br /&gt;
* Prepare the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:En_codes_t_lda_e.png</id>
		<title>文件:En codes t lda e.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:En_codes_t_lda_e.png"/>
				<updated>2021-11-03T14:32:07Z</updated>
		
		<summary type="html">&lt;p&gt;Sunhaoran：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Sunhaoran</name></author>	</entry>

	</feed>