<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://index.cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Liangwd</id>
		<title>cslt Wiki - 用户贡献 [zh-cn]</title>
		<link rel="self" type="application/atom+xml" href="http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Liangwd"/>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E7%89%B9%E6%AE%8A:%E7%94%A8%E6%88%B7%E8%B4%A1%E7%8C%AE/Liangwd"/>
		<updated>2026-04-07T07:13:47Z</updated>
		<subtitle>用户贡献</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-11</id>
		<title>2022-04-11</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-11"/>
				<updated>2022-04-11T10:44:41Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (refresh SOTA model)&lt;br /&gt;
* Complete Spoof overview&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Zeus/kws&lt;br /&gt;
** CNN ASR&lt;br /&gt;
** IBM Double AE&lt;br /&gt;
** THU energy model&lt;br /&gt;
** Google Rnn-t (in progress)&lt;br /&gt;
** QbE (in progress)&lt;br /&gt;
** 4 kaldi kws recipe(in progress)&lt;br /&gt;
||&lt;br /&gt;
* Verify CNN &lt;br /&gt;
* continue on zeus/kws &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* update cycleflow website&lt;br /&gt;
* review unsupervised AVSR papers&lt;br /&gt;
||&lt;br /&gt;
* write review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* paper's web and code&lt;br /&gt;
||&lt;br /&gt;
* project&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* web and code for paper&lt;br /&gt;
* Presentation &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Wav2vec paper review preparation&lt;br /&gt;
* Cos sim experimental data collation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* continue the test on LSH&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* do some work about OCR&lt;br /&gt;
||&lt;br /&gt;
* continue the task on OCR&lt;br /&gt;
* read papers&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-28</id>
		<title>2022-03-28</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-28"/>
				<updated>2022-03-28T11:07:30Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Interspeech refinement&lt;br /&gt;
* Keyword spotting review&lt;br /&gt;
||&lt;br /&gt;
* Keyword spotting review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* Reports writing&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Spoof overview&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Continue work on Speech engrave (TXT convolution)&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare ASR/OCR model for ICPRMSR&lt;br /&gt;
* Reading paper of lip2wav&lt;br /&gt;
||&lt;br /&gt;
* Review recent papers about Lip Reading &amp;amp; AVSR&lt;br /&gt;
* Prepare ASR/OCR model for ICPRMSR&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* INTERSPEECH&lt;br /&gt;
||&lt;br /&gt;
* code, homepage for paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*  INTERSPEECH&lt;br /&gt;
||&lt;br /&gt;
* code update&lt;br /&gt;
* homepage building&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Cos sim experiment of WAV2VEC feature and MFCC feature&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Dictionary generation for true and pseudo-labeled&lt;br /&gt;
||&lt;br /&gt;
* Downstream ASR Task&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Test on SOTA model&lt;br /&gt;
* Learn score normalization&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
* Test with s-norm&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Build experiment environment on the server&lt;br /&gt;
* Extract embeddings of 2793 speakers&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* weighted score fusion&lt;br /&gt;
* score-weight score fusion&lt;br /&gt;
* SNR-weight score fusion&lt;br /&gt;
||&lt;br /&gt;
* OCR&lt;br /&gt;
* layer-SNR-weight score fusion&lt;br /&gt;
* find some reseach about recognition in information incompleteness&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.jpg</id>
		<title>文件:Map.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.jpg"/>
				<updated>2022-03-23T08:35:15Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：Liangwd上传“文件:Map.jpg”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.jpg</id>
		<title>文件:Map.jpg</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.jpg"/>
				<updated>2022-03-23T08:32:03Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png</id>
		<title>文件:Map.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png"/>
				<updated>2022-03-23T08:29:50Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：Liangwd上传“文件:Map.png”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png</id>
		<title>文件:Map.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png"/>
				<updated>2022-03-23T07:38:17Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：Liangwd上传“文件:Map.png”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png</id>
		<title>文件:Map.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png"/>
				<updated>2022-03-23T07:35:28Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：Liangwd上传“文件:Map.png”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png</id>
		<title>文件:Map.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png"/>
				<updated>2022-03-23T07:30:08Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：Liangwd上传“文件:Map.png”的新版本&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png</id>
		<title>文件:Map.png</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Map.png"/>
				<updated>2022-03-23T07:29:13Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Active_Projects</id>
		<title>Active Projects</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Active_Projects"/>
				<updated>2022-03-23T07:13:26Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Active projects=&lt;br /&gt;
&lt;br /&gt;
[[Deep_Speech_Factorization-2|Deep Speech Factorization (Phase 2)]]&lt;br /&gt;
&lt;br /&gt;
[[Asr-project-nsfc|Multilingual Minorilanguage Automatic Speech Recognition (M2ASR)]]&lt;br /&gt;
&lt;br /&gt;
[[AI 100 QA]]&lt;br /&gt;
&lt;br /&gt;
=Paused projects=&lt;br /&gt;
&lt;br /&gt;
[[Vivi-poem-generation|Vivi Poem]]&lt;br /&gt;
&lt;br /&gt;
=Completed projects=&lt;br /&gt;
&lt;br /&gt;
==Speech Processing==&lt;br /&gt;
[[Enhanced exemplar autoencoder with cycle consistency loss in any-to-one voice conversion]]&lt;br /&gt;
&lt;br /&gt;
[[Deep Generative Factorization For Speech Signal(ICASSP21)]]&lt;br /&gt;
&lt;br /&gt;
[[Flow-based Speech Analysis|Unsupervised speech factorization (ICASSP20)]]&lt;br /&gt;
&lt;br /&gt;
[[CN-Celeb|CN-Celeb (ICASSP20)]]&lt;br /&gt;
&lt;br /&gt;
[[Deep_Speaker_Feature_Learning|Deep Speaker Feature Learning]]&lt;br /&gt;
&lt;br /&gt;
[[Deep_Speech_Factorization|Deep Speech Factorization (Phase I)]]&lt;br /&gt;
&lt;br /&gt;
[[Speaker_Recognition_on_Trivial_events|Speaker Recognition on Trivial Events]]&lt;br /&gt;
&lt;br /&gt;
[[Phonetic_Temporal_Neural_LID|Phonetic Temporal Neural Model for Language Identification]]&lt;br /&gt;
&lt;br /&gt;
[[Asr-project-segment|Deep speaker segmentation]]&lt;br /&gt;
&lt;br /&gt;
[[TTS-project-synthesis|Flexible speech synthesis]]&lt;br /&gt;
&lt;br /&gt;
==Language Processing==&lt;br /&gt;
&lt;br /&gt;
[[Vivi-translation|Vivi Neural translation]]&lt;br /&gt;
&lt;br /&gt;
==Financial Processing==&lt;br /&gt;
&lt;br /&gt;
[[racorn-k|Risk Aversion Corn-K]]&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Active_Projects</id>
		<title>Active Projects</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Active_Projects"/>
				<updated>2022-03-23T07:13:13Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：/* Speech Processing */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=Active projects=&lt;br /&gt;
&lt;br /&gt;
[[Deep_Speech_Factorization-2|Deep Speech Factorization (Phase 2)]]&lt;br /&gt;
&lt;br /&gt;
[[Asr-project-nsfc|Multilingual Minorilanguage Automatic Speech Recognition (M2ASR)]]&lt;br /&gt;
&lt;br /&gt;
[[AI 100 QA]]&lt;br /&gt;
&lt;br /&gt;
=Paused projects=&lt;br /&gt;
&lt;br /&gt;
[[Vivi-poem-generation|Vivi Poem]]&lt;br /&gt;
&lt;br /&gt;
=Completed projects=&lt;br /&gt;
&lt;br /&gt;
==Speech Processing==&lt;br /&gt;
[[Enhanced exemplar autoencoder with cycle consistency loss in any-to-one voice conversion]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Deep Generative Factorization For Speech Signal(ICASSP21)]]&lt;br /&gt;
&lt;br /&gt;
[[Flow-based Speech Analysis|Unsupervised speech factorization (ICASSP20)]]&lt;br /&gt;
&lt;br /&gt;
[[CN-Celeb|CN-Celeb (ICASSP20)]]&lt;br /&gt;
&lt;br /&gt;
[[Deep_Speaker_Feature_Learning|Deep Speaker Feature Learning]]&lt;br /&gt;
&lt;br /&gt;
[[Deep_Speech_Factorization|Deep Speech Factorization (Phase I)]]&lt;br /&gt;
&lt;br /&gt;
[[Speaker_Recognition_on_Trivial_events|Speaker Recognition on Trivial Events]]&lt;br /&gt;
&lt;br /&gt;
[[Phonetic_Temporal_Neural_LID|Phonetic Temporal Neural Model for Language Identification]]&lt;br /&gt;
&lt;br /&gt;
[[Asr-project-segment|Deep speaker segmentation]]&lt;br /&gt;
&lt;br /&gt;
[[TTS-project-synthesis|Flexible speech synthesis]]&lt;br /&gt;
&lt;br /&gt;
==Language Processing==&lt;br /&gt;
&lt;br /&gt;
[[Vivi-translation|Vivi Neural translation]]&lt;br /&gt;
&lt;br /&gt;
==Financial Processing==&lt;br /&gt;
&lt;br /&gt;
[[racorn-k|Risk Aversion Corn-K]]&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-21</id>
		<title>2022-03-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-21"/>
				<updated>2022-03-21T11:07:28Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Rewrite the TASLP RC paper&lt;br /&gt;
* Design new architecture for speech engrave&lt;br /&gt;
||&lt;br /&gt;
* Interspeech paper polishment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* continue proposals&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system and Leaderboard Open)&lt;br /&gt;
* PUFA project delivery [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=832 cvss]&lt;br /&gt;
* Sunine update&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on MNIST&lt;br /&gt;
* Speech engrave on Speech data&lt;br /&gt;
||&lt;br /&gt;
* INTERSPEECH&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* experiments for cycle loss&lt;br /&gt;
* website of cycleflow&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
||&lt;br /&gt;
* Review Mandrain Lip Reading Datasets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* more Experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* Interspeech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* paper on CycleAE&lt;br /&gt;
||&lt;br /&gt;
* website update&lt;br /&gt;
* code upload&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Speaker recognition experiment was performed using WAV2VEC&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* dictionary generation and ASR task&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Collect SOTA pre-training model and test on AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Go on test and collect&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* finished plda model&lt;br /&gt;
||&lt;br /&gt;
* test the SOTA model for face recognition&lt;br /&gt;
* explore the means to calculate the confidence of audio&lt;br /&gt;
* do some experiments for different confidence to fuse the audio and face&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-21</id>
		<title>2022-03-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-21"/>
				<updated>2022-03-21T11:06:42Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Rewrite the TASLP RC paper&lt;br /&gt;
* Design new architecture for speech engrave&lt;br /&gt;
||&lt;br /&gt;
* Interspeech paper polishment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* continue proposals&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system and Leaderboard Open)&lt;br /&gt;
* PUFA project delivery [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=832 cvss]&lt;br /&gt;
* Sunine update&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on MNIST&lt;br /&gt;
* Speech engrave on Speech data&lt;br /&gt;
||&lt;br /&gt;
* INTERSPEECH&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* experiments for cycle loss&lt;br /&gt;
* website of cycleflow&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
||&lt;br /&gt;
* Review Mandrain Lip Reading Datasets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* more Experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* Interspeech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* paper on CycleVC&lt;br /&gt;
||&lt;br /&gt;
* website update&lt;br /&gt;
* code upload&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Speaker recognition experiment was performed using WAV2VEC&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* dictionary generation and ASR task&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Collect SOTA pre-training model and test on AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Go on test and collect&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* finished plda model&lt;br /&gt;
||&lt;br /&gt;
* test the SOTA model for face recognition&lt;br /&gt;
* explore the means to calculate the confidence of audio&lt;br /&gt;
* do some experiments for different confidence to fuse the audio and face&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-14</id>
		<title>2022-03-14</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-14"/>
				<updated>2022-03-14T11:06:07Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Keep on revising the TASLP paper&lt;br /&gt;
* More review on disentanglement&lt;br /&gt;
||&lt;br /&gt;
* Finish review disentanglement&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* revise and submit NSFC project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system, Leaderboard and Evaluation toolkit)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
* Release CNCSRC Submission and Leaderboard&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave with alignment&lt;br /&gt;
* Speech engrave with enhancement loss [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/61/Engrave_recover_result.pdf here]&lt;br /&gt;
||&lt;br /&gt;
* Continue work on speech engrave: Design more smart mask mechanisms&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* results of CycleVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/05/CycleVCd128.pdf pdf]&lt;br /&gt;
* tools for vc&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare report of Lip Reading &amp;amp; AVSR&lt;br /&gt;
||&lt;br /&gt;
* Reproduce experiments of reviewed methods&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* do more experiments on visualization&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* various tests with GOP SCA MOSNet and CER scoring&lt;br /&gt;
* 4 Spk VC done&lt;br /&gt;
* modify paper&lt;br /&gt;
||&lt;br /&gt;
* try 6 Spk VC&lt;br /&gt;
* submit paper &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* The WAV2VEC model trained by Librispeech dataset was used for Chinese Asr experiments&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* Improve phoneme translation system&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Data statistics of AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Design SOTA model test experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Test top-N and speed on binary embedding&lt;br /&gt;
||&lt;br /&gt;
* Prepare paper sharing&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* redo for MI and CKA&lt;br /&gt;
* achieve dePLDA&lt;br /&gt;
||&lt;br /&gt;
* improve dePLDA&lt;br /&gt;
* do some experiment for score fusion&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-14</id>
		<title>2022-03-14</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-14"/>
				<updated>2022-03-14T11:02:19Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Keep on revising the TASLP paper&lt;br /&gt;
* More review on disentanglement&lt;br /&gt;
||&lt;br /&gt;
* Finish review disentanglement&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* revise and submit NSFC project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system, Leaderboard and Evaluation toolkit)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
* Release CNCSRC Submission and Leaderboard&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave with alignment&lt;br /&gt;
* Speech engrave with enhancement loss [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/61/Engrave_recover_result.pdf here]&lt;br /&gt;
||&lt;br /&gt;
* Continue work on speech engrave: Design more smart mask mechanisms&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* results of CycleVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/05/CycleVCd128.pdf pdf]&lt;br /&gt;
* tools for vc&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare report of Lip Reading &amp;amp; AVSR&lt;br /&gt;
||&lt;br /&gt;
* Reproduce experiments of reviewed methods&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* do more experiments on visualization&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* various tests with GOP SCA MOSNet and CER scoring&lt;br /&gt;
* 4 Spk VC done&lt;br /&gt;
* modify paper&lt;br /&gt;
||&lt;br /&gt;
* try 6 Spk VC&lt;br /&gt;
* submit paper for Interspeech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* The WAV2VEC model trained by Librispeech dataset was used for Chinese Asr experiments&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* Improve phoneme translation system&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Data statistics of AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Design SOTA model test experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Test top-N and speed on binary embedding&lt;br /&gt;
||&lt;br /&gt;
* Prepare paper sharing&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* redo for MI and CKA&lt;br /&gt;
* achieve dePLDA&lt;br /&gt;
||&lt;br /&gt;
* improve dePLDA&lt;br /&gt;
* do some experiment for score fusion&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-07</id>
		<title>2022-03-07</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-07"/>
				<updated>2022-03-07T11:11:16Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Odyssey papers done&lt;br /&gt;
* Trans RC paper draft done&lt;br /&gt;
||&lt;br /&gt;
* More literature review for the Trans TC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* NSCF revising&lt;br /&gt;
||&lt;br /&gt;
*write popular science project  &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Release SR.eval and C-P map)&lt;br /&gt;
* Finish Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Attention based Speech engrave&lt;br /&gt;
** adversarial learning&lt;br /&gt;
** Gaussian graver&lt;br /&gt;
** Garbage node training&lt;br /&gt;
* Speech engrave with alignment&lt;br /&gt;
** garbage node training&lt;br /&gt;
||&lt;br /&gt;
* Test Speech engrave with alignment&lt;br /&gt;
** search decoding&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* the odyssey paper&lt;br /&gt;
* cycle and adversarial training on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/14/CycleVC.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* CycleVC adjustment&lt;br /&gt;
* Cycle loss plus adversarial loss on AutoVC&lt;br /&gt;
* experiments for RC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Do experimrnts of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Try finetune &amp;amp; pretrain of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* check and reconstruct experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* experiments on CycleVAE&lt;br /&gt;
* Check and finish paper framework [http://166.111.134.19:7777/liangwd/paper_final.html]&lt;br /&gt;
||&lt;br /&gt;
* finish Multi-speaker test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Do experiments on MOSES system&lt;br /&gt;
||&lt;br /&gt;
* Prepare phrase-based MT training data&lt;br /&gt;
* train phrase-based MT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Finish speaker recognition training on AV-CNCeleb &lt;br /&gt;
||&lt;br /&gt;
* Do face recognition training on AV-CNCeleb &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* test binary embedding performance (mAP, speed) on speaker retrieval task&lt;br /&gt;
||&lt;br /&gt;
* do some tests on AE&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* learn and try to use dePLDA&lt;br /&gt;
||&lt;br /&gt;
* finish task of dePLDA&lt;br /&gt;
* prepare for sharing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-07</id>
		<title>2022-03-07</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-07"/>
				<updated>2022-03-07T11:04:28Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Odyssey papers done&lt;br /&gt;
* Trans RC paper draft done&lt;br /&gt;
||&lt;br /&gt;
* More literature review for the Trans TC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* NSCF revising&lt;br /&gt;
||&lt;br /&gt;
*write popular science project  &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Release SR.eval and C-P map)&lt;br /&gt;
* Finish Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Attention based Speech engrave&lt;br /&gt;
** adversarial learning&lt;br /&gt;
** Gaussian graver&lt;br /&gt;
** Garbage node training&lt;br /&gt;
* Speech engrave with alignment&lt;br /&gt;
** garbage node training&lt;br /&gt;
||&lt;br /&gt;
* Test Speech engrave with alignment&lt;br /&gt;
** search decoding&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* the odyssey paper&lt;br /&gt;
* cycle and adversarial training on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/14/CycleVC.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* CycleVC adjustment&lt;br /&gt;
* Cycle loss plus adversarial loss on AutoVC&lt;br /&gt;
* experiments for RC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Do experimrnts of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Try finetune &amp;amp; pretrain of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* check and reconstruct experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* experiments on CycleVAE&lt;br /&gt;
* Check and finish paper framework&lt;br /&gt;
||&lt;br /&gt;
* finish Multi-speaker test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Do experiments on MOSES system&lt;br /&gt;
||&lt;br /&gt;
* Prepare phrase-based MT training data&lt;br /&gt;
* train phrase-based MT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Finish speaker recognition training on AV-CNCeleb &lt;br /&gt;
||&lt;br /&gt;
* Do face recognition training on AV-CNCeleb &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* test binary embedding performance (mAP, speed) on speaker retrieval task&lt;br /&gt;
||&lt;br /&gt;
* do some tests on AE&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* learn and try to use dePLDA&lt;br /&gt;
||&lt;br /&gt;
* finish task of dePLDA&lt;br /&gt;
* prepare for sharing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-21</id>
		<title>2022-02-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-21"/>
				<updated>2022-02-21T11:31:01Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Experiment on IB control with conditional model [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangd&amp;amp;step=view_request&amp;amp;cvssid=847], rough conclusions were obtained. &lt;br /&gt;
* Refine the AV speaker recognition theoretical part.&lt;br /&gt;
* Review for ICME.&lt;br /&gt;
||&lt;br /&gt;
* Complete ICME review&lt;br /&gt;
* VQMIVC reproduction, update with random mask&lt;br /&gt;
* Some missing papers treatment: (1) true nonlinear LDA (2) CycleFlow (3) Thermal-visual database&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*NSFC Application&lt;br /&gt;
*Materials inverse design investigation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Data release and SR baseline)&lt;br /&gt;
* Submit Tencent AI Lab project&lt;br /&gt;
* Submit M2ASR concluding report&lt;br /&gt;
* Write ASVSpoof response&lt;br /&gt;
||&lt;br /&gt;
* Submit ASVSpoof response&lt;br /&gt;
* Finish Draft of C-P Map paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
* M2ASR final report&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* autoVC with cycle loss [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/16/Autovc-cyc.pdf pdf] [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/66/Pre.rar demo]&lt;br /&gt;
||&lt;br /&gt;
* cycle loss after adverserial training&lt;br /&gt;
* VQMIVC&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Prepare data &amp;amp; environment for experiments of AV-Hubert&lt;br /&gt;
||&lt;br /&gt;
* &amp;lt;-- keep doing these tasks&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* collated the visualization methods that have been reproduced&lt;br /&gt;
* some scripts for baseline(cncsrc)&lt;br /&gt;
||&lt;br /&gt;
* study feature aggregation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Never-before-seen test [http://166.111.134.19:7777/liangwd/paper.html]&lt;br /&gt;
* 3~6 spk cycle loss models on wav2vec+seq2seq model&lt;br /&gt;
* Rewrite paper and focus on cycle loss&lt;br /&gt;
||&lt;br /&gt;
* Finish paper framework&lt;br /&gt;
* Push test on WER scoring&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Multi-language W2V model features were used for ASR experiments and compared with traditional MFCC features&lt;br /&gt;
||&lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Make an experiment plan&lt;br /&gt;
* Read the HuBERT paper and code&lt;br /&gt;
||&lt;br /&gt;
* Finish the hubert-U framework&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*Find the baseline for CN-Celeb speaker identification&lt;br /&gt;
||&lt;br /&gt;
*Train this baseline and find face recognition baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Check CKA&lt;br /&gt;
* Do experiment for gender&lt;br /&gt;
||&lt;br /&gt;
* Do experiment for cross-modal PLDA&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-21</id>
		<title>2022-02-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-21"/>
				<updated>2022-02-21T10:38:27Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Data release and SR baseline)&lt;br /&gt;
* Submit Tencent AI Lab project&lt;br /&gt;
* Submit M2ASR concluding report&lt;br /&gt;
* Write ASVSpoof response&lt;br /&gt;
||&lt;br /&gt;
* Submit ASVSpoof response&lt;br /&gt;
* Finish Draft of C-P Map paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
* M2ASR final report&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Prepare data &amp;amp; environment for experiments of AV-Hubert&lt;br /&gt;
||&lt;br /&gt;
* &amp;lt;-- keep doing these tasks&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* 3~6 spk cycle loss models on wav2vec+seq2seq model&lt;br /&gt;
* Rewrite paper and focus on cycle loss&lt;br /&gt;
||&lt;br /&gt;
* Finish paper framework&lt;br /&gt;
* Push test on WER scoring&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Multi-language W2V model features were used for ASR experiments and compared with traditional MFCC features&lt;br /&gt;
||&lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Check CKA&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-24</id>
		<title>2022-01-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-24"/>
				<updated>2022-01-24T10:58:27Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Reschedule the cycleFlow paper&lt;br /&gt;
* Keep on investigation for multi-modality information fusion&lt;br /&gt;
||&lt;br /&gt;
* Rewrite cycleFlow&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*Investigate railway Bureau; the practical application scenario of intelligent inspection robot &lt;br /&gt;
*Prepare a report for Gusu Lab&lt;br /&gt;
||&lt;br /&gt;
* keep on intelligent sensor investigation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Submit final papers&lt;br /&gt;
* Prepare hard trials paper&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* investige forward attention [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on forward attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some experiments on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/1c/Autovc.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* more experiments for cycle loss&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Experiments on kmeans and use label for clustering &lt;br /&gt;
* Experiments on # of phn kinds&lt;br /&gt;
* [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=cchen&amp;amp;step=view_request&amp;amp;cvssid=846 cvss]&lt;br /&gt;
||&lt;br /&gt;
* Check the experimrnt of label clustering&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* study representation learning(self-supervised learning)&lt;br /&gt;
* Using mel-spectrum and standard softmax on small data, models are trained&lt;br /&gt;
* Data preprocessing&lt;br /&gt;
||&lt;br /&gt;
* Visualization on a small models(mel-spectrum &amp;amp; MFCC)&lt;br /&gt;
* Implement RELAX&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Conduct wav2vec test&lt;br /&gt;
* Adjust andRun wav2vec+decoder model(training 40k/50k)&lt;br /&gt;
* Add experiment details to paper&lt;br /&gt;
||&lt;br /&gt;
* paper submission to Arxiv&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Training supervised speech recognition models using wav2vec features&lt;br /&gt;
||&lt;br /&gt;
* Experiments with more data sets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Extract MFCC features for GAN&lt;br /&gt;
* Learning Clustering Algorithms&lt;br /&gt;
||&lt;br /&gt;
* go on GAN experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Preproess data for the training of UIS-RNN&lt;br /&gt;
* Train the UIS-RNN model on small dataset&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish MI test.&lt;br /&gt;
* Finish CKA test.&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-17</id>
		<title>2022-01-17</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-17"/>
				<updated>2022-01-17T10:38:19Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*Low-light image simulation and data preparation&lt;br /&gt;
*capsule network review&lt;br /&gt;
*Research plan guidance for interns&lt;br /&gt;
*Conceptual model of intelligent sensor&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Polish CNCSRC plan&lt;br /&gt;
* Write hard trials paper&lt;br /&gt;
||&lt;br /&gt;
* Finish hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Analysis FA of Speech engrave&lt;br /&gt;
* Speech engrave with position embedding [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* Analysis Speech engrave with position embedding &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Read Paper (RELAX,RISE); Finish Demo about RISE&lt;br /&gt;
* Evaluate the importance maps  with Deletion and Insertion&lt;br /&gt;
||&lt;br /&gt;
* Test MFCC&lt;br /&gt;
* Finish RELAX&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Paper sharing on any-to-any VC&lt;br /&gt;
* Finish training wav2vec model&lt;br /&gt;
* Ongoing wav2vec+AE model&lt;br /&gt;
||&lt;br /&gt;
* Finish all model training &lt;br /&gt;
* Test and comparison analysis&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Extract MFCC features for GAN&lt;br /&gt;
* Write a project application&lt;br /&gt;
||&lt;br /&gt;
* Go on GAN experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Read the UIS-RNN paper&lt;br /&gt;
* Run some tests on speaker diarization&lt;br /&gt;
||&lt;br /&gt;
* Try to train the UIS-RNN model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-10</id>
		<title>2022-01-10</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-10"/>
				<updated>2022-01-10T11:12:11Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Finish CNSRC challenge plan&lt;br /&gt;
* Deal with ICASSP response&lt;br /&gt;
* Keep on survey for multi-modal information fusion&lt;br /&gt;
||&lt;br /&gt;
* Keep on multi-modal technique review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* Capsnet generative model investigation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC plan&lt;br /&gt;
* Make Real AM rebuttal&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave (text attend speech)&lt;br /&gt;
* attention visualization [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave v2&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish course thesis&lt;br /&gt;
* Test UASR with different phone symbol &lt;br /&gt;
||&lt;br /&gt;
* Figure out reason of the influence of symbol &lt;br /&gt;
* Test w2v2.0 feats with other NN instead of GAN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish wav2vec+decoder model and do self-evaluation test&lt;br /&gt;
* evaluate the scale of network&lt;br /&gt;
||&lt;br /&gt;
* Finish the never-before seen test&lt;br /&gt;
* Paper sharing on Wednesday&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Thchs30 speech recognition using Kaldi&lt;br /&gt;
* Try using the WAV2VEc feature in Kaldi speech recognition, modify make_mfcc.sh&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare the effect of wav2vec parameter tuning&lt;br /&gt;
* Prepare group report&lt;br /&gt;
||&lt;br /&gt;
* Extract Chinese features for GAN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Calculate the acc of each segment score&lt;br /&gt;
||&lt;br /&gt;
* Complete data merge&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish course task&lt;br /&gt;
||&lt;br /&gt;
* Finish pmi&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-10</id>
		<title>2022-01-10</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-10"/>
				<updated>2022-01-10T11:04:53Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Finish CNSRC challenge plan&lt;br /&gt;
* Deal with ICASSP response&lt;br /&gt;
* Keep on survey for multi-modal information fusion&lt;br /&gt;
||&lt;br /&gt;
* Keep on multi-modal technique review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* Capsnet generative model investigation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC plan&lt;br /&gt;
* Make Real AM rebuttal&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave (text attend speech)&lt;br /&gt;
* attention visualization [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave v2&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish course thesis&lt;br /&gt;
* Test UASR with different phone symbol &lt;br /&gt;
||&lt;br /&gt;
* Figure out reason of the influence of symbol &lt;br /&gt;
* Test w2v2.0 feats with other NN instead of GAN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish wav2vec+decoder model and do self-evaluation test&lt;br /&gt;
* evaluate the scale of network&lt;br /&gt;
||&lt;br /&gt;
* Finish the never-before seen test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Thchs30 speech recognition using Kaldi&lt;br /&gt;
* Try using the WAV2VEc feature in Kaldi speech recognition, modify make_mfcc.sh&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare the effect of wav2vec parameter tuning&lt;br /&gt;
* Prepare group report&lt;br /&gt;
||&lt;br /&gt;
* Extract Chinese features for GAN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Calculate the acc of each segment score&lt;br /&gt;
||&lt;br /&gt;
* Complete data merge&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
||&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-03</id>
		<title>2022-01-03</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-03"/>
				<updated>2022-01-03T11:20:44Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review multi-modal and multi-view literature &lt;br /&gt;
* Design lip2wav models (possibly based on GCN)&lt;br /&gt;
* Continuous construct theory for noisy linear flow and PCA&lt;br /&gt;
||&lt;br /&gt;
* Keep on multi-modal design&lt;br /&gt;
* CNSRC test plan&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC plan&lt;br /&gt;
* Update Sunine toolkit [https://gitlab.com/csltstu/sunine]&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* m2asr&lt;br /&gt;
* speech engrave text attend speech&lt;br /&gt;
||&lt;br /&gt;
* speech engrave text attend speech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some modification on x-vector&lt;br /&gt;
||&lt;br /&gt;
* intorduce bottleneck into it&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish final exam&lt;br /&gt;
* Finish course projects&lt;br /&gt;
||&lt;br /&gt;
* 3 small course thesis&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Prepare for final exam&lt;br /&gt;
||&lt;br /&gt;
* Final exam for a week&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Modify wav2vec+decoder model&lt;br /&gt;
||&lt;br /&gt;
* Finish training model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Try supervised speech recognition based on WAV2VEC&lt;br /&gt;
||&lt;br /&gt;
* Wav2vec supervised speech recognition system was built&lt;br /&gt;
* Multilingual WAV2VEC supervised recognition experiment was conducted&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Do wav2vec-u in Swedish&lt;br /&gt;
* Prepare bi-weekly report&lt;br /&gt;
||&lt;br /&gt;
* go on wav2vec-u&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Recalculate FAR&lt;br /&gt;
* Divide the data into several parts&lt;br /&gt;
||&lt;br /&gt;
* Calculate the FAR of the divided data&lt;br /&gt;
* To complete the data partition&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
* Finish the remaining homework&lt;br /&gt;
* Do some work&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-03</id>
		<title>2022-01-03</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-03"/>
				<updated>2022-01-03T11:13:46Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review multi-modal and multi-view literature &lt;br /&gt;
* Design lip2wav models (possibly based on GCN)&lt;br /&gt;
* Continuous construct theory for noisy linear flow and PCA&lt;br /&gt;
||&lt;br /&gt;
* Keep on multi-modal design&lt;br /&gt;
* CNSRC test plan&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC plan&lt;br /&gt;
* Update Sunine toolkit [https://gitlab.com/csltstu/sunine]&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* m2asr&lt;br /&gt;
* speech engrave text attend speech&lt;br /&gt;
||&lt;br /&gt;
* speech engrave text attend speech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some modification on x-vector&lt;br /&gt;
||&lt;br /&gt;
* intorduce bottleneck into it&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish final exam&lt;br /&gt;
* Finish course projects&lt;br /&gt;
||&lt;br /&gt;
* 3 small course thesis&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Prepare for final exam&lt;br /&gt;
||&lt;br /&gt;
* Final exam for a week&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Modify wav2vec+decoder model&lt;br /&gt;
||&lt;br /&gt;
* Finish training model&lt;br /&gt;
* Paper sharing on Wednesday&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Try supervised speech recognition based on WAV2VEC&lt;br /&gt;
||&lt;br /&gt;
* Wav2vec supervised speech recognition system was built&lt;br /&gt;
* Multilingual WAV2VEC supervised recognition experiment was conducted&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Do wav2vec-u in Swedish&lt;br /&gt;
* Prepare bi-weekly report&lt;br /&gt;
||&lt;br /&gt;
* go on wav2vec-u&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Recalculate FAR&lt;br /&gt;
* Divide the data into several parts&lt;br /&gt;
||&lt;br /&gt;
* Calculate the FAR of the divided data&lt;br /&gt;
* To complete the data partition&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
* Finish the remaining homework&lt;br /&gt;
* Do some work&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-27</id>
		<title>2021-12-27</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-27"/>
				<updated>2021-12-27T11:13:47Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Hard trails paper revised&lt;br /&gt;
* Investigate link between AE and flow in the linear case [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/91/Linear.png]&lt;br /&gt;
* Investigate audio visual pretraining&lt;br /&gt;
||&lt;br /&gt;
* Keep on neural PCA theory&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* AE baseline&lt;br /&gt;
* Alignet &amp;amp; AE joint training for Infra to Visible domain transfer&lt;br /&gt;
* Gatenet for Visible and Infra latent code fusion [https://qw4sbuvrmj.feishu.cn/docs/doccnNaBy0EAyUTskPQvvN7kgWo]&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Plan CNCSRC stuffs.&lt;br /&gt;
* Maintain Sunine.&lt;br /&gt;
* Deliver projects.&lt;br /&gt;
* Modify patents.&lt;br /&gt;
||&lt;br /&gt;
* Go on Hard trials paper.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* m2asr Kirgiz check&lt;br /&gt;
||&lt;br /&gt;
* continue on speech engrave (text attend speech; HMM based attention)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some contrast experiment&lt;br /&gt;
||&lt;br /&gt;
* design the timbre encoder with a design similar to x-vector&lt;br /&gt;
* insert the new encoder into our four-encoder structure&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do course project&lt;br /&gt;
* Organize Kirgiz data&lt;br /&gt;
||&lt;br /&gt;
* Do course project&lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish patent disclosure&lt;br /&gt;
* Update the project website and upload never_before_seen result[http://166.111.134.19:7777/liangwd/never_before_seen.html]&lt;br /&gt;
* Go on with seq2seq test&lt;br /&gt;
||&lt;br /&gt;
* Finish seq2seq test&lt;br /&gt;
* Update paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Debug bugs encountered when using the fine-tuning model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Adjust the parameters to use thchs30 training&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Sampling out pictures compute FAR&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do course project&lt;br /&gt;
||&lt;br /&gt;
* Do course project&lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-27</id>
		<title>2021-12-27</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-27"/>
				<updated>2021-12-27T11:09:16Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Hard trails paper revised&lt;br /&gt;
* Investigate link between AE and flow in the linear case [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/9/91/Linear.png]&lt;br /&gt;
* Investigate audio visual pretraining&lt;br /&gt;
||&lt;br /&gt;
* Keep on neural PCA theory&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* AE baseline&lt;br /&gt;
* Alignet &amp;amp; AE joint training for Infra to Visible domain transfer&lt;br /&gt;
* Gatenet for Visible and Infra latent code fusion [https://qw4sbuvrmj.feishu.cn/docs/doccnNaBy0EAyUTskPQvvN7kgWo]&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Plan CNCSRC stuffs.&lt;br /&gt;
* Maintain Sunine.&lt;br /&gt;
* Deliver projects.&lt;br /&gt;
* Modify patents.&lt;br /&gt;
||&lt;br /&gt;
* Go on Hard trials paper.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* m2asr Kirgiz check&lt;br /&gt;
||&lt;br /&gt;
* continue on speech engrave (text attend speech; HMM based attention)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some contrast experiment&lt;br /&gt;
||&lt;br /&gt;
* design the timbre encoder with a design similar to x-vector&lt;br /&gt;
* insert the new encoder into our four-encoder structure&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do course project&lt;br /&gt;
* Organize Kirgiz data&lt;br /&gt;
||&lt;br /&gt;
* Do course project&lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish patent disclosure&lt;br /&gt;
* Update the project website and upload never_before_seen result&lt;br /&gt;
* Go on with seq2seq test&lt;br /&gt;
||&lt;br /&gt;
* Finish seq2seq test&lt;br /&gt;
* Update paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Debug bugs encountered when using the fine-tuning model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Adjust the parameters to use thchs30 training&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Sampling out pictures compute FAR&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Do course project&lt;br /&gt;
||&lt;br /&gt;
* Do course project&lt;br /&gt;
* Prepare for stochastic process final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-20</id>
		<title>2021-12-20</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-20"/>
				<updated>2021-12-20T11:08:19Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Hard trials paper revision - on going&lt;br /&gt;
* ICASSP paper review done&lt;br /&gt;
||&lt;br /&gt;
* Hard trials paper reviewion - keep on&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Upload AI course exam questions.&lt;br /&gt;
* Merge ECAPA-TDNN and Auto-LR into Sunine.&lt;br /&gt;
* Submit ETM TASLP response.&lt;br /&gt;
||&lt;br /&gt;
* Clean up PUFA data and build cross-channel system.&lt;br /&gt;
* Take charge of CNCSRC.&lt;br /&gt;
* Exps of hard trials.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Some analyse about fncmd and speech engrave [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 cvss]&lt;br /&gt;
* Prepare huawei retrain&lt;br /&gt;
||&lt;br /&gt;
* Consider about HMM-based attention mechanism&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* add contrastive loss&lt;br /&gt;
||&lt;br /&gt;
* short-term ib dimension adjustment&lt;br /&gt;
* speaker encoder adjustment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* a little analysis about aamsoftmax on cam &amp;amp;&amp;amp; saliency map on image domain&lt;br /&gt;
* homework&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* baseline result for not-before seen test&lt;br /&gt;
* framework for wav2vec model&lt;br /&gt;
||&lt;br /&gt;
* test for wave2vec model&lt;br /&gt;
* patent writing&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Preparation for sharing about capsule&lt;br /&gt;
* Revise the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Unsupervised experiment of thchs30&lt;br /&gt;
* Revise the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Compute the cosine distance&lt;br /&gt;
* Revise the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-13</id>
		<title>2021-12-13</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-13"/>
				<updated>2021-12-12T16:52:40Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish training for not-ever-seen speaker on baseline AE and cycle model&lt;br /&gt;
* Build the framework of wav2vec model&lt;br /&gt;
||&lt;br /&gt;
* Full test on baseline &amp;amp; cycle model&lt;br /&gt;
* More details need to be discussed on wav2vec model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-12-06</id>
		<title>2021-12-06</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-12-06"/>
				<updated>2021-12-05T17:01:28Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
|| &lt;br /&gt;
*review papers about CQDs&lt;br /&gt;
*Verify the deconvolution of infrared and visible faces&lt;br /&gt;
*Verify infrared and visible image fusion based on GLOW model&lt;br /&gt;
*Arrange research plans for interns&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Finish the first version on improved exemplar autoencoder with cycle loss&lt;br /&gt;
* Rethink the theory analysis part&lt;br /&gt;
||&lt;br /&gt;
* Test on never-before-seen speaker conversion&lt;br /&gt;
* Review the code of wav2vec, StarGAN and PPG based GAN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-11-29</id>
		<title>2021-11-29</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-11-29"/>
				<updated>2021-11-29T01:33:25Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Anti-spoof paper&lt;br /&gt;
* Some paper review&lt;br /&gt;
||&lt;br /&gt;
* Complete anti-spoof paper&lt;br /&gt;
* Prepare a talk on over-tuniung&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* Prepare frame alignment project plans for interns&lt;br /&gt;
* Intelligent infrared detector working platform&lt;br /&gt;
* CQDs infrared detector investigation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* SOTA models for hard trials (ECAPA-TDNN and GE2E).&lt;br /&gt;
* Study AI/ML book.&lt;br /&gt;
||&lt;br /&gt;
* Exps of hard trials.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave train&amp;amp;test&lt;br /&gt;
||&lt;br /&gt;
* improve speech engrave&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Speaker Classification Test&lt;br /&gt;
||&lt;br /&gt;
* Further test on classification&lt;br /&gt;
* First version of the expected paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Comparative experiment of WSJ, Tibetan and Thchs30&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Preparing the enroll data set&lt;br /&gt;
* Preparing the Tracker tool&lt;br /&gt;
||&lt;br /&gt;
* Sampling of CNCeleb dataset&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-11-15</id>
		<title>2021-11-15</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-11-15"/>
				<updated>2021-11-15T11:10:07Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Spoof paper almost done&lt;br /&gt;
||&lt;br /&gt;
* Spoof paper cleaning&lt;br /&gt;
* Hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* Completed THS2021 data preprocessing, image feature extraction, and baseline&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Complete hard trials paper v1.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* Go on preparing my defence.&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* make a comparison  between fncmd with new methods&lt;br /&gt;
* find some cross-modality methods(Cross modality attention) &lt;br /&gt;
||&lt;br /&gt;
* Implement Cross modality attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* speaker and robustness test for CycleFlow-spk&lt;br /&gt;
||&lt;br /&gt;
* some improvement&lt;br /&gt;
* some other exploration for the model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Renew the project website and upload relevant speech sequences&lt;br /&gt;
* Draw various loss curves&lt;br /&gt;
* Prepare test data for content-relevant tests&lt;br /&gt;
||&lt;br /&gt;
* Quantitave test&lt;br /&gt;
* Prepare test data for speaker separation tests&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|- &lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Modify wav2vec-u gan network code and output intermediate results&lt;br /&gt;
* Training wav2vec2 model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare the training process of TIMIT and Tibetan&lt;br /&gt;
||&lt;br /&gt;
* Fine-tune the Tibetan wav2vec model&lt;br /&gt;
* Prepare the thesis opening report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-11-01</id>
		<title>2021-11-01</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-11-01"/>
				<updated>2021-11-01T01:35:06Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Anti-spoof chapter&lt;br /&gt;
||&lt;br /&gt;
* Anti-spoof chapter&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* THS dataset experiments and paper &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Submit CNC response&lt;br /&gt;
* Complete project deliveries&lt;br /&gt;
||&lt;br /&gt;
* Hard trials&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Continue on KWS&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* egs of thc30 base sunine&lt;br /&gt;
* reproduce bubble noise on speech recognition&lt;br /&gt;
||&lt;br /&gt;
* audio important maps based bubble noise on speaker recognition&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* several models tested including model without cycle loss, models with different bottleneck&lt;br /&gt;
||&lt;br /&gt;
* finetune these models decoders&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* analyse reason of the problem of wav2vec-u in Tibetan&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Gan network training code&lt;br /&gt;
* Mongolian recording project&lt;br /&gt;
||&lt;br /&gt;
* Adjust Tibetan wav2vec-u parameters&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Voxceleb2 and Voxceleb1 data set collation&lt;br /&gt;
||&lt;br /&gt;
* single mode sota system training&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-10-25</id>
		<title>2021-10-25</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-10-25"/>
				<updated>2021-10-25T01:32:10Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Neural PCA paper&lt;br /&gt;
||&lt;br /&gt;
* Neural PCA paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* CNC response&lt;br /&gt;
* Project delivery&lt;br /&gt;
||&lt;br /&gt;
* Hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Tiankai Zhi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jingxin Shen&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* audio important maps based mask&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Publish the baseline test data on the website&lt;br /&gt;
* Realize the cycle-loss model&lt;br /&gt;
||&lt;br /&gt;
* More models with different layers and parameters need to be tested&lt;br /&gt;
* Test whether the current model is better than baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-09-06</id>
		<title>2021-09-06</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-09-06"/>
				<updated>2021-09-06T05:33:44Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* KWS &lt;br /&gt;
||&lt;br /&gt;
* Prepear ICASSP 2022&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiao Han&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Di Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Tiankai Zhi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jingxin Shen&lt;br /&gt;
||  &lt;br /&gt;
* Thermal face experiment with  DNF&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Approach two accomplished&lt;br /&gt;
||&lt;br /&gt;
* Collect data and Build the project website&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-08-30</id>
		<title>2021-08-30</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-08-30"/>
				<updated>2021-08-30T06:07:29Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Neural PCA experiments on FashionMNIST and kMNIST, [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangd&amp;amp;step=view_request&amp;amp;cvssid=830 link]&lt;br /&gt;
* Some variants of NPCA, in terms of sampling scheme, loss function, etc.&lt;br /&gt;
* Some work for thermal paper review &lt;br /&gt;
||&lt;br /&gt;
* TASLP paper response&lt;br /&gt;
* Continue the NPCA and paper review. &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* TASLP paper experiments and response&lt;br /&gt;
* Thermal Speaking project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Prepare multi-modal datasets of CN-Celeb.&lt;br /&gt;
* Monitor cvss 808 and 831.&lt;br /&gt;
||&lt;br /&gt;
* Paper of `Hard trials'&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* emotion recognition on MSP-IMPROV&lt;br /&gt;
||&lt;br /&gt;
* subjective evaluation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiao Han&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Di Wang&lt;br /&gt;
|| &lt;br /&gt;
* Complete cross-channel experiment doc.&lt;br /&gt;
||&lt;br /&gt;
* Data processing for CNC1 and CNC2.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Tiankai Zhi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* read paper about SN-net(two branchs DAE)&lt;br /&gt;
* attend graduate freshman lectures&lt;br /&gt;
||&lt;br /&gt;
* run UASR code&lt;br /&gt;
* attend graduate freshman lectures&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jingxin Shen&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare MAML and R-MAML on MiniImagenet for Fast Adaptation &lt;br /&gt;
||&lt;br /&gt;
* study of [808]&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Try a two-step training to improve the baseline exemplar encoder&lt;br /&gt;
* Collect test data of the method&lt;br /&gt;
||&lt;br /&gt;
* Continue another possible method on multi-step training.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2021-08-30</id>
		<title>2021-08-30</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2021-08-30"/>
				<updated>2021-08-30T06:06:27Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Neural PCA experiments on FashionMNIST and kMNIST, [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangd&amp;amp;step=view_request&amp;amp;cvssid=830 link]&lt;br /&gt;
* Some variants of NPCA, in terms of sampling scheme, loss function, etc.&lt;br /&gt;
* Some work for thermal paper review &lt;br /&gt;
||&lt;br /&gt;
* TASLP paper response&lt;br /&gt;
* Continue the NPCA and paper review. &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* TASLP paper experiments and response&lt;br /&gt;
* Thermal Speaking project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Prepare multi-modal datasets of CN-Celeb.&lt;br /&gt;
* Monitor cvss 808 and 831.&lt;br /&gt;
||&lt;br /&gt;
* Paper of `Hard trials'&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* emotion recognition on MSP-IMPROV&lt;br /&gt;
||&lt;br /&gt;
* subjective evaluation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jiao Han&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Di Wang&lt;br /&gt;
|| &lt;br /&gt;
* Complete cross-channel experiment doc.&lt;br /&gt;
||&lt;br /&gt;
* Data processing for CNC1 and CNC2.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Tiankai Zhi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* read paper about SN-net(two branchs DAE)&lt;br /&gt;
* attend graduate freshman lectures&lt;br /&gt;
||&lt;br /&gt;
* run UASR code&lt;br /&gt;
* attend graduate freshman lectures&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Jingxin Shen&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Compare MAML and R-MAML on MiniImagenet for Fast Adaptation &lt;br /&gt;
||&lt;br /&gt;
* study of [808]&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Qingyang Zhu&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Try a two-step training to improve the baseline exemplar encoder&lt;br /&gt;
* Collect test data of the method&lt;br /&gt;
* Continue another possible method.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2021-08-13T12:56:29Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周四晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun  || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen   || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang  || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang ||  Unsupervised Audio-Visual Synthesis via Exemplar Autoencoders  ||  [[媒体文件:Bi-weekly_report_Liangwd.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/26  ||Jingxin Shen ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jiao Han  ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/16  ||Haoran Sun ||  ||&lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2021-08-13T12:55:30Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周四晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun  || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen   || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang  || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang ||  Unsupervised Audio-visual Synthesis via Exemplar Autoencoders  ||  [[媒体文件:Bi-weekly_report_Liangwd.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/26  ||Jingxin Shen ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jiao Han  ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/16  ||Haoran Sun ||  ||&lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2021-08-13T12:53:56Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周四晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun  || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen   || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang  || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang ||  UNSUPERVISED AUDIOVISUAL SYNTHESIS VIA EXEMPLAR AUTOENCODERS || &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/26  ||Jingxin Shen ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jiao Han  ||  ||&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/16  ||Haoran Sun ||  ||&lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:UNSUPERVISED_AUDIOVISUAL_SYNTHESIS_VIA_EXEMPLAR_AUTOENCODERS.pdf</id>
		<title>文件:UNSUPERVISED AUDIOVISUAL SYNTHESIS VIA EXEMPLAR AUTOENCODERS.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:UNSUPERVISED_AUDIOVISUAL_SYNTHESIS_VIA_EXEMPLAR_AUTOENCODERS.pdf"/>
				<updated>2021-08-13T06:51:30Z</updated>
		
		<summary type="html">&lt;p&gt;Liangwd：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Liangwd</name></author>	</entry>

	</feed>